Lines Matching +full:aes +full:- +full:cmac
1 // SPDX-License-Identifier: GPL-2.0+
3 * caam - Freescale FSL CAAM support for ahash functions of crypto API
6 * Copyright 2018-2019, 2023 NXP
13 * --------------- ---------------
14 * | JobDesc #1 |-------------------->| ShareDesc |
16 * --------------- | (operation) |
17 * ---------------
21 * --------------- ---------------
22 * | JobDesc #2 |-------------------->| ShareDesc |
23 * | *(packet 2) | |------------->| (hashKey) |
24 * --------------- | |-------->| (operation) |
26 * . | | ---------------
27 * --------------- | |
28 * | JobDesc #3 |------| |
30 * --------------- |
33 * --------------- |
34 * | JobDesc #4 |------------
36 * ---------------
46 * ---------------------
55 * ---------------------
70 #include <linux/dma-mapping.h>
94 /* ahash per-session context */
145 /* Map state->caam_ctx, and append seq_out_ptr command that points to it */
150 state->ctx_dma_len = ctx_len; in map_seq_out_ptr_ctx()
151 state->ctx_dma = dma_map_single(jrdev, state->caam_ctx, in map_seq_out_ptr_ctx()
153 if (dma_mapping_error(jrdev, state->ctx_dma)) { in map_seq_out_ptr_ctx()
155 state->ctx_dma = 0; in map_seq_out_ptr_ctx()
156 return -ENOMEM; in map_seq_out_ptr_ctx()
159 append_seq_out_ptr(desc, state->ctx_dma, ctx_len, 0); in map_seq_out_ptr_ctx()
169 int buflen = state->buflen; in buf_map_to_sec4_sg()
174 state->buf_dma = dma_map_single(jrdev, state->buf, buflen, in buf_map_to_sec4_sg()
176 if (dma_mapping_error(jrdev, state->buf_dma)) { in buf_map_to_sec4_sg()
178 state->buf_dma = 0; in buf_map_to_sec4_sg()
179 return -ENOMEM; in buf_map_to_sec4_sg()
182 dma_to_sec4_sg_one(sec4_sg, state->buf_dma, buflen, 0); in buf_map_to_sec4_sg()
187 /* Map state->caam_ctx, and add it to link table */
192 state->ctx_dma_len = ctx_len; in ctx_map_to_sec4_sg()
193 state->ctx_dma = dma_map_single(jrdev, state->caam_ctx, ctx_len, flag); in ctx_map_to_sec4_sg()
194 if (dma_mapping_error(jrdev, state->ctx_dma)) { in ctx_map_to_sec4_sg()
196 state->ctx_dma = 0; in ctx_map_to_sec4_sg()
197 return -ENOMEM; in ctx_map_to_sec4_sg()
200 dma_to_sec4_sg_one(sec4_sg, state->ctx_dma, ctx_len, 0); in ctx_map_to_sec4_sg()
209 struct device *jrdev = ctx->jrdev; in ahash_set_sh_desc()
210 struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent); in ahash_set_sh_desc()
213 ctx->adata.key_virt = ctx->key; in ahash_set_sh_desc()
216 desc = ctx->sh_desc_update; in ahash_set_sh_desc()
217 cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_UPDATE, ctx->ctx_len, in ahash_set_sh_desc()
218 ctx->ctx_len, true, ctrlpriv->era); in ahash_set_sh_desc()
219 dma_sync_single_for_device(jrdev, ctx->sh_desc_update_dma, in ahash_set_sh_desc()
220 desc_bytes(desc), ctx->dir); in ahash_set_sh_desc()
227 desc = ctx->sh_desc_update_first; in ahash_set_sh_desc()
228 cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INIT, ctx->ctx_len, in ahash_set_sh_desc()
229 ctx->ctx_len, false, ctrlpriv->era); in ahash_set_sh_desc()
230 dma_sync_single_for_device(jrdev, ctx->sh_desc_update_first_dma, in ahash_set_sh_desc()
231 desc_bytes(desc), ctx->dir); in ahash_set_sh_desc()
237 desc = ctx->sh_desc_fin; in ahash_set_sh_desc()
238 cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_FINALIZE, digestsize, in ahash_set_sh_desc()
239 ctx->ctx_len, true, ctrlpriv->era); in ahash_set_sh_desc()
240 dma_sync_single_for_device(jrdev, ctx->sh_desc_fin_dma, in ahash_set_sh_desc()
241 desc_bytes(desc), ctx->dir); in ahash_set_sh_desc()
248 desc = ctx->sh_desc_digest; in ahash_set_sh_desc()
249 cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INITFINAL, digestsize, in ahash_set_sh_desc()
250 ctx->ctx_len, false, ctrlpriv->era); in ahash_set_sh_desc()
251 dma_sync_single_for_device(jrdev, ctx->sh_desc_digest_dma, in ahash_set_sh_desc()
252 desc_bytes(desc), ctx->dir); in ahash_set_sh_desc()
265 struct device *jrdev = ctx->jrdev; in axcbc_set_sh_desc()
269 desc = ctx->sh_desc_update; in axcbc_set_sh_desc()
270 cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_UPDATE, in axcbc_set_sh_desc()
271 ctx->ctx_len, ctx->ctx_len); in axcbc_set_sh_desc()
272 dma_sync_single_for_device(jrdev, ctx->sh_desc_update_dma, in axcbc_set_sh_desc()
273 desc_bytes(desc), ctx->dir); in axcbc_set_sh_desc()
279 desc = ctx->sh_desc_fin; in axcbc_set_sh_desc()
280 cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_FINALIZE, in axcbc_set_sh_desc()
281 digestsize, ctx->ctx_len); in axcbc_set_sh_desc()
282 dma_sync_single_for_device(jrdev, ctx->sh_desc_fin_dma, in axcbc_set_sh_desc()
283 desc_bytes(desc), ctx->dir); in axcbc_set_sh_desc()
289 ctx->adata.key_virt = ctx->key; in axcbc_set_sh_desc()
292 desc = ctx->sh_desc_update_first; in axcbc_set_sh_desc()
293 cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_INIT, ctx->ctx_len, in axcbc_set_sh_desc()
294 ctx->ctx_len); in axcbc_set_sh_desc()
295 dma_sync_single_for_device(jrdev, ctx->sh_desc_update_first_dma, in axcbc_set_sh_desc()
296 desc_bytes(desc), ctx->dir); in axcbc_set_sh_desc()
302 desc = ctx->sh_desc_digest; in axcbc_set_sh_desc()
303 cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_INITFINAL, in axcbc_set_sh_desc()
304 digestsize, ctx->ctx_len); in axcbc_set_sh_desc()
305 dma_sync_single_for_device(jrdev, ctx->sh_desc_digest_dma, in axcbc_set_sh_desc()
306 desc_bytes(desc), ctx->dir); in axcbc_set_sh_desc()
317 struct device *jrdev = ctx->jrdev; in acmac_set_sh_desc()
321 desc = ctx->sh_desc_update; in acmac_set_sh_desc()
322 cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_UPDATE, in acmac_set_sh_desc()
323 ctx->ctx_len, ctx->ctx_len); in acmac_set_sh_desc()
324 dma_sync_single_for_device(jrdev, ctx->sh_desc_update_dma, in acmac_set_sh_desc()
325 desc_bytes(desc), ctx->dir); in acmac_set_sh_desc()
331 desc = ctx->sh_desc_fin; in acmac_set_sh_desc()
332 cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_FINALIZE, in acmac_set_sh_desc()
333 digestsize, ctx->ctx_len); in acmac_set_sh_desc()
334 dma_sync_single_for_device(jrdev, ctx->sh_desc_fin_dma, in acmac_set_sh_desc()
335 desc_bytes(desc), ctx->dir); in acmac_set_sh_desc()
341 desc = ctx->sh_desc_update_first; in acmac_set_sh_desc()
342 cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_INIT, ctx->ctx_len, in acmac_set_sh_desc()
343 ctx->ctx_len); in acmac_set_sh_desc()
344 dma_sync_single_for_device(jrdev, ctx->sh_desc_update_first_dma, in acmac_set_sh_desc()
345 desc_bytes(desc), ctx->dir); in acmac_set_sh_desc()
351 desc = ctx->sh_desc_digest; in acmac_set_sh_desc()
352 cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_INITFINAL, in acmac_set_sh_desc()
353 digestsize, ctx->ctx_len); in acmac_set_sh_desc()
354 dma_sync_single_for_device(jrdev, ctx->sh_desc_digest_dma, in acmac_set_sh_desc()
355 desc_bytes(desc), ctx->dir); in acmac_set_sh_desc()
367 struct device *jrdev = ctx->jrdev; in hash_digest_key()
375 return -ENOMEM; in hash_digest_key()
383 return -ENOMEM; in hash_digest_key()
387 append_operation(desc, ctx->adata.algtype | OP_ALG_ENCRYPT | in hash_digest_key()
406 if (ret == -EINPROGRESS) { in hash_digest_key()
428 struct device *jrdev = ctx->jrdev; in ahash_setkey()
429 int blocksize = crypto_tfm_alg_blocksize(&ahash->base); in ahash_setkey()
431 struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctx->jrdev->parent); in ahash_setkey()
442 return -EOVERFLOW; in ahash_setkey()
446 return -ENOMEM; in ahash_setkey()
457 if (ctrlpriv->era >= 6) { in ahash_setkey()
458 ctx->adata.key_inline = true; in ahash_setkey()
459 ctx->adata.keylen = keylen; in ahash_setkey()
460 ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype & in ahash_setkey()
463 if (ctx->adata.keylen_pad > CAAM_MAX_HASH_KEY_SIZE) in ahash_setkey()
466 memcpy(ctx->key, key, keylen); in ahash_setkey()
474 if (keylen > ctx->adata.keylen_pad) in ahash_setkey()
475 dma_sync_single_for_device(ctx->jrdev, in ahash_setkey()
476 ctx->adata.key_dma, in ahash_setkey()
477 ctx->adata.keylen_pad, in ahash_setkey()
480 ret = gen_split_key(ctx->jrdev, ctx->key, &ctx->adata, key, in ahash_setkey()
490 return -EINVAL; in ahash_setkey()
497 struct device *jrdev = ctx->jrdev; in axcbc_setkey()
500 return -EINVAL; in axcbc_setkey()
502 memcpy(ctx->key, key, keylen); in axcbc_setkey()
503 dma_sync_single_for_device(jrdev, ctx->adata.key_dma, keylen, in axcbc_setkey()
505 ctx->adata.keylen = keylen; in axcbc_setkey()
508 DUMP_PREFIX_ADDRESS, 16, 4, ctx->key, keylen, 1); in axcbc_setkey()
523 /* key is immediate data for all cmac shared descriptors */ in acmac_setkey()
524 ctx->adata.key_virt = key; in acmac_setkey()
525 ctx->adata.keylen = keylen; in acmac_setkey()
534 * ahash_edesc - s/w-extended ahash descriptor
557 if (edesc->src_nents) in ahash_unmap()
558 dma_unmap_sg(dev, req->src, edesc->src_nents, DMA_TO_DEVICE); in ahash_unmap()
560 if (edesc->sec4_sg_bytes) in ahash_unmap()
561 dma_unmap_single(dev, edesc->sec4_sg_dma, in ahash_unmap()
562 edesc->sec4_sg_bytes, DMA_TO_DEVICE); in ahash_unmap()
564 if (state->buf_dma) { in ahash_unmap()
565 dma_unmap_single(dev, state->buf_dma, state->buflen, in ahash_unmap()
567 state->buf_dma = 0; in ahash_unmap()
577 if (state->ctx_dma) { in ahash_unmap_ctx()
578 dma_unmap_single(dev, state->ctx_dma, state->ctx_dma_len, flag); in ahash_unmap_ctx()
579 state->ctx_dma = 0; in ahash_unmap_ctx()
599 edesc = state->edesc; in ahash_done_cpy()
600 has_bklog = edesc->bklog; in ahash_done_cpy()
606 memcpy(req->result, state->caam_ctx, digestsize); in ahash_done_cpy()
610 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx, in ahash_done_cpy()
611 ctx->ctx_len, 1); in ahash_done_cpy()
620 crypto_finalize_hash_request(jrp->engine, req, ecode); in ahash_done_cpy()
650 edesc = state->edesc; in ahash_done_switch()
651 has_bklog = edesc->bklog; in ahash_done_switch()
655 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, dir); in ahash_done_switch()
658 scatterwalk_map_and_copy(state->buf, req->src, in ahash_done_switch()
659 req->nbytes - state->next_buflen, in ahash_done_switch()
660 state->next_buflen, 0); in ahash_done_switch()
661 state->buflen = state->next_buflen; in ahash_done_switch()
664 DUMP_PREFIX_ADDRESS, 16, 4, state->buf, in ahash_done_switch()
665 state->buflen, 1); in ahash_done_switch()
668 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx, in ahash_done_switch()
669 ctx->ctx_len, 1); in ahash_done_switch()
670 if (req->result) in ahash_done_switch()
672 DUMP_PREFIX_ADDRESS, 16, 4, req->result, in ahash_done_switch()
682 crypto_finalize_hash_request(jrp->engine, req, ecode); in ahash_done_switch()
707 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? in ahash_edesc_alloc()
716 state->edesc = edesc; in ahash_edesc_alloc()
718 init_job_desc_shared(edesc->hw_desc, sh_desc_dma, desc_len(sh_desc), in ahash_edesc_alloc()
734 struct sec4_sg_entry *sg = edesc->sec4_sg; in ahash_edesc_add_src()
738 sg_to_sec4_sg_last(req->src, to_hash, sg + first_sg, 0); in ahash_edesc_add_src()
740 src_dma = dma_map_single(ctx->jrdev, sg, sgsize, DMA_TO_DEVICE); in ahash_edesc_add_src()
741 if (dma_mapping_error(ctx->jrdev, src_dma)) { in ahash_edesc_add_src()
742 dev_err(ctx->jrdev, "unable to map S/G table\n"); in ahash_edesc_add_src()
743 return -ENOMEM; in ahash_edesc_add_src()
746 edesc->sec4_sg_bytes = sgsize; in ahash_edesc_add_src()
747 edesc->sec4_sg_dma = src_dma; in ahash_edesc_add_src()
750 src_dma = sg_dma_address(req->src); in ahash_edesc_add_src()
754 append_seq_in_ptr(edesc->hw_desc, src_dma, first_bytes + to_hash, in ahash_edesc_add_src()
765 struct device *jrdev = ctx->jrdev; in ahash_do_one_req()
766 u32 *desc = state->edesc->hw_desc; in ahash_do_one_req()
769 state->edesc->bklog = true; in ahash_do_one_req()
771 ret = caam_jr_enqueue(jrdev, desc, state->ahash_op_done, req); in ahash_do_one_req()
773 if (ret == -ENOSPC && engine->retry_support) in ahash_do_one_req()
776 if (ret != -EINPROGRESS) { in ahash_do_one_req()
777 ahash_unmap(jrdev, state->edesc, req, 0); in ahash_do_one_req()
778 kfree(state->edesc); in ahash_do_one_req()
794 struct ahash_edesc *edesc = state->edesc; in ahash_enqueue_req()
795 u32 *desc = edesc->hw_desc; in ahash_enqueue_req()
798 state->ahash_op_done = cbk; in ahash_enqueue_req()
801 * Only the backlog request are sent to crypto-engine since the others in ahash_enqueue_req()
803 * entries (more than the 10 entries from crypto-engine). in ahash_enqueue_req()
805 if (req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG) in ahash_enqueue_req()
806 ret = crypto_transfer_hash_request_to_engine(jrpriv->engine, in ahash_enqueue_req()
811 if ((ret != -EINPROGRESS) && (ret != -EBUSY)) { in ahash_enqueue_req()
825 struct device *jrdev = ctx->jrdev; in ahash_update_ctx()
826 u8 *buf = state->buf; in ahash_update_ctx()
827 int *buflen = &state->buflen; in ahash_update_ctx()
828 int *next_buflen = &state->next_buflen; in ahash_update_ctx()
830 int in_len = *buflen + req->nbytes, to_hash; in ahash_update_ctx()
836 *next_buflen = in_len & (blocksize - 1); in ahash_update_ctx()
837 to_hash = in_len - *next_buflen; in ahash_update_ctx()
840 * For XCBC and CMAC, if to_hash is multiple of block size, in ahash_update_ctx()
843 if ((is_xcbc_aes(ctx->adata.algtype) || in ahash_update_ctx()
844 is_cmac_aes(ctx->adata.algtype)) && to_hash >= blocksize && in ahash_update_ctx()
847 to_hash -= blocksize; in ahash_update_ctx()
852 int src_len = req->nbytes - *next_buflen; in ahash_update_ctx()
854 src_nents = sg_nents_for_len(req->src, src_len); in ahash_update_ctx()
861 mapped_nents = dma_map_sg(jrdev, req->src, src_nents, in ahash_update_ctx()
865 return -ENOMEM; in ahash_update_ctx()
879 edesc = ahash_edesc_alloc(req, pad_nents, ctx->sh_desc_update, in ahash_update_ctx()
880 ctx->sh_desc_update_dma); in ahash_update_ctx()
882 dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE); in ahash_update_ctx()
883 return -ENOMEM; in ahash_update_ctx()
886 edesc->src_nents = src_nents; in ahash_update_ctx()
887 edesc->sec4_sg_bytes = sec4_sg_bytes; in ahash_update_ctx()
889 ret = ctx_map_to_sec4_sg(jrdev, state, ctx->ctx_len, in ahash_update_ctx()
890 edesc->sec4_sg, DMA_BIDIRECTIONAL); in ahash_update_ctx()
894 ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, state); in ahash_update_ctx()
899 sg_to_sec4_sg_last(req->src, src_len, in ahash_update_ctx()
900 edesc->sec4_sg + sec4_sg_src_index, in ahash_update_ctx()
903 sg_to_sec4_set_last(edesc->sec4_sg + sec4_sg_src_index - in ahash_update_ctx()
906 desc = edesc->hw_desc; in ahash_update_ctx()
908 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, in ahash_update_ctx()
911 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) { in ahash_update_ctx()
913 ret = -ENOMEM; in ahash_update_ctx()
917 append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len + in ahash_update_ctx()
920 append_seq_out_ptr(desc, state->ctx_dma, ctx->ctx_len, 0); in ahash_update_ctx()
927 ctx->ctx_len, DMA_BIDIRECTIONAL); in ahash_update_ctx()
929 scatterwalk_map_and_copy(buf + *buflen, req->src, 0, in ahash_update_ctx()
930 req->nbytes, 0); in ahash_update_ctx()
940 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL); in ahash_update_ctx()
950 struct device *jrdev = ctx->jrdev; in ahash_final_ctx()
951 int buflen = state->buflen; in ahash_final_ctx()
962 edesc = ahash_edesc_alloc(req, 4, ctx->sh_desc_fin, in ahash_final_ctx()
963 ctx->sh_desc_fin_dma); in ahash_final_ctx()
965 return -ENOMEM; in ahash_final_ctx()
967 desc = edesc->hw_desc; in ahash_final_ctx()
969 edesc->sec4_sg_bytes = sec4_sg_bytes; in ahash_final_ctx()
971 ret = ctx_map_to_sec4_sg(jrdev, state, ctx->ctx_len, in ahash_final_ctx()
972 edesc->sec4_sg, DMA_BIDIRECTIONAL); in ahash_final_ctx()
976 ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, state); in ahash_final_ctx()
980 sg_to_sec4_set_last(edesc->sec4_sg + (buflen ? 1 : 0)); in ahash_final_ctx()
982 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, in ahash_final_ctx()
984 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) { in ahash_final_ctx()
986 ret = -ENOMEM; in ahash_final_ctx()
990 append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len + buflen, in ahash_final_ctx()
992 append_seq_out_ptr(desc, state->ctx_dma, digestsize, 0); in ahash_final_ctx()
1011 struct device *jrdev = ctx->jrdev; in ahash_finup_ctx()
1012 int buflen = state->buflen; in ahash_finup_ctx()
1020 src_nents = sg_nents_for_len(req->src, req->nbytes); in ahash_finup_ctx()
1027 mapped_nents = dma_map_sg(jrdev, req->src, src_nents, in ahash_finup_ctx()
1031 return -ENOMEM; in ahash_finup_ctx()
1041 ctx->sh_desc_fin, ctx->sh_desc_fin_dma); in ahash_finup_ctx()
1043 dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE); in ahash_finup_ctx()
1044 return -ENOMEM; in ahash_finup_ctx()
1047 desc = edesc->hw_desc; in ahash_finup_ctx()
1049 edesc->src_nents = src_nents; in ahash_finup_ctx()
1051 ret = ctx_map_to_sec4_sg(jrdev, state, ctx->ctx_len, in ahash_finup_ctx()
1052 edesc->sec4_sg, DMA_BIDIRECTIONAL); in ahash_finup_ctx()
1056 ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, state); in ahash_finup_ctx()
1061 sec4_sg_src_index, ctx->ctx_len + buflen, in ahash_finup_ctx()
1062 req->nbytes); in ahash_finup_ctx()
1066 append_seq_out_ptr(desc, state->ctx_dma, digestsize, 0); in ahash_finup_ctx()
1085 struct device *jrdev = ctx->jrdev; in ahash_digest()
1092 state->buf_dma = 0; in ahash_digest()
1094 src_nents = sg_nents_for_len(req->src, req->nbytes); in ahash_digest()
1101 mapped_nents = dma_map_sg(jrdev, req->src, src_nents, in ahash_digest()
1105 return -ENOMEM; in ahash_digest()
1113 ctx->sh_desc_digest, ctx->sh_desc_digest_dma); in ahash_digest()
1115 dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE); in ahash_digest()
1116 return -ENOMEM; in ahash_digest()
1119 edesc->src_nents = src_nents; in ahash_digest()
1122 req->nbytes); in ahash_digest()
1129 desc = edesc->hw_desc; in ahash_digest()
1135 return -ENOMEM; in ahash_digest()
1152 struct device *jrdev = ctx->jrdev; in ahash_final_no_ctx()
1153 u8 *buf = state->buf; in ahash_final_no_ctx()
1154 int buflen = state->buflen; in ahash_final_no_ctx()
1161 edesc = ahash_edesc_alloc(req, 0, ctx->sh_desc_digest, in ahash_final_no_ctx()
1162 ctx->sh_desc_digest_dma); in ahash_final_no_ctx()
1164 return -ENOMEM; in ahash_final_no_ctx()
1166 desc = edesc->hw_desc; in ahash_final_no_ctx()
1169 state->buf_dma = dma_map_single(jrdev, buf, buflen, in ahash_final_no_ctx()
1171 if (dma_mapping_error(jrdev, state->buf_dma)) { in ahash_final_no_ctx()
1176 append_seq_in_ptr(desc, state->buf_dma, buflen, 0); in ahash_final_no_ctx()
1192 return -ENOMEM; in ahash_final_no_ctx()
1201 struct device *jrdev = ctx->jrdev; in ahash_update_no_ctx()
1202 u8 *buf = state->buf; in ahash_update_no_ctx()
1203 int *buflen = &state->buflen; in ahash_update_no_ctx()
1204 int *next_buflen = &state->next_buflen; in ahash_update_no_ctx()
1206 int in_len = *buflen + req->nbytes, to_hash; in ahash_update_no_ctx()
1212 *next_buflen = in_len & (blocksize - 1); in ahash_update_no_ctx()
1213 to_hash = in_len - *next_buflen; in ahash_update_no_ctx()
1216 * For XCBC and CMAC, if to_hash is multiple of block size, in ahash_update_no_ctx()
1219 if ((is_xcbc_aes(ctx->adata.algtype) || in ahash_update_no_ctx()
1220 is_cmac_aes(ctx->adata.algtype)) && to_hash >= blocksize && in ahash_update_no_ctx()
1223 to_hash -= blocksize; in ahash_update_no_ctx()
1228 int src_len = req->nbytes - *next_buflen; in ahash_update_no_ctx()
1230 src_nents = sg_nents_for_len(req->src, src_len); in ahash_update_no_ctx()
1237 mapped_nents = dma_map_sg(jrdev, req->src, src_nents, in ahash_update_no_ctx()
1241 return -ENOMEM; in ahash_update_no_ctx()
1255 ctx->sh_desc_update_first, in ahash_update_no_ctx()
1256 ctx->sh_desc_update_first_dma); in ahash_update_no_ctx()
1258 dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE); in ahash_update_no_ctx()
1259 return -ENOMEM; in ahash_update_no_ctx()
1262 edesc->src_nents = src_nents; in ahash_update_no_ctx()
1263 edesc->sec4_sg_bytes = sec4_sg_bytes; in ahash_update_no_ctx()
1265 ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, state); in ahash_update_no_ctx()
1269 sg_to_sec4_sg_last(req->src, src_len, edesc->sec4_sg + 1, 0); in ahash_update_no_ctx()
1271 desc = edesc->hw_desc; in ahash_update_no_ctx()
1273 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, in ahash_update_no_ctx()
1276 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) { in ahash_update_no_ctx()
1278 ret = -ENOMEM; in ahash_update_no_ctx()
1282 append_seq_in_ptr(desc, edesc->sec4_sg_dma, to_hash, LDST_SGF); in ahash_update_no_ctx()
1284 ret = map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len); in ahash_update_no_ctx()
1293 ctx->ctx_len, DMA_TO_DEVICE); in ahash_update_no_ctx()
1294 if ((ret != -EINPROGRESS) && (ret != -EBUSY)) in ahash_update_no_ctx()
1296 state->update = ahash_update_ctx; in ahash_update_no_ctx()
1297 state->finup = ahash_finup_ctx; in ahash_update_no_ctx()
1298 state->final = ahash_final_ctx; in ahash_update_no_ctx()
1300 scatterwalk_map_and_copy(buf + *buflen, req->src, 0, in ahash_update_no_ctx()
1301 req->nbytes, 0); in ahash_update_no_ctx()
1311 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE); in ahash_update_no_ctx()
1322 struct device *jrdev = ctx->jrdev; in ahash_finup_no_ctx()
1323 int buflen = state->buflen; in ahash_finup_no_ctx()
1330 src_nents = sg_nents_for_len(req->src, req->nbytes); in ahash_finup_no_ctx()
1337 mapped_nents = dma_map_sg(jrdev, req->src, src_nents, in ahash_finup_no_ctx()
1341 return -ENOMEM; in ahash_finup_no_ctx()
1353 ctx->sh_desc_digest, ctx->sh_desc_digest_dma); in ahash_finup_no_ctx()
1355 dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE); in ahash_finup_no_ctx()
1356 return -ENOMEM; in ahash_finup_no_ctx()
1359 desc = edesc->hw_desc; in ahash_finup_no_ctx()
1361 edesc->src_nents = src_nents; in ahash_finup_no_ctx()
1362 edesc->sec4_sg_bytes = sec4_sg_bytes; in ahash_finup_no_ctx()
1364 ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, state); in ahash_finup_no_ctx()
1369 req->nbytes); in ahash_finup_no_ctx()
1388 return -ENOMEM; in ahash_finup_no_ctx()
1398 struct device *jrdev = ctx->jrdev; in ahash_update_first()
1399 u8 *buf = state->buf; in ahash_update_first()
1400 int *buflen = &state->buflen; in ahash_update_first()
1401 int *next_buflen = &state->next_buflen; in ahash_update_first()
1409 *next_buflen = req->nbytes & (blocksize - 1); in ahash_update_first()
1410 to_hash = req->nbytes - *next_buflen; in ahash_update_first()
1413 * For XCBC and CMAC, if to_hash is multiple of block size, in ahash_update_first()
1416 if ((is_xcbc_aes(ctx->adata.algtype) || in ahash_update_first()
1417 is_cmac_aes(ctx->adata.algtype)) && to_hash >= blocksize && in ahash_update_first()
1420 to_hash -= blocksize; in ahash_update_first()
1424 src_nents = sg_nents_for_len(req->src, in ahash_update_first()
1425 req->nbytes - *next_buflen); in ahash_update_first()
1432 mapped_nents = dma_map_sg(jrdev, req->src, src_nents, in ahash_update_first()
1436 return -ENOMEM; in ahash_update_first()
1448 ctx->sh_desc_update_first, in ahash_update_first()
1449 ctx->sh_desc_update_first_dma); in ahash_update_first()
1451 dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE); in ahash_update_first()
1452 return -ENOMEM; in ahash_update_first()
1455 edesc->src_nents = src_nents; in ahash_update_first()
1462 desc = edesc->hw_desc; in ahash_update_first()
1464 ret = map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len); in ahash_update_first()
1473 ctx->ctx_len, DMA_TO_DEVICE); in ahash_update_first()
1474 if ((ret != -EINPROGRESS) && (ret != -EBUSY)) in ahash_update_first()
1476 state->update = ahash_update_ctx; in ahash_update_first()
1477 state->finup = ahash_finup_ctx; in ahash_update_first()
1478 state->final = ahash_final_ctx; in ahash_update_first()
1480 state->update = ahash_update_no_ctx; in ahash_update_first()
1481 state->finup = ahash_finup_no_ctx; in ahash_update_first()
1482 state->final = ahash_final_no_ctx; in ahash_update_first()
1483 scatterwalk_map_and_copy(buf, req->src, 0, in ahash_update_first()
1484 req->nbytes, 0); in ahash_update_first()
1494 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE); in ahash_update_first()
1508 state->update = ahash_update_first; in ahash_init()
1509 state->finup = ahash_finup_first; in ahash_init()
1510 state->final = ahash_final_no_ctx; in ahash_init()
1512 state->ctx_dma = 0; in ahash_init()
1513 state->ctx_dma_len = 0; in ahash_init()
1514 state->buf_dma = 0; in ahash_init()
1515 state->buflen = 0; in ahash_init()
1516 state->next_buflen = 0; in ahash_init()
1525 return state->update(req); in ahash_update()
1532 return state->finup(req); in ahash_finup()
1539 return state->final(req); in ahash_final()
1546 u8 *buf = state->buf; in ahash_export()
1547 int len = state->buflen; in ahash_export()
1549 memcpy(export->buf, buf, len); in ahash_export()
1550 memcpy(export->caam_ctx, state->caam_ctx, sizeof(export->caam_ctx)); in ahash_export()
1551 export->buflen = len; in ahash_export()
1552 export->update = state->update; in ahash_export()
1553 export->final = state->final; in ahash_export()
1554 export->finup = state->finup; in ahash_export()
1565 memcpy(state->buf, export->buf, export->buflen); in ahash_import()
1566 memcpy(state->caam_ctx, export->caam_ctx, sizeof(state->caam_ctx)); in ahash_import()
1567 state->buflen = export->buflen; in ahash_import()
1568 state->update = export->update; in ahash_import()
1569 state->final = export->final; in ahash_import()
1570 state->finup = export->finup; in ahash_import()
1589 .driver_name = "sha1-caam",
1591 .hmac_driver_name = "hmac-sha1-caam",
1610 .driver_name = "sha224-caam",
1612 .hmac_driver_name = "hmac-sha224-caam",
1631 .driver_name = "sha256-caam",
1633 .hmac_driver_name = "hmac-sha256-caam",
1652 .driver_name = "sha384-caam",
1654 .hmac_driver_name = "hmac-sha384-caam",
1673 .driver_name = "sha512-caam",
1675 .hmac_driver_name = "hmac-sha512-caam",
1694 .driver_name = "md5-caam",
1696 .hmac_driver_name = "hmac-md5-caam",
1714 .hmac_name = "xcbc(aes)",
1715 .hmac_driver_name = "xcbc-aes-caam",
1733 .hmac_name = "cmac(aes)",
1734 .hmac_driver_name = "cmac-aes-caam",
1764 struct crypto_alg *base = tfm->__crt_alg; in caam_hash_cra_init()
1785 * Get a Job ring from Job Ring driver to ensure in-order in caam_hash_cra_init()
1788 ctx->jrdev = caam_jr_alloc(); in caam_hash_cra_init()
1789 if (IS_ERR(ctx->jrdev)) { in caam_hash_cra_init()
1791 return PTR_ERR(ctx->jrdev); in caam_hash_cra_init()
1794 priv = dev_get_drvdata(ctx->jrdev->parent); in caam_hash_cra_init()
1796 if (is_xcbc_aes(caam_hash->alg_type)) { in caam_hash_cra_init()
1797 ctx->dir = DMA_TO_DEVICE; in caam_hash_cra_init()
1798 ctx->key_dir = DMA_BIDIRECTIONAL; in caam_hash_cra_init()
1799 ctx->adata.algtype = OP_TYPE_CLASS1_ALG | caam_hash->alg_type; in caam_hash_cra_init()
1800 ctx->ctx_len = 48; in caam_hash_cra_init()
1801 } else if (is_cmac_aes(caam_hash->alg_type)) { in caam_hash_cra_init()
1802 ctx->dir = DMA_TO_DEVICE; in caam_hash_cra_init()
1803 ctx->key_dir = DMA_NONE; in caam_hash_cra_init()
1804 ctx->adata.algtype = OP_TYPE_CLASS1_ALG | caam_hash->alg_type; in caam_hash_cra_init()
1805 ctx->ctx_len = 32; in caam_hash_cra_init()
1807 if (priv->era >= 6) { in caam_hash_cra_init()
1808 ctx->dir = DMA_BIDIRECTIONAL; in caam_hash_cra_init()
1809 ctx->key_dir = caam_hash->is_hmac ? DMA_TO_DEVICE : DMA_NONE; in caam_hash_cra_init()
1811 ctx->dir = DMA_TO_DEVICE; in caam_hash_cra_init()
1812 ctx->key_dir = DMA_NONE; in caam_hash_cra_init()
1814 ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam_hash->alg_type; in caam_hash_cra_init()
1815 ctx->ctx_len = runninglen[(ctx->adata.algtype & in caam_hash_cra_init()
1820 if (ctx->key_dir != DMA_NONE) { in caam_hash_cra_init()
1821 ctx->adata.key_dma = dma_map_single_attrs(ctx->jrdev, ctx->key, in caam_hash_cra_init()
1822 ARRAY_SIZE(ctx->key), in caam_hash_cra_init()
1823 ctx->key_dir, in caam_hash_cra_init()
1825 if (dma_mapping_error(ctx->jrdev, ctx->adata.key_dma)) { in caam_hash_cra_init()
1826 dev_err(ctx->jrdev, "unable to map key\n"); in caam_hash_cra_init()
1827 caam_jr_free(ctx->jrdev); in caam_hash_cra_init()
1828 return -ENOMEM; in caam_hash_cra_init()
1832 dma_addr = dma_map_single_attrs(ctx->jrdev, ctx->sh_desc_update, in caam_hash_cra_init()
1833 offsetof(struct caam_hash_ctx, key) - in caam_hash_cra_init()
1835 ctx->dir, DMA_ATTR_SKIP_CPU_SYNC); in caam_hash_cra_init()
1836 if (dma_mapping_error(ctx->jrdev, dma_addr)) { in caam_hash_cra_init()
1837 dev_err(ctx->jrdev, "unable to map shared descriptors\n"); in caam_hash_cra_init()
1839 if (ctx->key_dir != DMA_NONE) in caam_hash_cra_init()
1840 dma_unmap_single_attrs(ctx->jrdev, ctx->adata.key_dma, in caam_hash_cra_init()
1841 ARRAY_SIZE(ctx->key), in caam_hash_cra_init()
1842 ctx->key_dir, in caam_hash_cra_init()
1845 caam_jr_free(ctx->jrdev); in caam_hash_cra_init()
1846 return -ENOMEM; in caam_hash_cra_init()
1849 ctx->sh_desc_update_dma = dma_addr; in caam_hash_cra_init()
1850 ctx->sh_desc_update_first_dma = dma_addr + in caam_hash_cra_init()
1852 sh_desc_update_first) - in caam_hash_cra_init()
1854 ctx->sh_desc_fin_dma = dma_addr + offsetof(struct caam_hash_ctx, in caam_hash_cra_init()
1855 sh_desc_fin) - in caam_hash_cra_init()
1857 ctx->sh_desc_digest_dma = dma_addr + offsetof(struct caam_hash_ctx, in caam_hash_cra_init()
1858 sh_desc_digest) - in caam_hash_cra_init()
1867 return caam_hash->is_hmac ? 0 : ahash_set_sh_desc(ahash); in caam_hash_cra_init()
1874 dma_unmap_single_attrs(ctx->jrdev, ctx->sh_desc_update_dma, in caam_hash_cra_exit()
1875 offsetof(struct caam_hash_ctx, key) - in caam_hash_cra_exit()
1877 ctx->dir, DMA_ATTR_SKIP_CPU_SYNC); in caam_hash_cra_exit()
1878 if (ctx->key_dir != DMA_NONE) in caam_hash_cra_exit()
1879 dma_unmap_single_attrs(ctx->jrdev, ctx->adata.key_dma, in caam_hash_cra_exit()
1880 ARRAY_SIZE(ctx->key), ctx->key_dir, in caam_hash_cra_exit()
1882 caam_jr_free(ctx->jrdev); in caam_hash_cra_exit()
1893 crypto_engine_unregister_ahash(&t_alg->ahash_alg); in caam_algapi_hash_exit()
1894 list_del(&t_alg->entry); in caam_algapi_hash_exit()
1909 return ERR_PTR(-ENOMEM); in caam_hash_alloc()
1911 t_alg->ahash_alg.base = template->template_ahash; in caam_hash_alloc()
1912 halg = &t_alg->ahash_alg.base; in caam_hash_alloc()
1913 alg = &halg->halg.base; in caam_hash_alloc()
1916 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", in caam_hash_alloc()
1917 template->hmac_name); in caam_hash_alloc()
1918 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s", in caam_hash_alloc()
1919 template->hmac_driver_name); in caam_hash_alloc()
1920 t_alg->is_hmac = true; in caam_hash_alloc()
1922 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", in caam_hash_alloc()
1923 template->name); in caam_hash_alloc()
1924 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s", in caam_hash_alloc()
1925 template->driver_name); in caam_hash_alloc()
1926 halg->setkey = NULL; in caam_hash_alloc()
1927 t_alg->is_hmac = false; in caam_hash_alloc()
1929 alg->cra_module = THIS_MODULE; in caam_hash_alloc()
1930 alg->cra_init = caam_hash_cra_init; in caam_hash_alloc()
1931 alg->cra_exit = caam_hash_cra_exit; in caam_hash_alloc()
1932 alg->cra_ctxsize = sizeof(struct caam_hash_ctx) + crypto_dma_padding(); in caam_hash_alloc()
1933 alg->cra_priority = CAAM_CRA_PRIORITY; in caam_hash_alloc()
1934 alg->cra_blocksize = template->blocksize; in caam_hash_alloc()
1935 alg->cra_alignmask = 0; in caam_hash_alloc()
1936 alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY; in caam_hash_alloc()
1938 t_alg->alg_type = template->alg_type; in caam_hash_alloc()
1939 t_alg->ahash_alg.op.do_one_request = ahash_do_one_req; in caam_hash_alloc()
1955 if (priv->era < 10) { in caam_algapi_hash_init()
1956 struct caam_perfmon __iomem *perfmon = &priv->jr[0]->perfmon; in caam_algapi_hash_init()
1958 md_vid = (rd_reg32(&perfmon->cha_id_ls) & in caam_algapi_hash_init()
1960 md_inst = (rd_reg32(&perfmon->cha_num_ls) & in caam_algapi_hash_init()
1963 u32 mdha = rd_reg32(&priv->jr[0]->vreg.mdha); in caam_algapi_hash_init()
1988 if (is_mdha(alg->alg_type) && in caam_algapi_hash_init()
1989 alg->template_ahash.halg.digestsize > md_limit) in caam_algapi_hash_init()
1997 alg->hmac_driver_name); in caam_algapi_hash_init()
2001 err = crypto_engine_register_ahash(&t_alg->ahash_alg); in caam_algapi_hash_init()
2004 t_alg->ahash_alg.base.halg.base.cra_driver_name, in caam_algapi_hash_init()
2008 list_add_tail(&t_alg->entry, &hash_list); in caam_algapi_hash_init()
2010 if ((alg->alg_type & OP_ALG_ALGSEL_MASK) == OP_ALG_ALGSEL_AES) in caam_algapi_hash_init()
2017 pr_warn("%s alg allocation failed\n", alg->driver_name); in caam_algapi_hash_init()
2021 err = crypto_engine_register_ahash(&t_alg->ahash_alg); in caam_algapi_hash_init()
2024 t_alg->ahash_alg.base.halg.base.cra_driver_name, in caam_algapi_hash_init()
2028 list_add_tail(&t_alg->entry, &hash_list); in caam_algapi_hash_init()