Lines Matching +full:pk +full:- +full:pk
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
8 #include <linux/dma-mapping.h>
46 * cc_copy_mac() - Copy MAC to temporary location
56 u32 skip = req->assoclen + req->cryptlen; in cc_copy_mac()
58 cc_copy_sg_portion(dev, areq_ctx->backup_mac, req->src, in cc_copy_mac()
59 (skip - areq_ctx->req_authsize), skip, dir); in cc_copy_mac()
63 * cc_get_sgl_nents() - Get scatterlist number of entries.
85 nbytes -= (sg_list->length > nbytes) ? in cc_get_sgl_nents()
86 nbytes : sg_list->length; in cc_get_sgl_nents()
95 * cc_copy_sg_portion() - Copy scatter list data,
112 sg_copy_buffer(sg, nents, dest, (end - to_skip + 1), to_skip, in cc_copy_sg_portion()
128 return -ENOMEM; in cc_render_buff_to_mlli()
139 buff_size -= CC_MAX_MLLI_ENTRY_SIZE; in cc_render_buff_to_mlli()
166 (sgl_data_len > sg_dma_len(curr_sgl) - sgl_offset) ? in cc_render_sg_to_mlli()
167 sg_dma_len(curr_sgl) - sgl_offset : in cc_render_sg_to_mlli()
169 sgl_data_len -= entry_data_len; in cc_render_sg_to_mlli()
189 dev_dbg(dev, "NUM of SG's = %d\n", sg_data->num_of_buffers); in cc_generate_mlli()
192 mlli_params->mlli_virt_addr = in cc_generate_mlli()
193 dma_pool_alloc(mlli_params->curr_pool, flags, in cc_generate_mlli()
194 &mlli_params->mlli_dma_addr); in cc_generate_mlli()
195 if (!mlli_params->mlli_virt_addr) { in cc_generate_mlli()
197 rc = -ENOMEM; in cc_generate_mlli()
201 mlli_p = mlli_params->mlli_virt_addr; in cc_generate_mlli()
203 for (i = 0; i < sg_data->num_of_buffers; i++) { in cc_generate_mlli()
204 union buffer_array_entry *entry = &sg_data->entry[i]; in cc_generate_mlli()
205 u32 tot_len = sg_data->total_data_len[i]; in cc_generate_mlli()
206 u32 offset = sg_data->offset[i]; in cc_generate_mlli()
208 rc = cc_render_sg_to_mlli(dev, entry->sgl, tot_len, offset, in cc_generate_mlli()
214 if (sg_data->mlli_nents[i]) { in cc_generate_mlli()
218 *sg_data->mlli_nents[i] += in cc_generate_mlli()
219 (total_nents - prev_total_nents); in cc_generate_mlli()
225 mlli_params->mlli_len = (total_nents * LLI_ENTRY_BYTE_SIZE); in cc_generate_mlli()
227 dev_dbg(dev, "MLLI params: virt_addr=%pK dma_addr=%pad mlli_len=0x%X\n", in cc_generate_mlli()
228 mlli_params->mlli_virt_addr, &mlli_params->mlli_dma_addr, in cc_generate_mlli()
229 mlli_params->mlli_len); in cc_generate_mlli()
240 unsigned int index = sgl_data->num_of_buffers; in cc_add_sg_entry()
242 dev_dbg(dev, "index=%u nents=%u sgl=%pK data_len=0x%08X is_last=%d\n", in cc_add_sg_entry()
244 sgl_data->nents[index] = nents; in cc_add_sg_entry()
245 sgl_data->entry[index].sgl = sgl; in cc_add_sg_entry()
246 sgl_data->offset[index] = data_offset; in cc_add_sg_entry()
247 sgl_data->total_data_len[index] = data_len; in cc_add_sg_entry()
248 sgl_data->is_last[index] = is_last_table; in cc_add_sg_entry()
249 sgl_data->mlli_nents[index] = mlli_nents; in cc_add_sg_entry()
250 if (sgl_data->mlli_nents[index]) in cc_add_sg_entry()
251 *sgl_data->mlli_nents[index] = 0; in cc_add_sg_entry()
252 sgl_data->num_of_buffers++; in cc_add_sg_entry()
273 return -ENOMEM; in cc_map_sg()
280 return -ENOMEM; in cc_map_sg()
295 sg_init_one(&areq_ctx->ccm_adata_sg, config_data, in cc_set_aead_conf_buf()
296 AES_BLOCK_SIZE + areq_ctx->ccm_hdr_size); in cc_set_aead_conf_buf()
297 if (dma_map_sg(dev, &areq_ctx->ccm_adata_sg, 1, DMA_TO_DEVICE) != 1) { in cc_set_aead_conf_buf()
299 return -ENOMEM; in cc_set_aead_conf_buf()
301 dev_dbg(dev, "Mapped curr_buff: dma_address=%pad page=%p addr=%pK offset=%u length=%u\n", in cc_set_aead_conf_buf()
302 &sg_dma_address(&areq_ctx->ccm_adata_sg), in cc_set_aead_conf_buf()
303 sg_page(&areq_ctx->ccm_adata_sg), in cc_set_aead_conf_buf()
304 sg_virt(&areq_ctx->ccm_adata_sg), in cc_set_aead_conf_buf()
305 areq_ctx->ccm_adata_sg.offset, areq_ctx->ccm_adata_sg.length); in cc_set_aead_conf_buf()
308 cc_add_sg_entry(dev, sg_data, 1, &areq_ctx->ccm_adata_sg, in cc_set_aead_conf_buf()
309 (AES_BLOCK_SIZE + areq_ctx->ccm_hdr_size), in cc_set_aead_conf_buf()
321 sg_init_one(areq_ctx->buff_sg, curr_buff, curr_buff_cnt); in cc_set_hash_buf()
322 if (dma_map_sg(dev, areq_ctx->buff_sg, 1, DMA_TO_DEVICE) != 1) { in cc_set_hash_buf()
324 return -ENOMEM; in cc_set_hash_buf()
326 dev_dbg(dev, "Mapped curr_buff: dma_address=%pad page=%p addr=%pK offset=%u length=%u\n", in cc_set_hash_buf()
327 &sg_dma_address(areq_ctx->buff_sg), sg_page(areq_ctx->buff_sg), in cc_set_hash_buf()
328 sg_virt(areq_ctx->buff_sg), areq_ctx->buff_sg->offset, in cc_set_hash_buf()
329 areq_ctx->buff_sg->length); in cc_set_hash_buf()
330 areq_ctx->data_dma_buf_type = CC_DMA_BUF_DLLI; in cc_set_hash_buf()
331 areq_ctx->curr_sg = areq_ctx->buff_sg; in cc_set_hash_buf()
332 areq_ctx->in_nents = 0; in cc_set_hash_buf()
334 cc_add_sg_entry(dev, sg_data, 1, areq_ctx->buff_sg, curr_buff_cnt, 0, in cc_set_hash_buf()
345 if (req_ctx->gen_ctx.iv_dma_addr) { in cc_unmap_cipher_request()
347 &req_ctx->gen_ctx.iv_dma_addr, ivsize); in cc_unmap_cipher_request()
348 dma_unmap_single(dev, req_ctx->gen_ctx.iv_dma_addr, in cc_unmap_cipher_request()
352 if (req_ctx->dma_buf_type == CC_DMA_BUF_MLLI && in cc_unmap_cipher_request()
353 req_ctx->mlli_params.mlli_virt_addr) { in cc_unmap_cipher_request()
354 dma_pool_free(req_ctx->mlli_params.curr_pool, in cc_unmap_cipher_request()
355 req_ctx->mlli_params.mlli_virt_addr, in cc_unmap_cipher_request()
356 req_ctx->mlli_params.mlli_dma_addr); in cc_unmap_cipher_request()
360 dma_unmap_sg(dev, src, req_ctx->in_nents, DMA_TO_DEVICE); in cc_unmap_cipher_request()
361 dma_unmap_sg(dev, dst, req_ctx->out_nents, DMA_FROM_DEVICE); in cc_unmap_cipher_request()
362 dev_dbg(dev, "Unmapped req->dst=%pK\n", sg_virt(dst)); in cc_unmap_cipher_request()
363 dev_dbg(dev, "Unmapped req->src=%pK\n", sg_virt(src)); in cc_unmap_cipher_request()
365 dma_unmap_sg(dev, src, req_ctx->in_nents, DMA_BIDIRECTIONAL); in cc_unmap_cipher_request()
366 dev_dbg(dev, "Unmapped req->src=%pK\n", sg_virt(src)); in cc_unmap_cipher_request()
376 struct mlli_params *mlli_params = &req_ctx->mlli_params; in cc_map_cipher_request()
384 req_ctx->dma_buf_type = CC_DMA_BUF_DLLI; in cc_map_cipher_request()
385 mlli_params->curr_pool = NULL; in cc_map_cipher_request()
391 req_ctx->gen_ctx.iv_dma_addr = in cc_map_cipher_request()
393 if (dma_mapping_error(dev, req_ctx->gen_ctx.iv_dma_addr)) { in cc_map_cipher_request()
394 dev_err(dev, "Mapping iv %u B at va=%pK for DMA failed\n", in cc_map_cipher_request()
396 return -ENOMEM; in cc_map_cipher_request()
398 dev_dbg(dev, "Mapped iv %u B at va=%pK to dma=%pad\n", in cc_map_cipher_request()
399 ivsize, info, &req_ctx->gen_ctx.iv_dma_addr); in cc_map_cipher_request()
401 req_ctx->gen_ctx.iv_dma_addr = 0; in cc_map_cipher_request()
405 rc = cc_map_sg(dev, src, nbytes, src_direction, &req_ctx->in_nents, in cc_map_cipher_request()
410 req_ctx->dma_buf_type = CC_DMA_BUF_MLLI; in cc_map_cipher_request()
414 if (req_ctx->dma_buf_type == CC_DMA_BUF_MLLI) { in cc_map_cipher_request()
415 req_ctx->out_nents = 0; in cc_map_cipher_request()
416 cc_add_sg_entry(dev, &sg_data, req_ctx->in_nents, src, in cc_map_cipher_request()
418 &req_ctx->in_mlli_nents); in cc_map_cipher_request()
423 &req_ctx->out_nents, LLI_MAX_NUM_OF_DATA_ENTRIES, in cc_map_cipher_request()
428 req_ctx->dma_buf_type = CC_DMA_BUF_MLLI; in cc_map_cipher_request()
430 if (req_ctx->dma_buf_type == CC_DMA_BUF_MLLI) { in cc_map_cipher_request()
431 cc_add_sg_entry(dev, &sg_data, req_ctx->in_nents, src, in cc_map_cipher_request()
433 &req_ctx->in_mlli_nents); in cc_map_cipher_request()
434 cc_add_sg_entry(dev, &sg_data, req_ctx->out_nents, dst, in cc_map_cipher_request()
436 &req_ctx->out_mlli_nents); in cc_map_cipher_request()
440 if (req_ctx->dma_buf_type == CC_DMA_BUF_MLLI) { in cc_map_cipher_request()
441 mlli_params->curr_pool = drvdata->mlli_buffs_pool; in cc_map_cipher_request()
447 dev_dbg(dev, "areq_ctx->dma_buf_type = %s\n", in cc_map_cipher_request()
448 cc_dma_buf_type(req_ctx->dma_buf_type)); in cc_map_cipher_request()
460 unsigned int hw_iv_size = areq_ctx->hw_iv_size; in cc_unmap_aead_request()
462 int src_direction = (req->src != req->dst ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL); in cc_unmap_aead_request()
464 if (areq_ctx->mac_buf_dma_addr) { in cc_unmap_aead_request()
465 dma_unmap_single(dev, areq_ctx->mac_buf_dma_addr, in cc_unmap_aead_request()
469 if (areq_ctx->cipher_mode == DRV_CIPHER_GCTR) { in cc_unmap_aead_request()
470 if (areq_ctx->hkey_dma_addr) { in cc_unmap_aead_request()
471 dma_unmap_single(dev, areq_ctx->hkey_dma_addr, in cc_unmap_aead_request()
475 if (areq_ctx->gcm_block_len_dma_addr) { in cc_unmap_aead_request()
476 dma_unmap_single(dev, areq_ctx->gcm_block_len_dma_addr, in cc_unmap_aead_request()
480 if (areq_ctx->gcm_iv_inc1_dma_addr) { in cc_unmap_aead_request()
481 dma_unmap_single(dev, areq_ctx->gcm_iv_inc1_dma_addr, in cc_unmap_aead_request()
485 if (areq_ctx->gcm_iv_inc2_dma_addr) { in cc_unmap_aead_request()
486 dma_unmap_single(dev, areq_ctx->gcm_iv_inc2_dma_addr, in cc_unmap_aead_request()
491 if (areq_ctx->ccm_hdr_size != ccm_header_size_null) { in cc_unmap_aead_request()
492 if (areq_ctx->ccm_iv0_dma_addr) { in cc_unmap_aead_request()
493 dma_unmap_single(dev, areq_ctx->ccm_iv0_dma_addr, in cc_unmap_aead_request()
497 dma_unmap_sg(dev, &areq_ctx->ccm_adata_sg, 1, DMA_TO_DEVICE); in cc_unmap_aead_request()
499 if (areq_ctx->gen_ctx.iv_dma_addr) { in cc_unmap_aead_request()
500 dma_unmap_single(dev, areq_ctx->gen_ctx.iv_dma_addr, in cc_unmap_aead_request()
502 kfree_sensitive(areq_ctx->gen_ctx.iv); in cc_unmap_aead_request()
506 if ((areq_ctx->assoc_buff_type == CC_DMA_BUF_MLLI || in cc_unmap_aead_request()
507 areq_ctx->data_buff_type == CC_DMA_BUF_MLLI) && in cc_unmap_aead_request()
508 (areq_ctx->mlli_params.mlli_virt_addr)) { in cc_unmap_aead_request()
509 dev_dbg(dev, "free MLLI buffer: dma=%pad virt=%pK\n", in cc_unmap_aead_request()
510 &areq_ctx->mlli_params.mlli_dma_addr, in cc_unmap_aead_request()
511 areq_ctx->mlli_params.mlli_virt_addr); in cc_unmap_aead_request()
512 dma_pool_free(areq_ctx->mlli_params.curr_pool, in cc_unmap_aead_request()
513 areq_ctx->mlli_params.mlli_virt_addr, in cc_unmap_aead_request()
514 areq_ctx->mlli_params.mlli_dma_addr); in cc_unmap_aead_request()
517 …dev_dbg(dev, "Unmapping src sgl: req->src=%pK areq_ctx->src.nents=%u areq_ctx->assoc.nents=%u asso… in cc_unmap_aead_request()
518 sg_virt(req->src), areq_ctx->src.nents, areq_ctx->assoc.nents, in cc_unmap_aead_request()
519 areq_ctx->assoclen, req->cryptlen); in cc_unmap_aead_request()
521 dma_unmap_sg(dev, req->src, areq_ctx->src.mapped_nents, src_direction); in cc_unmap_aead_request()
522 if (req->src != req->dst) { in cc_unmap_aead_request()
523 dev_dbg(dev, "Unmapping dst sgl: req->dst=%pK\n", in cc_unmap_aead_request()
524 sg_virt(req->dst)); in cc_unmap_aead_request()
525 dma_unmap_sg(dev, req->dst, areq_ctx->dst.mapped_nents, DMA_FROM_DEVICE); in cc_unmap_aead_request()
527 if (drvdata->coherent && in cc_unmap_aead_request()
528 areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT && in cc_unmap_aead_request()
529 req->src == req->dst) { in cc_unmap_aead_request()
550 unsigned int hw_iv_size = areq_ctx->hw_iv_size; in cc_aead_chain_iv()
552 gfp_t flags = cc_gfp_flags(&req->base); in cc_aead_chain_iv()
555 if (!req->iv) { in cc_aead_chain_iv()
556 areq_ctx->gen_ctx.iv_dma_addr = 0; in cc_aead_chain_iv()
557 areq_ctx->gen_ctx.iv = NULL; in cc_aead_chain_iv()
561 areq_ctx->gen_ctx.iv = kmemdup(req->iv, hw_iv_size, flags); in cc_aead_chain_iv()
562 if (!areq_ctx->gen_ctx.iv) in cc_aead_chain_iv()
563 return -ENOMEM; in cc_aead_chain_iv()
565 areq_ctx->gen_ctx.iv_dma_addr = in cc_aead_chain_iv()
566 dma_map_single(dev, areq_ctx->gen_ctx.iv, hw_iv_size, in cc_aead_chain_iv()
568 if (dma_mapping_error(dev, areq_ctx->gen_ctx.iv_dma_addr)) { in cc_aead_chain_iv()
569 dev_err(dev, "Mapping iv %u B at va=%pK for DMA failed\n", in cc_aead_chain_iv()
570 hw_iv_size, req->iv); in cc_aead_chain_iv()
571 kfree_sensitive(areq_ctx->gen_ctx.iv); in cc_aead_chain_iv()
572 areq_ctx->gen_ctx.iv = NULL; in cc_aead_chain_iv()
573 rc = -ENOMEM; in cc_aead_chain_iv()
577 dev_dbg(dev, "Mapped iv %u B at va=%pK to dma=%pad\n", in cc_aead_chain_iv()
578 hw_iv_size, req->iv, &areq_ctx->gen_ctx.iv_dma_addr); in cc_aead_chain_iv()
595 rc = -EINVAL; in cc_aead_chain_assoc()
599 if (areq_ctx->assoclen == 0) { in cc_aead_chain_assoc()
600 areq_ctx->assoc_buff_type = CC_DMA_BUF_NULL; in cc_aead_chain_assoc()
601 areq_ctx->assoc.nents = 0; in cc_aead_chain_assoc()
602 areq_ctx->assoc.mlli_nents = 0; in cc_aead_chain_assoc()
604 cc_dma_buf_type(areq_ctx->assoc_buff_type), in cc_aead_chain_assoc()
605 areq_ctx->assoc.nents); in cc_aead_chain_assoc()
609 mapped_nents = sg_nents_for_len(req->src, areq_ctx->assoclen); in cc_aead_chain_assoc()
616 return -ENOMEM; in cc_aead_chain_assoc()
618 areq_ctx->assoc.nents = mapped_nents; in cc_aead_chain_assoc()
623 if (areq_ctx->ccm_hdr_size != ccm_header_size_null) { in cc_aead_chain_assoc()
626 (areq_ctx->assoc.nents + 1), in cc_aead_chain_assoc()
628 rc = -ENOMEM; in cc_aead_chain_assoc()
633 if (mapped_nents == 1 && areq_ctx->ccm_hdr_size == ccm_header_size_null) in cc_aead_chain_assoc()
634 areq_ctx->assoc_buff_type = CC_DMA_BUF_DLLI; in cc_aead_chain_assoc()
636 areq_ctx->assoc_buff_type = CC_DMA_BUF_MLLI; in cc_aead_chain_assoc()
638 if (do_chain || areq_ctx->assoc_buff_type == CC_DMA_BUF_MLLI) { in cc_aead_chain_assoc()
640 cc_dma_buf_type(areq_ctx->assoc_buff_type), in cc_aead_chain_assoc()
641 areq_ctx->assoc.nents); in cc_aead_chain_assoc()
642 cc_add_sg_entry(dev, sg_data, areq_ctx->assoc.nents, req->src, in cc_aead_chain_assoc()
643 areq_ctx->assoclen, 0, is_last, in cc_aead_chain_assoc()
644 &areq_ctx->assoc.mlli_nents); in cc_aead_chain_assoc()
645 areq_ctx->assoc_buff_type = CC_DMA_BUF_MLLI; in cc_aead_chain_assoc()
656 enum drv_crypto_direction direct = areq_ctx->gen_ctx.op_type; in cc_prepare_aead_data_dlli()
657 unsigned int authsize = areq_ctx->req_authsize; in cc_prepare_aead_data_dlli()
661 areq_ctx->is_icv_fragmented = false; in cc_prepare_aead_data_dlli()
663 if ((req->src == req->dst) || direct == DRV_CRYPTO_DIRECTION_DECRYPT) { in cc_prepare_aead_data_dlli()
664 sg = areq_ctx->src_sgl; in cc_prepare_aead_data_dlli()
665 offset = *src_last_bytes - authsize; in cc_prepare_aead_data_dlli()
667 sg = areq_ctx->dst_sgl; in cc_prepare_aead_data_dlli()
668 offset = *dst_last_bytes - authsize; in cc_prepare_aead_data_dlli()
671 areq_ctx->icv_dma_addr = sg_dma_address(sg) + offset; in cc_prepare_aead_data_dlli()
672 areq_ctx->icv_virt_addr = sg_virt(sg) + offset; in cc_prepare_aead_data_dlli()
682 enum drv_crypto_direction direct = areq_ctx->gen_ctx.op_type; in cc_prepare_aead_data_mlli()
683 unsigned int authsize = areq_ctx->req_authsize; in cc_prepare_aead_data_mlli()
687 if (req->src == req->dst) { in cc_prepare_aead_data_mlli()
689 cc_add_sg_entry(dev, sg_data, areq_ctx->src.nents, in cc_prepare_aead_data_mlli()
690 areq_ctx->src_sgl, areq_ctx->cryptlen, in cc_prepare_aead_data_mlli()
691 areq_ctx->src_offset, is_last_table, in cc_prepare_aead_data_mlli()
692 &areq_ctx->src.mlli_nents); in cc_prepare_aead_data_mlli()
694 areq_ctx->is_icv_fragmented = in cc_prepare_aead_data_mlli()
695 cc_is_icv_frag(areq_ctx->src.nents, authsize, in cc_prepare_aead_data_mlli()
698 if (areq_ctx->is_icv_fragmented) { in cc_prepare_aead_data_mlli()
706 * INPLACE-DECRYPT operation, hence in cc_prepare_aead_data_mlli()
709 if (!drvdata->coherent) in cc_prepare_aead_data_mlli()
712 areq_ctx->icv_virt_addr = areq_ctx->backup_mac; in cc_prepare_aead_data_mlli()
714 areq_ctx->icv_virt_addr = areq_ctx->mac_buf; in cc_prepare_aead_data_mlli()
715 areq_ctx->icv_dma_addr = in cc_prepare_aead_data_mlli()
716 areq_ctx->mac_buf_dma_addr; in cc_prepare_aead_data_mlli()
719 sg = &areq_ctx->src_sgl[areq_ctx->src.nents - 1]; in cc_prepare_aead_data_mlli()
721 areq_ctx->icv_dma_addr = sg_dma_address(sg) + in cc_prepare_aead_data_mlli()
722 (*src_last_bytes - authsize); in cc_prepare_aead_data_mlli()
723 areq_ctx->icv_virt_addr = sg_virt(sg) + in cc_prepare_aead_data_mlli()
724 (*src_last_bytes - authsize); in cc_prepare_aead_data_mlli()
728 /*NON-INPLACE and DECRYPT*/ in cc_prepare_aead_data_mlli()
729 cc_add_sg_entry(dev, sg_data, areq_ctx->src.nents, in cc_prepare_aead_data_mlli()
730 areq_ctx->src_sgl, areq_ctx->cryptlen, in cc_prepare_aead_data_mlli()
731 areq_ctx->src_offset, is_last_table, in cc_prepare_aead_data_mlli()
732 &areq_ctx->src.mlli_nents); in cc_prepare_aead_data_mlli()
733 cc_add_sg_entry(dev, sg_data, areq_ctx->dst.nents, in cc_prepare_aead_data_mlli()
734 areq_ctx->dst_sgl, areq_ctx->cryptlen, in cc_prepare_aead_data_mlli()
735 areq_ctx->dst_offset, is_last_table, in cc_prepare_aead_data_mlli()
736 &areq_ctx->dst.mlli_nents); in cc_prepare_aead_data_mlli()
738 areq_ctx->is_icv_fragmented = in cc_prepare_aead_data_mlli()
739 cc_is_icv_frag(areq_ctx->src.nents, authsize, in cc_prepare_aead_data_mlli()
746 if (areq_ctx->is_icv_fragmented) { in cc_prepare_aead_data_mlli()
748 areq_ctx->icv_virt_addr = areq_ctx->backup_mac; in cc_prepare_aead_data_mlli()
751 sg = &areq_ctx->src_sgl[areq_ctx->src.nents - 1]; in cc_prepare_aead_data_mlli()
753 areq_ctx->icv_dma_addr = sg_dma_address(sg) + in cc_prepare_aead_data_mlli()
754 (*src_last_bytes - authsize); in cc_prepare_aead_data_mlli()
755 areq_ctx->icv_virt_addr = sg_virt(sg) + in cc_prepare_aead_data_mlli()
756 (*src_last_bytes - authsize); in cc_prepare_aead_data_mlli()
760 /*NON-INPLACE and ENCRYPT*/ in cc_prepare_aead_data_mlli()
761 cc_add_sg_entry(dev, sg_data, areq_ctx->dst.nents, in cc_prepare_aead_data_mlli()
762 areq_ctx->dst_sgl, areq_ctx->cryptlen, in cc_prepare_aead_data_mlli()
763 areq_ctx->dst_offset, is_last_table, in cc_prepare_aead_data_mlli()
764 &areq_ctx->dst.mlli_nents); in cc_prepare_aead_data_mlli()
765 cc_add_sg_entry(dev, sg_data, areq_ctx->src.nents, in cc_prepare_aead_data_mlli()
766 areq_ctx->src_sgl, areq_ctx->cryptlen, in cc_prepare_aead_data_mlli()
767 areq_ctx->src_offset, is_last_table, in cc_prepare_aead_data_mlli()
768 &areq_ctx->src.mlli_nents); in cc_prepare_aead_data_mlli()
770 areq_ctx->is_icv_fragmented = in cc_prepare_aead_data_mlli()
771 cc_is_icv_frag(areq_ctx->dst.nents, authsize, in cc_prepare_aead_data_mlli()
774 if (!areq_ctx->is_icv_fragmented) { in cc_prepare_aead_data_mlli()
775 sg = &areq_ctx->dst_sgl[areq_ctx->dst.nents - 1]; in cc_prepare_aead_data_mlli()
777 areq_ctx->icv_dma_addr = sg_dma_address(sg) + in cc_prepare_aead_data_mlli()
778 (*dst_last_bytes - authsize); in cc_prepare_aead_data_mlli()
779 areq_ctx->icv_virt_addr = sg_virt(sg) + in cc_prepare_aead_data_mlli()
780 (*dst_last_bytes - authsize); in cc_prepare_aead_data_mlli()
782 areq_ctx->icv_dma_addr = areq_ctx->mac_buf_dma_addr; in cc_prepare_aead_data_mlli()
783 areq_ctx->icv_virt_addr = areq_ctx->mac_buf; in cc_prepare_aead_data_mlli()
795 enum drv_crypto_direction direct = areq_ctx->gen_ctx.op_type; in cc_aead_chain_data()
796 unsigned int authsize = areq_ctx->req_authsize; in cc_aead_chain_data()
801 /* non-inplace mode */ in cc_aead_chain_data()
802 unsigned int size_for_map = req->assoclen + req->cryptlen; in cc_aead_chain_data()
804 u32 size_to_skip = req->assoclen; in cc_aead_chain_data()
810 return -EINVAL; in cc_aead_chain_data()
812 areq_ctx->src_sgl = req->src; in cc_aead_chain_data()
813 areq_ctx->dst_sgl = req->dst; in cc_aead_chain_data()
817 src_mapped_nents = cc_get_sgl_nents(dev, req->src, size_for_map, in cc_aead_chain_data()
819 sg_index = areq_ctx->src_sgl->length; in cc_aead_chain_data()
822 src_mapped_nents--; in cc_aead_chain_data()
823 offset -= areq_ctx->src_sgl->length; in cc_aead_chain_data()
824 sgl = sg_next(areq_ctx->src_sgl); in cc_aead_chain_data()
827 areq_ctx->src_sgl = sgl; in cc_aead_chain_data()
828 sg_index += areq_ctx->src_sgl->length; in cc_aead_chain_data()
833 return -ENOMEM; in cc_aead_chain_data()
836 areq_ctx->src.nents = src_mapped_nents; in cc_aead_chain_data()
838 areq_ctx->src_offset = offset; in cc_aead_chain_data()
840 if (req->src != req->dst) { in cc_aead_chain_data()
841 size_for_map = req->assoclen + req->cryptlen; in cc_aead_chain_data()
846 size_for_map -= authsize; in cc_aead_chain_data()
848 rc = cc_map_sg(dev, req->dst, size_for_map, DMA_FROM_DEVICE, in cc_aead_chain_data()
849 &areq_ctx->dst.mapped_nents, in cc_aead_chain_data()
856 dst_mapped_nents = cc_get_sgl_nents(dev, req->dst, size_for_map, in cc_aead_chain_data()
858 sg_index = areq_ctx->dst_sgl->length; in cc_aead_chain_data()
863 dst_mapped_nents--; in cc_aead_chain_data()
864 offset -= areq_ctx->dst_sgl->length; in cc_aead_chain_data()
865 sgl = sg_next(areq_ctx->dst_sgl); in cc_aead_chain_data()
868 areq_ctx->dst_sgl = sgl; in cc_aead_chain_data()
869 sg_index += areq_ctx->dst_sgl->length; in cc_aead_chain_data()
874 return -ENOMEM; in cc_aead_chain_data()
876 areq_ctx->dst.nents = dst_mapped_nents; in cc_aead_chain_data()
877 areq_ctx->dst_offset = offset; in cc_aead_chain_data()
881 areq_ctx->data_buff_type = CC_DMA_BUF_MLLI; in cc_aead_chain_data()
886 areq_ctx->data_buff_type = CC_DMA_BUF_DLLI; in cc_aead_chain_data()
901 if (areq_ctx->assoc_buff_type == CC_DMA_BUF_MLLI) { in cc_update_aead_mlli_nents()
902 areq_ctx->assoc.sram_addr = drvdata->mlli_sram_addr; in cc_update_aead_mlli_nents()
903 curr_mlli_size = areq_ctx->assoc.mlli_nents * in cc_update_aead_mlli_nents()
907 if (areq_ctx->data_buff_type == CC_DMA_BUF_MLLI) { in cc_update_aead_mlli_nents()
909 if (req->src == req->dst) { in cc_update_aead_mlli_nents()
910 areq_ctx->dst.mlli_nents = areq_ctx->src.mlli_nents; in cc_update_aead_mlli_nents()
911 areq_ctx->src.sram_addr = drvdata->mlli_sram_addr + in cc_update_aead_mlli_nents()
913 areq_ctx->dst.sram_addr = areq_ctx->src.sram_addr; in cc_update_aead_mlli_nents()
914 if (!areq_ctx->is_single_pass) in cc_update_aead_mlli_nents()
915 areq_ctx->assoc.mlli_nents += in cc_update_aead_mlli_nents()
916 areq_ctx->src.mlli_nents; in cc_update_aead_mlli_nents()
918 if (areq_ctx->gen_ctx.op_type == in cc_update_aead_mlli_nents()
920 areq_ctx->src.sram_addr = in cc_update_aead_mlli_nents()
921 drvdata->mlli_sram_addr + in cc_update_aead_mlli_nents()
923 areq_ctx->dst.sram_addr = in cc_update_aead_mlli_nents()
924 areq_ctx->src.sram_addr + in cc_update_aead_mlli_nents()
925 areq_ctx->src.mlli_nents * in cc_update_aead_mlli_nents()
927 if (!areq_ctx->is_single_pass) in cc_update_aead_mlli_nents()
928 areq_ctx->assoc.mlli_nents += in cc_update_aead_mlli_nents()
929 areq_ctx->src.mlli_nents; in cc_update_aead_mlli_nents()
931 areq_ctx->dst.sram_addr = in cc_update_aead_mlli_nents()
932 drvdata->mlli_sram_addr + in cc_update_aead_mlli_nents()
934 areq_ctx->src.sram_addr = in cc_update_aead_mlli_nents()
935 areq_ctx->dst.sram_addr + in cc_update_aead_mlli_nents()
936 areq_ctx->dst.mlli_nents * in cc_update_aead_mlli_nents()
938 if (!areq_ctx->is_single_pass) in cc_update_aead_mlli_nents()
939 areq_ctx->assoc.mlli_nents += in cc_update_aead_mlli_nents()
940 areq_ctx->dst.mlli_nents; in cc_update_aead_mlli_nents()
949 struct mlli_params *mlli_params = &areq_ctx->mlli_params; in cc_map_aead_request()
952 unsigned int authsize = areq_ctx->req_authsize; in cc_map_aead_request()
958 gfp_t flags = cc_gfp_flags(&req->base); in cc_map_aead_request()
960 mlli_params->curr_pool = NULL; in cc_map_aead_request()
966 if (drvdata->coherent && in cc_map_aead_request()
967 areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT && in cc_map_aead_request()
968 req->src == req->dst) in cc_map_aead_request()
972 areq_ctx->cryptlen = (areq_ctx->gen_ctx.op_type == in cc_map_aead_request()
974 req->cryptlen : in cc_map_aead_request()
975 (req->cryptlen - authsize); in cc_map_aead_request()
977 dma_addr = dma_map_single(dev, areq_ctx->mac_buf, MAX_MAC_SIZE, in cc_map_aead_request()
980 dev_err(dev, "Mapping mac_buf %u B at va=%pK for DMA failed\n", in cc_map_aead_request()
981 MAX_MAC_SIZE, areq_ctx->mac_buf); in cc_map_aead_request()
982 rc = -ENOMEM; in cc_map_aead_request()
985 areq_ctx->mac_buf_dma_addr = dma_addr; in cc_map_aead_request()
987 if (areq_ctx->ccm_hdr_size != ccm_header_size_null) { in cc_map_aead_request()
988 void *addr = areq_ctx->ccm_config + CCM_CTR_COUNT_0_OFFSET; in cc_map_aead_request()
994 dev_err(dev, "Mapping mac_buf %u B at va=%pK for DMA failed\n", in cc_map_aead_request()
996 areq_ctx->ccm_iv0_dma_addr = 0; in cc_map_aead_request()
997 rc = -ENOMEM; in cc_map_aead_request()
1000 areq_ctx->ccm_iv0_dma_addr = dma_addr; in cc_map_aead_request()
1002 rc = cc_set_aead_conf_buf(dev, areq_ctx, areq_ctx->ccm_config, in cc_map_aead_request()
1003 &sg_data, areq_ctx->assoclen); in cc_map_aead_request()
1008 if (areq_ctx->cipher_mode == DRV_CIPHER_GCTR) { in cc_map_aead_request()
1009 dma_addr = dma_map_single(dev, areq_ctx->hkey, AES_BLOCK_SIZE, in cc_map_aead_request()
1012 dev_err(dev, "Mapping hkey %u B at va=%pK for DMA failed\n", in cc_map_aead_request()
1013 AES_BLOCK_SIZE, areq_ctx->hkey); in cc_map_aead_request()
1014 rc = -ENOMEM; in cc_map_aead_request()
1017 areq_ctx->hkey_dma_addr = dma_addr; in cc_map_aead_request()
1019 dma_addr = dma_map_single(dev, &areq_ctx->gcm_len_block, in cc_map_aead_request()
1022 dev_err(dev, "Mapping gcm_len_block %u B at va=%pK for DMA failed\n", in cc_map_aead_request()
1023 AES_BLOCK_SIZE, &areq_ctx->gcm_len_block); in cc_map_aead_request()
1024 rc = -ENOMEM; in cc_map_aead_request()
1027 areq_ctx->gcm_block_len_dma_addr = dma_addr; in cc_map_aead_request()
1029 dma_addr = dma_map_single(dev, areq_ctx->gcm_iv_inc1, in cc_map_aead_request()
1033 dev_err(dev, "Mapping gcm_iv_inc1 %u B at va=%pK for DMA failed\n", in cc_map_aead_request()
1034 AES_BLOCK_SIZE, (areq_ctx->gcm_iv_inc1)); in cc_map_aead_request()
1035 areq_ctx->gcm_iv_inc1_dma_addr = 0; in cc_map_aead_request()
1036 rc = -ENOMEM; in cc_map_aead_request()
1039 areq_ctx->gcm_iv_inc1_dma_addr = dma_addr; in cc_map_aead_request()
1041 dma_addr = dma_map_single(dev, areq_ctx->gcm_iv_inc2, in cc_map_aead_request()
1045 dev_err(dev, "Mapping gcm_iv_inc2 %u B at va=%pK for DMA failed\n", in cc_map_aead_request()
1046 AES_BLOCK_SIZE, (areq_ctx->gcm_iv_inc2)); in cc_map_aead_request()
1047 areq_ctx->gcm_iv_inc2_dma_addr = 0; in cc_map_aead_request()
1048 rc = -ENOMEM; in cc_map_aead_request()
1051 areq_ctx->gcm_iv_inc2_dma_addr = dma_addr; in cc_map_aead_request()
1054 size_to_map = req->cryptlen + req->assoclen; in cc_map_aead_request()
1055 /* If we do in-place encryption, we also need the auth tag */ in cc_map_aead_request()
1056 if ((areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_ENCRYPT) && in cc_map_aead_request()
1057 (req->src == req->dst)) { in cc_map_aead_request()
1061 rc = cc_map_sg(dev, req->src, size_to_map, in cc_map_aead_request()
1062 (req->src != req->dst ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL), in cc_map_aead_request()
1063 &areq_ctx->src.mapped_nents, in cc_map_aead_request()
1070 if (areq_ctx->is_single_pass) { in cc_map_aead_request()
1086 } else { /* DOUBLE-PASS flow */ in cc_map_aead_request()
1095 * If ENCRYPT (non-inplace) in cc_map_aead_request()
1101 * If DECRYPT (non-inplace) in cc_map_aead_request()
1118 /* Mlli support -start building the MLLI according to the above in cc_map_aead_request()
1121 if (areq_ctx->assoc_buff_type == CC_DMA_BUF_MLLI || in cc_map_aead_request()
1122 areq_ctx->data_buff_type == CC_DMA_BUF_MLLI) { in cc_map_aead_request()
1123 mlli_params->curr_pool = drvdata->mlli_buffs_pool; in cc_map_aead_request()
1130 areq_ctx->assoc.mlli_nents); in cc_map_aead_request()
1131 dev_dbg(dev, "src params mn %d\n", areq_ctx->src.mlli_nents); in cc_map_aead_request()
1132 dev_dbg(dev, "dst params mn %d\n", areq_ctx->dst.mlli_nents); in cc_map_aead_request()
1149 struct mlli_params *mlli_params = &areq_ctx->mlli_params; in cc_map_hash_request_final()
1155 …dev_dbg(dev, "final params : curr_buff=%pK curr_buff_cnt=0x%X nbytes = 0x%X src=%pK curr_index=%u\… in cc_map_hash_request_final()
1156 curr_buff, *curr_buff_cnt, nbytes, src, areq_ctx->buff_index); in cc_map_hash_request_final()
1158 areq_ctx->data_dma_buf_type = CC_DMA_BUF_NULL; in cc_map_hash_request_final()
1159 mlli_params->curr_pool = NULL; in cc_map_hash_request_final()
1161 areq_ctx->in_nents = 0; in cc_map_hash_request_final()
1178 &areq_ctx->in_nents, LLI_MAX_NUM_OF_DATA_ENTRIES, in cc_map_hash_request_final()
1183 areq_ctx->data_dma_buf_type == CC_DMA_BUF_NULL) { in cc_map_hash_request_final()
1184 memcpy(areq_ctx->buff_sg, src, in cc_map_hash_request_final()
1186 areq_ctx->buff_sg->length = nbytes; in cc_map_hash_request_final()
1187 areq_ctx->curr_sg = areq_ctx->buff_sg; in cc_map_hash_request_final()
1188 areq_ctx->data_dma_buf_type = CC_DMA_BUF_DLLI; in cc_map_hash_request_final()
1190 areq_ctx->data_dma_buf_type = CC_DMA_BUF_MLLI; in cc_map_hash_request_final()
1195 if (areq_ctx->data_dma_buf_type == CC_DMA_BUF_MLLI) { in cc_map_hash_request_final()
1196 mlli_params->curr_pool = drvdata->mlli_buffs_pool; in cc_map_hash_request_final()
1198 cc_add_sg_entry(dev, &sg_data, areq_ctx->in_nents, src, nbytes, in cc_map_hash_request_final()
1199 0, true, &areq_ctx->mlli_nents); in cc_map_hash_request_final()
1205 areq_ctx->buff_index = (areq_ctx->buff_index ^ 1); in cc_map_hash_request_final()
1206 dev_dbg(dev, "areq_ctx->data_dma_buf_type = %s\n", in cc_map_hash_request_final()
1207 cc_dma_buf_type(areq_ctx->data_dma_buf_type)); in cc_map_hash_request_final()
1211 dma_unmap_sg(dev, src, areq_ctx->in_nents, DMA_TO_DEVICE); in cc_map_hash_request_final()
1215 dma_unmap_sg(dev, areq_ctx->buff_sg, 1, DMA_TO_DEVICE); in cc_map_hash_request_final()
1230 struct mlli_params *mlli_params = &areq_ctx->mlli_params; in cc_map_hash_request_update()
1239 …dev_dbg(dev, " update params : curr_buff=%pK curr_buff_cnt=0x%X nbytes=0x%X src=%pK curr_index=%u\… in cc_map_hash_request_update()
1240 curr_buff, *curr_buff_cnt, nbytes, src, areq_ctx->buff_index); in cc_map_hash_request_update()
1242 areq_ctx->data_dma_buf_type = CC_DMA_BUF_NULL; in cc_map_hash_request_update()
1243 mlli_params->curr_pool = NULL; in cc_map_hash_request_update()
1244 areq_ctx->curr_sg = NULL; in cc_map_hash_request_update()
1246 areq_ctx->in_nents = 0; in cc_map_hash_request_update()
1249 dev_dbg(dev, " less than one block: curr_buff=%pK *curr_buff_cnt=0x%X copy_to=%pK\n", in cc_map_hash_request_update()
1251 areq_ctx->in_nents = sg_nents_for_len(src, nbytes); in cc_map_hash_request_update()
1252 sg_copy_to_buffer(src, areq_ctx->in_nents, in cc_map_hash_request_update()
1259 *next_buff_cnt = total_in_len & (block_size - 1); in cc_map_hash_request_update()
1261 update_data_len = total_in_len - *next_buff_cnt; in cc_map_hash_request_update()
1268 dev_dbg(dev, " handle residue: next buff %pK skip data %u residue %u\n", in cc_map_hash_request_update()
1269 next_buff, (update_data_len - *curr_buff_cnt), in cc_map_hash_request_update()
1272 (update_data_len - *curr_buff_cnt), in cc_map_hash_request_update()
1288 rc = cc_map_sg(dev, src, (update_data_len - *curr_buff_cnt), in cc_map_hash_request_update()
1289 DMA_TO_DEVICE, &areq_ctx->in_nents, in cc_map_hash_request_update()
1295 areq_ctx->data_dma_buf_type == CC_DMA_BUF_NULL) { in cc_map_hash_request_update()
1297 memcpy(areq_ctx->buff_sg, src, in cc_map_hash_request_update()
1299 areq_ctx->buff_sg->length = update_data_len; in cc_map_hash_request_update()
1300 areq_ctx->data_dma_buf_type = CC_DMA_BUF_DLLI; in cc_map_hash_request_update()
1301 areq_ctx->curr_sg = areq_ctx->buff_sg; in cc_map_hash_request_update()
1303 areq_ctx->data_dma_buf_type = CC_DMA_BUF_MLLI; in cc_map_hash_request_update()
1307 if (areq_ctx->data_dma_buf_type == CC_DMA_BUF_MLLI) { in cc_map_hash_request_update()
1308 mlli_params->curr_pool = drvdata->mlli_buffs_pool; in cc_map_hash_request_update()
1310 cc_add_sg_entry(dev, &sg_data, areq_ctx->in_nents, src, in cc_map_hash_request_update()
1311 (update_data_len - *curr_buff_cnt), 0, true, in cc_map_hash_request_update()
1312 &areq_ctx->mlli_nents); in cc_map_hash_request_update()
1317 areq_ctx->buff_index = (areq_ctx->buff_index ^ swap_index); in cc_map_hash_request_update()
1322 dma_unmap_sg(dev, src, areq_ctx->in_nents, DMA_TO_DEVICE); in cc_map_hash_request_update()
1326 dma_unmap_sg(dev, areq_ctx->buff_sg, 1, DMA_TO_DEVICE); in cc_map_hash_request_update()
1340 if (areq_ctx->mlli_params.curr_pool) { in cc_unmap_hash_request()
1341 dev_dbg(dev, "free MLLI buffer: dma=%pad virt=%pK\n", in cc_unmap_hash_request()
1342 &areq_ctx->mlli_params.mlli_dma_addr, in cc_unmap_hash_request()
1343 areq_ctx->mlli_params.mlli_virt_addr); in cc_unmap_hash_request()
1344 dma_pool_free(areq_ctx->mlli_params.curr_pool, in cc_unmap_hash_request()
1345 areq_ctx->mlli_params.mlli_virt_addr, in cc_unmap_hash_request()
1346 areq_ctx->mlli_params.mlli_dma_addr); in cc_unmap_hash_request()
1349 if (src && areq_ctx->in_nents) { in cc_unmap_hash_request()
1350 dev_dbg(dev, "Unmapped sg src: virt=%pK dma=%pad len=0x%X\n", in cc_unmap_hash_request()
1353 areq_ctx->in_nents, DMA_TO_DEVICE); in cc_unmap_hash_request()
1357 dev_dbg(dev, "Unmapped buffer: areq_ctx->buff_sg=%pK dma=%pad len 0x%X\n", in cc_unmap_hash_request()
1358 sg_virt(areq_ctx->buff_sg), in cc_unmap_hash_request()
1359 &sg_dma_address(areq_ctx->buff_sg), in cc_unmap_hash_request()
1360 sg_dma_len(areq_ctx->buff_sg)); in cc_unmap_hash_request()
1361 dma_unmap_sg(dev, areq_ctx->buff_sg, 1, DMA_TO_DEVICE); in cc_unmap_hash_request()
1368 areq_ctx->buff_index ^= 1; in cc_unmap_hash_request()
1377 drvdata->mlli_buffs_pool = in cc_buffer_mgr_init()
1383 if (!drvdata->mlli_buffs_pool) in cc_buffer_mgr_init()
1384 return -ENOMEM; in cc_buffer_mgr_init()
1391 dma_pool_destroy(drvdata->mlli_buffs_pool); in cc_buffer_mgr_fini()