Lines Matching +full:imx23 +full:- +full:dcp

1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Freescale i.MX23/i.MX28 Data Co-Processor driver
8 #include <linux/dma-mapping.h>
18 #include <soc/fsl/dcp.h>
47 /* DCP DMA descriptor. */
71 struct dcp { struct
96 /* SHA Hash-specific context */
101 /* Crypto-specific context */
125 * There can even be only one instance of the MXS DCP due to the
128 static struct dcp *global_sdcp;
130 /* DCP register layout. */
179 struct dcp *sdcp = global_sdcp; in mxs_dcp_start_dma()
180 const int chan = actx->chan; in mxs_dcp_start_dma()
183 struct dcp_dma_desc *desc = &sdcp->coh->desc[actx->chan]; in mxs_dcp_start_dma()
184 dma_addr_t desc_phys = dma_map_single(sdcp->dev, desc, sizeof(*desc), in mxs_dcp_start_dma()
187 dma_err = dma_mapping_error(sdcp->dev, desc_phys); in mxs_dcp_start_dma()
191 reinit_completion(&sdcp->completion[chan]); in mxs_dcp_start_dma()
194 writel(0xffffffff, sdcp->base + MXS_DCP_CH_N_STAT_CLR(chan)); in mxs_dcp_start_dma()
197 writel(desc_phys, sdcp->base + MXS_DCP_CH_N_CMDPTR(chan)); in mxs_dcp_start_dma()
200 writel(1, sdcp->base + MXS_DCP_CH_N_SEMA(chan)); in mxs_dcp_start_dma()
202 ret = wait_for_completion_timeout(&sdcp->completion[chan], in mxs_dcp_start_dma()
205 dev_err(sdcp->dev, "Channel %i timeout (DCP_STAT=0x%08x)\n", in mxs_dcp_start_dma()
206 chan, readl(sdcp->base + MXS_DCP_STAT)); in mxs_dcp_start_dma()
207 return -ETIMEDOUT; in mxs_dcp_start_dma()
210 stat = readl(sdcp->base + MXS_DCP_CH_N_STAT(chan)); in mxs_dcp_start_dma()
212 dev_err(sdcp->dev, "Channel %i error (CH_STAT=0x%08x)\n", in mxs_dcp_start_dma()
214 return -EINVAL; in mxs_dcp_start_dma()
217 dma_unmap_single(sdcp->dev, desc_phys, sizeof(*desc), DMA_TO_DEVICE); in mxs_dcp_start_dma()
229 struct dcp *sdcp = global_sdcp; in mxs_dcp_run_aes()
230 struct dcp_dma_desc *desc = &sdcp->coh->desc[actx->chan]; in mxs_dcp_run_aes()
232 bool key_referenced = actx->key_referenced; in mxs_dcp_run_aes()
236 key_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_key + AES_KEYSIZE_128, in mxs_dcp_run_aes()
239 key_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_key, in mxs_dcp_run_aes()
241 ret = dma_mapping_error(sdcp->dev, key_phys); in mxs_dcp_run_aes()
245 src_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_in_buf, in mxs_dcp_run_aes()
247 ret = dma_mapping_error(sdcp->dev, src_phys); in mxs_dcp_run_aes()
251 dst_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_out_buf, in mxs_dcp_run_aes()
253 ret = dma_mapping_error(sdcp->dev, dst_phys); in mxs_dcp_run_aes()
257 if (actx->fill % AES_BLOCK_SIZE) { in mxs_dcp_run_aes()
258 dev_err(sdcp->dev, "Invalid block size!\n"); in mxs_dcp_run_aes()
259 ret = -EINVAL; in mxs_dcp_run_aes()
264 desc->control0 = MXS_DCP_CONTROL0_DECR_SEMAPHORE | in mxs_dcp_run_aes()
270 desc->control0 |= MXS_DCP_CONTROL0_OTP_KEY; in mxs_dcp_run_aes()
273 desc->control0 |= MXS_DCP_CONTROL0_PAYLOAD_KEY; in mxs_dcp_run_aes()
275 if (rctx->enc) in mxs_dcp_run_aes()
276 desc->control0 |= MXS_DCP_CONTROL0_CIPHER_ENCRYPT; in mxs_dcp_run_aes()
278 desc->control0 |= MXS_DCP_CONTROL0_CIPHER_INIT; in mxs_dcp_run_aes()
280 desc->control1 = MXS_DCP_CONTROL1_CIPHER_SELECT_AES128; in mxs_dcp_run_aes()
282 if (rctx->ecb) in mxs_dcp_run_aes()
283 desc->control1 |= MXS_DCP_CONTROL1_CIPHER_MODE_ECB; in mxs_dcp_run_aes()
285 desc->control1 |= MXS_DCP_CONTROL1_CIPHER_MODE_CBC; in mxs_dcp_run_aes()
288 desc->control1 |= sdcp->coh->aes_key[0] << MXS_DCP_CONTROL1_KEY_SELECT_SHIFT; in mxs_dcp_run_aes()
290 desc->next_cmd_addr = 0; in mxs_dcp_run_aes()
291 desc->source = src_phys; in mxs_dcp_run_aes()
292 desc->destination = dst_phys; in mxs_dcp_run_aes()
293 desc->size = actx->fill; in mxs_dcp_run_aes()
294 desc->payload = key_phys; in mxs_dcp_run_aes()
295 desc->status = 0; in mxs_dcp_run_aes()
300 dma_unmap_single(sdcp->dev, dst_phys, DCP_BUF_SZ, DMA_FROM_DEVICE); in mxs_dcp_run_aes()
302 dma_unmap_single(sdcp->dev, src_phys, DCP_BUF_SZ, DMA_TO_DEVICE); in mxs_dcp_run_aes()
305 dma_unmap_single(sdcp->dev, key_phys, AES_KEYSIZE_128, in mxs_dcp_run_aes()
308 dma_unmap_single(sdcp->dev, key_phys, 2 * AES_KEYSIZE_128, in mxs_dcp_run_aes()
315 struct dcp *sdcp = global_sdcp; in mxs_dcp_aes_block_crypt()
318 struct dcp_async_ctx *actx = crypto_tfm_ctx(arq->tfm); in mxs_dcp_aes_block_crypt()
321 struct scatterlist *dst = req->dst; in mxs_dcp_aes_block_crypt()
322 struct scatterlist *src = req->src; in mxs_dcp_aes_block_crypt()
326 uint8_t *in_buf = sdcp->coh->aes_in_buf; in mxs_dcp_aes_block_crypt()
327 uint8_t *out_buf = sdcp->coh->aes_out_buf; in mxs_dcp_aes_block_crypt()
333 uint8_t *key = sdcp->coh->aes_key; in mxs_dcp_aes_block_crypt()
340 actx->fill = 0; in mxs_dcp_aes_block_crypt()
343 memcpy(key, actx->key, actx->key_len); in mxs_dcp_aes_block_crypt()
345 if (!rctx->ecb) { in mxs_dcp_aes_block_crypt()
347 memcpy(key + AES_KEYSIZE_128, req->iv, AES_KEYSIZE_128); in mxs_dcp_aes_block_crypt()
354 for_each_sg(req->src, src, sg_nents(req->src), i) { in mxs_dcp_aes_block_crypt()
358 limit_hit = tlen > req->cryptlen; in mxs_dcp_aes_block_crypt()
361 len = req->cryptlen - (tlen - len); in mxs_dcp_aes_block_crypt()
364 if (actx->fill + len > out_off) in mxs_dcp_aes_block_crypt()
365 clen = out_off - actx->fill; in mxs_dcp_aes_block_crypt()
369 memcpy(in_buf + actx->fill, src_buf, clen); in mxs_dcp_aes_block_crypt()
370 len -= clen; in mxs_dcp_aes_block_crypt()
372 actx->fill += clen; in mxs_dcp_aes_block_crypt()
378 if (actx->fill == out_off || sg_is_last(src) || in mxs_dcp_aes_block_crypt()
386 actx->fill, dst_off); in mxs_dcp_aes_block_crypt()
387 dst_off += actx->fill; in mxs_dcp_aes_block_crypt()
388 last_out_len = actx->fill; in mxs_dcp_aes_block_crypt()
389 actx->fill = 0; in mxs_dcp_aes_block_crypt()
398 if (!rctx->ecb) { in mxs_dcp_aes_block_crypt()
399 if (rctx->enc) in mxs_dcp_aes_block_crypt()
400 memcpy(req->iv, out_buf+(last_out_len-AES_BLOCK_SIZE), in mxs_dcp_aes_block_crypt()
403 memcpy(req->iv, in_buf+(last_out_len-AES_BLOCK_SIZE), in mxs_dcp_aes_block_crypt()
412 struct dcp *sdcp = global_sdcp; in dcp_chan_thread_aes()
423 spin_lock(&sdcp->lock[chan]); in dcp_chan_thread_aes()
424 backlog = crypto_get_backlog(&sdcp->queue[chan]); in dcp_chan_thread_aes()
425 arq = crypto_dequeue_request(&sdcp->queue[chan]); in dcp_chan_thread_aes()
426 spin_unlock(&sdcp->lock[chan]); in dcp_chan_thread_aes()
436 crypto_request_complete(backlog, -EINPROGRESS); in dcp_chan_thread_aes()
454 skcipher_request_set_tfm(&rctx->fallback_req, ctx->fallback); in mxs_dcp_block_fallback()
455 skcipher_request_set_callback(&rctx->fallback_req, req->base.flags, in mxs_dcp_block_fallback()
456 req->base.complete, req->base.data); in mxs_dcp_block_fallback()
457 skcipher_request_set_crypt(&rctx->fallback_req, req->src, req->dst, in mxs_dcp_block_fallback()
458 req->cryptlen, req->iv); in mxs_dcp_block_fallback()
461 ret = crypto_skcipher_encrypt(&rctx->fallback_req); in mxs_dcp_block_fallback()
463 ret = crypto_skcipher_decrypt(&rctx->fallback_req); in mxs_dcp_block_fallback()
470 struct dcp *sdcp = global_sdcp; in mxs_dcp_aes_enqueue()
471 struct crypto_async_request *arq = &req->base; in mxs_dcp_aes_enqueue()
472 struct dcp_async_ctx *actx = crypto_tfm_ctx(arq->tfm); in mxs_dcp_aes_enqueue()
476 if (unlikely(actx->key_len != AES_KEYSIZE_128 && !actx->key_referenced)) in mxs_dcp_aes_enqueue()
479 rctx->enc = enc; in mxs_dcp_aes_enqueue()
480 rctx->ecb = ecb; in mxs_dcp_aes_enqueue()
481 actx->chan = DCP_CHAN_CRYPTO; in mxs_dcp_aes_enqueue()
483 spin_lock(&sdcp->lock[actx->chan]); in mxs_dcp_aes_enqueue()
484 ret = crypto_enqueue_request(&sdcp->queue[actx->chan], &req->base); in mxs_dcp_aes_enqueue()
485 spin_unlock(&sdcp->lock[actx->chan]); in mxs_dcp_aes_enqueue()
487 wake_up_process(sdcp->thread[actx->chan]); in mxs_dcp_aes_enqueue()
522 actx->key_len = len; in mxs_dcp_aes_setkey()
523 actx->key_referenced = false; in mxs_dcp_aes_setkey()
525 memcpy(actx->key, key, len); in mxs_dcp_aes_setkey()
531 * but is supported by in-kernel software implementation, we use in mxs_dcp_aes_setkey()
534 crypto_skcipher_clear_flags(actx->fallback, CRYPTO_TFM_REQ_MASK); in mxs_dcp_aes_setkey()
535 crypto_skcipher_set_flags(actx->fallback, in mxs_dcp_aes_setkey()
536 tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK); in mxs_dcp_aes_setkey()
537 return crypto_skcipher_setkey(actx->fallback, key, len); in mxs_dcp_aes_setkey()
546 return -EINVAL; in mxs_dcp_aes_setrefkey()
555 memcpy(actx->key, key, len); in mxs_dcp_aes_setrefkey()
556 actx->key_len = len; in mxs_dcp_aes_setrefkey()
557 actx->key_referenced = true; in mxs_dcp_aes_setrefkey()
560 return -EINVAL; in mxs_dcp_aes_setrefkey()
576 actx->fallback = blk; in mxs_dcp_aes_fallback_init_tfm()
586 crypto_free_skcipher(actx->fallback); in mxs_dcp_aes_fallback_exit_tfm()
601 struct dcp *sdcp = global_sdcp; in mxs_dcp_run_sha()
607 struct dcp_dma_desc *desc = &sdcp->coh->desc[actx->chan]; in mxs_dcp_run_sha()
610 dma_addr_t buf_phys = dma_map_single(sdcp->dev, sdcp->coh->sha_in_buf, in mxs_dcp_run_sha()
613 ret = dma_mapping_error(sdcp->dev, buf_phys); in mxs_dcp_run_sha()
618 desc->control0 = MXS_DCP_CONTROL0_DECR_SEMAPHORE | in mxs_dcp_run_sha()
621 if (rctx->init) in mxs_dcp_run_sha()
622 desc->control0 |= MXS_DCP_CONTROL0_HASH_INIT; in mxs_dcp_run_sha()
624 desc->control1 = actx->alg; in mxs_dcp_run_sha()
625 desc->next_cmd_addr = 0; in mxs_dcp_run_sha()
626 desc->source = buf_phys; in mxs_dcp_run_sha()
627 desc->destination = 0; in mxs_dcp_run_sha()
628 desc->size = actx->fill; in mxs_dcp_run_sha()
629 desc->payload = 0; in mxs_dcp_run_sha()
630 desc->status = 0; in mxs_dcp_run_sha()
635 if (rctx->init && rctx->fini && desc->size == 0) { in mxs_dcp_run_sha()
638 (actx->alg == MXS_DCP_CONTROL1_HASH_SELECT_SHA1) ? in mxs_dcp_run_sha()
640 memcpy(sdcp->coh->sha_out_buf, sha_buf, halg->digestsize); in mxs_dcp_run_sha()
646 if (rctx->fini) { in mxs_dcp_run_sha()
647 digest_phys = dma_map_single(sdcp->dev, sdcp->coh->sha_out_buf, in mxs_dcp_run_sha()
649 ret = dma_mapping_error(sdcp->dev, digest_phys); in mxs_dcp_run_sha()
653 desc->control0 |= MXS_DCP_CONTROL0_HASH_TERM; in mxs_dcp_run_sha()
654 desc->payload = digest_phys; in mxs_dcp_run_sha()
659 if (rctx->fini) in mxs_dcp_run_sha()
660 dma_unmap_single(sdcp->dev, digest_phys, DCP_SHA_PAY_SZ, in mxs_dcp_run_sha()
664 dma_unmap_single(sdcp->dev, buf_phys, DCP_BUF_SZ, DMA_TO_DEVICE); in mxs_dcp_run_sha()
671 struct dcp *sdcp = global_sdcp; in dcp_sha_req_to_buf()
679 uint8_t *in_buf = sdcp->coh->sha_in_buf; in dcp_sha_req_to_buf()
680 uint8_t *out_buf = sdcp->coh->sha_out_buf; in dcp_sha_req_to_buf()
687 int fin = rctx->fini; in dcp_sha_req_to_buf()
689 rctx->fini = 0; in dcp_sha_req_to_buf()
691 src = req->src; in dcp_sha_req_to_buf()
692 len = req->nbytes; in dcp_sha_req_to_buf()
695 if (actx->fill + len > DCP_BUF_SZ) in dcp_sha_req_to_buf()
696 clen = DCP_BUF_SZ - actx->fill; in dcp_sha_req_to_buf()
700 scatterwalk_map_and_copy(in_buf + actx->fill, src, oft, clen, in dcp_sha_req_to_buf()
703 len -= clen; in dcp_sha_req_to_buf()
705 actx->fill += clen; in dcp_sha_req_to_buf()
711 if (len && actx->fill == DCP_BUF_SZ) { in dcp_sha_req_to_buf()
715 actx->fill = 0; in dcp_sha_req_to_buf()
716 rctx->init = 0; in dcp_sha_req_to_buf()
721 rctx->fini = 1; in dcp_sha_req_to_buf()
724 if (!req->result) in dcp_sha_req_to_buf()
725 return -EINVAL; in dcp_sha_req_to_buf()
731 actx->fill = 0; in dcp_sha_req_to_buf()
734 for (i = 0; i < halg->digestsize; i++) in dcp_sha_req_to_buf()
735 req->result[i] = out_buf[halg->digestsize - i - 1]; in dcp_sha_req_to_buf()
743 struct dcp *sdcp = global_sdcp; in dcp_chan_thread_sha()
753 spin_lock(&sdcp->lock[chan]); in dcp_chan_thread_sha()
754 backlog = crypto_get_backlog(&sdcp->queue[chan]); in dcp_chan_thread_sha()
755 arq = crypto_dequeue_request(&sdcp->queue[chan]); in dcp_chan_thread_sha()
756 spin_unlock(&sdcp->lock[chan]); in dcp_chan_thread_sha()
766 crypto_request_complete(backlog, -EINPROGRESS); in dcp_chan_thread_sha()
790 if (strcmp(halg->base.cra_name, "sha1") == 0) in dcp_sha_init()
791 actx->alg = MXS_DCP_CONTROL1_HASH_SELECT_SHA1; in dcp_sha_init()
793 actx->alg = MXS_DCP_CONTROL1_HASH_SELECT_SHA256; in dcp_sha_init()
795 actx->fill = 0; in dcp_sha_init()
796 actx->hot = 0; in dcp_sha_init()
797 actx->chan = DCP_CHAN_HASH_SHA; in dcp_sha_init()
799 mutex_init(&actx->mutex); in dcp_sha_init()
806 struct dcp *sdcp = global_sdcp; in dcp_sha_update_fx()
818 if (!req->nbytes && !fini) in dcp_sha_update_fx()
821 mutex_lock(&actx->mutex); in dcp_sha_update_fx()
823 rctx->fini = fini; in dcp_sha_update_fx()
825 if (!actx->hot) { in dcp_sha_update_fx()
826 actx->hot = 1; in dcp_sha_update_fx()
827 rctx->init = 1; in dcp_sha_update_fx()
830 spin_lock(&sdcp->lock[actx->chan]); in dcp_sha_update_fx()
831 ret = crypto_enqueue_request(&sdcp->queue[actx->chan], &req->base); in dcp_sha_update_fx()
832 spin_unlock(&sdcp->lock[actx->chan]); in dcp_sha_update_fx()
834 wake_up_process(sdcp->thread[actx->chan]); in dcp_sha_update_fx()
835 mutex_unlock(&actx->mutex); in dcp_sha_update_fx()
847 ahash_request_set_crypt(req, NULL, req->result, 0); in dcp_sha_final()
848 req->nbytes = 0; in dcp_sha_final()
877 memcpy(rctx, &export->req_ctx, sizeof(struct dcp_sha_req_ctx)); in dcp_sha_import()
878 memcpy(actx, &export->async_ctx, sizeof(struct dcp_async_ctx)); in dcp_sha_import()
890 memcpy(&export->req_ctx, rctx_state, sizeof(struct dcp_sha_req_ctx)); in dcp_sha_export()
891 memcpy(&export->async_ctx, actx_state, sizeof(struct dcp_async_ctx)); in dcp_sha_export()
911 .base.cra_driver_name = "ecb-aes-dcp",
929 .base.cra_driver_name = "cbc-aes-dcp",
948 .base.cra_driver_name = "ecb-paes-dcp",
964 .base.cra_driver_name = "cbc-paes-dcp",
996 .cra_driver_name = "sha1-dcp",
1022 .cra_driver_name = "sha256-dcp",
1036 struct dcp *sdcp = context; in mxs_dcp_irq()
1040 stat = readl(sdcp->base + MXS_DCP_STAT); in mxs_dcp_irq()
1046 writel(stat, sdcp->base + MXS_DCP_STAT_CLR); in mxs_dcp_irq()
1051 complete(&sdcp->completion[i]); in mxs_dcp_irq()
1058 struct device *dev = &pdev->dev; in mxs_dcp_probe()
1059 struct dcp *sdcp = NULL; in mxs_dcp_probe()
1064 dev_err(dev, "Only one DCP instance allowed!\n"); in mxs_dcp_probe()
1065 return -ENODEV; in mxs_dcp_probe()
1078 return -ENOMEM; in mxs_dcp_probe()
1080 sdcp->dev = dev; in mxs_dcp_probe()
1081 sdcp->base = devm_platform_ioremap_resource(pdev, 0); in mxs_dcp_probe()
1082 if (IS_ERR(sdcp->base)) in mxs_dcp_probe()
1083 return PTR_ERR(sdcp->base); in mxs_dcp_probe()
1087 "dcp-vmi-irq", sdcp); in mxs_dcp_probe()
1089 dev_err(dev, "Failed to claim DCP VMI IRQ!\n"); in mxs_dcp_probe()
1094 "dcp-irq", sdcp); in mxs_dcp_probe()
1096 dev_err(dev, "Failed to claim DCP IRQ!\n"); in mxs_dcp_probe()
1101 sdcp->coh = devm_kzalloc(dev, sizeof(*sdcp->coh) + DCP_ALIGNMENT, in mxs_dcp_probe()
1103 if (!sdcp->coh) in mxs_dcp_probe()
1104 return -ENOMEM; in mxs_dcp_probe()
1106 /* Re-align the structure so it fits the DCP constraints. */ in mxs_dcp_probe()
1107 sdcp->coh = PTR_ALIGN(sdcp->coh, DCP_ALIGNMENT); in mxs_dcp_probe()
1109 /* DCP clock is optional, only used on some SOCs */ in mxs_dcp_probe()
1110 sdcp->dcp_clk = devm_clk_get_optional_enabled(dev, "dcp"); in mxs_dcp_probe()
1111 if (IS_ERR(sdcp->dcp_clk)) in mxs_dcp_probe()
1112 return PTR_ERR(sdcp->dcp_clk); in mxs_dcp_probe()
1114 /* Restart the DCP block. */ in mxs_dcp_probe()
1115 ret = stmp_reset_block(sdcp->base); in mxs_dcp_probe()
1124 sdcp->base + MXS_DCP_CTRL); in mxs_dcp_probe()
1126 /* Enable all DCP DMA channels. */ in mxs_dcp_probe()
1128 sdcp->base + MXS_DCP_CHANNELCTRL); in mxs_dcp_probe()
1133 * inadvertantly enabled, the DCP will return an error instead of in mxs_dcp_probe()
1134 * trashing good memory. The DCP DMA cannot access ROM, so any ROM in mxs_dcp_probe()
1137 writel(0xffff0000, sdcp->base + MXS_DCP_CONTEXT); in mxs_dcp_probe()
1139 writel(0xffffffff, sdcp->base + MXS_DCP_CH_N_STAT_CLR(i)); in mxs_dcp_probe()
1140 writel(0xffffffff, sdcp->base + MXS_DCP_STAT_CLR); in mxs_dcp_probe()
1147 spin_lock_init(&sdcp->lock[i]); in mxs_dcp_probe()
1148 init_completion(&sdcp->completion[i]); in mxs_dcp_probe()
1149 crypto_init_queue(&sdcp->queue[i], 50); in mxs_dcp_probe()
1153 sdcp->thread[DCP_CHAN_HASH_SHA] = kthread_run(dcp_chan_thread_sha, in mxs_dcp_probe()
1155 if (IS_ERR(sdcp->thread[DCP_CHAN_HASH_SHA])) { in mxs_dcp_probe()
1157 ret = PTR_ERR(sdcp->thread[DCP_CHAN_HASH_SHA]); in mxs_dcp_probe()
1161 sdcp->thread[DCP_CHAN_CRYPTO] = kthread_run(dcp_chan_thread_aes, in mxs_dcp_probe()
1163 if (IS_ERR(sdcp->thread[DCP_CHAN_CRYPTO])) { in mxs_dcp_probe()
1165 ret = PTR_ERR(sdcp->thread[DCP_CHAN_CRYPTO]); in mxs_dcp_probe()
1170 sdcp->caps = readl(sdcp->base + MXS_DCP_CAPABILITY1); in mxs_dcp_probe()
1172 if (sdcp->caps & MXS_DCP_CAPABILITY1_AES128) { in mxs_dcp_probe()
1182 if (sdcp->caps & MXS_DCP_CAPABILITY1_SHA1) { in mxs_dcp_probe()
1191 if (sdcp->caps & MXS_DCP_CAPABILITY1_SHA256) { in mxs_dcp_probe()
1203 if (sdcp->caps & MXS_DCP_CAPABILITY1_SHA1) in mxs_dcp_probe()
1207 if (sdcp->caps & MXS_DCP_CAPABILITY1_AES128) in mxs_dcp_probe()
1211 kthread_stop(sdcp->thread[DCP_CHAN_CRYPTO]); in mxs_dcp_probe()
1214 kthread_stop(sdcp->thread[DCP_CHAN_HASH_SHA]); in mxs_dcp_probe()
1221 struct dcp *sdcp = platform_get_drvdata(pdev); in mxs_dcp_remove()
1223 if (sdcp->caps & MXS_DCP_CAPABILITY1_SHA256) in mxs_dcp_remove()
1226 if (sdcp->caps & MXS_DCP_CAPABILITY1_SHA1) in mxs_dcp_remove()
1229 if (sdcp->caps & MXS_DCP_CAPABILITY1_AES128) in mxs_dcp_remove()
1232 kthread_stop(sdcp->thread[DCP_CHAN_HASH_SHA]); in mxs_dcp_remove()
1233 kthread_stop(sdcp->thread[DCP_CHAN_CRYPTO]); in mxs_dcp_remove()
1241 { .compatible = "fsl,imx23-dcp", .data = NULL, },
1242 { .compatible = "fsl,imx28-dcp", .data = NULL, },
1252 .name = "mxs-dcp",
1260 MODULE_DESCRIPTION("Freescale MXS DCP Driver");
1262 MODULE_ALIAS("platform:mxs-dcp");