Lines Matching +full:compound +full:- +full:device
1 // SPDX-License-Identifier: GPL-2.0
6 * Copyright 2013-2016 Freescale Semiconductor, Inc.
7 * Copyright 2016-2017, 2019-2020 NXP
11 #include <linux/device.h>
12 #include <linux/dma-mapping.h>
33 * so that resources used by the in-flight buffers do not become a memory hog.
42 * caam_napi - struct holding CAAM NAPI-related params
52 * caam_qi_pcpu_priv - percpu private data structure to main list of pending
68 * caam_qi_priv - CAAM QI backend private params
78 * This is written by only one core - the one that initialized the CGR - and
89 * being processed. This could be added by the dpaa-ethernet driver.
92 * NOTE: The memcache is SMP-safe. No need to handle spinlocks in-here
106 int caam_qi_enqueue(struct device *qidev, struct caam_drv_req *req) in caam_qi_enqueue()
114 qm_fd_set_compound(&fd, qm_sg_entry_get_len(&req->fd_sgt[1])); in caam_qi_enqueue()
116 addr = dma_map_single(qidev, req->fd_sgt, sizeof(req->fd_sgt), in caam_qi_enqueue()
120 return -EIO; in caam_qi_enqueue()
125 refcount_inc(&req->drv_ctx->refcnt); in caam_qi_enqueue()
126 ret = qman_enqueue(req->drv_ctx->req_fq, &fd); in caam_qi_enqueue()
130 refcount_dec(&req->drv_ctx->refcnt); in caam_qi_enqueue()
131 if (ret != -EBUSY) in caam_qi_enqueue()
147 struct device *qidev = &(raw_cpu_ptr(&pcpu_qipriv)->net_dev->dev); in caam_fq_ern_cb()
150 fd = &msg->ern.fd; in caam_fq_ern_cb()
152 drv_req = caam_iova_to_virt(priv->domain, qm_fd_addr_get64(fd)); in caam_fq_ern_cb()
159 refcount_dec(&drv_req->drv_ctx->refcnt); in caam_fq_ern_cb()
162 dev_err(qidev, "Non-compound FD from CAAM\n"); in caam_fq_ern_cb()
166 dma_unmap_single(drv_req->drv_ctx->qidev, qm_fd_addr(fd), in caam_fq_ern_cb()
167 sizeof(drv_req->fd_sgt), DMA_BIDIRECTIONAL); in caam_fq_ern_cb()
169 if (fd->status) in caam_fq_ern_cb()
170 drv_req->cbk(drv_req, be32_to_cpu(fd->status)); in caam_fq_ern_cb()
172 drv_req->cbk(drv_req, JRSTA_SSRC_QI); in caam_fq_ern_cb()
175 static struct qman_fq *create_caam_req_fq(struct device *qidev, in create_caam_req_fq()
186 return ERR_PTR(-ENOMEM); in create_caam_req_fq()
188 req_fq->cb.ern = caam_fq_ern_cb; in create_caam_req_fq()
189 req_fq->cb.fqs = NULL; in create_caam_req_fq()
214 dev_dbg(qidev, "Allocated request FQ %u for CPU %u\n", req_fq->fqid, in create_caam_req_fq()
225 static int empty_retired_fq(struct device *qidev, struct qman_fq *fq) in empty_retired_fq()
234 dev_err(qidev, "Volatile dequeue fail for FQ: %u\n", fq->fqid); in empty_retired_fq()
243 } while (fq->flags & QMAN_FQ_STATE_NE); in empty_retired_fq()
248 static int kill_fq(struct device *qidev, struct qman_fq *fq) in kill_fq()
267 } while (fq->state != qman_fq_state_retired); in kill_fq()
269 WARN_ON(fq->flags & QMAN_FQ_STATE_BLOCKOOS); in kill_fq()
270 WARN_ON(fq->flags & QMAN_FQ_STATE_ORL); in kill_fq()
274 if (fq->flags & QMAN_FQ_STATE_NE) { in kill_fq()
278 fq->fqid); in kill_fq()
285 dev_err(qidev, "OOS of FQID: %u failed\n", fq->fqid); in kill_fq()
313 if (refcount_read(&drv_ctx->refcnt) == 1) in empty_caam_fq()
317 } while (--retries); in empty_caam_fq()
320 dev_warn_once(drv_ctx->qidev, "%d frames from FQID %u still pending in CAAM\n", in empty_caam_fq()
321 refcount_read(&drv_ctx->refcnt), fq->fqid); in empty_caam_fq()
331 struct device *qidev = drv_ctx->qidev; in caam_drv_ctx_update()
336 return -EINVAL; in caam_drv_ctx_update()
340 old_fq = drv_ctx->req_fq; in caam_drv_ctx_update()
343 new_fq = create_caam_req_fq(drv_ctx->qidev, drv_ctx->rsp_fq, in caam_drv_ctx_update()
344 drv_ctx->context_a, 0); in caam_drv_ctx_update()
351 drv_ctx->req_fq = new_fq; in caam_drv_ctx_update()
359 drv_ctx->req_fq = old_fq; in caam_drv_ctx_update()
368 * Re-initialise pre-header. Set RSLS and SDLEN. in caam_drv_ctx_update()
371 drv_ctx->prehdr[0] = cpu_to_caam32((1 << PREHDR_RSLS_SHIFT) | in caam_drv_ctx_update()
373 drv_ctx->prehdr[1] = cpu_to_caam32(PREHDR_ABS); in caam_drv_ctx_update()
374 memcpy(drv_ctx->sh_desc, sh_desc, desc_bytes(sh_desc)); in caam_drv_ctx_update()
375 dma_sync_single_for_device(qidev, drv_ctx->context_a, in caam_drv_ctx_update()
376 sizeof(drv_ctx->sh_desc) + in caam_drv_ctx_update()
377 sizeof(drv_ctx->prehdr), in caam_drv_ctx_update()
390 drv_ctx->req_fq = old_fq; in caam_drv_ctx_update()
402 struct caam_drv_ctx *caam_drv_ctx_init(struct device *qidev, in caam_drv_ctx_init()
416 return ERR_PTR(-EINVAL); in caam_drv_ctx_init()
421 return ERR_PTR(-ENOMEM); in caam_drv_ctx_init()
424 * Initialise pre-header - set RSLS and SDLEN - and shared descriptor in caam_drv_ctx_init()
425 * and dma-map them. in caam_drv_ctx_init()
427 drv_ctx->prehdr[0] = cpu_to_caam32((1 << PREHDR_RSLS_SHIFT) | in caam_drv_ctx_init()
429 drv_ctx->prehdr[1] = cpu_to_caam32(PREHDR_ABS); in caam_drv_ctx_init()
430 memcpy(drv_ctx->sh_desc, sh_desc, desc_bytes(sh_desc)); in caam_drv_ctx_init()
431 size = sizeof(drv_ctx->prehdr) + sizeof(drv_ctx->sh_desc); in caam_drv_ctx_init()
432 hwdesc = dma_map_single(qidev, drv_ctx->prehdr, size, in caam_drv_ctx_init()
437 return ERR_PTR(-ENOMEM); in caam_drv_ctx_init()
439 drv_ctx->context_a = hwdesc; in caam_drv_ctx_init()
452 drv_ctx->cpu = *cpu; in caam_drv_ctx_init()
455 drv_ctx->rsp_fq = per_cpu(pcpu_qipriv.rsp_fq, drv_ctx->cpu); in caam_drv_ctx_init()
458 drv_ctx->req_fq = create_caam_req_fq(qidev, drv_ctx->rsp_fq, hwdesc, in caam_drv_ctx_init()
460 if (IS_ERR(drv_ctx->req_fq)) { in caam_drv_ctx_init()
464 return ERR_PTR(-ENOMEM); in caam_drv_ctx_init()
468 refcount_set(&drv_ctx->refcnt, 1); in caam_drv_ctx_init()
470 drv_ctx->qidev = qidev; in caam_drv_ctx_init()
491 int cleaned = qman_p_poll_dqrr(np->p, budget); in caam_qi_poll()
495 qman_p_irqsource_add(np->p, QM_PIRQ_DQRI); in caam_qi_poll()
507 if (kill_fq(drv_ctx->qidev, drv_ctx->req_fq)) in caam_drv_ctx_rel()
508 dev_err(drv_ctx->qidev, "Crypto session req FQ kill failed\n"); in caam_drv_ctx_rel()
510 dma_unmap_single(drv_ctx->qidev, drv_ctx->context_a, in caam_drv_ctx_rel()
511 sizeof(drv_ctx->sh_desc) + sizeof(drv_ctx->prehdr), in caam_drv_ctx_rel()
520 struct device *qidev = data; in caam_qi_shutdown()
527 irqtask = &per_cpu_ptr(&pcpu_qipriv.caam_napi, i)->irqtask; in caam_qi_shutdown()
536 qman_delete_cgr_safe(&priv->cgr); in caam_qi_shutdown()
537 qman_release_cgrid(priv->cgr.cgrid); in caam_qi_shutdown()
562 np->p = p; in caam_qi_napi_schedule()
563 napi_schedule(&np->irqtask); in caam_qi_napi_schedule()
577 struct device *qidev = &(raw_cpu_ptr(&pcpu_qipriv)->net_dev->dev); in caam_rsp_fq_dqrr_cb()
584 fd = &dqrr->fd; in caam_rsp_fq_dqrr_cb()
586 drv_req = caam_iova_to_virt(priv->domain, qm_fd_addr_get64(fd)); in caam_rsp_fq_dqrr_cb()
593 refcount_dec(&drv_req->drv_ctx->refcnt); in caam_rsp_fq_dqrr_cb()
595 status = be32_to_cpu(fd->status); in caam_rsp_fq_dqrr_cb()
608 dev_err(qidev, "Non-compound FD from CAAM\n"); in caam_rsp_fq_dqrr_cb()
612 dma_unmap_single(drv_req->drv_ctx->qidev, qm_fd_addr(fd), in caam_rsp_fq_dqrr_cb()
613 sizeof(drv_req->fd_sgt), DMA_BIDIRECTIONAL); in caam_rsp_fq_dqrr_cb()
615 drv_req->cbk(drv_req, status); in caam_rsp_fq_dqrr_cb()
619 static int alloc_rsp_fq_cpu(struct device *qidev, unsigned int cpu) in alloc_rsp_fq_cpu()
627 return -ENOMEM; in alloc_rsp_fq_cpu()
629 fq->cb.dqrr = caam_rsp_fq_dqrr_cb; in alloc_rsp_fq_cpu()
636 return -ENODEV; in alloc_rsp_fq_cpu()
655 return -ENODEV; in alloc_rsp_fq_cpu()
660 dev_dbg(qidev, "Allocated response FQ %u for CPU %u", fq->fqid, cpu); in alloc_rsp_fq_cpu()
664 static int init_cgr(struct device *qidev) in init_cgr()
696 static int alloc_rsp_fqs(struct device *qidev) in alloc_rsp_fqs()
729 free_netdev(priv->net_dev); in free_caam_qi_pcpu_netdev()
736 struct device *qidev = &caam_pdev->dev; in caam_qi_init()
741 err = -ENOMEM; in caam_qi_init()
767 struct caam_napi *caam_napi = &priv->caam_napi; in caam_qi_init()
768 struct napi_struct *irqtask = &caam_napi->irqtask; in caam_qi_init()
773 err = -ENOMEM; in caam_qi_init()
777 priv->net_dev = net_dev; in caam_qi_init()
778 net_dev->dev = *qidev; in caam_qi_init()
790 err = -ENOMEM; in caam_qi_init()