Home
last modified time | relevance | path

Searched full:cmdq (Results 1 – 25 of 142) sorted by relevance

123456

/linux-6.14.4/drivers/infiniband/hw/erdma/
Derdma_cmdq.c9 static void arm_cmdq_cq(struct erdma_cmdq *cmdq) in arm_cmdq_cq() argument
11 struct erdma_dev *dev = container_of(cmdq, struct erdma_dev, cmdq); in arm_cmdq_cq()
12 u64 db_data = FIELD_PREP(ERDMA_CQDB_CI_MASK, cmdq->cq.ci) | in arm_cmdq_cq()
14 FIELD_PREP(ERDMA_CQDB_CMDSN_MASK, cmdq->cq.cmdsn) | in arm_cmdq_cq()
15 FIELD_PREP(ERDMA_CQDB_IDX_MASK, cmdq->cq.cmdsn); in arm_cmdq_cq()
17 *cmdq->cq.dbrec = db_data; in arm_cmdq_cq()
20 atomic64_inc(&cmdq->cq.armed_num); in arm_cmdq_cq()
23 static void kick_cmdq_db(struct erdma_cmdq *cmdq) in kick_cmdq_db() argument
25 struct erdma_dev *dev = container_of(cmdq, struct erdma_dev, cmdq); in kick_cmdq_db()
26 u64 db_data = FIELD_PREP(ERDMA_CMD_HDR_WQEBB_INDEX_MASK, cmdq->sq.pi); in kick_cmdq_db()
[all …]
Derdma.h198 /* cmdq and aeq use the same msix vector */
200 struct erdma_cmdq cmdq; member
267 int erdma_post_cmd_wait(struct erdma_cmdq *cmdq, void *req, u32 req_size,
269 void erdma_cmdq_completion_handler(struct erdma_cmdq *cmdq);
/linux-6.14.4/drivers/mailbox/
Dmtk-cmdq-mailbox.c18 #include <linux/mailbox/mtk-cmdq-mailbox.h>
69 struct cmdq *cmdq; member
76 struct cmdq { struct
95 static void cmdq_sw_ddr_enable(struct cmdq *cmdq, bool enable) in cmdq_sw_ddr_enable() argument
97 WARN_ON(clk_bulk_enable(cmdq->pdata->gce_num, cmdq->clocks)); in cmdq_sw_ddr_enable()
100 writel(GCE_DDR_EN | GCE_CTRL_BY_SW, cmdq->base + GCE_GCTL_VALUE); in cmdq_sw_ddr_enable()
102 writel(GCE_CTRL_BY_SW, cmdq->base + GCE_GCTL_VALUE); in cmdq_sw_ddr_enable()
104 clk_bulk_disable(cmdq->pdata->gce_num, cmdq->clocks); in cmdq_sw_ddr_enable()
109 struct cmdq *cmdq = container_of(chan->mbox, struct cmdq, mbox); in cmdq_get_shift_pa() local
111 return cmdq->pdata->shift; in cmdq_get_shift_pa()
[all …]
/linux-6.14.4/drivers/gpu/drm/nouveau/nvkm/falcon/
Dcmdq.c26 nvkm_falcon_cmdq_has_room(struct nvkm_falcon_cmdq *cmdq, u32 size, bool *rewind) in nvkm_falcon_cmdq_has_room() argument
28 u32 head = nvkm_falcon_rd32(cmdq->qmgr->falcon, cmdq->head_reg); in nvkm_falcon_cmdq_has_room()
29 u32 tail = nvkm_falcon_rd32(cmdq->qmgr->falcon, cmdq->tail_reg); in nvkm_falcon_cmdq_has_room()
35 free = cmdq->offset + cmdq->size - head; in nvkm_falcon_cmdq_has_room()
40 head = cmdq->offset; in nvkm_falcon_cmdq_has_room()
51 nvkm_falcon_cmdq_push(struct nvkm_falcon_cmdq *cmdq, void *data, u32 size) in nvkm_falcon_cmdq_push() argument
53 struct nvkm_falcon *falcon = cmdq->qmgr->falcon; in nvkm_falcon_cmdq_push()
54 nvkm_falcon_pio_wr(falcon, data, 0, 0, DMEM, cmdq->position, size, 0, false); in nvkm_falcon_cmdq_push()
55 cmdq->position += ALIGN(size, QUEUE_ALIGNMENT); in nvkm_falcon_cmdq_push()
59 nvkm_falcon_cmdq_rewind(struct nvkm_falcon_cmdq *cmdq) in nvkm_falcon_cmdq_rewind() argument
[all …]
/linux-6.14.4/drivers/net/ethernet/brocade/bna/
Dbfa_msgq.c31 static void bfa_msgq_cmdq_dbell(struct bfa_msgq_cmdq *cmdq);
32 static void bfa_msgq_cmdq_copy_rsp(struct bfa_msgq_cmdq *cmdq);
43 bfa_fsm_state_decl(cmdq, stopped, struct bfa_msgq_cmdq, enum cmdq_event);
44 bfa_fsm_state_decl(cmdq, init_wait, struct bfa_msgq_cmdq, enum cmdq_event);
45 bfa_fsm_state_decl(cmdq, ready, struct bfa_msgq_cmdq, enum cmdq_event);
46 bfa_fsm_state_decl(cmdq, dbell_wait, struct bfa_msgq_cmdq,
50 cmdq_sm_stopped_entry(struct bfa_msgq_cmdq *cmdq) in cmdq_sm_stopped_entry() argument
54 cmdq->producer_index = 0; in cmdq_sm_stopped_entry()
55 cmdq->consumer_index = 0; in cmdq_sm_stopped_entry()
56 cmdq->flags = 0; in cmdq_sm_stopped_entry()
[all …]
/linux-6.14.4/drivers/crypto/cavium/nitrox/
Dnitrox_lib.c25 static int nitrox_cmdq_init(struct nitrox_cmdq *cmdq, int align_bytes) in nitrox_cmdq_init() argument
27 struct nitrox_device *ndev = cmdq->ndev; in nitrox_cmdq_init()
29 cmdq->qsize = (ndev->qlen * cmdq->instr_size) + align_bytes; in nitrox_cmdq_init()
30 cmdq->unalign_base = dma_alloc_coherent(DEV(ndev), cmdq->qsize, in nitrox_cmdq_init()
31 &cmdq->unalign_dma, in nitrox_cmdq_init()
33 if (!cmdq->unalign_base) in nitrox_cmdq_init()
36 cmdq->dma = PTR_ALIGN(cmdq->unalign_dma, align_bytes); in nitrox_cmdq_init()
37 cmdq->base = cmdq->unalign_base + (cmdq->dma - cmdq->unalign_dma); in nitrox_cmdq_init()
38 cmdq->write_idx = 0; in nitrox_cmdq_init()
40 spin_lock_init(&cmdq->cmd_qlock); in nitrox_cmdq_init()
[all …]
Dnitrox_reqmgr.c230 struct nitrox_cmdq *cmdq) in backlog_list_add() argument
234 spin_lock_bh(&cmdq->backlog_qlock); in backlog_list_add()
235 list_add_tail(&sr->backlog, &cmdq->backlog_head); in backlog_list_add()
236 atomic_inc(&cmdq->backlog_count); in backlog_list_add()
238 spin_unlock_bh(&cmdq->backlog_qlock); in backlog_list_add()
242 struct nitrox_cmdq *cmdq) in response_list_add() argument
246 spin_lock_bh(&cmdq->resp_qlock); in response_list_add()
247 list_add_tail(&sr->response, &cmdq->response_head); in response_list_add()
248 spin_unlock_bh(&cmdq->resp_qlock); in response_list_add()
252 struct nitrox_cmdq *cmdq) in response_list_del() argument
[all …]
/linux-6.14.4/drivers/accel/ivpu/
Divpu_job.c27 static void ivpu_cmdq_ring_db(struct ivpu_device *vdev, struct ivpu_cmdq *cmdq) in ivpu_cmdq_ring_db() argument
29 ivpu_hw_db_set(vdev, cmdq->db_id); in ivpu_cmdq_ring_db()
33 struct ivpu_file_priv *file_priv, struct ivpu_cmdq *cmdq) in ivpu_preemption_buffers_create() argument
42 cmdq->primary_preempt_buf = ivpu_bo_create(vdev, &file_priv->ctx, &vdev->hw->ranges.user, in ivpu_preemption_buffers_create()
44 if (!cmdq->primary_preempt_buf) { in ivpu_preemption_buffers_create()
49 cmdq->secondary_preempt_buf = ivpu_bo_create(vdev, &file_priv->ctx, &vdev->hw->ranges.dma, in ivpu_preemption_buffers_create()
51 if (!cmdq->secondary_preempt_buf) { in ivpu_preemption_buffers_create()
59 ivpu_bo_free(cmdq->primary_preempt_buf); in ivpu_preemption_buffers_create()
60 cmdq->primary_preempt_buf = NULL; in ivpu_preemption_buffers_create()
65 struct ivpu_file_priv *file_priv, struct ivpu_cmdq *cmdq) in ivpu_preemption_buffers_free() argument
[all …]
Divpu_mmu.c226 #define IVPU_MMU_GERROR_ERR_MASK ((REG_FLD(IVPU_MMU_REG_GERROR, CMDQ)) | \
373 struct ivpu_mmu_queue *q = &mmu->cmdq; in ivpu_mmu_cmdq_alloc()
383 ivpu_dbg(vdev, MMU, "CMDQ alloc: dma=%pad dma_q=%pad size=%u\n", in ivpu_mmu_cmdq_alloc()
426 ivpu_err(vdev, "Failed to allocate cmdq: %d\n", ret); in ivpu_mmu_structs_alloc()
465 struct ivpu_mmu_queue *cmdq = &vdev->mmu->cmdq; in ivpu_mmu_cmdq_wait_for_cons() local
468 ret = REGV_POLL_FLD(IVPU_MMU_REG_CMDQ_CONS, VAL, cmdq->prod, in ivpu_mmu_cmdq_wait_for_cons()
473 cmdq->cons = cmdq->prod; in ivpu_mmu_cmdq_wait_for_cons()
492 struct ivpu_mmu_queue *cmdq = &vdev->mmu->cmdq; in ivpu_mmu_cmdq_cmd_write() local
493 u64 *queue_buffer = cmdq->base; in ivpu_mmu_cmdq_cmd_write()
494 int idx = IVPU_MMU_Q_IDX(cmdq->prod) * (IVPU_MMU_CMDQ_CMD_SIZE / sizeof(*queue_buffer)); in ivpu_mmu_cmdq_cmd_write()
[all …]
Divpu_mmu.h32 struct mutex lock; /* Protects cdtab, strtab, cmdq, on */
35 struct ivpu_mmu_queue cmdq; member
/linux-6.14.4/drivers/net/ethernet/huawei/hinic/
Dhinic_hw_cmdq.c78 #define cmdq_to_cmdqs(cmdq) container_of((cmdq) - (cmdq)->cmdq_type, \ argument
79 struct hinic_cmdqs, cmdq[0])
320 static void cmdq_set_db(struct hinic_cmdq *cmdq, in cmdq_set_db() argument
332 writel(db_info, CMDQ_DB_ADDR(cmdq->db_base, prod_idx)); in cmdq_set_db()
335 static int cmdq_sync_cmd_direct_resp(struct hinic_cmdq *cmdq, in cmdq_sync_cmd_direct_resp() argument
343 struct hinic_wq *wq = cmdq->wq; in cmdq_sync_cmd_direct_resp()
348 spin_lock_bh(&cmdq->cmdq_lock); in cmdq_sync_cmd_direct_resp()
353 spin_unlock_bh(&cmdq->cmdq_lock); in cmdq_sync_cmd_direct_resp()
359 wrapped = cmdq->wrapped; in cmdq_sync_cmd_direct_resp()
364 cmdq->wrapped = !cmdq->wrapped; in cmdq_sync_cmd_direct_resp()
[all …]
Dhinic_hw_io.c119 dev_err(&pdev->dev, "Failed to allocate cmdq buf\n"); in write_sq_ctxts()
163 dev_err(&pdev->dev, "Failed to allocate cmdq buf\n"); in write_rq_ctxts()
220 dev_err(&pdev->dev, "Failed to allocate cmdq buf\n"); in hinic_clean_queue_offload_ctxt()
533 enum hinic_cmdq_type cmdq, type; in hinic_io_init() local
565 for (cmdq = HINIC_CMDQ_SYNC; cmdq < HINIC_MAX_CMDQ_TYPES; cmdq++) { in hinic_io_init()
568 dev_err(&pdev->dev, "Failed to get cmdq db area\n"); in hinic_io_init()
573 func_to_io->cmdq_db_area[cmdq] = db_area; in hinic_io_init()
600 for (type = HINIC_CMDQ_SYNC; type < cmdq; type++) in hinic_io_init()
619 enum hinic_cmdq_type cmdq; in hinic_io_free() local
628 for (cmdq = HINIC_CMDQ_SYNC; cmdq < HINIC_MAX_CMDQ_TYPES; cmdq++) in hinic_io_free()
[all …]
/linux-6.14.4/include/linux/soc/mediatek/
Dmtk-cmdq.h11 #include <linux/mailbox/mtk-cmdq-mailbox.h>
18 * Every cmdq thread has its own SPRs (Specific Purpose Registers),
67 * cmdq_dev_get_client_reg() - parse cmdq client reg from the device
68 * node of CMDQ client
69 * @dev: device of CMDQ mailbox client
70 * @client_reg: CMDQ client reg pointer
75 * Help CMDQ client parsing the cmdq client reg
76 * from the device node of CMDQ client.
82 * cmdq_mbox_create() - create CMDQ mailbox client and channel
83 * @dev: device of CMDQ mailbox client
[all …]
Dmtk-mmsys.h10 #include <linux/mailbox/mtk-cmdq-mailbox.h>
11 #include <linux/soc/mediatek/mtk-cmdq.h>
/linux-6.14.4/drivers/infiniband/hw/bnxt_re/
Dqplib_rcfw.c114 struct bnxt_qplib_cmdq_ctx *cmdq; in bnxt_re_is_fw_stalled() local
118 cmdq = &rcfw->cmdq; in bnxt_re_is_fw_stalled()
120 if (time_after(jiffies, cmdq->last_seen + in bnxt_re_is_fw_stalled()
123 "%s: FW STALL Detected. cmdq[%#x]=%#x waited (%d > %d) msec active %d ", in bnxt_re_is_fw_stalled()
125 jiffies_to_msecs(jiffies - cmdq->last_seen), in bnxt_re_is_fw_stalled()
147 struct bnxt_qplib_cmdq_ctx *cmdq; in __wait_for_resp() local
151 cmdq = &rcfw->cmdq; in __wait_for_resp()
155 if (test_bit(ERR_DEVICE_DETACHED, &cmdq->flags)) in __wait_for_resp()
157 if (test_bit(FIRMWARE_STALL_DETECTED, &cmdq->flags)) in __wait_for_resp()
160 wait_event_timeout(cmdq->waitq, in __wait_for_resp()
[all …]
/linux-6.14.4/drivers/net/ethernet/hisilicon/hns3/hns3_common/
Dhclge_comm_cmd.c492 /* If CMDQ ring is full, SW HEAD and HW HEAD may be different, in hclge_comm_cmd_send()
541 struct hclge_comm_cmq *cmdq = &hw->cmq; in hclge_comm_cmd_uninit() local
550 spin_lock_bh(&cmdq->csq.lock); in hclge_comm_cmd_uninit()
551 spin_lock(&cmdq->crq.lock); in hclge_comm_cmd_uninit()
553 spin_unlock(&cmdq->crq.lock); in hclge_comm_cmd_uninit()
554 spin_unlock_bh(&cmdq->csq.lock); in hclge_comm_cmd_uninit()
556 hclge_comm_free_cmd_desc(&cmdq->csq); in hclge_comm_cmd_uninit()
557 hclge_comm_free_cmd_desc(&cmdq->crq); in hclge_comm_cmd_uninit()
563 struct hclge_comm_cmq *cmdq = &hw->cmq; in hclge_comm_cmd_queue_init() local
567 spin_lock_init(&cmdq->csq.lock); in hclge_comm_cmd_queue_init()
[all …]
/linux-6.14.4/drivers/iommu/arm/arm-smmu-v3/
Darm-smmu-v3.c374 struct arm_smmu_cmdq *cmdq = NULL; in arm_smmu_get_cmdq() local
377 cmdq = smmu->impl_ops->get_secondary_cmdq(smmu, ent); in arm_smmu_get_cmdq()
379 return cmdq ?: &smmu->cmdq; in arm_smmu_get_cmdq()
383 struct arm_smmu_cmdq *cmdq) in arm_smmu_cmdq_needs_busy_polling() argument
385 if (cmdq == &smmu->cmdq) in arm_smmu_cmdq_needs_busy_polling()
392 struct arm_smmu_cmdq *cmdq, u32 prod) in arm_smmu_cmdq_build_sync_cmd() argument
394 struct arm_smmu_queue *q = &cmdq->q; in arm_smmu_cmdq_build_sync_cmd()
409 if (arm_smmu_cmdq_needs_busy_polling(smmu, cmdq)) in arm_smmu_cmdq_build_sync_cmd()
414 struct arm_smmu_cmdq *cmdq) in __arm_smmu_cmdq_skip_err() argument
422 struct arm_smmu_queue *q = &cmdq->q; in __arm_smmu_cmdq_skip_err()
[all …]
Dtegra241-cmdqv.c108 "This allows to disable CMDQV HW and use default SMMU internal CMDQ.");
122 * @cmdq: Command Queue struct
134 struct arm_smmu_cmdq cmdq; member
162 * struct tegra241_cmdqv - CMDQ-V for SMMUv3
267 __arm_smmu_cmdq_skip_err(&vintf->cmdqv->smmu, &vcmdq->cmdq); in tegra241_vintf0_handle_error()
330 /* Use SMMU CMDQ if VINTF0 is uninitialized */ in tegra241_cmdqv_get_cmdq()
336 * balance out traffic on cmdq issuing: each cmdq has its own in tegra241_cmdqv_get_cmdq()
337 * lock, if all cpus issue cmdlist using the same cmdq, only in tegra241_cmdqv_get_cmdq()
346 /* Unsupported CMD goes for smmu->cmdq pathway */ in tegra241_cmdqv_get_cmdq()
347 if (!arm_smmu_cmdq_supports_cmd(&vcmdq->cmdq, ent)) in tegra241_cmdqv_get_cmdq()
[all …]
/linux-6.14.4/drivers/gpu/drm/nouveau/nvkm/engine/sec2/
Dbase.c45 struct nvkm_falcon_cmdq *cmdq = sec2->cmdq; in nvkm_sec2_fini() local
56 ret = nvkm_falcon_cmdq_send(cmdq, &cmd, nvkm_sec2_finimsg, sec2, in nvkm_sec2_fini()
68 nvkm_falcon_cmdq_fini(cmdq); in nvkm_sec2_fini()
119 nvkm_falcon_cmdq_del(&sec2->cmdq); in nvkm_sec2_dtor()
159 (ret = nvkm_falcon_cmdq_new(sec2->qmgr, "cmdq", &sec2->cmdq)) || in nvkm_sec2_new_()
/linux-6.14.4/drivers/net/ethernet/chelsio/cxgb/
Dsge.c168 struct cmdQ { struct
182 spinlock_t lock; /* Lock to protect cmdQ enqueuing */ argument
208 /* Bit flags for cmdQ.status */
265 struct cmdQ cmdQ[SGE_CMDQ_N] ____cacheline_aligned_in_smp; member
474 struct cmdQ *q = &sge->cmdQ[0]; in sched_skb()
612 static void free_cmdQ_buffers(struct sge *sge, struct cmdQ *q, unsigned int n) in free_cmdQ_buffers()
653 struct cmdQ *q = &sge->cmdQ[i]; in free_tx_resources()
677 struct cmdQ *q = &sge->cmdQ[i]; in alloc_tx_resources()
706 sge->cmdQ[0].stop_thres = sge->adapter->params.nports * in alloc_tx_resources()
750 setup_ring_params(ap, sge->cmdQ[0].dma_addr, sge->cmdQ[0].size, in configure_sge()
[all …]
/linux-6.14.4/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/
Dr535.c175 cmd->sequence = gsp->cmdq.seq++; in r535_gsp_cmdq_push()
183 wptr = *gsp->cmdq.wptr; in r535_gsp_cmdq_push()
186 free = *gsp->cmdq.rptr + gsp->cmdq.cnt - wptr - 1; in r535_gsp_cmdq_push()
187 if (free >= gsp->cmdq.cnt) in r535_gsp_cmdq_push()
188 free -= gsp->cmdq.cnt; in r535_gsp_cmdq_push()
200 cqe = (void *)((u8 *)gsp->shm.cmdq.ptr + 0x1000 + wptr * 0x1000); in r535_gsp_cmdq_push()
201 step = min_t(u32, free, (gsp->cmdq.cnt - wptr)); in r535_gsp_cmdq_push()
207 if (wptr == gsp->cmdq.cnt) in r535_gsp_cmdq_push()
214 nvkm_trace(&gsp->subdev, "cmdq: wptr %d\n", wptr); in r535_gsp_cmdq_push()
216 (*gsp->cmdq.wptr) = wptr; in r535_gsp_cmdq_push()
[all …]
/linux-6.14.4/include/dt-bindings/gce/
Dmt8186-gce.h82 /* CMDQ: debug */
85 /* CMDQ: P7: debug */
348 /* CMDQ sw tokens
367 /* Notify normal CMDQ there are some secure task done
372 /* CMDQ use sw token */
419 /* CMDQ sw tokens END */
/linux-6.14.4/drivers/soc/mediatek/
DKconfig9 tristate "MediaTek CMDQ Support"
15 Say yes here to add support for the MediaTek Command Queue (CMDQ)
16 driver. The CMDQ is used to help read/write registers with critical
/linux-6.14.4/Documentation/devicetree/bindings/mailbox/
Dmediatek,gce-props.yaml15 (CMDQ) mailbox driver is a driver for GCE, implemented using the Linux
18 We use mediatek,gce-mailbox.yaml to define the properties for CMDQ mailbox
19 driver. A device driver that uses the CMDQ driver to configure its hardware
/linux-6.14.4/drivers/gpu/drm/mediatek/
Dmtk_padding.c12 #include <linux/soc/mediatek/mtk-cmdq.h>
30 * @cmdq_reg: CMDQ setting of the Padding
33 * CMDQ settings, we stored these differences all together.

123456