Lines Matching full:cmdq

226 #define IVPU_MMU_GERROR_ERR_MASK ((REG_FLD(IVPU_MMU_REG_GERROR, CMDQ)) | \
373 struct ivpu_mmu_queue *q = &mmu->cmdq; in ivpu_mmu_cmdq_alloc()
383 ivpu_dbg(vdev, MMU, "CMDQ alloc: dma=%pad dma_q=%pad size=%u\n", in ivpu_mmu_cmdq_alloc()
426 ivpu_err(vdev, "Failed to allocate cmdq: %d\n", ret); in ivpu_mmu_structs_alloc()
465 struct ivpu_mmu_queue *cmdq = &vdev->mmu->cmdq; in ivpu_mmu_cmdq_wait_for_cons() local
468 ret = REGV_POLL_FLD(IVPU_MMU_REG_CMDQ_CONS, VAL, cmdq->prod, in ivpu_mmu_cmdq_wait_for_cons()
473 cmdq->cons = cmdq->prod; in ivpu_mmu_cmdq_wait_for_cons()
492 struct ivpu_mmu_queue *cmdq = &vdev->mmu->cmdq; in ivpu_mmu_cmdq_cmd_write() local
493 u64 *queue_buffer = cmdq->base; in ivpu_mmu_cmdq_cmd_write()
494 int idx = IVPU_MMU_Q_IDX(cmdq->prod) * (IVPU_MMU_CMDQ_CMD_SIZE / sizeof(*queue_buffer)); in ivpu_mmu_cmdq_cmd_write()
496 if (ivpu_mmu_queue_is_full(cmdq)) { in ivpu_mmu_cmdq_cmd_write()
503 cmdq->prod = (cmdq->prod + 1) & IVPU_MMU_Q_WRAP_MASK; in ivpu_mmu_cmdq_cmd_write()
512 struct ivpu_mmu_queue *q = &vdev->mmu->cmdq; in ivpu_mmu_cmdq_sync()
570 memset(mmu->cmdq.base, 0, IVPU_MMU_CMDQ_SIZE); in ivpu_mmu_reset()
572 clflush_cache_range(mmu->cmdq.base, IVPU_MMU_CMDQ_SIZE); in ivpu_mmu_reset()
573 mmu->cmdq.prod = 0; in ivpu_mmu_reset()
574 mmu->cmdq.cons = 0; in ivpu_mmu_reset()
595 REGV_WR64(IVPU_MMU_REG_CMDQ_BASE, mmu->cmdq.dma_q); in ivpu_mmu_reset()
928 ivpu_warn_ratelimited(vdev, "MMU CMDQ MSI ABT write aborted\n"); in ivpu_mmu_irq_gerr_handler()
936 if (REG_TEST_FLD(IVPU_MMU_REG_GERROR, CMDQ, active)) in ivpu_mmu_irq_gerr_handler()
937 ivpu_err_ratelimited(vdev, "MMU CMDQ write aborted\n"); in ivpu_mmu_irq_gerr_handler()