Lines Matching +full:cmdq +full:- +full:sync

1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2020-2024 Intel Corporation
76 #define IVPU_MMU_Q_IDX_MASK (IVPU_MMU_Q_COUNT - 1)
226 #define IVPU_MMU_GERROR_ERR_MASK ((REG_FLD(IVPU_MMU_REG_GERROR, CMDQ)) | \
253 return "Transaction marks non-substream disabled"; in ivpu_mmu_event_to_str()
295 return "Sync failed to complete ATS invalidation"; in ivpu_mmu_cmdq_err_to_str()
337 struct ivpu_mmu_info *mmu = vdev->mmu; in ivpu_mmu_cdtab_alloc()
338 struct ivpu_mmu_cdtab *cdtab = &mmu->cdtab; in ivpu_mmu_cdtab_alloc()
341 cdtab->base = dmam_alloc_coherent(vdev->drm.dev, size, &cdtab->dma, GFP_KERNEL); in ivpu_mmu_cdtab_alloc()
342 if (!cdtab->base) in ivpu_mmu_cdtab_alloc()
343 return -ENOMEM; in ivpu_mmu_cdtab_alloc()
345 ivpu_dbg(vdev, MMU, "CDTAB alloc: dma=%pad size=%zu\n", &cdtab->dma, size); in ivpu_mmu_cdtab_alloc()
352 struct ivpu_mmu_info *mmu = vdev->mmu; in ivpu_mmu_strtab_alloc()
353 struct ivpu_mmu_strtab *strtab = &mmu->strtab; in ivpu_mmu_strtab_alloc()
356 strtab->base = dmam_alloc_coherent(vdev->drm.dev, size, &strtab->dma, GFP_KERNEL); in ivpu_mmu_strtab_alloc()
357 if (!strtab->base) in ivpu_mmu_strtab_alloc()
358 return -ENOMEM; in ivpu_mmu_strtab_alloc()
360 strtab->base_cfg = IVPU_MMU_STRTAB_CFG; in ivpu_mmu_strtab_alloc()
361 strtab->dma_q = IVPU_MMU_STRTAB_BASE_RA; in ivpu_mmu_strtab_alloc()
362 strtab->dma_q |= strtab->dma & IVPU_MMU_STRTAB_BASE_ADDR_MASK; in ivpu_mmu_strtab_alloc()
365 &strtab->dma, &strtab->dma_q, size); in ivpu_mmu_strtab_alloc()
372 struct ivpu_mmu_info *mmu = vdev->mmu; in ivpu_mmu_cmdq_alloc()
373 struct ivpu_mmu_queue *q = &mmu->cmdq; in ivpu_mmu_cmdq_alloc()
375 q->base = dmam_alloc_coherent(vdev->drm.dev, IVPU_MMU_CMDQ_SIZE, &q->dma, GFP_KERNEL); in ivpu_mmu_cmdq_alloc()
376 if (!q->base) in ivpu_mmu_cmdq_alloc()
377 return -ENOMEM; in ivpu_mmu_cmdq_alloc()
379 q->dma_q = IVPU_MMU_Q_BASE_RWA; in ivpu_mmu_cmdq_alloc()
380 q->dma_q |= q->dma & IVPU_MMU_Q_BASE_ADDR_MASK; in ivpu_mmu_cmdq_alloc()
381 q->dma_q |= IVPU_MMU_Q_COUNT_LOG2; in ivpu_mmu_cmdq_alloc()
383 ivpu_dbg(vdev, MMU, "CMDQ alloc: dma=%pad dma_q=%pad size=%u\n", in ivpu_mmu_cmdq_alloc()
384 &q->dma, &q->dma_q, IVPU_MMU_CMDQ_SIZE); in ivpu_mmu_cmdq_alloc()
391 struct ivpu_mmu_info *mmu = vdev->mmu; in ivpu_mmu_evtq_alloc()
392 struct ivpu_mmu_queue *q = &mmu->evtq; in ivpu_mmu_evtq_alloc()
394 q->base = dmam_alloc_coherent(vdev->drm.dev, IVPU_MMU_EVTQ_SIZE, &q->dma, GFP_KERNEL); in ivpu_mmu_evtq_alloc()
395 if (!q->base) in ivpu_mmu_evtq_alloc()
396 return -ENOMEM; in ivpu_mmu_evtq_alloc()
398 q->dma_q = IVPU_MMU_Q_BASE_RWA; in ivpu_mmu_evtq_alloc()
399 q->dma_q |= q->dma & IVPU_MMU_Q_BASE_ADDR_MASK; in ivpu_mmu_evtq_alloc()
400 q->dma_q |= IVPU_MMU_Q_COUNT_LOG2; in ivpu_mmu_evtq_alloc()
403 &q->dma, &q->dma_q, IVPU_MMU_EVTQ_SIZE); in ivpu_mmu_evtq_alloc()
426 ivpu_err(vdev, "Failed to allocate cmdq: %d\n", ret); in ivpu_mmu_structs_alloc()
465 struct ivpu_mmu_queue *cmdq = &vdev->mmu->cmdq; in ivpu_mmu_cmdq_wait_for_cons() local
468 ret = REGV_POLL_FLD(IVPU_MMU_REG_CMDQ_CONS, VAL, cmdq->prod, in ivpu_mmu_cmdq_wait_for_cons()
473 cmdq->cons = cmdq->prod; in ivpu_mmu_cmdq_wait_for_cons()
480 return ((IVPU_MMU_Q_IDX(q->prod) == IVPU_MMU_Q_IDX(q->cons)) && in ivpu_mmu_queue_is_full()
481 (IVPU_MMU_Q_WRP(q->prod) != IVPU_MMU_Q_WRP(q->cons))); in ivpu_mmu_queue_is_full()
486 return ((IVPU_MMU_Q_IDX(q->prod) == IVPU_MMU_Q_IDX(q->cons)) && in ivpu_mmu_queue_is_empty()
487 (IVPU_MMU_Q_WRP(q->prod) == IVPU_MMU_Q_WRP(q->cons))); in ivpu_mmu_queue_is_empty()
492 struct ivpu_mmu_queue *cmdq = &vdev->mmu->cmdq; in ivpu_mmu_cmdq_cmd_write() local
493 u64 *queue_buffer = cmdq->base; in ivpu_mmu_cmdq_cmd_write()
494 int idx = IVPU_MMU_Q_IDX(cmdq->prod) * (IVPU_MMU_CMDQ_CMD_SIZE / sizeof(*queue_buffer)); in ivpu_mmu_cmdq_cmd_write()
496 if (ivpu_mmu_queue_is_full(cmdq)) { in ivpu_mmu_cmdq_cmd_write()
498 return -EBUSY; in ivpu_mmu_cmdq_cmd_write()
503 cmdq->prod = (cmdq->prod + 1) & IVPU_MMU_Q_WRAP_MASK; in ivpu_mmu_cmdq_cmd_write()
512 struct ivpu_mmu_queue *q = &vdev->mmu->cmdq; in ivpu_mmu_cmdq_sync()
518 ret = ivpu_mmu_cmdq_cmd_write(vdev, "SYNC", val, 0); in ivpu_mmu_cmdq_sync()
523 clflush_cache_range(q->base, IVPU_MMU_CMDQ_SIZE); in ivpu_mmu_cmdq_sync()
524 REGV_WR32(IVPU_MMU_REG_CMDQ_PROD, q->prod); in ivpu_mmu_cmdq_sync()
566 struct ivpu_mmu_info *mmu = vdev->mmu; in ivpu_mmu_reset()
570 memset(mmu->cmdq.base, 0, IVPU_MMU_CMDQ_SIZE); in ivpu_mmu_reset()
572 clflush_cache_range(mmu->cmdq.base, IVPU_MMU_CMDQ_SIZE); in ivpu_mmu_reset()
573 mmu->cmdq.prod = 0; in ivpu_mmu_reset()
574 mmu->cmdq.cons = 0; in ivpu_mmu_reset()
576 memset(mmu->evtq.base, 0, IVPU_MMU_EVTQ_SIZE); in ivpu_mmu_reset()
577 mmu->evtq.prod = 0; in ivpu_mmu_reset()
578 mmu->evtq.cons = 0; in ivpu_mmu_reset()
592 REGV_WR64(IVPU_MMU_REG_STRTAB_BASE, mmu->strtab.dma_q); in ivpu_mmu_reset()
593 REGV_WR32(IVPU_MMU_REG_STRTAB_BASE_CFG, mmu->strtab.base_cfg); in ivpu_mmu_reset()
595 REGV_WR64(IVPU_MMU_REG_CMDQ_BASE, mmu->cmdq.dma_q); in ivpu_mmu_reset()
616 REGV_WR64(IVPU_MMU_REG_EVTQ_BASE, mmu->evtq.dma_q); in ivpu_mmu_reset()
640 struct ivpu_mmu_info *mmu = vdev->mmu; in ivpu_mmu_strtab_link_cd()
641 struct ivpu_mmu_strtab *strtab = &mmu->strtab; in ivpu_mmu_strtab_link_cd()
642 struct ivpu_mmu_cdtab *cdtab = &mmu->cdtab; in ivpu_mmu_strtab_link_cd()
643 u64 *entry = strtab->base + (sid * IVPU_MMU_STRTAB_ENT_SIZE); in ivpu_mmu_strtab_link_cd()
650 (cdtab->dma & IVPU_MMU_STE_0_S1CTXPTR_MASK); in ivpu_mmu_strtab_link_cd()
682 struct ivpu_mmu_info *mmu = vdev->mmu; in ivpu_mmu_invalidate_tlb()
685 mutex_lock(&mmu->lock); in ivpu_mmu_invalidate_tlb()
686 if (!mmu->on) in ivpu_mmu_invalidate_tlb()
695 mutex_unlock(&mmu->lock); in ivpu_mmu_invalidate_tlb()
701 struct ivpu_mmu_info *mmu = vdev->mmu; in ivpu_mmu_cdtab_entry_set()
702 struct ivpu_mmu_cdtab *cdtab = &mmu->cdtab; in ivpu_mmu_cdtab_entry_set()
708 return -EINVAL; in ivpu_mmu_cdtab_entry_set()
710 entry = cdtab->base + (ssid * IVPU_MMU_CDTAB_ENT_SIZE); in ivpu_mmu_cdtab_entry_set()
711 drm_WARN_ON(&vdev->drm, (entry[0] & IVPU_MMU_CD_0_V) == valid); in ivpu_mmu_cdtab_entry_set()
746 mutex_lock(&mmu->lock); in ivpu_mmu_cdtab_entry_set()
747 if (!mmu->on) in ivpu_mmu_cdtab_entry_set()
758 mutex_unlock(&mmu->lock); in ivpu_mmu_cdtab_entry_set()
763 mutex_unlock(&mmu->lock); in ivpu_mmu_cdtab_entry_set()
769 struct ivpu_mmu_info *mmu = vdev->mmu; in ivpu_mmu_init()
776 ret = drmm_mutex_init(&vdev->drm, &mmu->lock); in ivpu_mmu_init()
803 struct ivpu_mmu_info *mmu = vdev->mmu; in ivpu_mmu_enable()
806 mutex_lock(&mmu->lock); in ivpu_mmu_enable()
808 mmu->on = true; in ivpu_mmu_enable()
828 mutex_unlock(&mmu->lock); in ivpu_mmu_enable()
832 mmu->on = false; in ivpu_mmu_enable()
833 mutex_unlock(&mmu->lock); in ivpu_mmu_enable()
839 struct ivpu_mmu_info *mmu = vdev->mmu; in ivpu_mmu_disable()
841 mutex_lock(&mmu->lock); in ivpu_mmu_disable()
842 mmu->on = false; in ivpu_mmu_disable()
843 mutex_unlock(&mmu->lock); in ivpu_mmu_disable()
861 struct ivpu_mmu_queue *evtq = &vdev->mmu->evtq; in ivpu_mmu_get_event()
862 u32 idx = IVPU_MMU_Q_IDX(evtq->cons); in ivpu_mmu_get_event()
863 u32 *evt = evtq->base + (idx * IVPU_MMU_EVTQ_CMD_SIZE); in ivpu_mmu_get_event()
865 evtq->prod = REGV_RD32(IVPU_MMU_REG_EVTQ_PROD_SEC); in ivpu_mmu_get_event()
869 evtq->cons = (evtq->cons + 1) & IVPU_MMU_Q_WRAP_MASK; in ivpu_mmu_get_event()
890 REGV_WR32(IVPU_MMU_REG_EVTQ_CONS_SEC, vdev->mmu->evtq.cons); in ivpu_mmu_irq_evtq_handler()
893 if (!kfifo_put(&vdev->hw->irq.fifo, IVPU_HW_IRQ_SRC_MMU_EVTQ)) in ivpu_mmu_irq_evtq_handler()
928 ivpu_warn_ratelimited(vdev, "MMU CMDQ MSI ABT write aborted\n"); in ivpu_mmu_irq_gerr_handler()
936 if (REG_TEST_FLD(IVPU_MMU_REG_GERROR, CMDQ, active)) in ivpu_mmu_irq_gerr_handler()
937 ivpu_err_ratelimited(vdev, "MMU CMDQ write aborted\n"); in ivpu_mmu_irq_gerr_handler()
944 return ivpu_mmu_cdtab_entry_set(vdev, ssid, pgtable->pgd_dma, true); in ivpu_mmu_cd_set()