Lines Matching full:mmu
255 return "MMU bypass is disallowed for this StreamID"; in ivpu_mmu_event_to_str()
313 ivpu_dbg(vdev, MMU, "IDR0 0x%x != IDR0_REF 0x%x\n", val, val_ref); in ivpu_mmu_config_check()
317 ivpu_dbg(vdev, MMU, "IDR1 0x%x != IDR1_REF 0x%x\n", val, IVPU_MMU_IDR1_REF); in ivpu_mmu_config_check()
321 ivpu_dbg(vdev, MMU, "IDR3 0x%x != IDR3_REF 0x%x\n", val, IVPU_MMU_IDR3_REF); in ivpu_mmu_config_check()
332 ivpu_dbg(vdev, MMU, "IDR5 0x%x != IDR5_REF 0x%x\n", val, val_ref); in ivpu_mmu_config_check()
337 struct ivpu_mmu_info *mmu = vdev->mmu; in ivpu_mmu_cdtab_alloc() local
338 struct ivpu_mmu_cdtab *cdtab = &mmu->cdtab; in ivpu_mmu_cdtab_alloc()
345 ivpu_dbg(vdev, MMU, "CDTAB alloc: dma=%pad size=%zu\n", &cdtab->dma, size); in ivpu_mmu_cdtab_alloc()
352 struct ivpu_mmu_info *mmu = vdev->mmu; in ivpu_mmu_strtab_alloc() local
353 struct ivpu_mmu_strtab *strtab = &mmu->strtab; in ivpu_mmu_strtab_alloc()
364 ivpu_dbg(vdev, MMU, "STRTAB alloc: dma=%pad dma_q=%pad size=%zu\n", in ivpu_mmu_strtab_alloc()
372 struct ivpu_mmu_info *mmu = vdev->mmu; in ivpu_mmu_cmdq_alloc() local
373 struct ivpu_mmu_queue *q = &mmu->cmdq; in ivpu_mmu_cmdq_alloc()
383 ivpu_dbg(vdev, MMU, "CMDQ alloc: dma=%pad dma_q=%pad size=%u\n", in ivpu_mmu_cmdq_alloc()
391 struct ivpu_mmu_info *mmu = vdev->mmu; in ivpu_mmu_evtq_alloc() local
392 struct ivpu_mmu_queue *q = &mmu->evtq; in ivpu_mmu_evtq_alloc()
402 ivpu_dbg(vdev, MMU, "EVTQ alloc: dma=%pad dma_q=%pad size=%u\n", in ivpu_mmu_evtq_alloc()
465 struct ivpu_mmu_queue *cmdq = &vdev->mmu->cmdq; in ivpu_mmu_cmdq_wait_for_cons()
492 struct ivpu_mmu_queue *cmdq = &vdev->mmu->cmdq; in ivpu_mmu_cmdq_cmd_write()
497 ivpu_err(vdev, "Failed to write MMU CMD %s\n", name); in ivpu_mmu_cmdq_cmd_write()
505 ivpu_dbg(vdev, MMU, "CMD write: %s data: 0x%llx 0x%llx\n", name, data0, data1); in ivpu_mmu_cmdq_cmd_write()
512 struct ivpu_mmu_queue *q = &vdev->mmu->cmdq; in ivpu_mmu_cmdq_sync()
533 ivpu_err(vdev, "Timed out waiting for MMU consumer: %d, error: %s\n", ret, in ivpu_mmu_cmdq_sync()
566 struct ivpu_mmu_info *mmu = vdev->mmu; in ivpu_mmu_reset() local
570 memset(mmu->cmdq.base, 0, IVPU_MMU_CMDQ_SIZE); in ivpu_mmu_reset()
572 clflush_cache_range(mmu->cmdq.base, IVPU_MMU_CMDQ_SIZE); in ivpu_mmu_reset()
573 mmu->cmdq.prod = 0; in ivpu_mmu_reset()
574 mmu->cmdq.cons = 0; in ivpu_mmu_reset()
576 memset(mmu->evtq.base, 0, IVPU_MMU_EVTQ_SIZE); in ivpu_mmu_reset()
577 mmu->evtq.prod = 0; in ivpu_mmu_reset()
578 mmu->evtq.cons = 0; in ivpu_mmu_reset()
592 REGV_WR64(IVPU_MMU_REG_STRTAB_BASE, mmu->strtab.dma_q); in ivpu_mmu_reset()
593 REGV_WR32(IVPU_MMU_REG_STRTAB_BASE_CFG, mmu->strtab.base_cfg); in ivpu_mmu_reset()
595 REGV_WR64(IVPU_MMU_REG_CMDQ_BASE, mmu->cmdq.dma_q); in ivpu_mmu_reset()
616 REGV_WR64(IVPU_MMU_REG_EVTQ_BASE, mmu->evtq.dma_q); in ivpu_mmu_reset()
640 struct ivpu_mmu_info *mmu = vdev->mmu; in ivpu_mmu_strtab_link_cd() local
641 struct ivpu_mmu_strtab *strtab = &mmu->strtab; in ivpu_mmu_strtab_link_cd()
642 struct ivpu_mmu_cdtab *cdtab = &mmu->cdtab; in ivpu_mmu_strtab_link_cd()
669 ivpu_dbg(vdev, MMU, "STRTAB write entry (SSID=%u): 0x%llx, 0x%llx\n", sid, str[0], str[1]); in ivpu_mmu_strtab_link_cd()
682 struct ivpu_mmu_info *mmu = vdev->mmu; in ivpu_mmu_invalidate_tlb() local
685 mutex_lock(&mmu->lock); in ivpu_mmu_invalidate_tlb()
686 if (!mmu->on) in ivpu_mmu_invalidate_tlb()
695 mutex_unlock(&mmu->lock); in ivpu_mmu_invalidate_tlb()
701 struct ivpu_mmu_info *mmu = vdev->mmu; in ivpu_mmu_cdtab_entry_set() local
702 struct ivpu_mmu_cdtab *cdtab = &mmu->cdtab; in ivpu_mmu_cdtab_entry_set()
743 ivpu_dbg(vdev, MMU, "CDTAB set %s entry (SSID=%u, dma=%pad): 0x%llx, 0x%llx, 0x%llx, 0x%llx\n", in ivpu_mmu_cdtab_entry_set()
746 mutex_lock(&mmu->lock); in ivpu_mmu_cdtab_entry_set()
747 if (!mmu->on) in ivpu_mmu_cdtab_entry_set()
758 mutex_unlock(&mmu->lock); in ivpu_mmu_cdtab_entry_set()
763 mutex_unlock(&mmu->lock); in ivpu_mmu_cdtab_entry_set()
769 struct ivpu_mmu_info *mmu = vdev->mmu; in ivpu_mmu_init() local
772 ivpu_dbg(vdev, MMU, "Init..\n"); in ivpu_mmu_init()
776 ret = drmm_mutex_init(&vdev->drm, &mmu->lock); in ivpu_mmu_init()
792 ivpu_err(vdev, "Failed to resume MMU: %d\n", ret); in ivpu_mmu_init()
796 ivpu_dbg(vdev, MMU, "Init done\n"); in ivpu_mmu_init()
803 struct ivpu_mmu_info *mmu = vdev->mmu; in ivpu_mmu_enable() local
806 mutex_lock(&mmu->lock); in ivpu_mmu_enable()
808 mmu->on = true; in ivpu_mmu_enable()
812 ivpu_err(vdev, "Failed to reset MMU: %d\n", ret); in ivpu_mmu_enable()
828 mutex_unlock(&mmu->lock); in ivpu_mmu_enable()
832 mmu->on = false; in ivpu_mmu_enable()
833 mutex_unlock(&mmu->lock); in ivpu_mmu_enable()
839 struct ivpu_mmu_info *mmu = vdev->mmu; in ivpu_mmu_disable() local
841 mutex_lock(&mmu->lock); in ivpu_mmu_disable()
842 mmu->on = false; in ivpu_mmu_disable()
843 mutex_unlock(&mmu->lock); in ivpu_mmu_disable()
854 …ivpu_err_ratelimited(vdev, "MMU EVTQ: 0x%x (%s) SSID: %d SID: %d, e[2] %08x, e[3] %08x, in addr: 0… in ivpu_mmu_dump_event()
861 struct ivpu_mmu_queue *evtq = &vdev->mmu->evtq; in ivpu_mmu_get_event()
878 ivpu_dbg(vdev, IRQ, "MMU event queue\n"); in ivpu_mmu_irq_evtq_handler()
885 ivpu_pm_trigger_recovery(vdev, "MMU event"); in ivpu_mmu_irq_evtq_handler()
890 REGV_WR32(IVPU_MMU_REG_EVTQ_CONS_SEC, vdev->mmu->evtq.cons); in ivpu_mmu_irq_evtq_handler()
909 ivpu_dbg(vdev, IRQ, "MMU error\n"); in ivpu_mmu_irq_gerr_handler()
919 ivpu_warn_ratelimited(vdev, "MMU MSI ABT write aborted\n"); in ivpu_mmu_irq_gerr_handler()
922 ivpu_warn_ratelimited(vdev, "MMU PRIQ MSI ABT write aborted\n"); in ivpu_mmu_irq_gerr_handler()
925 ivpu_warn_ratelimited(vdev, "MMU EVTQ MSI ABT write aborted\n"); in ivpu_mmu_irq_gerr_handler()
928 ivpu_warn_ratelimited(vdev, "MMU CMDQ MSI ABT write aborted\n"); in ivpu_mmu_irq_gerr_handler()
931 ivpu_err_ratelimited(vdev, "MMU PRIQ write aborted\n"); in ivpu_mmu_irq_gerr_handler()
934 ivpu_err_ratelimited(vdev, "MMU EVTQ write aborted\n"); in ivpu_mmu_irq_gerr_handler()
937 ivpu_err_ratelimited(vdev, "MMU CMDQ write aborted\n"); in ivpu_mmu_irq_gerr_handler()