Lines Matching full:vdev
47 static void *ivpu_pgtable_alloc_page(struct ivpu_device *vdev, dma_addr_t *dma) in ivpu_pgtable_alloc_page() argument
59 dma_addr = dma_map_page(vdev->drm.dev, page, 0, PAGE_SIZE, DMA_BIDIRECTIONAL); in ivpu_pgtable_alloc_page()
60 if (dma_mapping_error(vdev->drm.dev, dma_addr)) in ivpu_pgtable_alloc_page()
72 dma_unmap_page(vdev->drm.dev, dma_addr, PAGE_SIZE, DMA_BIDIRECTIONAL); in ivpu_pgtable_alloc_page()
79 static void ivpu_pgtable_free_page(struct ivpu_device *vdev, u64 *cpu_addr, dma_addr_t dma_addr) in ivpu_pgtable_free_page() argument
86 dma_unmap_page(vdev->drm.dev, dma_addr & ~IVPU_MMU_ENTRY_FLAGS_MASK, PAGE_SIZE, in ivpu_pgtable_free_page()
93 static void ivpu_mmu_pgtables_free(struct ivpu_device *vdev, struct ivpu_mmu_pgtable *pgtable) in ivpu_mmu_pgtables_free() argument
117 ivpu_pgtable_free_page(vdev, pte_dma_ptr, pte_dma); in ivpu_mmu_pgtables_free()
121 ivpu_pgtable_free_page(vdev, pmd_dma_ptr, pmd_dma); in ivpu_mmu_pgtables_free()
126 ivpu_pgtable_free_page(vdev, pud_dma_ptr, pud_dma); in ivpu_mmu_pgtables_free()
129 ivpu_pgtable_free_page(vdev, pgtable->pgd_dma_ptr, pgtable->pgd_dma); in ivpu_mmu_pgtables_free()
135 ivpu_mmu_ensure_pgd(struct ivpu_device *vdev, struct ivpu_mmu_pgtable *pgtable) in ivpu_mmu_ensure_pgd() argument
143 pgd_dma_ptr = ivpu_pgtable_alloc_page(vdev, &pgd_dma); in ivpu_mmu_ensure_pgd()
154 ivpu_mmu_ensure_pud(struct ivpu_device *vdev, struct ivpu_mmu_pgtable *pgtable, int pgd_idx) in ivpu_mmu_ensure_pud() argument
162 pud_dma_ptr = ivpu_pgtable_alloc_page(vdev, &pud_dma); in ivpu_mmu_ensure_pud()
166 drm_WARN_ON(&vdev->drm, pgtable->pmd_ptrs[pgd_idx]); in ivpu_mmu_ensure_pud()
171 drm_WARN_ON(&vdev->drm, pgtable->pte_ptrs[pgd_idx]); in ivpu_mmu_ensure_pud()
185 ivpu_pgtable_free_page(vdev, pud_dma_ptr, pud_dma); in ivpu_mmu_ensure_pud()
190 ivpu_mmu_ensure_pmd(struct ivpu_device *vdev, struct ivpu_mmu_pgtable *pgtable, int pgd_idx, in ivpu_mmu_ensure_pmd() argument
199 pmd_dma_ptr = ivpu_pgtable_alloc_page(vdev, &pmd_dma); in ivpu_mmu_ensure_pmd()
203 drm_WARN_ON(&vdev->drm, pgtable->pte_ptrs[pgd_idx][pud_idx]); in ivpu_mmu_ensure_pmd()
214 ivpu_pgtable_free_page(vdev, pmd_dma_ptr, pmd_dma); in ivpu_mmu_ensure_pmd()
219 ivpu_mmu_ensure_pte(struct ivpu_device *vdev, struct ivpu_mmu_pgtable *pgtable, in ivpu_mmu_ensure_pte() argument
228 pte_dma_ptr = ivpu_pgtable_alloc_page(vdev, &pte_dma); in ivpu_mmu_ensure_pte()
239 ivpu_mmu_context_map_page(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx, in ivpu_mmu_context_map_page() argument
248 drm_WARN_ON(&vdev->drm, ctx->id == IVPU_RESERVED_CONTEXT_MMU_SSID); in ivpu_mmu_context_map_page()
251 if (!ivpu_mmu_ensure_pgd(vdev, &ctx->pgtable)) in ivpu_mmu_context_map_page()
255 if (!ivpu_mmu_ensure_pud(vdev, &ctx->pgtable, pgd_idx)) in ivpu_mmu_context_map_page()
259 if (!ivpu_mmu_ensure_pmd(vdev, &ctx->pgtable, pgd_idx, pud_idx)) in ivpu_mmu_context_map_page()
263 pte = ivpu_mmu_ensure_pte(vdev, &ctx->pgtable, pgd_idx, pud_idx, pmd_idx); in ivpu_mmu_context_map_page()
274 ivpu_mmu_context_map_cont_64k(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx, u64 vpu_addr, in ivpu_mmu_context_map_cont_64k() argument
279 drm_WARN_ON(&vdev->drm, !IS_ALIGNED(vpu_addr, size)); in ivpu_mmu_context_map_cont_64k()
280 drm_WARN_ON(&vdev->drm, !IS_ALIGNED(dma_addr, size)); in ivpu_mmu_context_map_cont_64k()
285 int ret = ivpu_mmu_context_map_page(vdev, ctx, vpu_addr, dma_addr, prot); in ivpu_mmu_context_map_cont_64k()
310 ivpu_mmu_context_map_pages(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx, in ivpu_mmu_context_map_pages() argument
319 ret = ivpu_mmu_context_map_cont_64k(vdev, ctx, vpu_addr, dma_addr, prot); in ivpu_mmu_context_map_pages()
322 ret = ivpu_mmu_context_map_page(vdev, ctx, vpu_addr, dma_addr, prot); in ivpu_mmu_context_map_pages()
337 static void ivpu_mmu_context_set_page_ro(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx, in ivpu_mmu_context_set_page_ro() argument
348 static void ivpu_mmu_context_split_page(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx, in ivpu_mmu_context_split_page() argument
359 static void ivpu_mmu_context_split_64k_page(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx, in ivpu_mmu_context_split_64k_page() argument
366 ivpu_dbg(vdev, MMU_MAP, "Split 64K page ctx: %u vpu_addr: 0x%llx\n", ctx->id, vpu_addr); in ivpu_mmu_context_split_64k_page()
369 ivpu_mmu_context_split_page(vdev, ctx, start + offset); in ivpu_mmu_context_split_64k_page()
375 ivpu_mmu_context_set_pages_ro(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx, u64 vpu_addr, in ivpu_mmu_context_set_pages_ro() argument
385 if (drm_WARN_ON(&vdev->drm, !IS_ALIGNED(vpu_addr | size, IVPU_MMU_PAGE_SIZE))) in ivpu_mmu_context_set_pages_ro()
390 ivpu_dbg(vdev, MMU_MAP, "Set read-only pages ctx: %u vpu_addr: 0x%llx size: %lu\n", in ivpu_mmu_context_set_pages_ro()
396 ivpu_mmu_context_split_64k_page(vdev, ctx, vpu_addr); in ivpu_mmu_context_set_pages_ro()
400 ivpu_mmu_context_split_64k_page(vdev, ctx, vpu_addr + size); in ivpu_mmu_context_set_pages_ro()
405 ivpu_mmu_context_set_page_ro(vdev, ctx, vpu_addr); in ivpu_mmu_context_set_pages_ro()
415 ret = ivpu_mmu_invalidate_tlb(vdev, ctx->id); in ivpu_mmu_context_set_pages_ro()
417 ivpu_err(vdev, "Failed to invalidate TLB for ctx %u: %d\n", ctx->id, ret); in ivpu_mmu_context_set_pages_ro()
432 ivpu_mmu_context_map_sgt(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx, in ivpu_mmu_context_map_sgt() argument
441 if (drm_WARN_ON(&vdev->drm, !ctx)) in ivpu_mmu_context_map_sgt()
460 ivpu_dbg(vdev, MMU_MAP, "Map ctx: %u dma_addr: 0x%llx vpu_addr: 0x%llx size: %lu\n", in ivpu_mmu_context_map_sgt()
463 ret = ivpu_mmu_context_map_pages(vdev, ctx, vpu_addr, dma_addr, size, prot); in ivpu_mmu_context_map_sgt()
465 ivpu_err(vdev, "Failed to map context pages\n"); in ivpu_mmu_context_map_sgt()
472 ret = ivpu_mmu_cd_set(vdev, ctx->id, &ctx->pgtable); in ivpu_mmu_context_map_sgt()
474 ivpu_err(vdev, "Failed to set context descriptor for context %u: %d\n", in ivpu_mmu_context_map_sgt()
484 ret = ivpu_mmu_invalidate_tlb(vdev, ctx->id); in ivpu_mmu_context_map_sgt()
486 ivpu_err(vdev, "Failed to invalidate TLB for ctx %u: %d\n", ctx->id, ret); in ivpu_mmu_context_map_sgt()
500 ivpu_mmu_context_unmap_sgt(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx, in ivpu_mmu_context_unmap_sgt() argument
507 if (drm_WARN_ON(&vdev->drm, !ctx)) in ivpu_mmu_context_unmap_sgt()
516 ivpu_dbg(vdev, MMU_MAP, "Unmap ctx: %u dma_addr: 0x%llx vpu_addr: 0x%llx size: %lu\n", in ivpu_mmu_context_unmap_sgt()
528 ret = ivpu_mmu_invalidate_tlb(vdev, ctx->id); in ivpu_mmu_context_unmap_sgt()
530 ivpu_warn(vdev, "Failed to invalidate TLB for ctx %u: %d\n", ctx->id, ret); in ivpu_mmu_context_unmap_sgt()
564 void ivpu_mmu_context_init(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx, u32 context_id) in ivpu_mmu_context_init() argument
571 start = vdev->hw->ranges.global.start; in ivpu_mmu_context_init()
572 end = vdev->hw->ranges.shave.end; in ivpu_mmu_context_init()
574 start = min_t(u64, vdev->hw->ranges.user.start, vdev->hw->ranges.shave.start); in ivpu_mmu_context_init()
575 end = max_t(u64, vdev->hw->ranges.user.end, vdev->hw->ranges.dma.end); in ivpu_mmu_context_init()
582 void ivpu_mmu_context_fini(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx) in ivpu_mmu_context_fini() argument
585 ivpu_mmu_cd_clear(vdev, ctx->id); in ivpu_mmu_context_fini()
590 ivpu_mmu_pgtables_free(vdev, &ctx->pgtable); in ivpu_mmu_context_fini()
594 void ivpu_mmu_global_context_init(struct ivpu_device *vdev) in ivpu_mmu_global_context_init() argument
596 ivpu_mmu_context_init(vdev, &vdev->gctx, IVPU_GLOBAL_CONTEXT_MMU_SSID); in ivpu_mmu_global_context_init()
599 void ivpu_mmu_global_context_fini(struct ivpu_device *vdev) in ivpu_mmu_global_context_fini() argument
601 ivpu_mmu_context_fini(vdev, &vdev->gctx); in ivpu_mmu_global_context_fini()
604 int ivpu_mmu_reserved_context_init(struct ivpu_device *vdev) in ivpu_mmu_reserved_context_init() argument
608 ivpu_mmu_context_init(vdev, &vdev->rctx, IVPU_RESERVED_CONTEXT_MMU_SSID); in ivpu_mmu_reserved_context_init()
610 mutex_lock(&vdev->rctx.lock); in ivpu_mmu_reserved_context_init()
612 if (!ivpu_mmu_ensure_pgd(vdev, &vdev->rctx.pgtable)) { in ivpu_mmu_reserved_context_init()
613 ivpu_err(vdev, "Failed to allocate root page table for reserved context\n"); in ivpu_mmu_reserved_context_init()
618 ret = ivpu_mmu_cd_set(vdev, vdev->rctx.id, &vdev->rctx.pgtable); in ivpu_mmu_reserved_context_init()
620 ivpu_err(vdev, "Failed to set context descriptor for reserved context\n"); in ivpu_mmu_reserved_context_init()
624 mutex_unlock(&vdev->rctx.lock); in ivpu_mmu_reserved_context_init()
628 mutex_unlock(&vdev->rctx.lock); in ivpu_mmu_reserved_context_init()
629 ivpu_mmu_context_fini(vdev, &vdev->rctx); in ivpu_mmu_reserved_context_init()
633 void ivpu_mmu_reserved_context_fini(struct ivpu_device *vdev) in ivpu_mmu_reserved_context_fini() argument
635 ivpu_mmu_cd_clear(vdev, vdev->rctx.id); in ivpu_mmu_reserved_context_fini()
636 ivpu_mmu_context_fini(vdev, &vdev->rctx); in ivpu_mmu_reserved_context_fini()
639 void ivpu_mmu_user_context_mark_invalid(struct ivpu_device *vdev, u32 ssid) in ivpu_mmu_user_context_mark_invalid() argument
643 xa_lock(&vdev->context_xa); in ivpu_mmu_user_context_mark_invalid()
645 file_priv = xa_load(&vdev->context_xa, ssid); in ivpu_mmu_user_context_mark_invalid()
649 xa_unlock(&vdev->context_xa); in ivpu_mmu_user_context_mark_invalid()