Lines Matching +full:disable +full:- +full:mmu +full:- +full:reset

1 // SPDX-License-Identifier: GPL-2.0
9 #include <linux/dma-mapping.h>
13 #include <linux/io-pgtable.h>
26 #define mmu_write(dev, reg, data) writel(data, dev->iomem + reg)
27 #define mmu_read(dev, reg) readl(dev->iomem + reg)
34 /* Wait for the MMU status to indicate there is no active command, in in wait_ready()
36 ret = readl_relaxed_poll_timeout_atomic(pfdev->iomem + AS_STATUS(as_nr), in wait_ready()
40 /* The GPU hung, let's trigger a reset */ in wait_ready()
42 dev_err(pfdev->dev, "AS_ACTIVE bit stuck\n"); in wait_ready()
52 /* write AS_COMMAND when MMU is ready to accept another command */ in write_cmd()
78 region_width = max(fls64(region_start ^ (region_end - 1)), in lock_region()
79 const_ilog2(AS_LOCK_REGION_MIN_SIZE)) - 1; in lock_region()
105 /* Run the MMU operation */ in mmu_hw_do_operation_locked()
113 struct panfrost_mmu *mmu, in mmu_hw_do_operation() argument
118 spin_lock(&pfdev->as_lock); in mmu_hw_do_operation()
119 ret = mmu_hw_do_operation_locked(pfdev, mmu->as, iova, size, op); in mmu_hw_do_operation()
120 spin_unlock(&pfdev->as_lock); in mmu_hw_do_operation()
124 static void panfrost_mmu_enable(struct panfrost_device *pfdev, struct panfrost_mmu *mmu) in panfrost_mmu_enable() argument
126 int as_nr = mmu->as; in panfrost_mmu_enable()
127 struct io_pgtable_cfg *cfg = &mmu->pgtbl_cfg; in panfrost_mmu_enable()
128 u64 transtab = cfg->arm_mali_lpae_cfg.transtab; in panfrost_mmu_enable()
129 u64 memattr = cfg->arm_mali_lpae_cfg.memattr; in panfrost_mmu_enable()
158 u32 panfrost_mmu_as_get(struct panfrost_device *pfdev, struct panfrost_mmu *mmu) in panfrost_mmu_as_get() argument
162 spin_lock(&pfdev->as_lock); in panfrost_mmu_as_get()
164 as = mmu->as; in panfrost_mmu_as_get()
166 int en = atomic_inc_return(&mmu->as_count); in panfrost_mmu_as_get()
175 list_move(&mmu->list, &pfdev->as_lru_list); in panfrost_mmu_as_get()
177 if (pfdev->as_faulty_mask & mask) { in panfrost_mmu_as_get()
178 /* Unhandled pagefault on this AS, the MMU was in panfrost_mmu_as_get()
179 * disabled. We need to re-enable the MMU after in panfrost_mmu_as_get()
183 mmu_write(pfdev, MMU_INT_MASK, ~pfdev->as_faulty_mask); in panfrost_mmu_as_get()
184 pfdev->as_faulty_mask &= ~mask; in panfrost_mmu_as_get()
185 panfrost_mmu_enable(pfdev, mmu); in panfrost_mmu_as_get()
192 as = ffz(pfdev->as_alloc_mask); in panfrost_mmu_as_get()
193 if (!(BIT(as) & pfdev->features.as_present)) { in panfrost_mmu_as_get()
196 list_for_each_entry_reverse(lru_mmu, &pfdev->as_lru_list, list) { in panfrost_mmu_as_get()
197 if (!atomic_read(&lru_mmu->as_count)) in panfrost_mmu_as_get()
200 WARN_ON(&lru_mmu->list == &pfdev->as_lru_list); in panfrost_mmu_as_get()
202 list_del_init(&lru_mmu->list); in panfrost_mmu_as_get()
203 as = lru_mmu->as; in panfrost_mmu_as_get()
206 lru_mmu->as = -1; in panfrost_mmu_as_get()
210 mmu->as = as; in panfrost_mmu_as_get()
211 set_bit(as, &pfdev->as_alloc_mask); in panfrost_mmu_as_get()
212 atomic_set(&mmu->as_count, 1); in panfrost_mmu_as_get()
213 list_add(&mmu->list, &pfdev->as_lru_list); in panfrost_mmu_as_get()
215 dev_dbg(pfdev->dev, "Assigned AS%d to mmu %p, alloc_mask=%lx", as, mmu, pfdev->as_alloc_mask); in panfrost_mmu_as_get()
217 panfrost_mmu_enable(pfdev, mmu); in panfrost_mmu_as_get()
220 spin_unlock(&pfdev->as_lock); in panfrost_mmu_as_get()
224 void panfrost_mmu_as_put(struct panfrost_device *pfdev, struct panfrost_mmu *mmu) in panfrost_mmu_as_put() argument
226 atomic_dec(&mmu->as_count); in panfrost_mmu_as_put()
227 WARN_ON(atomic_read(&mmu->as_count) < 0); in panfrost_mmu_as_put()
232 struct panfrost_mmu *mmu, *mmu_tmp; in panfrost_mmu_reset() local
234 clear_bit(PANFROST_COMP_BIT_MMU, pfdev->is_suspended); in panfrost_mmu_reset()
236 spin_lock(&pfdev->as_lock); in panfrost_mmu_reset()
238 pfdev->as_alloc_mask = 0; in panfrost_mmu_reset()
239 pfdev->as_faulty_mask = 0; in panfrost_mmu_reset()
241 list_for_each_entry_safe(mmu, mmu_tmp, &pfdev->as_lru_list, list) { in panfrost_mmu_reset()
242 mmu->as = -1; in panfrost_mmu_reset()
243 atomic_set(&mmu->as_count, 0); in panfrost_mmu_reset()
244 list_del_init(&mmu->list); in panfrost_mmu_reset()
247 spin_unlock(&pfdev->as_lock); in panfrost_mmu_reset()
256 * io-pgtable only operates on multiple pages within a single table in get_pgsize()
259 * boundary of block size B is logically B - A % B, but in unsigned in get_pgsize()
261 * B - A % B == (B - A) % B == (n * B - A) % B, and choose n = 0 :) in get_pgsize()
263 size_t blk_offset = -addr % SZ_2M; in get_pgsize()
269 blk_offset = -addr % SZ_1G ?: SZ_1G; in get_pgsize()
275 struct panfrost_mmu *mmu, in panfrost_mmu_flush_range() argument
278 if (mmu->as < 0) in panfrost_mmu_flush_range()
281 pm_runtime_get_noresume(pfdev->dev); in panfrost_mmu_flush_range()
284 if (pm_runtime_active(pfdev->dev)) in panfrost_mmu_flush_range()
285 mmu_hw_do_operation(pfdev, mmu, iova, size, AS_COMMAND_FLUSH_PT); in panfrost_mmu_flush_range()
287 pm_runtime_put_autosuspend(pfdev->dev); in panfrost_mmu_flush_range()
290 static int mmu_map_sg(struct panfrost_device *pfdev, struct panfrost_mmu *mmu, in mmu_map_sg() argument
295 struct io_pgtable_ops *ops = mmu->pgtbl_ops; in mmu_map_sg()
302 dev_dbg(pfdev->dev, "map: as=%d, iova=%llx, paddr=%lx, len=%zx", mmu->as, iova, paddr, len); in mmu_map_sg()
308 ops->map_pages(ops, iova, paddr, pgsize, pgcount, prot, in mmu_map_sg()
314 len -= mapped; in mmu_map_sg()
318 panfrost_mmu_flush_range(pfdev, mmu, start_iova, iova - start_iova); in mmu_map_sg()
325 struct panfrost_gem_object *bo = mapping->obj; in panfrost_mmu_map()
326 struct drm_gem_shmem_object *shmem = &bo->base; in panfrost_mmu_map()
327 struct drm_gem_object *obj = &shmem->base; in panfrost_mmu_map()
328 struct panfrost_device *pfdev = to_panfrost_device(obj->dev); in panfrost_mmu_map()
332 if (WARN_ON(mapping->active)) in panfrost_mmu_map()
335 if (bo->noexec) in panfrost_mmu_map()
342 mmu_map_sg(pfdev, mapping->mmu, mapping->mmnode.start << PAGE_SHIFT, in panfrost_mmu_map()
344 mapping->active = true; in panfrost_mmu_map()
351 struct panfrost_gem_object *bo = mapping->obj; in panfrost_mmu_unmap()
352 struct drm_gem_object *obj = &bo->base.base; in panfrost_mmu_unmap()
353 struct panfrost_device *pfdev = to_panfrost_device(obj->dev); in panfrost_mmu_unmap()
354 struct io_pgtable_ops *ops = mapping->mmu->pgtbl_ops; in panfrost_mmu_unmap()
355 u64 iova = mapping->mmnode.start << PAGE_SHIFT; in panfrost_mmu_unmap()
356 size_t len = mapping->mmnode.size << PAGE_SHIFT; in panfrost_mmu_unmap()
359 if (WARN_ON(!mapping->active)) in panfrost_mmu_unmap()
362 dev_dbg(pfdev->dev, "unmap: as=%d, iova=%llx, len=%zx", in panfrost_mmu_unmap()
363 mapping->mmu->as, iova, len); in panfrost_mmu_unmap()
367 size_t pgsize = get_pgsize(iova, len - unmapped_len, &pgcount); in panfrost_mmu_unmap()
369 if (bo->is_heap) in panfrost_mmu_unmap()
371 if (!bo->is_heap || ops->iova_to_phys(ops, iova)) { in panfrost_mmu_unmap()
372 unmapped_page = ops->unmap_pages(ops, iova, pgsize, pgcount, NULL); in panfrost_mmu_unmap()
379 panfrost_mmu_flush_range(pfdev, mapping->mmu, in panfrost_mmu_unmap()
380 mapping->mmnode.start << PAGE_SHIFT, len); in panfrost_mmu_unmap()
381 mapping->active = false; in panfrost_mmu_unmap()
389 //struct panfrost_mmu *mmu = cookie; in mmu_tlb_sync_context()
410 struct panfrost_mmu *mmu; in addr_to_mapping() local
412 spin_lock(&pfdev->as_lock); in addr_to_mapping()
413 list_for_each_entry(mmu, &pfdev->as_lru_list, list) { in addr_to_mapping()
414 if (as == mmu->as) in addr_to_mapping()
421 spin_lock(&mmu->mm_lock); in addr_to_mapping()
423 drm_mm_for_each_node(node, &mmu->mm) { in addr_to_mapping()
424 if (offset >= node->start && in addr_to_mapping()
425 offset < (node->start + node->size)) { in addr_to_mapping()
428 kref_get(&mapping->refcount); in addr_to_mapping()
433 spin_unlock(&mmu->mm_lock); in addr_to_mapping()
435 spin_unlock(&pfdev->as_lock); in addr_to_mapping()
455 return -ENOENT; in panfrost_mmu_map_fault_addr()
457 bo = bomapping->obj; in panfrost_mmu_map_fault_addr()
458 if (!bo->is_heap) { in panfrost_mmu_map_fault_addr()
459 dev_WARN(pfdev->dev, "matching BO is not heap type (GPU VA = %llx)", in panfrost_mmu_map_fault_addr()
460 bomapping->mmnode.start << PAGE_SHIFT); in panfrost_mmu_map_fault_addr()
461 ret = -EINVAL; in panfrost_mmu_map_fault_addr()
464 WARN_ON(bomapping->mmu->as != as); in panfrost_mmu_map_fault_addr()
467 addr &= ~((u64)SZ_2M - 1); in panfrost_mmu_map_fault_addr()
469 page_offset -= bomapping->mmnode.start; in panfrost_mmu_map_fault_addr()
471 obj = &bo->base.base; in panfrost_mmu_map_fault_addr()
473 dma_resv_lock(obj->resv, NULL); in panfrost_mmu_map_fault_addr()
475 if (!bo->base.pages) { in panfrost_mmu_map_fault_addr()
476 bo->sgts = kvmalloc_array(bo->base.base.size / SZ_2M, in panfrost_mmu_map_fault_addr()
478 if (!bo->sgts) { in panfrost_mmu_map_fault_addr()
479 ret = -ENOMEM; in panfrost_mmu_map_fault_addr()
483 pages = kvmalloc_array(bo->base.base.size >> PAGE_SHIFT, in panfrost_mmu_map_fault_addr()
486 kvfree(bo->sgts); in panfrost_mmu_map_fault_addr()
487 bo->sgts = NULL; in panfrost_mmu_map_fault_addr()
488 ret = -ENOMEM; in panfrost_mmu_map_fault_addr()
491 bo->base.pages = pages; in panfrost_mmu_map_fault_addr()
492 bo->base.pages_use_count = 1; in panfrost_mmu_map_fault_addr()
494 pages = bo->base.pages; in panfrost_mmu_map_fault_addr()
501 mapping = bo->base.base.filp->f_mapping; in panfrost_mmu_map_fault_addr()
520 sgt = &bo->sgts[page_offset / (SZ_2M / PAGE_SIZE)]; in panfrost_mmu_map_fault_addr()
526 ret = dma_map_sgtable(pfdev->dev, sgt, DMA_BIDIRECTIONAL, 0); in panfrost_mmu_map_fault_addr()
530 mmu_map_sg(pfdev, bomapping->mmu, addr, in panfrost_mmu_map_fault_addr()
533 bomapping->active = true; in panfrost_mmu_map_fault_addr()
534 bo->heap_rss_size += SZ_2M; in panfrost_mmu_map_fault_addr()
536 dev_dbg(pfdev->dev, "mapped page fault @ AS%d %llx", as, addr); in panfrost_mmu_map_fault_addr()
539 dma_resv_unlock(obj->resv); in panfrost_mmu_map_fault_addr()
548 dma_resv_unlock(obj->resv); in panfrost_mmu_map_fault_addr()
556 struct panfrost_mmu *mmu = container_of(kref, struct panfrost_mmu, in panfrost_mmu_release_ctx() local
558 struct panfrost_device *pfdev = mmu->pfdev; in panfrost_mmu_release_ctx()
560 spin_lock(&pfdev->as_lock); in panfrost_mmu_release_ctx()
561 if (mmu->as >= 0) { in panfrost_mmu_release_ctx()
562 pm_runtime_get_noresume(pfdev->dev); in panfrost_mmu_release_ctx()
563 if (pm_runtime_active(pfdev->dev)) in panfrost_mmu_release_ctx()
564 panfrost_mmu_disable(pfdev, mmu->as); in panfrost_mmu_release_ctx()
565 pm_runtime_put_autosuspend(pfdev->dev); in panfrost_mmu_release_ctx()
567 clear_bit(mmu->as, &pfdev->as_alloc_mask); in panfrost_mmu_release_ctx()
568 clear_bit(mmu->as, &pfdev->as_in_use_mask); in panfrost_mmu_release_ctx()
569 list_del(&mmu->list); in panfrost_mmu_release_ctx()
571 spin_unlock(&pfdev->as_lock); in panfrost_mmu_release_ctx()
573 free_io_pgtable_ops(mmu->pgtbl_ops); in panfrost_mmu_release_ctx()
574 drm_mm_takedown(&mmu->mm); in panfrost_mmu_release_ctx()
575 kfree(mmu); in panfrost_mmu_release_ctx()
578 void panfrost_mmu_ctx_put(struct panfrost_mmu *mmu) in panfrost_mmu_ctx_put() argument
580 kref_put(&mmu->refcount, panfrost_mmu_release_ctx); in panfrost_mmu_ctx_put()
583 struct panfrost_mmu *panfrost_mmu_ctx_get(struct panfrost_mmu *mmu) in panfrost_mmu_ctx_get() argument
585 kref_get(&mmu->refcount); in panfrost_mmu_ctx_get()
587 return mmu; in panfrost_mmu_ctx_get()
591 #define PFN_4G_MASK (PFN_4G - 1)
606 (*end)--; in panfrost_drm_mm_color_adjust()
609 if (next_seg - *start <= PFN_16M) in panfrost_drm_mm_color_adjust()
612 *end = min(*end, ALIGN(*start, PFN_4G) - 1); in panfrost_drm_mm_color_adjust()
618 struct panfrost_mmu *mmu; in panfrost_mmu_ctx_create() local
620 mmu = kzalloc(sizeof(*mmu), GFP_KERNEL); in panfrost_mmu_ctx_create()
621 if (!mmu) in panfrost_mmu_ctx_create()
622 return ERR_PTR(-ENOMEM); in panfrost_mmu_ctx_create()
624 mmu->pfdev = pfdev; in panfrost_mmu_ctx_create()
625 spin_lock_init(&mmu->mm_lock); in panfrost_mmu_ctx_create()
627 /* 4G enough for now. can be 48-bit */ in panfrost_mmu_ctx_create()
628 drm_mm_init(&mmu->mm, SZ_32M >> PAGE_SHIFT, (SZ_4G - SZ_32M) >> PAGE_SHIFT); in panfrost_mmu_ctx_create()
629 mmu->mm.color_adjust = panfrost_drm_mm_color_adjust; in panfrost_mmu_ctx_create()
631 INIT_LIST_HEAD(&mmu->list); in panfrost_mmu_ctx_create()
632 mmu->as = -1; in panfrost_mmu_ctx_create()
634 mmu->pgtbl_cfg = (struct io_pgtable_cfg) { in panfrost_mmu_ctx_create()
636 .ias = FIELD_GET(0xff, pfdev->features.mmu_features), in panfrost_mmu_ctx_create()
637 .oas = FIELD_GET(0xff00, pfdev->features.mmu_features), in panfrost_mmu_ctx_create()
638 .coherent_walk = pfdev->coherent, in panfrost_mmu_ctx_create()
640 .iommu_dev = pfdev->dev, in panfrost_mmu_ctx_create()
643 mmu->pgtbl_ops = alloc_io_pgtable_ops(ARM_MALI_LPAE, &mmu->pgtbl_cfg, in panfrost_mmu_ctx_create()
644 mmu); in panfrost_mmu_ctx_create()
645 if (!mmu->pgtbl_ops) { in panfrost_mmu_ctx_create()
646 kfree(mmu); in panfrost_mmu_ctx_create()
647 return ERR_PTR(-EINVAL); in panfrost_mmu_ctx_create()
650 kref_init(&mmu->refcount); in panfrost_mmu_ctx_create()
652 return mmu; in panfrost_mmu_ctx_create()
680 if (test_bit(PANFROST_COMP_BIT_MMU, pfdev->is_suspended)) in panfrost_mmu_irq_handler()
697 u32 as = ffs(status | (status >> 16)) - 1; in panfrost_mmu_irq_handler_thread()
717 ret = -1; in panfrost_mmu_irq_handler_thread()
723 dev_err(pfdev->dev, in panfrost_mmu_irq_handler_thread()
739 spin_lock(&pfdev->as_lock); in panfrost_mmu_irq_handler_thread()
740 /* Ignore MMU interrupts on this AS until it's been in panfrost_mmu_irq_handler_thread()
741 * re-enabled. in panfrost_mmu_irq_handler_thread()
743 pfdev->as_faulty_mask |= mask; in panfrost_mmu_irq_handler_thread()
745 /* Disable the MMU to kill jobs on this AS. */ in panfrost_mmu_irq_handler_thread()
747 spin_unlock(&pfdev->as_lock); in panfrost_mmu_irq_handler_thread()
752 /* If we received new MMU interrupts, process them before returning. */ in panfrost_mmu_irq_handler_thread()
754 status = mmu_read(pfdev, MMU_INT_RAWSTAT) & ~pfdev->as_faulty_mask; in panfrost_mmu_irq_handler_thread()
758 if (!test_bit(PANFROST_COMP_BIT_MMU, pfdev->is_suspended)) { in panfrost_mmu_irq_handler_thread()
759 spin_lock(&pfdev->as_lock); in panfrost_mmu_irq_handler_thread()
760 mmu_write(pfdev, MMU_INT_MASK, ~pfdev->as_faulty_mask); in panfrost_mmu_irq_handler_thread()
761 spin_unlock(&pfdev->as_lock); in panfrost_mmu_irq_handler_thread()
771 pfdev->mmu_irq = platform_get_irq_byname(to_platform_device(pfdev->dev), "mmu"); in panfrost_mmu_init()
772 if (pfdev->mmu_irq < 0) in panfrost_mmu_init()
773 return pfdev->mmu_irq; in panfrost_mmu_init()
775 err = devm_request_threaded_irq(pfdev->dev, pfdev->mmu_irq, in panfrost_mmu_init()
778 IRQF_SHARED, KBUILD_MODNAME "-mmu", in panfrost_mmu_init()
782 dev_err(pfdev->dev, "failed to request mmu irq"); in panfrost_mmu_init()
796 set_bit(PANFROST_COMP_BIT_MMU, pfdev->is_suspended); in panfrost_mmu_suspend_irq()
799 synchronize_irq(pfdev->mmu_irq); in panfrost_mmu_suspend_irq()