Lines Matching full:vm

45 	/** @vm: VM bound to this slot. NULL is no VM is bound. */
46 struct panthor_vm *vm; member
77 * We use this list to pick a VM to evict when all slots are
88 /** @vm: VMs management fields */
101 } vm; member
105 * struct panthor_vm_pool - VM pool object
108 /** @xa: Array used for VM handle tracking. */
133 * struct panthor_vm_op_ctx - VM operation context
135 * With VM operations potentially taking place in a dma-signaling path, we
155 * After an VM operation, there might be free pages left in this array.
173 /** @va: Virtual range targeted by the VM operation. */
183 * @returned_vmas: List of panthor_vma objects returned after a VM operation.
220 * struct panthor_vm - VM object
222 * A VM is an object representing a GPU (or MCU) virtual address space.
225 * the VM.
227 * Except for the MCU VM, which is managed by the kernel, all other VMs are
255 * There's currently one bind queue per VM. It doesn't make sense to
256 * allow more given the VM operations are serialized anyway.
273 * @op_lock: Lock used to serialize operations on a VM.
282 * @op_ctx: The context attached to the currently executing VM operation.
294 * For the MCU VM, this is managing the VA range that's used to map
317 * @id: ID of the address space this VM is bound to.
319 * A value of -1 means the VM is inactive/not bound.
323 /** @active_cnt: Number of active users of this VM. */
327 * @lru_node: Used to instead the VM in the panthor_mmu::as::lru_list.
339 * @pool: The heap pool attached to this VM.
341 * Will stay NULL until someone creates a heap context on this VM.
349 /** @node: Used to insert the VM in the panthor_mmu::vm::list. */
352 /** @for_mcu: True if this is the MCU VM. */
356 * @destroyed: True if the VM was destroyed.
358 * No further bind requests should be queued to a destroyed VM.
363 * @unusable: True if the VM has turned unusable because something
371 * Instead, we should just flag the VM as unusable, and fail any
372 * further request targeting this VM.
374 * We also provide a way to query a VM state, so userspace can destroy
392 * struct panthor_vm_bind_job - VM bind job
401 /** @cleanup_op_ctx_work: Work used to cleanup the VM operation context. */
404 /** @vm: VM targeted by the VM operation. */
405 struct panthor_vm *vm; member
430 * done to allow asynchronous VM operations.
437 struct panthor_vm *vm = cookie; in alloc_pt() local
441 if (unlikely(!vm->root_page_table)) { in alloc_pt()
444 drm_WARN_ON(&vm->ptdev->base, vm->op_ctx); in alloc_pt()
445 p = alloc_pages_node(dev_to_node(vm->ptdev->base.dev), in alloc_pt()
448 vm->root_page_table = page; in alloc_pt()
455 if (drm_WARN_ON(&vm->ptdev->base, size != SZ_4K)) in alloc_pt()
458 /* We must have some op_ctx attached to the VM and it must have at least one in alloc_pt()
461 if (drm_WARN_ON(&vm->ptdev->base, !vm->op_ctx) || in alloc_pt()
462 drm_WARN_ON(&vm->ptdev->base, in alloc_pt()
463 vm->op_ctx->rsvd_page_tables.ptr >= vm->op_ctx->rsvd_page_tables.count)) in alloc_pt()
466 page = vm->op_ctx->rsvd_page_tables.pages[vm->op_ctx->rsvd_page_tables.ptr++]; in alloc_pt()
489 struct panthor_vm *vm = cookie; in free_pt() local
491 if (unlikely(vm->root_page_table == data)) { in free_pt()
493 vm->root_page_table = NULL; in free_pt()
497 if (drm_WARN_ON(&vm->ptdev->base, size != SZ_4K)) in free_pt()
595 static int mmu_hw_do_operation(struct panthor_vm *vm, in mmu_hw_do_operation() argument
598 struct panthor_device *ptdev = vm->ptdev; in mmu_hw_do_operation()
602 ret = mmu_hw_do_operation_locked(ptdev, vm->as.id, iova, size, op); in mmu_hw_do_operation()
661 * panthor_vm_has_unhandled_faults() - Check if a VM has unhandled faults
662 * @vm: VM to check.
664 * Return: true if the VM has unhandled faults, false otherwise.
666 bool panthor_vm_has_unhandled_faults(struct panthor_vm *vm) in panthor_vm_has_unhandled_faults() argument
668 return vm->unhandled_fault; in panthor_vm_has_unhandled_faults()
672 * panthor_vm_is_unusable() - Check if the VM is still usable
673 * @vm: VM to check.
675 * Return: true if the VM is unusable, false otherwise.
677 bool panthor_vm_is_unusable(struct panthor_vm *vm) in panthor_vm_is_unusable() argument
679 return vm->unusable; in panthor_vm_is_unusable()
682 static void panthor_vm_release_as_locked(struct panthor_vm *vm) in panthor_vm_release_as_locked() argument
684 struct panthor_device *ptdev = vm->ptdev; in panthor_vm_release_as_locked()
688 if (drm_WARN_ON(&ptdev->base, vm->as.id < 0)) in panthor_vm_release_as_locked()
691 ptdev->mmu->as.slots[vm->as.id].vm = NULL; in panthor_vm_release_as_locked()
692 clear_bit(vm->as.id, &ptdev->mmu->as.alloc_mask); in panthor_vm_release_as_locked()
693 refcount_set(&vm->as.active_cnt, 0); in panthor_vm_release_as_locked()
694 list_del_init(&vm->as.lru_node); in panthor_vm_release_as_locked()
695 vm->as.id = -1; in panthor_vm_release_as_locked()
699 * panthor_vm_active() - Flag a VM as active
700 * @VM: VM to flag as active.
702 * Assigns an address space to a VM so it can be used by the GPU/MCU.
706 int panthor_vm_active(struct panthor_vm *vm) in panthor_vm_active() argument
708 struct panthor_device *ptdev = vm->ptdev; in panthor_vm_active()
710 struct io_pgtable_cfg *cfg = &io_pgtable_ops_to_pgtable(vm->pgtbl_ops)->cfg; in panthor_vm_active()
717 if (refcount_inc_not_zero(&vm->as.active_cnt)) in panthor_vm_active()
722 if (refcount_inc_not_zero(&vm->as.active_cnt)) in panthor_vm_active()
725 as = vm->as.id; in panthor_vm_active()
737 if (vm->for_mcu) { in panthor_vm_active()
761 vm->as.id = as; in panthor_vm_active()
763 ptdev->mmu->as.slots[as].vm = vm; in panthor_vm_active()
774 /* If the VM is re-activated, we clear the fault. */ in panthor_vm_active()
775 vm->unhandled_fault = false; in panthor_vm_active()
786 ret = panthor_mmu_as_enable(vm->ptdev, vm->as.id, transtab, transcfg, vm->memattr); in panthor_vm_active()
790 refcount_set(&vm->as.active_cnt, 1); in panthor_vm_active()
791 list_del_init(&vm->as.lru_node); in panthor_vm_active()
803 * panthor_vm_idle() - Flag a VM idle
804 * @VM: VM to flag as idle.
806 * When we know the GPU is done with the VM (no more jobs to process),
807 * we can relinquish the AS slot attached to this VM, if any.
809 * We don't release the slot immediately, but instead place the VM in
810 * the LRU list, so it can be evicted if another VM needs an AS slot.
815 void panthor_vm_idle(struct panthor_vm *vm) in panthor_vm_idle() argument
817 struct panthor_device *ptdev = vm->ptdev; in panthor_vm_idle()
819 if (!refcount_dec_and_mutex_lock(&vm->as.active_cnt, &ptdev->mmu->as.slots_lock)) in panthor_vm_idle()
822 if (!drm_WARN_ON(&ptdev->base, vm->as.id == -1 || !list_empty(&vm->as.lru_node))) in panthor_vm_idle()
823 list_add_tail(&vm->as.lru_node, &ptdev->mmu->as.lru_list); in panthor_vm_idle()
825 refcount_set(&vm->as.active_cnt, 0); in panthor_vm_idle()
829 u32 panthor_vm_page_size(struct panthor_vm *vm) in panthor_vm_page_size() argument
831 const struct io_pgtable *pgt = io_pgtable_ops_to_pgtable(vm->pgtbl_ops); in panthor_vm_page_size()
837 static void panthor_vm_stop(struct panthor_vm *vm) in panthor_vm_stop() argument
839 drm_sched_stop(&vm->sched, NULL); in panthor_vm_stop()
842 static void panthor_vm_start(struct panthor_vm *vm) in panthor_vm_start() argument
844 drm_sched_start(&vm->sched, 0); in panthor_vm_start()
848 * panthor_vm_as() - Get the AS slot attached to a VM
849 * @vm: VM to get the AS slot of.
851 * Return: -1 if the VM is not assigned an AS slot yet, >= 0 otherwise.
853 int panthor_vm_as(struct panthor_vm *vm) in panthor_vm_as() argument
855 return vm->as.id; in panthor_vm_as()
879 static int panthor_vm_flush_range(struct panthor_vm *vm, u64 iova, u64 size) in panthor_vm_flush_range() argument
881 struct panthor_device *ptdev = vm->ptdev; in panthor_vm_flush_range()
884 if (vm->as.id < 0) in panthor_vm_flush_range()
891 ret = mmu_hw_do_operation(vm, iova, size, AS_COMMAND_FLUSH_PT); in panthor_vm_flush_range()
898 * panthor_vm_flush_all() - Flush L2 caches for the entirety of a VM's AS
899 * @vm: VM whose cache to flush
903 int panthor_vm_flush_all(struct panthor_vm *vm) in panthor_vm_flush_all() argument
905 return panthor_vm_flush_range(vm, vm->base.mm_start, vm->base.mm_range); in panthor_vm_flush_all()
908 static int panthor_vm_unmap_pages(struct panthor_vm *vm, u64 iova, u64 size) in panthor_vm_unmap_pages() argument
910 struct panthor_device *ptdev = vm->ptdev; in panthor_vm_unmap_pages()
911 struct io_pgtable_ops *ops = vm->pgtbl_ops; in panthor_vm_unmap_pages()
914 drm_dbg(&ptdev->base, "unmap: as=%d, iova=%llx, len=%llx", vm->as.id, iova, size); in panthor_vm_unmap_pages()
927 panthor_vm_flush_range(vm, iova, offset + unmapped_sz); in panthor_vm_unmap_pages()
933 return panthor_vm_flush_range(vm, iova, size); in panthor_vm_unmap_pages()
937 panthor_vm_map_pages(struct panthor_vm *vm, u64 iova, int prot, in panthor_vm_map_pages() argument
940 struct panthor_device *ptdev = vm->ptdev; in panthor_vm_map_pages()
943 struct io_pgtable_ops *ops = vm->pgtbl_ops; in panthor_vm_map_pages()
965 vm->as.id, iova, &paddr, len); in panthor_vm_map_pages()
985 panthor_vm_unmap_pages(vm, start_iova, in panthor_vm_map_pages()
997 return panthor_vm_flush_range(vm, start_iova, iova - start_iova); in panthor_vm_map_pages()
1020 * @VM: VM to allocate a region on.
1027 * need to be mapped to the userspace VM, in the region reserved for kernel
1035 panthor_vm_alloc_va(struct panthor_vm *vm, u64 va, u64 size, in panthor_vm_alloc_va() argument
1038 ssize_t vm_pgsz = panthor_vm_page_size(vm); in panthor_vm_alloc_va()
1047 mutex_lock(&vm->mm_lock); in panthor_vm_alloc_va()
1051 ret = drm_mm_reserve_node(&vm->mm, va_node); in panthor_vm_alloc_va()
1053 ret = drm_mm_insert_node_in_range(&vm->mm, va_node, size, in panthor_vm_alloc_va()
1055 0, vm->kernel_auto_va.start, in panthor_vm_alloc_va()
1056 vm->kernel_auto_va.end, in panthor_vm_alloc_va()
1059 mutex_unlock(&vm->mm_lock); in panthor_vm_alloc_va()
1066 * @VM: VM to free the region on.
1069 void panthor_vm_free_va(struct panthor_vm *vm, struct drm_mm_node *va_node) in panthor_vm_free_va() argument
1071 mutex_lock(&vm->mm_lock); in panthor_vm_free_va()
1073 mutex_unlock(&vm->mm_lock); in panthor_vm_free_va()
1079 struct drm_gpuvm *vm = vm_bo->vm; in panthor_vm_bo_put() local
1084 * Same goes for the VM, since we take the VM resv lock. in panthor_vm_bo_put()
1087 drm_gpuvm_get(vm); in panthor_vm_bo_put()
1096 dma_resv_lock(drm_gpuvm_resv(vm), NULL); in panthor_vm_bo_put()
1100 dma_resv_unlock(drm_gpuvm_resv(vm)); in panthor_vm_bo_put()
1108 drm_gpuvm_put(vm); in panthor_vm_bo_put()
1113 struct panthor_vm *vm) in panthor_vm_cleanup_op_ctx() argument
1199 struct panthor_vm *vm, in panthor_vm_prepare_map_op_ctx() argument
1221 /* If the BO has an exclusive VM attached, it can't be mapped to other VMs. */ in panthor_vm_prepare_map_op_ctx()
1223 bo->exclusive_vm_root_gem != panthor_vm_root_gem(vm)) in panthor_vm_prepare_map_op_ctx()
1256 preallocated_vm_bo = drm_gpuvm_bo_create(&vm->base, &bo->base.base); in panthor_vm_prepare_map_op_ctx()
1266 * pre-allocated BO if the <BO,VM> association exists. Given we in panthor_vm_prepare_map_op_ctx()
1268 * be called immediately, and we have to hold the VM resv lock when in panthor_vm_prepare_map_op_ctx()
1271 dma_resv_lock(panthor_vm_resv(vm), NULL); in panthor_vm_prepare_map_op_ctx()
1275 dma_resv_unlock(panthor_vm_resv(vm)); in panthor_vm_prepare_map_op_ctx()
1277 /* If the a vm_bo for this <VM,BO> combination exists, it already in panthor_vm_prepare_map_op_ctx()
1315 dma_resv_lock(panthor_vm_resv(vm), NULL); in panthor_vm_prepare_map_op_ctx()
1317 dma_resv_unlock(panthor_vm_resv(vm)); in panthor_vm_prepare_map_op_ctx()
1322 panthor_vm_cleanup_op_ctx(op_ctx, vm); in panthor_vm_prepare_map_op_ctx()
1327 struct panthor_vm *vm, in panthor_vm_prepare_unmap_op_ctx() argument
1374 panthor_vm_cleanup_op_ctx(op_ctx, vm); in panthor_vm_prepare_unmap_op_ctx()
1379 struct panthor_vm *vm) in panthor_vm_prepare_sync_only_op_ctx() argument
1388 * @vm: VM to look into.
1401 panthor_vm_get_bo_for_va(struct panthor_vm *vm, u64 va, u64 *bo_offset) in panthor_vm_get_bo_for_va() argument
1407 /* Take the VM lock to prevent concurrent map/unmap operations. */ in panthor_vm_get_bo_for_va()
1408 mutex_lock(&vm->op_lock); in panthor_vm_get_bo_for_va()
1409 gpuva = drm_gpuva_find_first(&vm->base, va, 1); in panthor_vm_get_bo_for_va()
1416 mutex_unlock(&vm->op_lock); in panthor_vm_get_bo_for_va()
1437 /* If the task VM size is smaller than the GPU VA range, pick this in panthor_vm_create_get_user_va_range()
1443 /* If the GPU VA range is smaller than the task VM size, we in panthor_vm_create_get_user_va_range()
1494 * panthor_vm_pool_create_vm() - Create a VM
1495 * @pool: The VM to create this VM on.
1499 * Return: a positive VM ID on success, a negative error code otherwise.
1506 struct panthor_vm *vm; in panthor_vm_pool_create_vm() local
1514 vm = panthor_vm_create(ptdev, false, kernel_va_start, kernel_va_range, in panthor_vm_pool_create_vm()
1516 if (IS_ERR(vm)) in panthor_vm_pool_create_vm()
1517 return PTR_ERR(vm); in panthor_vm_pool_create_vm()
1519 ret = xa_alloc(&pool->xa, &id, vm, in panthor_vm_pool_create_vm()
1523 panthor_vm_put(vm); in panthor_vm_pool_create_vm()
1531 static void panthor_vm_destroy(struct panthor_vm *vm) in panthor_vm_destroy() argument
1533 if (!vm) in panthor_vm_destroy()
1536 vm->destroyed = true; in panthor_vm_destroy()
1538 mutex_lock(&vm->heaps.lock); in panthor_vm_destroy()
1539 panthor_heap_pool_destroy(vm->heaps.pool); in panthor_vm_destroy()
1540 vm->heaps.pool = NULL; in panthor_vm_destroy()
1541 mutex_unlock(&vm->heaps.lock); in panthor_vm_destroy()
1543 drm_WARN_ON(&vm->ptdev->base, in panthor_vm_destroy()
1544 panthor_vm_unmap_range(vm, vm->base.mm_start, vm->base.mm_range)); in panthor_vm_destroy()
1545 panthor_vm_put(vm); in panthor_vm_destroy()
1549 * panthor_vm_pool_destroy_vm() - Destroy a VM.
1550 * @pool: VM pool.
1551 * @handle: VM handle.
1553 * This function doesn't free the VM object or its resources, it just kills
1559 * The VM resources are freed when the last reference on the VM object is
1564 struct panthor_vm *vm; in panthor_vm_pool_destroy_vm() local
1566 vm = xa_erase(&pool->xa, handle); in panthor_vm_pool_destroy_vm()
1568 panthor_vm_destroy(vm); in panthor_vm_pool_destroy_vm()
1570 return vm ? 0 : -EINVAL; in panthor_vm_pool_destroy_vm()
1574 * panthor_vm_pool_get_vm() - Retrieve VM object bound to a VM handle
1575 * @pool: VM pool to check.
1576 * @handle: Handle of the VM to retrieve.
1578 * Return: A valid pointer if the VM exists, NULL otherwise.
1583 struct panthor_vm *vm; in panthor_vm_pool_get_vm() local
1586 vm = panthor_vm_get(xa_load(&pool->xa, handle)); in panthor_vm_pool_get_vm()
1589 return vm; in panthor_vm_pool_get_vm()
1593 * panthor_vm_pool_destroy() - Destroy a VM pool.
1603 struct panthor_vm *vm; in panthor_vm_pool_destroy() local
1609 xa_for_each(&pfile->vms->xa, i, vm) in panthor_vm_pool_destroy()
1610 panthor_vm_destroy(vm); in panthor_vm_pool_destroy()
1617 * panthor_vm_pool_create() - Create a VM pool
1715 if (ptdev->mmu->as.slots[as].vm) in panthor_mmu_irq_handler()
1716 ptdev->mmu->as.slots[as].vm->unhandled_fault = true; in panthor_mmu_irq_handler()
1745 struct panthor_vm *vm = ptdev->mmu->as.slots[i].vm; in panthor_mmu_suspend() local
1747 if (vm) { in panthor_mmu_suspend()
1749 panthor_vm_release_as_locked(vm); in panthor_mmu_suspend()
1781 * don't get asked to do a VM operation while the GPU is down.
1788 struct panthor_vm *vm; in panthor_mmu_pre_reset() local
1792 mutex_lock(&ptdev->mmu->vm.lock); in panthor_mmu_pre_reset()
1793 ptdev->mmu->vm.reset_in_progress = true; in panthor_mmu_pre_reset()
1794 list_for_each_entry(vm, &ptdev->mmu->vm.list, node) in panthor_mmu_pre_reset()
1795 panthor_vm_stop(vm); in panthor_mmu_pre_reset()
1796 mutex_unlock(&ptdev->mmu->vm.lock); in panthor_mmu_pre_reset()
1808 struct panthor_vm *vm; in panthor_mmu_post_reset() local
1819 struct panthor_vm *vm = ptdev->mmu->as.slots[i].vm; in panthor_mmu_post_reset() local
1821 if (vm) in panthor_mmu_post_reset()
1822 panthor_vm_release_as_locked(vm); in panthor_mmu_post_reset()
1830 mutex_lock(&ptdev->mmu->vm.lock); in panthor_mmu_post_reset()
1831 list_for_each_entry(vm, &ptdev->mmu->vm.list, node) { in panthor_mmu_post_reset()
1832 panthor_vm_start(vm); in panthor_mmu_post_reset()
1834 ptdev->mmu->vm.reset_in_progress = false; in panthor_mmu_post_reset()
1835 mutex_unlock(&ptdev->mmu->vm.lock); in panthor_mmu_post_reset()
1840 struct panthor_vm *vm = container_of(gpuvm, struct panthor_vm, base); in panthor_vm_free() local
1841 struct panthor_device *ptdev = vm->ptdev; in panthor_vm_free()
1843 mutex_lock(&vm->heaps.lock); in panthor_vm_free()
1844 if (drm_WARN_ON(&ptdev->base, vm->heaps.pool)) in panthor_vm_free()
1845 panthor_heap_pool_destroy(vm->heaps.pool); in panthor_vm_free()
1846 mutex_unlock(&vm->heaps.lock); in panthor_vm_free()
1847 mutex_destroy(&vm->heaps.lock); in panthor_vm_free()
1849 mutex_lock(&ptdev->mmu->vm.lock); in panthor_vm_free()
1850 list_del(&vm->node); in panthor_vm_free()
1856 if (ptdev->mmu->vm.reset_in_progress) in panthor_vm_free()
1857 panthor_vm_start(vm); in panthor_vm_free()
1858 mutex_unlock(&ptdev->mmu->vm.lock); in panthor_vm_free()
1860 drm_sched_entity_destroy(&vm->entity); in panthor_vm_free()
1861 drm_sched_fini(&vm->sched); in panthor_vm_free()
1864 if (vm->as.id >= 0) { in panthor_vm_free()
1868 panthor_mmu_as_disable(ptdev, vm->as.id); in panthor_vm_free()
1872 ptdev->mmu->as.slots[vm->as.id].vm = NULL; in panthor_vm_free()
1873 clear_bit(vm->as.id, &ptdev->mmu->as.alloc_mask); in panthor_vm_free()
1874 list_del(&vm->as.lru_node); in panthor_vm_free()
1878 free_io_pgtable_ops(vm->pgtbl_ops); in panthor_vm_free()
1880 drm_mm_takedown(&vm->mm); in panthor_vm_free()
1881 kfree(vm); in panthor_vm_free()
1885 * panthor_vm_put() - Release a reference on a VM
1886 * @vm: VM to release the reference on. Can be NULL.
1888 void panthor_vm_put(struct panthor_vm *vm) in panthor_vm_put() argument
1890 drm_gpuvm_put(vm ? &vm->base : NULL); in panthor_vm_put()
1894 * panthor_vm_get() - Get a VM reference
1895 * @vm: VM to get the reference on. Can be NULL.
1897 * Return: @vm value.
1899 struct panthor_vm *panthor_vm_get(struct panthor_vm *vm) in panthor_vm_get() argument
1901 if (vm) in panthor_vm_get()
1902 drm_gpuvm_get(&vm->base); in panthor_vm_get()
1904 return vm; in panthor_vm_get()
1908 * panthor_vm_get_heap_pool() - Get the heap pool attached to a VM
1909 * @vm: VM to query the heap pool on.
1912 * Heap pools are per-VM. This function allows one to retrieve the heap pool
1913 * attached to a VM.
1921 struct panthor_heap_pool *panthor_vm_get_heap_pool(struct panthor_vm *vm, bool create) in panthor_vm_get_heap_pool() argument
1925 mutex_lock(&vm->heaps.lock); in panthor_vm_get_heap_pool()
1926 if (!vm->heaps.pool && create) { in panthor_vm_get_heap_pool()
1927 if (vm->destroyed) in panthor_vm_get_heap_pool()
1930 pool = panthor_heap_pool_create(vm->ptdev, vm); in panthor_vm_get_heap_pool()
1933 vm->heaps.pool = panthor_heap_pool_get(pool); in panthor_vm_get_heap_pool()
1935 pool = panthor_heap_pool_get(vm->heaps.pool); in panthor_vm_get_heap_pool()
1939 mutex_unlock(&vm->heaps.lock); in panthor_vm_get_heap_pool()
1946 * heaps over all the heap pools in a VM
1950 * Calculate all heap chunk sizes in all heap pools bound to a VM. If the VM
1955 struct panthor_vm *vm; in panthor_vm_heaps_sizes() local
1962 xa_for_each(&pfile->vms->xa, i, vm) { in panthor_vm_heaps_sizes()
1963 size_t size = panthor_heap_pool_size(vm->heaps.pool); in panthor_vm_heaps_sizes()
1965 if (vm->as.id >= 0) in panthor_vm_heaps_sizes()
2013 static void panthor_vma_link(struct panthor_vm *vm, in panthor_vma_link() argument
2021 drm_WARN_ON(&vm->ptdev->base, drm_gpuvm_bo_put(vm_bo)); in panthor_vma_link()
2025 static void panthor_vma_unlink(struct panthor_vm *vm, in panthor_vma_unlink() argument
2040 list_add_tail(&vma->node, &vm->op_ctx->returned_vmas); in panthor_vma_unlink()
2056 struct panthor_vm *vm = priv; in panthor_gpuva_sm_step_map() local
2057 struct panthor_vm_op_ctx *op_ctx = vm->op_ctx; in panthor_gpuva_sm_step_map()
2066 ret = panthor_vm_map_pages(vm, op->map.va.addr, flags_to_prot(vma->flags), in panthor_gpuva_sm_step_map()
2075 drm_gpuva_map(&vm->base, &vma->base, &op->map); in panthor_gpuva_sm_step_map()
2076 panthor_vma_link(vm, vma, op_ctx->map.vm_bo); in panthor_gpuva_sm_step_map()
2085 struct panthor_vm *vm = priv; in panthor_gpuva_sm_step_remap() local
2086 struct panthor_vm_op_ctx *op_ctx = vm->op_ctx; in panthor_gpuva_sm_step_remap()
2092 ret = panthor_vm_unmap_pages(vm, unmap_start, unmap_range); in panthor_gpuva_sm_step_remap()
2116 panthor_vma_link(vm, prev_vma, in panthor_gpuva_sm_step_remap()
2121 panthor_vma_link(vm, next_vma, in panthor_gpuva_sm_step_remap()
2125 panthor_vma_unlink(vm, unmap_vma); in panthor_gpuva_sm_step_remap()
2133 struct panthor_vm *vm = priv; in panthor_gpuva_sm_step_unmap() local
2136 ret = panthor_vm_unmap_pages(vm, unmap_vma->base.va.addr, in panthor_gpuva_sm_step_unmap()
2138 if (drm_WARN_ON(&vm->ptdev->base, ret)) in panthor_gpuva_sm_step_unmap()
2142 panthor_vma_unlink(vm, unmap_vma); in panthor_gpuva_sm_step_unmap()
2154 * panthor_vm_resv() - Get the dma_resv object attached to a VM.
2155 * @vm: VM to get the dma_resv of.
2159 struct dma_resv *panthor_vm_resv(struct panthor_vm *vm) in panthor_vm_resv() argument
2161 return drm_gpuvm_resv(&vm->base); in panthor_vm_resv()
2164 struct drm_gem_object *panthor_vm_root_gem(struct panthor_vm *vm) in panthor_vm_root_gem() argument
2166 if (!vm) in panthor_vm_root_gem()
2169 return vm->base.r_obj; in panthor_vm_root_gem()
2173 panthor_vm_exec_op(struct panthor_vm *vm, struct panthor_vm_op_ctx *op, in panthor_vm_exec_op() argument
2182 mutex_lock(&vm->op_lock); in panthor_vm_exec_op()
2183 vm->op_ctx = op; in panthor_vm_exec_op()
2186 if (vm->unusable) { in panthor_vm_exec_op()
2191 ret = drm_gpuvm_sm_map(&vm->base, vm, op->va.addr, op->va.range, in panthor_vm_exec_op()
2196 ret = drm_gpuvm_sm_unmap(&vm->base, vm, op->va.addr, op->va.range); in panthor_vm_exec_op()
2205 vm->unusable = true; in panthor_vm_exec_op()
2207 vm->op_ctx = NULL; in panthor_vm_exec_op()
2208 mutex_unlock(&vm->op_lock); in panthor_vm_exec_op()
2221 * drm_sched finished fence, but we also flag the VM as unusable, because in panthor_vm_bind_run_job()
2222 * a failure in the async VM_BIND results in an inconsistent state. VM needs in panthor_vm_bind_run_job()
2226 ret = panthor_vm_exec_op(job->vm, &job->ctx, true); in panthor_vm_bind_run_job()
2239 panthor_vm_cleanup_op_ctx(&job->ctx, job->vm); in panthor_vm_bind_job_release()
2240 panthor_vm_put(job->vm); in panthor_vm_bind_job_release()
2285 * panthor_vm_create() - Create a VM
2287 * @for_mcu: True if this is the FW MCU VM.
2307 struct panthor_vm *vm; in panthor_vm_create() local
2310 vm = kzalloc(sizeof(*vm), GFP_KERNEL); in panthor_vm_create()
2311 if (!vm) in panthor_vm_create()
2314 /* We allocate a dummy GEM for the VM. */ in panthor_vm_create()
2321 mutex_init(&vm->heaps.lock); in panthor_vm_create()
2322 vm->for_mcu = for_mcu; in panthor_vm_create()
2323 vm->ptdev = ptdev; in panthor_vm_create()
2324 mutex_init(&vm->op_lock); in panthor_vm_create()
2335 mutex_init(&vm->mm_lock); in panthor_vm_create()
2336 drm_mm_init(&vm->mm, kernel_va_start, kernel_va_size); in panthor_vm_create()
2337 vm->kernel_auto_va.start = auto_kernel_va_start; in panthor_vm_create()
2338 vm->kernel_auto_va.end = vm->kernel_auto_va.start + auto_kernel_va_size - 1; in panthor_vm_create()
2340 INIT_LIST_HEAD(&vm->node); in panthor_vm_create()
2341 INIT_LIST_HEAD(&vm->as.lru_node); in panthor_vm_create()
2342 vm->as.id = -1; in panthor_vm_create()
2343 refcount_set(&vm->as.active_cnt, 0); in panthor_vm_create()
2356 vm->pgtbl_ops = alloc_io_pgtable_ops(ARM_64_LPAE_S1, &pgtbl_cfg, vm); in panthor_vm_create()
2357 if (!vm->pgtbl_ops) { in panthor_vm_create()
2363 ret = drm_sched_init(&vm->sched, &panthor_vm_bind_ops, ptdev->mmu->vm.wq, in panthor_vm_create()
2366 "panthor-vm-bind", ptdev->base.dev); in panthor_vm_create()
2370 sched = &vm->sched; in panthor_vm_create()
2371 ret = drm_sched_entity_init(&vm->entity, 0, &sched, 1, NULL); in panthor_vm_create()
2375 mair = io_pgtable_ops_to_pgtable(vm->pgtbl_ops)->cfg.arm_lpae_s1_cfg.mair; in panthor_vm_create()
2376 vm->memattr = mair_to_memattr(mair, ptdev->coherent); in panthor_vm_create()
2378 mutex_lock(&ptdev->mmu->vm.lock); in panthor_vm_create()
2379 list_add_tail(&vm->node, &ptdev->mmu->vm.list); in panthor_vm_create()
2382 if (ptdev->mmu->vm.reset_in_progress) in panthor_vm_create()
2383 panthor_vm_stop(vm); in panthor_vm_create()
2384 mutex_unlock(&ptdev->mmu->vm.lock); in panthor_vm_create()
2389 drm_gpuvm_init(&vm->base, for_mcu ? "panthor-MCU-VM" : "panthor-GPU-VM", in panthor_vm_create()
2393 return vm; in panthor_vm_create()
2396 drm_sched_fini(&vm->sched); in panthor_vm_create()
2399 free_io_pgtable_ops(vm->pgtbl_ops); in panthor_vm_create()
2402 drm_mm_takedown(&vm->mm); in panthor_vm_create()
2406 kfree(vm); in panthor_vm_create()
2412 struct panthor_vm *vm, in panthor_vm_bind_prepare_op_ctx() argument
2416 ssize_t vm_pgsz = panthor_vm_page_size(vm); in panthor_vm_bind_prepare_op_ctx()
2427 ret = panthor_vm_prepare_map_op_ctx(op_ctx, vm, in panthor_vm_bind_prepare_op_ctx()
2443 return panthor_vm_prepare_unmap_op_ctx(op_ctx, vm, op->va, op->size); in panthor_vm_bind_prepare_op_ctx()
2458 panthor_vm_prepare_sync_only_op_ctx(op_ctx, vm); in panthor_vm_bind_prepare_op_ctx()
2477 * @vm: VM targeted by the VM_BIND job.
2478 * @op: VM operation data.
2484 struct panthor_vm *vm, in panthor_vm_bind_job_create() argument
2490 if (!vm) in panthor_vm_bind_job_create()
2493 if (vm->destroyed || vm->unusable) in panthor_vm_bind_job_create()
2500 ret = panthor_vm_bind_prepare_op_ctx(file, vm, op, &job->ctx); in panthor_vm_bind_job_create()
2508 job->vm = panthor_vm_get(vm); in panthor_vm_bind_job_create()
2510 ret = drm_sched_job_init(&job->base, &vm->entity, 1, vm); in panthor_vm_bind_job_create()
2526 * Locks and prepare the VM resv.
2538 /* Acquire the VM lock an reserve a slot for this VM bind job. */ in panthor_vm_bind_job_prepare_resvs()
2539 ret = drm_gpuvm_prepare_vm(&job->vm->base, exec, 1); in panthor_vm_bind_job_prepare_resvs()
2564 drm_gpuvm_resv_add_fence(&job->vm->base, exec, in panthor_vm_bind_job_update_resvs()
2570 void panthor_vm_update_resvs(struct panthor_vm *vm, struct drm_exec *exec, in panthor_vm_update_resvs() argument
2575 drm_gpuvm_resv_add_fence(&vm->base, exec, fence, private_usage, extobj_usage); in panthor_vm_update_resvs()
2581 * @vm: VM targeted by the VM operation.
2582 * @op: Data describing the VM operation.
2587 struct panthor_vm *vm, in panthor_vm_bind_exec_sync_op() argument
2600 ret = panthor_vm_bind_prepare_op_ctx(file, vm, op, &op_ctx); in panthor_vm_bind_exec_sync_op()
2604 ret = panthor_vm_exec_op(vm, &op_ctx, false); in panthor_vm_bind_exec_sync_op()
2605 panthor_vm_cleanup_op_ctx(&op_ctx, vm); in panthor_vm_bind_exec_sync_op()
2611 * panthor_vm_map_bo_range() - Map a GEM object range to a VM
2612 * @vm: VM to map the GEM to.
2625 int panthor_vm_map_bo_range(struct panthor_vm *vm, struct panthor_gem_object *bo, in panthor_vm_map_bo_range() argument
2631 ret = panthor_vm_prepare_map_op_ctx(&op_ctx, vm, bo, offset, size, va, flags); in panthor_vm_map_bo_range()
2635 ret = panthor_vm_exec_op(vm, &op_ctx, false); in panthor_vm_map_bo_range()
2636 panthor_vm_cleanup_op_ctx(&op_ctx, vm); in panthor_vm_map_bo_range()
2643 * @vm: VM to unmap the region from.
2652 int panthor_vm_unmap_range(struct panthor_vm *vm, u64 va, u64 size) in panthor_vm_unmap_range() argument
2657 ret = panthor_vm_prepare_unmap_op_ctx(&op_ctx, vm, va, size); in panthor_vm_unmap_range()
2661 ret = panthor_vm_exec_op(vm, &op_ctx, false); in panthor_vm_unmap_range()
2662 panthor_vm_cleanup_op_ctx(&op_ctx, vm); in panthor_vm_unmap_range()
2668 * panthor_vm_prepare_mapped_bos_resvs() - Prepare resvs on VM BOs.
2670 * @vm: VM targeted by the GPU job.
2673 * GPU jobs assume all BOs bound to the VM at the time the job is submitted
2675 * need to reserve a slot on all BOs mapped to a VM and update this slot with
2680 int panthor_vm_prepare_mapped_bos_resvs(struct drm_exec *exec, struct panthor_vm *vm, in panthor_vm_prepare_mapped_bos_resvs() argument
2685 /* Acquire the VM lock and reserve a slot for this GPU job. */ in panthor_vm_prepare_mapped_bos_resvs()
2686 ret = drm_gpuvm_prepare_vm(&vm->base, exec, slot_count); in panthor_vm_prepare_mapped_bos_resvs()
2690 return drm_gpuvm_prepare_objects(&vm->base, exec, slot_count); in panthor_vm_prepare_mapped_bos_resvs()
2707 struct panthor_vm *vm = ptdev->mmu->as.slots[i].vm; in panthor_mmu_unplug() local
2709 if (vm) { in panthor_mmu_unplug()
2711 panthor_vm_release_as_locked(vm); in panthor_mmu_unplug()
2744 INIT_LIST_HEAD(&mmu->vm.list); in panthor_mmu_init()
2745 ret = drmm_mutex_init(&ptdev->base, &mmu->vm.lock); in panthor_mmu_init()
2760 mmu->vm.wq = alloc_workqueue("panthor-vm-bind", WQ_UNBOUND, 0); in panthor_mmu_init()
2761 if (!mmu->vm.wq) in panthor_mmu_init()
2773 return drmm_add_action_or_reset(&ptdev->base, panthor_mmu_release_wq, mmu->vm.wq); in panthor_mmu_init()
2777 static int show_vm_gpuvas(struct panthor_vm *vm, struct seq_file *m) in show_vm_gpuvas() argument
2781 mutex_lock(&vm->op_lock); in show_vm_gpuvas()
2782 ret = drm_debugfs_gpuva_info(m, &vm->base); in show_vm_gpuvas()
2783 mutex_unlock(&vm->op_lock); in show_vm_gpuvas()
2794 struct panthor_vm *vm; in show_each_vm() local
2797 mutex_lock(&ptdev->mmu->vm.lock); in show_each_vm()
2798 list_for_each_entry(vm, &ptdev->mmu->vm.list, node) { in show_each_vm()
2799 ret = show(vm, m); in show_each_vm()
2805 mutex_unlock(&ptdev->mmu->vm.lock); in show_each_vm()