Lines Matching +full:protect +full:- +full:exec
1 // SPDX-License-Identifier: MIT
8 #include <linux/dma-fence-array.h>
44 return vm->gpuvm.r_obj; in xe_vm_obj()
48 * xe_vma_userptr_check_repin() - Advisory check for repin needed
53 * without the vm->userptr.notifier_lock held. There is no guarantee that the
57 * Return: 0 if userptr vma is valid, -EAGAIN otherwise; repin recommended.
61 return mmu_interval_check_retry(&uvma->userptr.notifier, in xe_vma_userptr_check_repin()
62 uvma->userptr.notifier_seq) ? in xe_vma_userptr_check_repin()
63 -EAGAIN : 0; in xe_vma_userptr_check_repin()
68 struct xe_vma *vma = &uvma->vma; in xe_vma_userptr_pin_pages()
70 struct xe_device *xe = vm->xe; in xe_vma_userptr_pin_pages()
72 lockdep_assert_held(&vm->lock); in xe_vma_userptr_pin_pages()
82 lockdep_assert_held(&vm->lock); in preempt_fences_waiting()
85 list_for_each_entry(q, &vm->preempt.exec_queues, lr.link) { in preempt_fences_waiting()
86 if (!q->lr.pfence || in preempt_fences_waiting()
88 &q->lr.pfence->flags)) { in preempt_fences_waiting()
107 lockdep_assert_held(&vm->lock); in alloc_preempt_fences()
110 if (*count >= vm->preempt.num_exec_queues) in alloc_preempt_fences()
113 for (; *count < vm->preempt.num_exec_queues; ++(*count)) { in alloc_preempt_fences()
131 list_for_each_entry(q, &vm->preempt.exec_queues, lr.link) { in wait_for_existing_preempt_fences()
132 if (q->lr.pfence) { in wait_for_existing_preempt_fences()
133 long timeout = dma_fence_wait(q->lr.pfence, false); in wait_for_existing_preempt_fences()
135 /* Only -ETIME on fence indicates VM needs to be killed */ in wait_for_existing_preempt_fences()
136 if (timeout < 0 || q->lr.pfence->error == -ETIME) in wait_for_existing_preempt_fences()
137 return -ETIME; in wait_for_existing_preempt_fences()
139 dma_fence_put(q->lr.pfence); in wait_for_existing_preempt_fences()
140 q->lr.pfence = NULL; in wait_for_existing_preempt_fences()
152 list_for_each_entry(q, &vm->preempt.exec_queues, lr.link) { in xe_vm_is_idle()
165 list_for_each_entry(q, &vm->preempt.exec_queues, lr.link) { in arm_preempt_fences()
168 link = list->next; in arm_preempt_fences()
169 xe_assert(vm->xe, link != list); in arm_preempt_fences()
172 q, q->lr.context, in arm_preempt_fences()
173 ++q->lr.seqno); in arm_preempt_fences()
174 dma_fence_put(q->lr.pfence); in arm_preempt_fences()
175 q->lr.pfence = fence; in arm_preempt_fences()
186 if (!vm->preempt.num_exec_queues) in add_preempt_fences()
189 err = dma_resv_reserve_fences(bo->ttm.base.resv, vm->preempt.num_exec_queues); in add_preempt_fences()
193 list_for_each_entry(q, &vm->preempt.exec_queues, lr.link) in add_preempt_fences()
194 if (q->lr.pfence) { in add_preempt_fences()
195 dma_resv_add_fence(bo->ttm.base.resv, in add_preempt_fences()
196 q->lr.pfence, in add_preempt_fences()
204 struct drm_exec *exec) in resume_and_reinstall_preempt_fences() argument
208 lockdep_assert_held(&vm->lock); in resume_and_reinstall_preempt_fences()
211 list_for_each_entry(q, &vm->preempt.exec_queues, lr.link) { in resume_and_reinstall_preempt_fences()
212 q->ops->resume(q); in resume_and_reinstall_preempt_fences()
214 drm_gpuvm_resv_add_fence(&vm->gpuvm, exec, q->lr.pfence, in resume_and_reinstall_preempt_fences()
222 .vm = &vm->gpuvm, in xe_vm_add_compute_exec_queue()
226 struct drm_exec *exec = &vm_exec.exec; in xe_vm_add_compute_exec_queue() local
231 xe_assert(vm->xe, xe_vm_in_preempt_fence_mode(vm)); in xe_vm_add_compute_exec_queue()
233 down_write(&vm->lock); in xe_vm_add_compute_exec_queue()
238 pfence = xe_preempt_fence_create(q, q->lr.context, in xe_vm_add_compute_exec_queue()
239 ++q->lr.seqno); in xe_vm_add_compute_exec_queue()
241 err = -ENOMEM; in xe_vm_add_compute_exec_queue()
245 list_add(&q->lr.link, &vm->preempt.exec_queues); in xe_vm_add_compute_exec_queue()
246 ++vm->preempt.num_exec_queues; in xe_vm_add_compute_exec_queue()
247 q->lr.pfence = pfence; in xe_vm_add_compute_exec_queue()
249 down_read(&vm->userptr.notifier_lock); in xe_vm_add_compute_exec_queue()
251 drm_gpuvm_resv_add_fence(&vm->gpuvm, exec, pfence, in xe_vm_add_compute_exec_queue()
263 up_read(&vm->userptr.notifier_lock); in xe_vm_add_compute_exec_queue()
266 drm_exec_fini(exec); in xe_vm_add_compute_exec_queue()
268 up_write(&vm->lock); in xe_vm_add_compute_exec_queue()
274 * xe_vm_remove_compute_exec_queue() - Remove compute exec queue from VM
285 down_write(&vm->lock); in xe_vm_remove_compute_exec_queue()
286 if (!list_empty(&q->lr.link)) { in xe_vm_remove_compute_exec_queue()
287 list_del_init(&q->lr.link); in xe_vm_remove_compute_exec_queue()
288 --vm->preempt.num_exec_queues; in xe_vm_remove_compute_exec_queue()
290 if (q->lr.pfence) { in xe_vm_remove_compute_exec_queue()
291 dma_fence_enable_sw_signaling(q->lr.pfence); in xe_vm_remove_compute_exec_queue()
292 dma_fence_put(q->lr.pfence); in xe_vm_remove_compute_exec_queue()
293 q->lr.pfence = NULL; in xe_vm_remove_compute_exec_queue()
295 up_write(&vm->lock); in xe_vm_remove_compute_exec_queue()
299 * __xe_vm_userptr_needs_repin() - Check whether the VM does have userptrs
304 * and provides a release-type barrier on the userptr.notifier_lock after
307 * Return: 0 if there are no userptrs needing repinning, -EAGAIN if there are.
311 lockdep_assert_held_read(&vm->userptr.notifier_lock); in __xe_vm_userptr_needs_repin()
313 return (list_empty(&vm->userptr.repin_list) && in __xe_vm_userptr_needs_repin()
314 list_empty(&vm->userptr.invalidated)) ? 0 : -EAGAIN; in __xe_vm_userptr_needs_repin()
320 * xe_vm_kill() - VM Kill
322 * @unlocked: Flag indicates the VM's dma-resv is not held
325 * use. If in preempt fence mode, also kill all exec queue attached to the VM.
331 lockdep_assert_held(&vm->lock); in xe_vm_kill()
336 vm->flags |= XE_VM_FLAG_BANNED; in xe_vm_kill()
339 list_for_each_entry(q, &vm->preempt.exec_queues, lr.link) in xe_vm_kill()
340 q->ops->kill(q); in xe_vm_kill()
349 * xe_vm_validate_should_retry() - Whether to retry after a validate error.
350 * @exec: The drm_exec object used for locking before validation.
356 * ttm_bo_validate() run into -EDEADLK and in such case returns -ENOMEM.
365 bool xe_vm_validate_should_retry(struct drm_exec *exec, int err, ktime_t *end) in xe_vm_validate_should_retry() argument
369 if (err != -ENOMEM) in xe_vm_validate_should_retry()
381 static int xe_gpuvm_validate(struct drm_gpuvm_bo *vm_bo, struct drm_exec *exec) in xe_gpuvm_validate() argument
383 struct xe_vm *vm = gpuvm_to_vm(vm_bo->vm); in xe_gpuvm_validate()
387 lockdep_assert_held(&vm->lock); in xe_gpuvm_validate()
389 list_move_tail(&gpuva_to_vma(gpuva)->combined_links.rebind, in xe_gpuvm_validate()
390 &vm->rebind_list); in xe_gpuvm_validate()
392 ret = xe_bo_validate(gem_to_xe_bo(vm_bo->obj), vm, false); in xe_gpuvm_validate()
396 vm_bo->evicted = false; in xe_gpuvm_validate()
401 * xe_vm_validate_rebind() - Validate buffer objects and rebind vmas
403 * @exec: The struct drm_exec with the locked GEM objects.
408 * rebindings may cause evictions and hence the validation-rebind
412 * may return -EINTR or -ERESTARTSYS if interrupted, and -EDEADLK if
415 int xe_vm_validate_rebind(struct xe_vm *vm, struct drm_exec *exec, in xe_vm_validate_rebind() argument
423 ret = drm_gpuvm_validate(&vm->gpuvm, exec); in xe_vm_validate_rebind()
430 } while (!list_empty(&vm->gpuvm.evict.list)); in xe_vm_validate_rebind()
432 drm_exec_for_each_locked_object(exec, index, obj) { in xe_vm_validate_rebind()
433 ret = dma_resv_reserve_fences(obj->resv, num_fences); in xe_vm_validate_rebind()
441 static int xe_preempt_work_begin(struct drm_exec *exec, struct xe_vm *vm, in xe_preempt_work_begin() argument
446 err = drm_gpuvm_prepare_vm(&vm->gpuvm, exec, 0); in xe_preempt_work_begin()
451 vm->preempt.rebind_deactivated = true; in xe_preempt_work_begin()
461 err = drm_gpuvm_prepare_objects(&vm->gpuvm, exec, 0); in xe_preempt_work_begin()
475 return xe_vm_validate_rebind(vm, exec, vm->preempt.num_exec_queues); in xe_preempt_work_begin()
481 struct drm_exec exec; in preempt_rebind_work_func() local
489 xe_assert(vm->xe, xe_vm_in_preempt_fence_mode(vm)); in preempt_rebind_work_func()
492 down_write(&vm->lock); in preempt_rebind_work_func()
495 up_write(&vm->lock); in preempt_rebind_work_func()
507 drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT, 0); in preempt_rebind_work_func()
509 drm_exec_until_all_locked(&exec) { in preempt_rebind_work_func()
512 err = xe_preempt_work_begin(&exec, vm, &done); in preempt_rebind_work_func()
513 drm_exec_retry_on_contention(&exec); in preempt_rebind_work_func()
515 drm_exec_fini(&exec); in preempt_rebind_work_func()
516 if (err && xe_vm_validate_should_retry(&exec, err, &end)) in preempt_rebind_work_func()
517 err = -EAGAIN; in preempt_rebind_work_func()
536 err = -ETIME; in preempt_rebind_work_func()
545 down_read(&vm->userptr.notifier_lock); in preempt_rebind_work_func()
547 up_read(&vm->userptr.notifier_lock); in preempt_rebind_work_func()
548 err = -EAGAIN; in preempt_rebind_work_func()
554 spin_lock(&vm->xe->ttm.lru_lock); in preempt_rebind_work_func()
555 ttm_lru_bulk_move_tail(&vm->lru_bulk_move); in preempt_rebind_work_func()
556 spin_unlock(&vm->xe->ttm.lru_lock); in preempt_rebind_work_func()
560 resume_and_reinstall_preempt_fences(vm, &exec); in preempt_rebind_work_func()
561 up_read(&vm->userptr.notifier_lock); in preempt_rebind_work_func()
564 drm_exec_fini(&exec); in preempt_rebind_work_func()
566 if (err == -EAGAIN) { in preempt_rebind_work_func()
572 drm_warn(&vm->xe->drm, "VM worker error: %d\n", err); in preempt_rebind_work_func()
575 up_write(&vm->lock); in preempt_rebind_work_func()
584 struct xe_userptr *userptr = &uvma->userptr; in __vma_userptr_invalidate()
585 struct xe_vma *vma = &uvma->vma; in __vma_userptr_invalidate()
591 * Tell exec and rebind worker they need to repin and rebind this in __vma_userptr_invalidate()
595 !(vma->gpuva.flags & XE_VMA_DESTROYED)) { in __vma_userptr_invalidate()
596 spin_lock(&vm->userptr.invalidated_lock); in __vma_userptr_invalidate()
597 list_move_tail(&userptr->invalidate_link, in __vma_userptr_invalidate()
598 &vm->userptr.invalidated); in __vma_userptr_invalidate()
599 spin_unlock(&vm->userptr.invalidated_lock); in __vma_userptr_invalidate()
619 if (xe_vm_in_fault_mode(vm) && userptr->initial_bind) { in __vma_userptr_invalidate()
632 struct xe_vma *vma = &uvma->vma; in vma_userptr_invalidate()
635 xe_assert(vm->xe, xe_vma_is_userptr(vma)); in vma_userptr_invalidate()
641 vm_dbg(&xe_vma_vm(vma)->xe->drm, in vma_userptr_invalidate()
645 down_write(&vm->userptr.notifier_lock); in vma_userptr_invalidate()
649 up_write(&vm->userptr.notifier_lock); in vma_userptr_invalidate()
661 * xe_vma_userptr_force_invalidate() - force invalidate a userptr
668 struct xe_vm *vm = xe_vma_vm(&uvma->vma); in xe_vma_userptr_force_invalidate()
670 /* Protect against concurrent userptr pinning */ in xe_vma_userptr_force_invalidate()
671 lockdep_assert_held(&vm->lock); in xe_vma_userptr_force_invalidate()
672 /* Protect against concurrent notifiers */ in xe_vma_userptr_force_invalidate()
673 lockdep_assert_held(&vm->userptr.notifier_lock); in xe_vma_userptr_force_invalidate()
675 * Protect against concurrent instances of this function and in xe_vma_userptr_force_invalidate()
676 * the critical exec sections in xe_vma_userptr_force_invalidate()
680 if (!mmu_interval_read_retry(&uvma->userptr.notifier, in xe_vma_userptr_force_invalidate()
681 uvma->userptr.notifier_seq)) in xe_vma_userptr_force_invalidate()
682 uvma->userptr.notifier_seq -= 2; in xe_vma_userptr_force_invalidate()
693 xe_assert(vm->xe, !xe_vm_in_fault_mode(vm)); in xe_vm_userptr_pin()
694 lockdep_assert_held_write(&vm->lock); in xe_vm_userptr_pin()
697 spin_lock(&vm->userptr.invalidated_lock); in xe_vm_userptr_pin()
698 xe_assert(vm->xe, list_empty(&vm->userptr.repin_list)); in xe_vm_userptr_pin()
699 list_for_each_entry_safe(uvma, next, &vm->userptr.invalidated, in xe_vm_userptr_pin()
701 list_del_init(&uvma->userptr.invalidate_link); in xe_vm_userptr_pin()
702 list_add_tail(&uvma->userptr.repin_link, in xe_vm_userptr_pin()
703 &vm->userptr.repin_list); in xe_vm_userptr_pin()
705 spin_unlock(&vm->userptr.invalidated_lock); in xe_vm_userptr_pin()
708 list_for_each_entry_safe(uvma, next, &vm->userptr.repin_list, in xe_vm_userptr_pin()
711 if (err == -EFAULT) { in xe_vm_userptr_pin()
712 list_del_init(&uvma->userptr.repin_link); in xe_vm_userptr_pin()
715 * then had to retry before the re-bind happened, due in xe_vm_userptr_pin()
723 if (!list_empty(&uvma->vma.combined_links.rebind)) in xe_vm_userptr_pin()
724 list_del_init(&uvma->vma.combined_links.rebind); in xe_vm_userptr_pin()
732 err = xe_vm_invalidate_vma(&uvma->vma); in xe_vm_userptr_pin()
740 list_del_init(&uvma->userptr.repin_link); in xe_vm_userptr_pin()
741 list_move_tail(&uvma->vma.combined_links.rebind, in xe_vm_userptr_pin()
742 &vm->rebind_list); in xe_vm_userptr_pin()
747 down_write(&vm->userptr.notifier_lock); in xe_vm_userptr_pin()
748 spin_lock(&vm->userptr.invalidated_lock); in xe_vm_userptr_pin()
749 list_for_each_entry_safe(uvma, next, &vm->userptr.repin_list, in xe_vm_userptr_pin()
751 list_del_init(&uvma->userptr.repin_link); in xe_vm_userptr_pin()
752 list_move_tail(&uvma->userptr.invalidate_link, in xe_vm_userptr_pin()
753 &vm->userptr.invalidated); in xe_vm_userptr_pin()
755 spin_unlock(&vm->userptr.invalidated_lock); in xe_vm_userptr_pin()
756 up_write(&vm->userptr.notifier_lock); in xe_vm_userptr_pin()
762 * xe_vm_userptr_check_repin() - Check whether the VM might have userptrs
770 * -EAGAIN if there are.
774 return (list_empty_careful(&vm->userptr.repin_list) && in xe_vm_userptr_check_repin()
775 list_empty_careful(&vm->userptr.invalidated)) ? 0 : -EAGAIN; in xe_vm_userptr_check_repin()
783 if (!vops->pt_update_ops[i].num_ops) in xe_vma_ops_alloc()
786 vops->pt_update_ops[i].ops = in xe_vma_ops_alloc()
787 kmalloc_array(vops->pt_update_ops[i].num_ops, in xe_vma_ops_alloc()
788 sizeof(*vops->pt_update_ops[i].ops), in xe_vma_ops_alloc()
790 if (!vops->pt_update_ops[i].ops) in xe_vma_ops_alloc()
791 return array_of_binds ? -ENOBUFS : -ENOMEM; in xe_vma_ops_alloc()
803 kfree(vops->pt_update_ops[i].ops); in xe_vma_ops_fini()
812 ++vops->pt_update_ops[i].num_ops; in xe_vma_ops_incr_pt_update_ops()
818 INIT_LIST_HEAD(&op->link); in xe_vm_populate_rebind()
819 op->tile_mask = tile_mask; in xe_vm_populate_rebind()
820 op->base.op = DRM_GPUVA_OP_MAP; in xe_vm_populate_rebind()
821 op->base.map.va.addr = vma->gpuva.va.addr; in xe_vm_populate_rebind()
822 op->base.map.va.range = vma->gpuva.va.range; in xe_vm_populate_rebind()
823 op->base.map.gem.obj = vma->gpuva.gem.obj; in xe_vm_populate_rebind()
824 op->base.map.gem.offset = vma->gpuva.gem.offset; in xe_vm_populate_rebind()
825 op->map.vma = vma; in xe_vm_populate_rebind()
826 op->map.immediate = true; in xe_vm_populate_rebind()
827 op->map.dumpable = vma->gpuva.flags & XE_VMA_DUMPABLE; in xe_vm_populate_rebind()
828 op->map.is_null = xe_vma_is_null(vma); in xe_vm_populate_rebind()
838 return -ENOMEM; in xe_vm_ops_add_rebind()
841 list_add_tail(&op->link, &vops->list); in xe_vm_ops_add_rebind()
861 lockdep_assert_held(&vm->lock); in xe_vm_rebind()
863 list_empty(&vm->rebind_list)) in xe_vm_rebind()
871 list_for_each_entry(vma, &vm->rebind_list, combined_links.rebind) { in xe_vm_rebind()
872 xe_assert(vm->xe, vma->tile_present); in xe_vm_rebind()
880 vma->tile_present); in xe_vm_rebind()
894 list_for_each_entry_safe(vma, next, &vm->rebind_list, in xe_vm_rebind()
896 list_del_init(&vma->combined_links.rebind); in xe_vm_rebind()
900 list_del(&op->link); in xe_vm_rebind()
917 lockdep_assert_held(&vm->lock); in xe_vma_rebind()
919 xe_assert(vm->xe, xe_vm_in_fault_mode(vm)); in xe_vma_rebind()
922 for_each_tile(tile, vm->xe, id) { in xe_vma_rebind()
924 vops.pt_update_ops[tile->id].q = in xe_vma_rebind()
942 list_del(&op->link); in xe_vma_rebind()
975 xe_assert(vm->xe, start < end); in xe_vma_create()
976 xe_assert(vm->xe, end < vm->size); in xe_vma_create()
986 return ERR_PTR(-ENOMEM); in xe_vma_create()
988 vma = &uvma->vma; in xe_vma_create()
992 return ERR_PTR(-ENOMEM); in xe_vma_create()
995 vma->gpuva.flags |= DRM_GPUVA_SPARSE; in xe_vma_create()
997 vma->gpuva.gem.obj = &bo->ttm.base; in xe_vma_create()
1000 INIT_LIST_HEAD(&vma->combined_links.rebind); in xe_vma_create()
1002 INIT_LIST_HEAD(&vma->gpuva.gem.entry); in xe_vma_create()
1003 vma->gpuva.vm = &vm->gpuvm; in xe_vma_create()
1004 vma->gpuva.va.addr = start; in xe_vma_create()
1005 vma->gpuva.va.range = end - start + 1; in xe_vma_create()
1007 vma->gpuva.flags |= XE_VMA_READ_ONLY; in xe_vma_create()
1009 vma->gpuva.flags |= XE_VMA_DUMPABLE; in xe_vma_create()
1011 for_each_tile(tile, vm->xe, id) in xe_vma_create()
1012 vma->tile_mask |= 0x1 << id; in xe_vma_create()
1014 if (vm->xe->info.has_atomic_enable_pte_bit) in xe_vma_create()
1015 vma->gpuva.flags |= XE_VMA_ATOMIC_PTE_BIT; in xe_vma_create()
1017 vma->pat_index = pat_index; in xe_vma_create()
1024 vm_bo = drm_gpuvm_bo_obtain(vma->gpuva.vm, &bo->ttm.base); in xe_vma_create()
1031 drm_gem_object_get(&bo->ttm.base); in xe_vma_create()
1032 vma->gpuva.gem.offset = bo_offset_or_userptr; in xe_vma_create()
1033 drm_gpuva_link(&vma->gpuva, vm_bo); in xe_vma_create()
1037 struct xe_userptr *userptr = &to_userptr_vma(vma)->userptr; in xe_vma_create()
1038 u64 size = end - start + 1; in xe_vma_create()
1041 INIT_LIST_HEAD(&userptr->invalidate_link); in xe_vma_create()
1042 INIT_LIST_HEAD(&userptr->repin_link); in xe_vma_create()
1043 vma->gpuva.gem.offset = bo_offset_or_userptr; in xe_vma_create()
1044 mutex_init(&userptr->unmap_mutex); in xe_vma_create()
1046 err = mmu_interval_notifier_insert(&userptr->notifier, in xe_vma_create()
1047 current->mm, in xe_vma_create()
1055 userptr->notifier_seq = LONG_MAX; in xe_vma_create()
1068 if (vma->ufence) { in xe_vma_destroy_late()
1069 xe_sync_ufence_put(vma->ufence); in xe_vma_destroy_late()
1070 vma->ufence = NULL; in xe_vma_destroy_late()
1075 struct xe_userptr *userptr = &uvma->userptr; in xe_vma_destroy_late()
1077 if (userptr->sg) in xe_vma_destroy_late()
1085 mmu_interval_notifier_remove(&userptr->notifier); in xe_vma_destroy_late()
1086 mutex_destroy(&userptr->unmap_mutex); in xe_vma_destroy_late()
1110 INIT_WORK(&vma->destroy_work, vma_destroy_work_func); in vma_destroy_cb()
1111 queue_work(system_unbound_wq, &vma->destroy_work); in vma_destroy_cb()
1118 lockdep_assert_held_write(&vm->lock); in xe_vma_destroy()
1119 xe_assert(vm->xe, list_empty(&vma->combined_links.destroy)); in xe_vma_destroy()
1122 xe_assert(vm->xe, vma->gpuva.flags & XE_VMA_DESTROYED); in xe_vma_destroy()
1124 spin_lock(&vm->userptr.invalidated_lock); in xe_vma_destroy()
1125 xe_assert(vm->xe, list_empty(&to_userptr_vma(vma)->userptr.repin_link)); in xe_vma_destroy()
1126 list_del(&to_userptr_vma(vma)->userptr.invalidate_link); in xe_vma_destroy()
1127 spin_unlock(&vm->userptr.invalidated_lock); in xe_vma_destroy()
1131 drm_gpuva_unlink(&vma->gpuva); in xe_vma_destroy()
1136 int ret = dma_fence_add_callback(fence, &vma->destroy_cb, in xe_vma_destroy()
1140 XE_WARN_ON(ret != -ENOENT); in xe_vma_destroy()
1149 * xe_vm_lock_vma() - drm_exec utility to lock a vma
1150 * @exec: The drm_exec object we're currently locking for.
1155 * may return -EDEADLK on WW transaction contention and -EINTR if
1158 int xe_vm_lock_vma(struct drm_exec *exec, struct xe_vma *vma) in xe_vm_lock_vma() argument
1166 err = drm_exec_lock_obj(exec, xe_vm_obj(vm)); in xe_vm_lock_vma()
1167 if (!err && bo && !bo->vm) in xe_vm_lock_vma()
1168 err = drm_exec_lock_obj(exec, &bo->ttm.base); in xe_vm_lock_vma()
1175 struct drm_exec exec; in xe_vma_destroy_unlocked() local
1178 drm_exec_init(&exec, 0, 0); in xe_vma_destroy_unlocked()
1179 drm_exec_until_all_locked(&exec) { in xe_vma_destroy_unlocked()
1180 err = xe_vm_lock_vma(&exec, vma); in xe_vma_destroy_unlocked()
1181 drm_exec_retry_on_contention(&exec); in xe_vma_destroy_unlocked()
1188 drm_exec_fini(&exec); in xe_vma_destroy_unlocked()
1196 lockdep_assert_held(&vm->lock); in xe_vm_find_overlapping_vma()
1201 xe_assert(vm->xe, start + range <= vm->size); in xe_vm_find_overlapping_vma()
1203 gpuva = drm_gpuva_find_first(&vm->gpuvm, start, range); in xe_vm_find_overlapping_vma()
1212 xe_assert(vm->xe, xe_vma_vm(vma) == vm); in xe_vm_insert_vma()
1213 lockdep_assert_held(&vm->lock); in xe_vm_insert_vma()
1215 mutex_lock(&vm->snap_mutex); in xe_vm_insert_vma()
1216 err = drm_gpuva_insert(&vm->gpuvm, &vma->gpuva); in xe_vm_insert_vma()
1217 mutex_unlock(&vm->snap_mutex); in xe_vm_insert_vma()
1225 xe_assert(vm->xe, xe_vma_vm(vma) == vm); in xe_vm_remove_vma()
1226 lockdep_assert_held(&vm->lock); in xe_vm_remove_vma()
1228 mutex_lock(&vm->snap_mutex); in xe_vm_remove_vma()
1229 drm_gpuva_remove(&vma->gpuva); in xe_vm_remove_vma()
1230 mutex_unlock(&vm->snap_mutex); in xe_vm_remove_vma()
1231 if (vm->usm.last_fault_vma == vma) in xe_vm_remove_vma()
1232 vm->usm.last_fault_vma = NULL; in xe_vm_remove_vma()
1244 return &op->base; in xe_vm_op_alloc()
1383 * xe_vm_create_scratch() - Setup a scratch memory pagetable tree for the
1389 * Sets up a pagetable tree with one page-table per level and a single
1390 * leaf PTE. All pagetable entries point to the single page-table or,
1399 u8 id = tile->id; in xe_vm_create_scratch()
1402 for (i = MAX_HUGEPTE_LEVEL; i < vm->pt_root[id]->level; i++) { in xe_vm_create_scratch()
1403 vm->scratch_pt[id][i] = xe_pt_create(vm, tile, i); in xe_vm_create_scratch()
1404 if (IS_ERR(vm->scratch_pt[id][i])) in xe_vm_create_scratch()
1405 return PTR_ERR(vm->scratch_pt[id][i]); in xe_vm_create_scratch()
1407 xe_pt_populate_empty(tile, vm, vm->scratch_pt[id][i]); in xe_vm_create_scratch()
1422 for_each_tile(tile, vm->xe, id) { in xe_vm_free_scratch()
1425 if (!vm->pt_root[id]) in xe_vm_free_scratch()
1428 for (i = MAX_HUGEPTE_LEVEL; i < vm->pt_root[id]->level; ++i) in xe_vm_free_scratch()
1429 if (vm->scratch_pt[id][i]) in xe_vm_free_scratch()
1430 xe_pt_destroy(vm->scratch_pt[id][i], vm->flags, NULL); in xe_vm_free_scratch()
1444 return ERR_PTR(-ENOMEM); in xe_vm_create()
1446 vm->xe = xe; in xe_vm_create()
1448 vm->size = 1ull << xe->info.va_bits; in xe_vm_create()
1450 vm->flags = flags; in xe_vm_create()
1452 init_rwsem(&vm->lock); in xe_vm_create()
1453 mutex_init(&vm->snap_mutex); in xe_vm_create()
1455 INIT_LIST_HEAD(&vm->rebind_list); in xe_vm_create()
1457 INIT_LIST_HEAD(&vm->userptr.repin_list); in xe_vm_create()
1458 INIT_LIST_HEAD(&vm->userptr.invalidated); in xe_vm_create()
1459 init_rwsem(&vm->userptr.notifier_lock); in xe_vm_create()
1460 spin_lock_init(&vm->userptr.invalidated_lock); in xe_vm_create()
1462 ttm_lru_bulk_move_init(&vm->lru_bulk_move); in xe_vm_create()
1464 INIT_WORK(&vm->destroy_work, vm_destroy_work_func); in xe_vm_create()
1466 INIT_LIST_HEAD(&vm->preempt.exec_queues); in xe_vm_create()
1467 vm->preempt.min_run_period_ms = 10; /* FIXME: Wire up to uAPI */ in xe_vm_create()
1470 xe_range_fence_tree_init(&vm->rftree[id]); in xe_vm_create()
1472 vm->pt_ops = &xelp_pt_ops; in xe_vm_create()
1475 * Long-running workloads are not protected by the scheduler references. in xe_vm_create()
1476 * By design, run_job for long-running workloads returns NULL and the in xe_vm_create()
1483 vm_resv_obj = drm_gpuvm_resv_object_alloc(&xe->drm); in xe_vm_create()
1485 err = -ENOMEM; in xe_vm_create()
1489 drm_gpuvm_init(&vm->gpuvm, "Xe VM", DRM_GPUVM_RESV_PROTECTED, &xe->drm, in xe_vm_create()
1490 vm_resv_obj, 0, vm->size, 0, 0, &gpuvm_ops); in xe_vm_create()
1498 if (IS_DGFX(xe) && xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K) in xe_vm_create()
1499 vm->flags |= XE_VM_FLAG_64K; in xe_vm_create()
1503 tile->id != XE_VM_FLAG_TILE_ID(flags)) in xe_vm_create()
1506 vm->pt_root[id] = xe_pt_create(vm, tile, xe->info.vm_max_level); in xe_vm_create()
1507 if (IS_ERR(vm->pt_root[id])) { in xe_vm_create()
1508 err = PTR_ERR(vm->pt_root[id]); in xe_vm_create()
1509 vm->pt_root[id] = NULL; in xe_vm_create()
1516 if (!vm->pt_root[id]) in xe_vm_create()
1523 vm->batch_invalidate_tlb = true; in xe_vm_create()
1526 if (vm->flags & XE_VM_FLAG_LR_MODE) { in xe_vm_create()
1527 INIT_WORK(&vm->preempt.rebind_work, preempt_rebind_work_func); in xe_vm_create()
1528 vm->batch_invalidate_tlb = false; in xe_vm_create()
1533 if (!vm->pt_root[id]) in xe_vm_create()
1536 xe_pt_populate_empty(tile, vm, vm->pt_root[id]); in xe_vm_create()
1546 if (!vm->pt_root[id]) in xe_vm_create()
1554 vm->q[id] = q; in xe_vm_create()
1560 vm->composite_fence_ctx = dma_fence_context_alloc(1); in xe_vm_create()
1573 mutex_destroy(&vm->snap_mutex); in xe_vm_create()
1575 xe_range_fence_tree_fini(&vm->rftree[id]); in xe_vm_create()
1576 ttm_lru_bulk_move_fini(&xe->ttm, &vm->lru_bulk_move); in xe_vm_create()
1585 down_write(&vm->lock); in xe_vm_close()
1586 vm->size = 0; in xe_vm_close()
1587 up_write(&vm->lock); in xe_vm_close()
1593 struct xe_device *xe = vm->xe; in xe_vm_close_and_put()
1599 xe_assert(xe, !vm->preempt.num_exec_queues); in xe_vm_close_and_put()
1603 flush_work(&vm->preempt.rebind_work); in xe_vm_close_and_put()
1605 down_write(&vm->lock); in xe_vm_close_and_put()
1607 if (vm->q[id]) in xe_vm_close_and_put()
1608 xe_exec_queue_last_fence_put(vm->q[id], vm); in xe_vm_close_and_put()
1610 up_write(&vm->lock); in xe_vm_close_and_put()
1613 if (vm->q[id]) { in xe_vm_close_and_put()
1614 xe_exec_queue_kill(vm->q[id]); in xe_vm_close_and_put()
1615 xe_exec_queue_put(vm->q[id]); in xe_vm_close_and_put()
1616 vm->q[id] = NULL; in xe_vm_close_and_put()
1620 down_write(&vm->lock); in xe_vm_close_and_put()
1622 drm_gpuvm_for_each_va_safe(gpuva, next, &vm->gpuvm) { in xe_vm_close_and_put()
1626 down_read(&vm->userptr.notifier_lock); in xe_vm_close_and_put()
1627 vma->gpuva.flags |= XE_VMA_DESTROYED; in xe_vm_close_and_put()
1628 up_read(&vm->userptr.notifier_lock); in xe_vm_close_and_put()
1634 if (xe_vma_has_no_bo(vma) || xe_vma_bo(vma)->vm) { in xe_vm_close_and_put()
1635 list_del_init(&vma->combined_links.rebind); in xe_vm_close_and_put()
1640 list_move_tail(&vma->combined_links.destroy, &contested); in xe_vm_close_and_put()
1641 vma->gpuva.flags |= XE_VMA_DESTROYED; in xe_vm_close_and_put()
1654 if (vm->pt_root[id]) { in xe_vm_close_and_put()
1655 xe_pt_destroy(vm->pt_root[id], vm->flags, NULL); in xe_vm_close_and_put()
1656 vm->pt_root[id] = NULL; in xe_vm_close_and_put()
1662 * VM is now dead, cannot re-add nodes to vm->vmas if it's NULL in xe_vm_close_and_put()
1668 list_del_init(&vma->combined_links.destroy); in xe_vm_close_and_put()
1672 up_write(&vm->lock); in xe_vm_close_and_put()
1674 down_write(&xe->usm.lock); in xe_vm_close_and_put()
1675 if (vm->usm.asid) { in xe_vm_close_and_put()
1678 xe_assert(xe, xe->info.has_asid); in xe_vm_close_and_put()
1679 xe_assert(xe, !(vm->flags & XE_VM_FLAG_MIGRATION)); in xe_vm_close_and_put()
1681 lookup = xa_erase(&xe->usm.asid_to_vm, vm->usm.asid); in xe_vm_close_and_put()
1684 up_write(&xe->usm.lock); in xe_vm_close_and_put()
1687 xe_range_fence_tree_fini(&vm->rftree[id]); in xe_vm_close_and_put()
1696 struct xe_device *xe = vm->xe; in vm_destroy_work_func()
1701 xe_assert(xe, !vm->size); in vm_destroy_work_func()
1704 flush_work(&vm->preempt.rebind_work); in vm_destroy_work_func()
1706 mutex_destroy(&vm->snap_mutex); in vm_destroy_work_func()
1708 if (vm->flags & XE_VM_FLAG_LR_MODE) in vm_destroy_work_func()
1712 XE_WARN_ON(vm->pt_root[id]); in vm_destroy_work_func()
1716 ttm_lru_bulk_move_fini(&xe->ttm, &vm->lru_bulk_move); in vm_destroy_work_func()
1718 if (vm->xef) in vm_destroy_work_func()
1719 xe_file_put(vm->xef); in vm_destroy_work_func()
1729 queue_work(system_unbound_wq, &vm->destroy_work); in xe_vm_free()
1736 mutex_lock(&xef->vm.lock); in xe_vm_lookup()
1737 vm = xa_load(&xef->vm.xa, id); in xe_vm_lookup()
1740 mutex_unlock(&xef->vm.lock); in xe_vm_lookup()
1747 return vm->pt_ops->pde_encode_bo(vm->pt_root[tile->id]->bo, 0, in xe_vm_pdp4_descriptor()
1748 tile_to_xe(tile)->pat.idx[XE_CACHE_WB]); in xe_vm_pdp4_descriptor()
1754 return q ? q : vm->q[0]; in to_wait_exec_queue()
1788 if (XE_IOCTL_DBG(xe, args->extensions)) in xe_vm_create_ioctl()
1789 return -EINVAL; in xe_vm_create_ioctl()
1792 args->flags |= DRM_XE_VM_CREATE_FLAG_SCRATCH_PAGE; in xe_vm_create_ioctl()
1794 if (XE_IOCTL_DBG(xe, args->flags & DRM_XE_VM_CREATE_FLAG_FAULT_MODE && in xe_vm_create_ioctl()
1795 !xe->info.has_usm)) in xe_vm_create_ioctl()
1796 return -EINVAL; in xe_vm_create_ioctl()
1798 if (XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1])) in xe_vm_create_ioctl()
1799 return -EINVAL; in xe_vm_create_ioctl()
1801 if (XE_IOCTL_DBG(xe, args->flags & ~ALL_DRM_XE_VM_CREATE_FLAGS)) in xe_vm_create_ioctl()
1802 return -EINVAL; in xe_vm_create_ioctl()
1804 if (XE_IOCTL_DBG(xe, args->flags & DRM_XE_VM_CREATE_FLAG_SCRATCH_PAGE && in xe_vm_create_ioctl()
1805 args->flags & DRM_XE_VM_CREATE_FLAG_FAULT_MODE)) in xe_vm_create_ioctl()
1806 return -EINVAL; in xe_vm_create_ioctl()
1808 if (XE_IOCTL_DBG(xe, !(args->flags & DRM_XE_VM_CREATE_FLAG_LR_MODE) && in xe_vm_create_ioctl()
1809 args->flags & DRM_XE_VM_CREATE_FLAG_FAULT_MODE)) in xe_vm_create_ioctl()
1810 return -EINVAL; in xe_vm_create_ioctl()
1812 if (args->flags & DRM_XE_VM_CREATE_FLAG_SCRATCH_PAGE) in xe_vm_create_ioctl()
1814 if (args->flags & DRM_XE_VM_CREATE_FLAG_LR_MODE) in xe_vm_create_ioctl()
1816 if (args->flags & DRM_XE_VM_CREATE_FLAG_FAULT_MODE) in xe_vm_create_ioctl()
1823 if (xe->info.has_asid) { in xe_vm_create_ioctl()
1824 down_write(&xe->usm.lock); in xe_vm_create_ioctl()
1825 err = xa_alloc_cyclic(&xe->usm.asid_to_vm, &asid, vm, in xe_vm_create_ioctl()
1826 XA_LIMIT(1, XE_MAX_ASID - 1), in xe_vm_create_ioctl()
1827 &xe->usm.next_asid, GFP_KERNEL); in xe_vm_create_ioctl()
1828 up_write(&xe->usm.lock); in xe_vm_create_ioctl()
1832 vm->usm.asid = asid; in xe_vm_create_ioctl()
1835 vm->xef = xe_file_get(xef); in xe_vm_create_ioctl()
1839 if (vm->pt_root[id]) in xe_vm_create_ioctl()
1840 xe_drm_client_add_bo(vm->xef->client, vm->pt_root[id]->bo); in xe_vm_create_ioctl()
1843 /* Warning: Security issue - never enable by default */ in xe_vm_create_ioctl()
1844 args->reserved[0] = xe_bo_main_addr(vm->pt_root[0]->bo, XE_PAGE_SIZE); in xe_vm_create_ioctl()
1848 err = xa_alloc(&xef->vm.xa, &id, vm, xa_limit_32b, GFP_KERNEL); in xe_vm_create_ioctl()
1852 args->vm_id = id; in xe_vm_create_ioctl()
1871 if (XE_IOCTL_DBG(xe, args->pad) || in xe_vm_destroy_ioctl()
1872 XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1])) in xe_vm_destroy_ioctl()
1873 return -EINVAL; in xe_vm_destroy_ioctl()
1875 mutex_lock(&xef->vm.lock); in xe_vm_destroy_ioctl()
1876 vm = xa_load(&xef->vm.xa, args->vm_id); in xe_vm_destroy_ioctl()
1878 err = -ENOENT; in xe_vm_destroy_ioctl()
1879 else if (XE_IOCTL_DBG(xe, vm->preempt.num_exec_queues)) in xe_vm_destroy_ioctl()
1880 err = -EBUSY; in xe_vm_destroy_ioctl()
1882 xa_erase(&xef->vm.xa, args->vm_id); in xe_vm_destroy_ioctl()
1883 mutex_unlock(&xef->vm.lock); in xe_vm_destroy_ioctl()
1900 down_read(&vm->userptr.notifier_lock); in prep_vma_destroy()
1901 vma->gpuva.flags |= XE_VMA_DESTROYED; in prep_vma_destroy()
1902 up_read(&vm->userptr.notifier_lock); in prep_vma_destroy()
1915 switch (op->op) { in print_op()
1917 vm_dbg(&xe->drm, "MAP: addr=0x%016llx, range=0x%016llx", in print_op()
1918 (ULL)op->map.va.addr, (ULL)op->map.va.range); in print_op()
1921 vma = gpuva_to_vma(op->remap.unmap->va); in print_op()
1922 vm_dbg(&xe->drm, "REMAP:UNMAP: addr=0x%016llx, range=0x%016llx, keep=%d", in print_op()
1924 op->remap.unmap->keep ? 1 : 0); in print_op()
1925 if (op->remap.prev) in print_op()
1926 vm_dbg(&xe->drm, in print_op()
1928 (ULL)op->remap.prev->va.addr, in print_op()
1929 (ULL)op->remap.prev->va.range); in print_op()
1930 if (op->remap.next) in print_op()
1931 vm_dbg(&xe->drm, in print_op()
1933 (ULL)op->remap.next->va.addr, in print_op()
1934 (ULL)op->remap.next->va.range); in print_op()
1937 vma = gpuva_to_vma(op->unmap.va); in print_op()
1938 vm_dbg(&xe->drm, "UNMAP: addr=0x%016llx, range=0x%016llx, keep=%d", in print_op()
1940 op->unmap.keep ? 1 : 0); in print_op()
1943 vma = gpuva_to_vma(op->prefetch.va); in print_op()
1944 vm_dbg(&xe->drm, "PREFETCH: addr=0x%016llx, range=0x%016llx", in print_op()
1948 drm_warn(&xe->drm, "NOT POSSIBLE"); in print_op()
1967 struct drm_gem_object *obj = bo ? &bo->ttm.base : NULL; in vm_bind_ioctl_ops_create()
1973 lockdep_assert_held_write(&vm->lock); in vm_bind_ioctl_ops_create()
1975 vm_dbg(&vm->xe->drm, in vm_bind_ioctl_ops_create()
1983 ops = drm_gpuvm_sm_map_ops_create(&vm->gpuvm, addr, range, in vm_bind_ioctl_ops_create()
1987 ops = drm_gpuvm_sm_unmap_ops_create(&vm->gpuvm, addr, range); in vm_bind_ioctl_ops_create()
1990 ops = drm_gpuvm_prefetch_ops_create(&vm->gpuvm, addr, range); in vm_bind_ioctl_ops_create()
1993 xe_assert(vm->xe, bo); in vm_bind_ioctl_ops_create()
1999 vm_bo = drm_gpuvm_bo_obtain(&vm->gpuvm, obj); in vm_bind_ioctl_ops_create()
2010 drm_warn(&vm->xe->drm, "NOT POSSIBLE"); in vm_bind_ioctl_ops_create()
2011 ops = ERR_PTR(-EINVAL); in vm_bind_ioctl_ops_create()
2019 if (__op->op == DRM_GPUVA_OP_MAP) { in vm_bind_ioctl_ops_create()
2020 op->map.immediate = in vm_bind_ioctl_ops_create()
2022 op->map.read_only = in vm_bind_ioctl_ops_create()
2024 op->map.is_null = flags & DRM_XE_VM_BIND_FLAG_NULL; in vm_bind_ioctl_ops_create()
2025 op->map.dumpable = flags & DRM_XE_VM_BIND_FLAG_DUMPABLE; in vm_bind_ioctl_ops_create()
2026 op->map.pat_index = pat_index; in vm_bind_ioctl_ops_create()
2027 } else if (__op->op == DRM_GPUVA_OP_PREFETCH) { in vm_bind_ioctl_ops_create()
2028 op->prefetch.region = prefetch_region; in vm_bind_ioctl_ops_create()
2031 print_op(vm->xe, __op); in vm_bind_ioctl_ops_create()
2041 struct xe_bo *bo = op->gem.obj ? gem_to_xe_bo(op->gem.obj) : NULL; in new_vma()
2042 struct drm_exec exec; in new_vma() local
2046 lockdep_assert_held_write(&vm->lock); in new_vma()
2049 drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT, 0); in new_vma()
2050 drm_exec_until_all_locked(&exec) { in new_vma()
2052 if (!bo->vm) { in new_vma()
2053 err = drm_exec_lock_obj(&exec, xe_vm_obj(vm)); in new_vma()
2054 drm_exec_retry_on_contention(&exec); in new_vma()
2057 err = drm_exec_lock_obj(&exec, &bo->ttm.base); in new_vma()
2058 drm_exec_retry_on_contention(&exec); in new_vma()
2061 drm_exec_fini(&exec); in new_vma()
2066 vma = xe_vma_create(vm, bo, op->gem.offset, in new_vma()
2067 op->va.addr, op->va.addr + in new_vma()
2068 op->va.range - 1, pat_index, flags); in new_vma()
2074 else if (!xe_vma_has_no_bo(vma) && !bo->vm) in new_vma()
2079 drm_exec_fini(&exec); in new_vma()
2092 if (vma->gpuva.flags & XE_VMA_PTE_1G) in xe_vma_max_pte_size()
2094 else if (vma->gpuva.flags & (XE_VMA_PTE_2M | XE_VMA_PTE_COMPACT)) in xe_vma_max_pte_size()
2096 else if (vma->gpuva.flags & XE_VMA_PTE_64K) in xe_vma_max_pte_size()
2098 else if (vma->gpuva.flags & XE_VMA_PTE_4K) in xe_vma_max_pte_size()
2108 vma->gpuva.flags |= XE_VMA_PTE_1G; in xe_vma_set_pte_size()
2111 vma->gpuva.flags |= XE_VMA_PTE_2M; in xe_vma_set_pte_size()
2114 vma->gpuva.flags |= XE_VMA_PTE_64K; in xe_vma_set_pte_size()
2117 vma->gpuva.flags |= XE_VMA_PTE_4K; in xe_vma_set_pte_size()
2126 lockdep_assert_held_write(&vm->lock); in xe_vma_op_commit()
2128 switch (op->base.op) { in xe_vma_op_commit()
2130 err |= xe_vm_insert_vma(vm, op->map.vma); in xe_vma_op_commit()
2132 op->flags |= XE_VMA_OP_COMMITTED; in xe_vma_op_commit()
2137 gpuva_to_vma(op->base.remap.unmap->va)->tile_present; in xe_vma_op_commit()
2139 prep_vma_destroy(vm, gpuva_to_vma(op->base.remap.unmap->va), in xe_vma_op_commit()
2141 op->flags |= XE_VMA_OP_COMMITTED; in xe_vma_op_commit()
2143 if (op->remap.prev) { in xe_vma_op_commit()
2144 err |= xe_vm_insert_vma(vm, op->remap.prev); in xe_vma_op_commit()
2146 op->flags |= XE_VMA_OP_PREV_COMMITTED; in xe_vma_op_commit()
2147 if (!err && op->remap.skip_prev) { in xe_vma_op_commit()
2148 op->remap.prev->tile_present = in xe_vma_op_commit()
2150 op->remap.prev = NULL; in xe_vma_op_commit()
2153 if (op->remap.next) { in xe_vma_op_commit()
2154 err |= xe_vm_insert_vma(vm, op->remap.next); in xe_vma_op_commit()
2156 op->flags |= XE_VMA_OP_NEXT_COMMITTED; in xe_vma_op_commit()
2157 if (!err && op->remap.skip_next) { in xe_vma_op_commit()
2158 op->remap.next->tile_present = in xe_vma_op_commit()
2160 op->remap.next = NULL; in xe_vma_op_commit()
2166 op->base.remap.unmap->va->va.addr = op->remap.start; in xe_vma_op_commit()
2167 op->base.remap.unmap->va->va.range = op->remap.range; in xe_vma_op_commit()
2172 prep_vma_destroy(vm, gpuva_to_vma(op->base.unmap.va), true); in xe_vma_op_commit()
2173 op->flags |= XE_VMA_OP_COMMITTED; in xe_vma_op_commit()
2176 op->flags |= XE_VMA_OP_COMMITTED; in xe_vma_op_commit()
2179 drm_warn(&vm->xe->drm, "NOT POSSIBLE"); in xe_vma_op_commit()
2188 struct xe_device *xe = vm->xe; in vm_bind_ioctl_ops_parse()
2194 lockdep_assert_held_write(&vm->lock); in vm_bind_ioctl_ops_parse()
2196 for_each_tile(tile, vm->xe, id) in vm_bind_ioctl_ops_parse()
2204 INIT_LIST_HEAD(&op->link); in vm_bind_ioctl_ops_parse()
2205 list_add_tail(&op->link, &vops->list); in vm_bind_ioctl_ops_parse()
2206 op->tile_mask = tile_mask; in vm_bind_ioctl_ops_parse()
2208 switch (op->base.op) { in vm_bind_ioctl_ops_parse()
2211 flags |= op->map.read_only ? in vm_bind_ioctl_ops_parse()
2213 flags |= op->map.is_null ? in vm_bind_ioctl_ops_parse()
2215 flags |= op->map.dumpable ? in vm_bind_ioctl_ops_parse()
2218 vma = new_vma(vm, &op->base.map, op->map.pat_index, in vm_bind_ioctl_ops_parse()
2223 op->map.vma = vma; in vm_bind_ioctl_ops_parse()
2224 if (op->map.immediate || !xe_vm_in_fault_mode(vm)) in vm_bind_ioctl_ops_parse()
2226 op->tile_mask); in vm_bind_ioctl_ops_parse()
2232 gpuva_to_vma(op->base.remap.unmap->va); in vm_bind_ioctl_ops_parse()
2234 op->remap.start = xe_vma_start(old); in vm_bind_ioctl_ops_parse()
2235 op->remap.range = xe_vma_size(old); in vm_bind_ioctl_ops_parse()
2237 if (op->base.remap.prev) { in vm_bind_ioctl_ops_parse()
2238 flags |= op->base.remap.unmap->va->flags & in vm_bind_ioctl_ops_parse()
2241 flags |= op->base.remap.unmap->va->flags & in vm_bind_ioctl_ops_parse()
2244 flags |= op->base.remap.unmap->va->flags & in vm_bind_ioctl_ops_parse()
2248 vma = new_vma(vm, op->base.remap.prev, in vm_bind_ioctl_ops_parse()
2249 old->pat_index, flags); in vm_bind_ioctl_ops_parse()
2253 op->remap.prev = vma; in vm_bind_ioctl_ops_parse()
2259 op->remap.skip_prev = !xe_vma_is_userptr(old) && in vm_bind_ioctl_ops_parse()
2262 if (op->remap.skip_prev) { in vm_bind_ioctl_ops_parse()
2264 op->remap.range -= in vm_bind_ioctl_ops_parse()
2265 xe_vma_end(vma) - in vm_bind_ioctl_ops_parse()
2267 op->remap.start = xe_vma_end(vma); in vm_bind_ioctl_ops_parse()
2268 vm_dbg(&xe->drm, "REMAP:SKIP_PREV: addr=0x%016llx, range=0x%016llx", in vm_bind_ioctl_ops_parse()
2269 (ULL)op->remap.start, in vm_bind_ioctl_ops_parse()
2270 (ULL)op->remap.range); in vm_bind_ioctl_ops_parse()
2272 xe_vma_ops_incr_pt_update_ops(vops, op->tile_mask); in vm_bind_ioctl_ops_parse()
2276 if (op->base.remap.next) { in vm_bind_ioctl_ops_parse()
2277 flags |= op->base.remap.unmap->va->flags & in vm_bind_ioctl_ops_parse()
2280 flags |= op->base.remap.unmap->va->flags & in vm_bind_ioctl_ops_parse()
2283 flags |= op->base.remap.unmap->va->flags & in vm_bind_ioctl_ops_parse()
2287 vma = new_vma(vm, op->base.remap.next, in vm_bind_ioctl_ops_parse()
2288 old->pat_index, flags); in vm_bind_ioctl_ops_parse()
2292 op->remap.next = vma; in vm_bind_ioctl_ops_parse()
2298 op->remap.skip_next = !xe_vma_is_userptr(old) && in vm_bind_ioctl_ops_parse()
2301 if (op->remap.skip_next) { in vm_bind_ioctl_ops_parse()
2303 op->remap.range -= in vm_bind_ioctl_ops_parse()
2304 xe_vma_end(old) - in vm_bind_ioctl_ops_parse()
2306 vm_dbg(&xe->drm, "REMAP:SKIP_NEXT: addr=0x%016llx, range=0x%016llx", in vm_bind_ioctl_ops_parse()
2307 (ULL)op->remap.start, in vm_bind_ioctl_ops_parse()
2308 (ULL)op->remap.range); in vm_bind_ioctl_ops_parse()
2310 xe_vma_ops_incr_pt_update_ops(vops, op->tile_mask); in vm_bind_ioctl_ops_parse()
2313 xe_vma_ops_incr_pt_update_ops(vops, op->tile_mask); in vm_bind_ioctl_ops_parse()
2317 xe_vma_ops_incr_pt_update_ops(vops, op->tile_mask); in vm_bind_ioctl_ops_parse()
2320 vma = gpuva_to_vma(op->base.prefetch.va); in vm_bind_ioctl_ops_parse()
2328 xe_vma_ops_incr_pt_update_ops(vops, op->tile_mask); in vm_bind_ioctl_ops_parse()
2331 drm_warn(&vm->xe->drm, "NOT POSSIBLE"); in vm_bind_ioctl_ops_parse()
2346 lockdep_assert_held_write(&vm->lock); in xe_vma_op_unwind()
2348 switch (op->base.op) { in xe_vma_op_unwind()
2350 if (op->map.vma) { in xe_vma_op_unwind()
2351 prep_vma_destroy(vm, op->map.vma, post_commit); in xe_vma_op_unwind()
2352 xe_vma_destroy_unlocked(op->map.vma); in xe_vma_op_unwind()
2357 struct xe_vma *vma = gpuva_to_vma(op->base.unmap.va); in xe_vma_op_unwind()
2360 down_read(&vm->userptr.notifier_lock); in xe_vma_op_unwind()
2361 vma->gpuva.flags &= ~XE_VMA_DESTROYED; in xe_vma_op_unwind()
2362 up_read(&vm->userptr.notifier_lock); in xe_vma_op_unwind()
2370 struct xe_vma *vma = gpuva_to_vma(op->base.remap.unmap->va); in xe_vma_op_unwind()
2372 if (op->remap.prev) { in xe_vma_op_unwind()
2373 prep_vma_destroy(vm, op->remap.prev, prev_post_commit); in xe_vma_op_unwind()
2374 xe_vma_destroy_unlocked(op->remap.prev); in xe_vma_op_unwind()
2376 if (op->remap.next) { in xe_vma_op_unwind()
2377 prep_vma_destroy(vm, op->remap.next, next_post_commit); in xe_vma_op_unwind()
2378 xe_vma_destroy_unlocked(op->remap.next); in xe_vma_op_unwind()
2381 down_read(&vm->userptr.notifier_lock); in xe_vma_op_unwind()
2382 vma->gpuva.flags &= ~XE_VMA_DESTROYED; in xe_vma_op_unwind()
2383 up_read(&vm->userptr.notifier_lock); in xe_vma_op_unwind()
2393 drm_warn(&vm->xe->drm, "NOT POSSIBLE"); in xe_vma_op_unwind()
2403 for (i = num_ops_list - 1; i >= 0; --i) { in vm_bind_ioctl_ops_unwind()
2414 op->flags & XE_VMA_OP_COMMITTED, in vm_bind_ioctl_ops_unwind()
2415 op->flags & XE_VMA_OP_PREV_COMMITTED, in vm_bind_ioctl_ops_unwind()
2416 op->flags & XE_VMA_OP_NEXT_COMMITTED); in vm_bind_ioctl_ops_unwind()
2421 static int vma_lock_and_validate(struct drm_exec *exec, struct xe_vma *vma, in vma_lock_and_validate() argument
2429 if (!bo->vm) in vma_lock_and_validate()
2430 err = drm_exec_lock_obj(exec, &bo->ttm.base); in vma_lock_and_validate()
2441 if (vma->ufence) { in check_ufence()
2442 struct xe_user_fence * const f = vma->ufence; in check_ufence()
2445 return -EBUSY; in check_ufence()
2447 vma->ufence = NULL; in check_ufence()
2454 static int op_lock_and_prep(struct drm_exec *exec, struct xe_vm *vm, in op_lock_and_prep() argument
2459 switch (op->base.op) { in op_lock_and_prep()
2461 err = vma_lock_and_validate(exec, op->map.vma, in op_lock_and_prep()
2463 op->map.immediate); in op_lock_and_prep()
2466 err = check_ufence(gpuva_to_vma(op->base.remap.unmap->va)); in op_lock_and_prep()
2470 err = vma_lock_and_validate(exec, in op_lock_and_prep()
2471 gpuva_to_vma(op->base.remap.unmap->va), in op_lock_and_prep()
2473 if (!err && op->remap.prev) in op_lock_and_prep()
2474 err = vma_lock_and_validate(exec, op->remap.prev, true); in op_lock_and_prep()
2475 if (!err && op->remap.next) in op_lock_and_prep()
2476 err = vma_lock_and_validate(exec, op->remap.next, true); in op_lock_and_prep()
2479 err = check_ufence(gpuva_to_vma(op->base.unmap.va)); in op_lock_and_prep()
2483 err = vma_lock_and_validate(exec, in op_lock_and_prep()
2484 gpuva_to_vma(op->base.unmap.va), in op_lock_and_prep()
2489 struct xe_vma *vma = gpuva_to_vma(op->base.prefetch.va); in op_lock_and_prep()
2490 u32 region = op->prefetch.region; in op_lock_and_prep()
2492 xe_assert(vm->xe, region <= ARRAY_SIZE(region_to_mem_type)); in op_lock_and_prep()
2494 err = vma_lock_and_validate(exec, in op_lock_and_prep()
2495 gpuva_to_vma(op->base.prefetch.va), in op_lock_and_prep()
2503 drm_warn(&vm->xe->drm, "NOT POSSIBLE"); in op_lock_and_prep()
2509 static int vm_bind_ioctl_ops_lock_and_prep(struct drm_exec *exec, in vm_bind_ioctl_ops_lock_and_prep() argument
2516 err = drm_exec_lock_obj(exec, xe_vm_obj(vm)); in vm_bind_ioctl_ops_lock_and_prep()
2520 list_for_each_entry(op, &vops->list, link) { in vm_bind_ioctl_ops_lock_and_prep()
2521 err = op_lock_and_prep(exec, vm, op); in vm_bind_ioctl_ops_lock_and_prep()
2527 if (vops->inject_error && in vm_bind_ioctl_ops_lock_and_prep()
2528 vm->xe->vm_inject_error_position == FORCE_OP_ERROR_LOCK) in vm_bind_ioctl_ops_lock_and_prep()
2529 return -ENOSPC; in vm_bind_ioctl_ops_lock_and_prep()
2537 switch (op->base.op) { in op_trace()
2539 trace_xe_vma_bind(op->map.vma); in op_trace()
2542 trace_xe_vma_unbind(gpuva_to_vma(op->base.remap.unmap->va)); in op_trace()
2543 if (op->remap.prev) in op_trace()
2544 trace_xe_vma_bind(op->remap.prev); in op_trace()
2545 if (op->remap.next) in op_trace()
2546 trace_xe_vma_bind(op->remap.next); in op_trace()
2549 trace_xe_vma_unbind(gpuva_to_vma(op->base.unmap.va)); in op_trace()
2552 trace_xe_vma_bind(gpuva_to_vma(op->base.prefetch.va)); in op_trace()
2563 list_for_each_entry(op, &vops->list, link) in trace_xe_vm_ops_execute()
2569 struct xe_exec_queue *q = vops->q; in vm_ops_setup_tile_args()
2574 for_each_tile(tile, vm->xe, id) { in vm_ops_setup_tile_args()
2575 if (vops->pt_update_ops[id].num_ops) in vm_ops_setup_tile_args()
2578 if (vops->pt_update_ops[id].q) in vm_ops_setup_tile_args()
2582 vops->pt_update_ops[id].q = q; in vm_ops_setup_tile_args()
2583 if (vm->pt_root[id] && !list_empty(&q->multi_gt_list)) in vm_ops_setup_tile_args()
2586 vops->pt_update_ops[id].q = vm->q[id]; in vm_ops_setup_tile_args()
2605 return ERR_PTR(-ENODATA); in ops_execute()
2611 fence = ERR_PTR(-ENOMEM); in ops_execute()
2616 for_each_tile(tile, vm->xe, id) { in ops_execute()
2617 if (!vops->pt_update_ops[id].num_ops) in ops_execute()
2629 for_each_tile(tile, vm->xe, id) { in ops_execute()
2630 if (!vops->pt_update_ops[id].num_ops) in ops_execute()
2643 vm->composite_fence_ctx, in ops_execute()
2644 vm->composite_fence_seqno++, in ops_execute()
2647 --vm->composite_fence_seqno; in ops_execute()
2648 fence = ERR_PTR(-ENOMEM); in ops_execute()
2651 fence = &cf->base; in ops_execute()
2654 for_each_tile(tile, vm->xe, id) { in ops_execute()
2655 if (!vops->pt_update_ops[id].num_ops) in ops_execute()
2664 for_each_tile(tile, vm->xe, id) { in ops_execute()
2665 if (!vops->pt_update_ops[id].num_ops) in ops_execute()
2671 dma_fence_put(fences[--current_fence]); in ops_execute()
2682 if (vma->ufence) in vma_add_ufence()
2683 xe_sync_ufence_put(vma->ufence); in vma_add_ufence()
2684 vma->ufence = __xe_sync_ufence_get(ufence); in vma_add_ufence()
2690 switch (op->base.op) { in op_add_ufence()
2692 vma_add_ufence(op->map.vma, ufence); in op_add_ufence()
2695 if (op->remap.prev) in op_add_ufence()
2696 vma_add_ufence(op->remap.prev, ufence); in op_add_ufence()
2697 if (op->remap.next) in op_add_ufence()
2698 vma_add_ufence(op->remap.next, ufence); in op_add_ufence()
2703 vma_add_ufence(gpuva_to_vma(op->base.prefetch.va), ufence); in op_add_ufence()
2706 drm_warn(&vm->xe->drm, "NOT POSSIBLE"); in op_add_ufence()
2713 struct xe_exec_queue *wait_exec_queue = to_wait_exec_queue(vm, vops->q); in vm_bind_ioctl_ops_fini()
2718 ufence = find_ufence_get(vops->syncs, vops->num_syncs); in vm_bind_ioctl_ops_fini()
2719 list_for_each_entry(op, &vops->list, link) { in vm_bind_ioctl_ops_fini()
2723 if (op->base.op == DRM_GPUVA_OP_UNMAP) in vm_bind_ioctl_ops_fini()
2724 xe_vma_destroy(gpuva_to_vma(op->base.unmap.va), fence); in vm_bind_ioctl_ops_fini()
2725 else if (op->base.op == DRM_GPUVA_OP_REMAP) in vm_bind_ioctl_ops_fini()
2726 xe_vma_destroy(gpuva_to_vma(op->base.remap.unmap->va), in vm_bind_ioctl_ops_fini()
2731 for (i = 0; i < vops->num_syncs; i++) in vm_bind_ioctl_ops_fini()
2732 xe_sync_entry_signal(vops->syncs + i, fence); in vm_bind_ioctl_ops_fini()
2740 struct drm_exec exec; in vm_bind_ioctl_ops_execute() local
2744 lockdep_assert_held_write(&vm->lock); in vm_bind_ioctl_ops_execute()
2746 drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT | in vm_bind_ioctl_ops_execute()
2748 drm_exec_until_all_locked(&exec) { in vm_bind_ioctl_ops_execute()
2749 err = vm_bind_ioctl_ops_lock_and_prep(&exec, vm, vops); in vm_bind_ioctl_ops_execute()
2750 drm_exec_retry_on_contention(&exec); in vm_bind_ioctl_ops_execute()
2764 drm_exec_fini(&exec); in vm_bind_ioctl_ops_execute()
2791 if (XE_IOCTL_DBG(xe, args->pad || args->pad2) || in vm_bind_ioctl_check_args()
2792 XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1])) in vm_bind_ioctl_check_args()
2793 return -EINVAL; in vm_bind_ioctl_check_args()
2795 if (XE_IOCTL_DBG(xe, args->extensions)) in vm_bind_ioctl_check_args()
2796 return -EINVAL; in vm_bind_ioctl_check_args()
2798 if (args->num_binds > 1) { in vm_bind_ioctl_check_args()
2800 u64_to_user_ptr(args->vector_of_binds); in vm_bind_ioctl_check_args()
2802 *bind_ops = kvmalloc_array(args->num_binds, in vm_bind_ioctl_check_args()
2807 return args->num_binds > 1 ? -ENOBUFS : -ENOMEM; in vm_bind_ioctl_check_args()
2811 args->num_binds); in vm_bind_ioctl_check_args()
2813 err = -EFAULT; in vm_bind_ioctl_check_args()
2817 *bind_ops = &args->bind; in vm_bind_ioctl_check_args()
2820 for (i = 0; i < args->num_binds; ++i) { in vm_bind_ioctl_check_args()
2832 if (XE_IOCTL_DBG(xe, pat_index >= xe->pat.n_entries)) { in vm_bind_ioctl_check_args()
2833 err = -EINVAL; in vm_bind_ioctl_check_args()
2837 pat_index = array_index_nospec(pat_index, xe->pat.n_entries); in vm_bind_ioctl_check_args()
2841 err = -EINVAL; in vm_bind_ioctl_check_args()
2846 err = -EINVAL; in vm_bind_ioctl_check_args()
2874 xe->info.mem_region_mask)) || in vm_bind_ioctl_check_args()
2877 err = -EINVAL; in vm_bind_ioctl_check_args()
2886 err = -EINVAL; in vm_bind_ioctl_check_args()
2894 if (args->num_binds > 1) in vm_bind_ioctl_check_args()
2927 INIT_LIST_HEAD(&vops->list); in xe_vma_ops_init()
2928 vops->vm = vm; in xe_vma_ops_init()
2929 vops->q = q; in xe_vma_ops_init()
2930 vops->syncs = syncs; in xe_vma_ops_init()
2931 vops->num_syncs = num_syncs; in xe_vma_ops_init()
2940 if (XE_IOCTL_DBG(xe, range > bo->size) || in xe_vm_bind_ioctl_validate_bo()
2942 bo->size - range)) { in xe_vm_bind_ioctl_validate_bo()
2943 return -EINVAL; in xe_vm_bind_ioctl_validate_bo()
2954 if ((bo->flags & XE_BO_FLAG_INTERNAL_64K) && in xe_vm_bind_ioctl_validate_bo()
2955 (xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K)) { in xe_vm_bind_ioctl_validate_bo()
2960 return -EINVAL; in xe_vm_bind_ioctl_validate_bo()
2965 if (bo->cpu_caching) { in xe_vm_bind_ioctl_validate_bo()
2967 bo->cpu_caching == DRM_XE_GEM_CPU_CACHING_WB)) { in xe_vm_bind_ioctl_validate_bo()
2968 return -EINVAL; in xe_vm_bind_ioctl_validate_bo()
2972 * Imported dma-buf from a different device should in xe_vm_bind_ioctl_validate_bo()
2977 return -EINVAL; in xe_vm_bind_ioctl_validate_bo()
3004 if (args->exec_queue_id) { in xe_vm_bind_ioctl()
3005 q = xe_exec_queue_lookup(xef, args->exec_queue_id); in xe_vm_bind_ioctl()
3007 err = -ENOENT; in xe_vm_bind_ioctl()
3011 if (XE_IOCTL_DBG(xe, !(q->flags & EXEC_QUEUE_FLAG_VM))) { in xe_vm_bind_ioctl()
3012 err = -EINVAL; in xe_vm_bind_ioctl()
3017 vm = xe_vm_lookup(xef, args->vm_id); in xe_vm_bind_ioctl()
3019 err = -EINVAL; in xe_vm_bind_ioctl()
3023 err = down_write_killable(&vm->lock); in xe_vm_bind_ioctl()
3028 err = -ENOENT; in xe_vm_bind_ioctl()
3032 for (i = 0; i < args->num_binds; ++i) { in xe_vm_bind_ioctl()
3036 if (XE_IOCTL_DBG(xe, range > vm->size) || in xe_vm_bind_ioctl()
3037 XE_IOCTL_DBG(xe, addr > vm->size - range)) { in xe_vm_bind_ioctl()
3038 err = -EINVAL; in xe_vm_bind_ioctl()
3043 if (args->num_binds) { in xe_vm_bind_ioctl()
3044 bos = kvcalloc(args->num_binds, sizeof(*bos), in xe_vm_bind_ioctl()
3048 err = -ENOMEM; in xe_vm_bind_ioctl()
3052 ops = kvcalloc(args->num_binds, sizeof(*ops), in xe_vm_bind_ioctl()
3056 err = -ENOMEM; in xe_vm_bind_ioctl()
3061 for (i = 0; i < args->num_binds; ++i) { in xe_vm_bind_ioctl()
3074 err = -ENOENT; in xe_vm_bind_ioctl()
3085 if (args->num_syncs) { in xe_vm_bind_ioctl()
3086 syncs = kcalloc(args->num_syncs, sizeof(*syncs), GFP_KERNEL); in xe_vm_bind_ioctl()
3088 err = -ENOMEM; in xe_vm_bind_ioctl()
3093 syncs_user = u64_to_user_ptr(args->syncs); in xe_vm_bind_ioctl()
3094 for (num_syncs = 0; num_syncs < args->num_syncs; num_syncs++) { in xe_vm_bind_ioctl()
3099 (!args->num_binds ? in xe_vm_bind_ioctl()
3109 err = -EINVAL; in xe_vm_bind_ioctl()
3113 if (!args->num_binds) { in xe_vm_bind_ioctl()
3114 err = -ENODATA; in xe_vm_bind_ioctl()
3119 for (i = 0; i < args->num_binds; ++i) { in xe_vm_bind_ioctl()
3144 vm->xe->vm_inject_error_position = in xe_vm_bind_ioctl()
3145 (vm->xe->vm_inject_error_position + 1) % in xe_vm_bind_ioctl()
3153 err = -ENODATA; in xe_vm_bind_ioctl()
3157 err = xe_vma_ops_alloc(&vops, args->num_binds > 1); in xe_vm_bind_ioctl()
3164 if (err && err != -ENODATA) in xe_vm_bind_ioctl()
3165 vm_bind_ioctl_ops_unwind(vm, ops, args->num_binds); in xe_vm_bind_ioctl()
3167 for (i = args->num_binds - 1; i >= 0; --i) in xe_vm_bind_ioctl()
3169 drm_gpuva_ops_free(&vm->gpuvm, ops[i]); in xe_vm_bind_ioctl()
3171 if (err == -ENODATA) in xe_vm_bind_ioctl()
3173 while (num_syncs--) in xe_vm_bind_ioctl()
3178 for (i = 0; i < args->num_binds; ++i) in xe_vm_bind_ioctl()
3181 up_write(&vm->lock); in xe_vm_bind_ioctl()
3190 if (args->num_binds > 1) in xe_vm_bind_ioctl()
3196 * xe_vm_lock() - Lock the vm's dma_resv object
3200 * Return: 0 on success, -EINTR if @intr is true and the wait for a
3213 * xe_vm_unlock() - Unlock the vm's dma_resv object
3224 * xe_vm_invalidate_vma - invalidate GPU mappings for VMA without a lock
3235 struct xe_device *xe = xe_vma_vm(vma)->xe; in xe_vm_invalidate_vma()
3246 vm_dbg(&xe_vma_vm(vma)->xe->drm, in xe_vm_invalidate_vma()
3250 /* Check that we don't race with page-table updates */ in xe_vm_invalidate_vma()
3254 (&to_userptr_vma(vma)->userptr.notifier, in xe_vm_invalidate_vma()
3255 to_userptr_vma(vma)->userptr.notifier_seq)); in xe_vm_invalidate_vma()
3267 xe_gt_tlb_invalidation_fence_init(tile->primary_gt, in xe_vm_invalidate_vma()
3271 ret = xe_gt_tlb_invalidation_vma(tile->primary_gt, in xe_vm_invalidate_vma()
3277 if (!tile->media_gt) in xe_vm_invalidate_vma()
3280 xe_gt_tlb_invalidation_fence_init(tile->media_gt, in xe_vm_invalidate_vma()
3284 ret = xe_gt_tlb_invalidation_vma(tile->media_gt, in xe_vm_invalidate_vma()
3296 vma->tile_invalidated = vma->tile_mask; in xe_vm_invalidate_vma()
3321 mutex_lock(&vm->snap_mutex); in xe_vm_snapshot_capture()
3322 drm_gpuvm_for_each_va(gpuva, &vm->gpuvm) { in xe_vm_snapshot_capture()
3323 if (gpuva->flags & XE_VMA_DUMPABLE) in xe_vm_snapshot_capture()
3330 snap = num_snaps ? ERR_PTR(-ENOMEM) : ERR_PTR(-ENODEV); in xe_vm_snapshot_capture()
3334 snap->num_snaps = num_snaps; in xe_vm_snapshot_capture()
3336 drm_gpuvm_for_each_va(gpuva, &vm->gpuvm) { in xe_vm_snapshot_capture()
3338 struct xe_bo *bo = vma->gpuva.gem.obj ? in xe_vm_snapshot_capture()
3339 gem_to_xe_bo(vma->gpuva.gem.obj) : NULL; in xe_vm_snapshot_capture()
3341 if (!(gpuva->flags & XE_VMA_DUMPABLE)) in xe_vm_snapshot_capture()
3344 snap->snap[i].ofs = xe_vma_start(vma); in xe_vm_snapshot_capture()
3345 snap->snap[i].len = xe_vma_size(vma); in xe_vm_snapshot_capture()
3347 snap->snap[i].bo = xe_bo_get(bo); in xe_vm_snapshot_capture()
3348 snap->snap[i].bo_ofs = xe_vma_bo_offset(vma); in xe_vm_snapshot_capture()
3351 to_userptr_vma(vma)->userptr.notifier.mm; in xe_vm_snapshot_capture()
3354 snap->snap[i].mm = mm; in xe_vm_snapshot_capture()
3356 snap->snap[i].data = ERR_PTR(-EFAULT); in xe_vm_snapshot_capture()
3358 snap->snap[i].bo_ofs = xe_vma_userptr(vma); in xe_vm_snapshot_capture()
3360 snap->snap[i].data = ERR_PTR(-ENOENT); in xe_vm_snapshot_capture()
3366 mutex_unlock(&vm->snap_mutex); in xe_vm_snapshot_capture()
3375 for (int i = 0; i < snap->num_snaps; i++) { in xe_vm_snapshot_capture_delayed()
3376 struct xe_bo *bo = snap->snap[i].bo; in xe_vm_snapshot_capture_delayed()
3379 if (IS_ERR(snap->snap[i].data)) in xe_vm_snapshot_capture_delayed()
3382 snap->snap[i].data = kvmalloc(snap->snap[i].len, GFP_USER); in xe_vm_snapshot_capture_delayed()
3383 if (!snap->snap[i].data) { in xe_vm_snapshot_capture_delayed()
3384 snap->snap[i].data = ERR_PTR(-ENOMEM); in xe_vm_snapshot_capture_delayed()
3389 err = xe_bo_read(bo, snap->snap[i].bo_ofs, in xe_vm_snapshot_capture_delayed()
3390 snap->snap[i].data, snap->snap[i].len); in xe_vm_snapshot_capture_delayed()
3392 void __user *userptr = (void __user *)(size_t)snap->snap[i].bo_ofs; in xe_vm_snapshot_capture_delayed()
3394 kthread_use_mm(snap->snap[i].mm); in xe_vm_snapshot_capture_delayed()
3395 if (!copy_from_user(snap->snap[i].data, userptr, snap->snap[i].len)) in xe_vm_snapshot_capture_delayed()
3398 err = -EFAULT; in xe_vm_snapshot_capture_delayed()
3399 kthread_unuse_mm(snap->snap[i].mm); in xe_vm_snapshot_capture_delayed()
3401 mmput(snap->snap[i].mm); in xe_vm_snapshot_capture_delayed()
3402 snap->snap[i].mm = NULL; in xe_vm_snapshot_capture_delayed()
3406 kvfree(snap->snap[i].data); in xe_vm_snapshot_capture_delayed()
3407 snap->snap[i].data = ERR_PTR(err); in xe_vm_snapshot_capture_delayed()
3412 snap->snap[i].bo = NULL; in xe_vm_snapshot_capture_delayed()
3425 for (i = 0; i < snap->num_snaps; i++) { in xe_vm_snapshot_print()
3426 drm_printf(p, "[%llx].length: 0x%lx\n", snap->snap[i].ofs, snap->snap[i].len); in xe_vm_snapshot_print()
3428 if (IS_ERR(snap->snap[i].data)) { in xe_vm_snapshot_print()
3429 drm_printf(p, "[%llx].error: %li\n", snap->snap[i].ofs, in xe_vm_snapshot_print()
3430 PTR_ERR(snap->snap[i].data)); in xe_vm_snapshot_print()
3434 drm_printf(p, "[%llx].data: ", snap->snap[i].ofs); in xe_vm_snapshot_print()
3436 for (j = 0; j < snap->snap[i].len; j += sizeof(u32)) { in xe_vm_snapshot_print()
3437 u32 *val = snap->snap[i].data + j; in xe_vm_snapshot_print()
3454 for (i = 0; i < snap->num_snaps; i++) { in xe_vm_snapshot_free()
3455 if (!IS_ERR(snap->snap[i].data)) in xe_vm_snapshot_free()
3456 kvfree(snap->snap[i].data); in xe_vm_snapshot_free()
3457 xe_bo_put(snap->snap[i].bo); in xe_vm_snapshot_free()
3458 if (snap->snap[i].mm) in xe_vm_snapshot_free()
3459 mmput(snap->snap[i].mm); in xe_vm_snapshot_free()