Lines Matching full:vm

42 static struct drm_gem_object *xe_vm_obj(struct xe_vm *vm)  in xe_vm_obj()  argument
44 return vm->gpuvm.r_obj; in xe_vm_obj()
53 * without the vm->userptr.notifier_lock held. There is no guarantee that the
69 struct xe_vm *vm = xe_vma_vm(vma); in xe_vma_userptr_pin_pages() local
70 struct xe_device *xe = vm->xe; in xe_vma_userptr_pin_pages()
72 lockdep_assert_held(&vm->lock); in xe_vma_userptr_pin_pages()
78 static bool preempt_fences_waiting(struct xe_vm *vm) in preempt_fences_waiting() argument
82 lockdep_assert_held(&vm->lock); in preempt_fences_waiting()
83 xe_vm_assert_held(vm); in preempt_fences_waiting()
85 list_for_each_entry(q, &vm->preempt.exec_queues, lr.link) { in preempt_fences_waiting()
104 static int alloc_preempt_fences(struct xe_vm *vm, struct list_head *list, in alloc_preempt_fences() argument
107 lockdep_assert_held(&vm->lock); in alloc_preempt_fences()
108 xe_vm_assert_held(vm); in alloc_preempt_fences()
110 if (*count >= vm->preempt.num_exec_queues) in alloc_preempt_fences()
113 for (; *count < vm->preempt.num_exec_queues; ++(*count)) { in alloc_preempt_fences()
125 static int wait_for_existing_preempt_fences(struct xe_vm *vm) in wait_for_existing_preempt_fences() argument
129 xe_vm_assert_held(vm); in wait_for_existing_preempt_fences()
131 list_for_each_entry(q, &vm->preempt.exec_queues, lr.link) { in wait_for_existing_preempt_fences()
135 /* Only -ETIME on fence indicates VM needs to be killed */ in wait_for_existing_preempt_fences()
147 static bool xe_vm_is_idle(struct xe_vm *vm) in xe_vm_is_idle() argument
151 xe_vm_assert_held(vm); in xe_vm_is_idle()
152 list_for_each_entry(q, &vm->preempt.exec_queues, lr.link) { in xe_vm_is_idle()
160 static void arm_preempt_fences(struct xe_vm *vm, struct list_head *list) in arm_preempt_fences() argument
165 list_for_each_entry(q, &vm->preempt.exec_queues, lr.link) { in arm_preempt_fences()
169 xe_assert(vm->xe, link != list); in arm_preempt_fences()
179 static int add_preempt_fences(struct xe_vm *vm, struct xe_bo *bo) in add_preempt_fences() argument
186 if (!vm->preempt.num_exec_queues) in add_preempt_fences()
189 err = dma_resv_reserve_fences(bo->ttm.base.resv, vm->preempt.num_exec_queues); in add_preempt_fences()
193 list_for_each_entry(q, &vm->preempt.exec_queues, lr.link) in add_preempt_fences()
203 static void resume_and_reinstall_preempt_fences(struct xe_vm *vm, in resume_and_reinstall_preempt_fences() argument
208 lockdep_assert_held(&vm->lock); in resume_and_reinstall_preempt_fences()
209 xe_vm_assert_held(vm); in resume_and_reinstall_preempt_fences()
211 list_for_each_entry(q, &vm->preempt.exec_queues, lr.link) { in resume_and_reinstall_preempt_fences()
214 drm_gpuvm_resv_add_fence(&vm->gpuvm, exec, q->lr.pfence, in resume_and_reinstall_preempt_fences()
219 int xe_vm_add_compute_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q) in xe_vm_add_compute_exec_queue() argument
222 .vm = &vm->gpuvm, in xe_vm_add_compute_exec_queue()
231 xe_assert(vm->xe, xe_vm_in_preempt_fence_mode(vm)); in xe_vm_add_compute_exec_queue()
233 down_write(&vm->lock); in xe_vm_add_compute_exec_queue()
245 list_add(&q->lr.link, &vm->preempt.exec_queues); in xe_vm_add_compute_exec_queue()
246 ++vm->preempt.num_exec_queues; in xe_vm_add_compute_exec_queue()
249 down_read(&vm->userptr.notifier_lock); in xe_vm_add_compute_exec_queue()
251 drm_gpuvm_resv_add_fence(&vm->gpuvm, exec, pfence, in xe_vm_add_compute_exec_queue()
255 * Check to see if a preemption on VM is in flight or userptr in xe_vm_add_compute_exec_queue()
257 * other preempt fences on the VM. in xe_vm_add_compute_exec_queue()
259 wait = __xe_vm_userptr_needs_repin(vm) || preempt_fences_waiting(vm); in xe_vm_add_compute_exec_queue()
263 up_read(&vm->userptr.notifier_lock); in xe_vm_add_compute_exec_queue()
268 up_write(&vm->lock); in xe_vm_add_compute_exec_queue()
274 * xe_vm_remove_compute_exec_queue() - Remove compute exec queue from VM
275 * @vm: The VM.
280 void xe_vm_remove_compute_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q) in xe_vm_remove_compute_exec_queue() argument
282 if (!xe_vm_in_preempt_fence_mode(vm)) in xe_vm_remove_compute_exec_queue()
285 down_write(&vm->lock); in xe_vm_remove_compute_exec_queue()
288 --vm->preempt.num_exec_queues; in xe_vm_remove_compute_exec_queue()
295 up_write(&vm->lock); in xe_vm_remove_compute_exec_queue()
299 * __xe_vm_userptr_needs_repin() - Check whether the VM does have userptrs
301 * @vm: The VM.
303 * This function checks for whether the VM has userptrs that need repinning,
309 int __xe_vm_userptr_needs_repin(struct xe_vm *vm) in __xe_vm_userptr_needs_repin() argument
311 lockdep_assert_held_read(&vm->userptr.notifier_lock); in __xe_vm_userptr_needs_repin()
313 return (list_empty(&vm->userptr.repin_list) && in __xe_vm_userptr_needs_repin()
314 list_empty(&vm->userptr.invalidated)) ? 0 : -EAGAIN; in __xe_vm_userptr_needs_repin()
320 * xe_vm_kill() - VM Kill
321 * @vm: The VM.
322 * @unlocked: Flag indicates the VM's dma-resv is not held
324 * Kill the VM by setting banned flag indicated VM is no longer available for
325 * use. If in preempt fence mode, also kill all exec queue attached to the VM.
327 void xe_vm_kill(struct xe_vm *vm, bool unlocked) in xe_vm_kill() argument
331 lockdep_assert_held(&vm->lock); in xe_vm_kill()
334 xe_vm_lock(vm, false); in xe_vm_kill()
336 vm->flags |= XE_VM_FLAG_BANNED; in xe_vm_kill()
337 trace_xe_vm_kill(vm); in xe_vm_kill()
339 list_for_each_entry(q, &vm->preempt.exec_queues, lr.link) in xe_vm_kill()
343 xe_vm_unlock(vm); in xe_vm_kill()
345 /* TODO: Inform user the VM is banned */ in xe_vm_kill()
383 struct xe_vm *vm = gpuvm_to_vm(vm_bo->vm); in xe_gpuvm_validate() local
387 lockdep_assert_held(&vm->lock); in xe_gpuvm_validate()
390 &vm->rebind_list); in xe_gpuvm_validate()
392 ret = xe_bo_validate(gem_to_xe_bo(vm_bo->obj), vm, false); in xe_gpuvm_validate()
402 * @vm: The vm for which we are rebinding.
415 int xe_vm_validate_rebind(struct xe_vm *vm, struct drm_exec *exec, in xe_vm_validate_rebind() argument
423 ret = drm_gpuvm_validate(&vm->gpuvm, exec); in xe_vm_validate_rebind()
427 ret = xe_vm_rebind(vm, false); in xe_vm_validate_rebind()
430 } while (!list_empty(&vm->gpuvm.evict.list)); in xe_vm_validate_rebind()
441 static int xe_preempt_work_begin(struct drm_exec *exec, struct xe_vm *vm, in xe_preempt_work_begin() argument
446 err = drm_gpuvm_prepare_vm(&vm->gpuvm, exec, 0); in xe_preempt_work_begin()
450 if (xe_vm_is_idle(vm)) { in xe_preempt_work_begin()
451 vm->preempt.rebind_deactivated = true; in xe_preempt_work_begin()
456 if (!preempt_fences_waiting(vm)) { in xe_preempt_work_begin()
461 err = drm_gpuvm_prepare_objects(&vm->gpuvm, exec, 0); in xe_preempt_work_begin()
465 err = wait_for_existing_preempt_fences(vm); in xe_preempt_work_begin()
475 return xe_vm_validate_rebind(vm, exec, vm->preempt.num_exec_queues); in xe_preempt_work_begin()
480 struct xe_vm *vm = container_of(w, struct xe_vm, preempt.rebind_work); in preempt_rebind_work_func() local
489 xe_assert(vm->xe, xe_vm_in_preempt_fence_mode(vm)); in preempt_rebind_work_func()
490 trace_xe_vm_rebind_worker_enter(vm); in preempt_rebind_work_func()
492 down_write(&vm->lock); in preempt_rebind_work_func()
494 if (xe_vm_is_closed_or_banned(vm)) { in preempt_rebind_work_func()
495 up_write(&vm->lock); in preempt_rebind_work_func()
496 trace_xe_vm_rebind_worker_exit(vm); in preempt_rebind_work_func()
501 if (xe_vm_userptr_check_repin(vm)) { in preempt_rebind_work_func()
502 err = xe_vm_userptr_pin(vm); in preempt_rebind_work_func()
512 err = xe_preempt_work_begin(&exec, vm, &done); in preempt_rebind_work_func()
523 err = alloc_preempt_fences(vm, &preempt_fences, &fence_count); in preempt_rebind_work_func()
527 err = xe_vm_rebind(vm, true); in preempt_rebind_work_func()
531 /* Wait on rebinds and munmap style VM unbinds */ in preempt_rebind_work_func()
532 wait = dma_resv_wait_timeout(xe_vm_resv(vm), in preempt_rebind_work_func()
545 down_read(&vm->userptr.notifier_lock); in preempt_rebind_work_func()
546 if (retry_required(tries, vm)) { in preempt_rebind_work_func()
547 up_read(&vm->userptr.notifier_lock); in preempt_rebind_work_func()
554 spin_lock(&vm->xe->ttm.lru_lock); in preempt_rebind_work_func()
555 ttm_lru_bulk_move_tail(&vm->lru_bulk_move); in preempt_rebind_work_func()
556 spin_unlock(&vm->xe->ttm.lru_lock); in preempt_rebind_work_func()
559 arm_preempt_fences(vm, &preempt_fences); in preempt_rebind_work_func()
560 resume_and_reinstall_preempt_fences(vm, &exec); in preempt_rebind_work_func()
561 up_read(&vm->userptr.notifier_lock); in preempt_rebind_work_func()
567 trace_xe_vm_rebind_worker_retry(vm); in preempt_rebind_work_func()
572 drm_warn(&vm->xe->drm, "VM worker error: %d\n", err); in preempt_rebind_work_func()
573 xe_vm_kill(vm, true); in preempt_rebind_work_func()
575 up_write(&vm->lock); in preempt_rebind_work_func()
579 trace_xe_vm_rebind_worker_exit(vm); in preempt_rebind_work_func()
582 static void __vma_userptr_invalidate(struct xe_vm *vm, struct xe_userptr_vma *uvma) in __vma_userptr_invalidate() argument
594 if (!xe_vm_in_fault_mode(vm) && in __vma_userptr_invalidate()
596 spin_lock(&vm->userptr.invalidated_lock); in __vma_userptr_invalidate()
598 &vm->userptr.invalidated); in __vma_userptr_invalidate()
599 spin_unlock(&vm->userptr.invalidated_lock); in __vma_userptr_invalidate()
606 * to the vm. in __vma_userptr_invalidate()
608 dma_resv_iter_begin(&cursor, xe_vm_resv(vm), in __vma_userptr_invalidate()
614 err = dma_resv_wait_timeout(xe_vm_resv(vm), in __vma_userptr_invalidate()
619 if (xe_vm_in_fault_mode(vm) && userptr->initial_bind) { in __vma_userptr_invalidate()
633 struct xe_vm *vm = xe_vma_vm(vma); in vma_userptr_invalidate() local
635 xe_assert(vm->xe, xe_vma_is_userptr(vma)); in vma_userptr_invalidate()
645 down_write(&vm->userptr.notifier_lock); in vma_userptr_invalidate()
648 __vma_userptr_invalidate(vm, uvma); in vma_userptr_invalidate()
649 up_write(&vm->userptr.notifier_lock); in vma_userptr_invalidate()
668 struct xe_vm *vm = xe_vma_vm(&uvma->vma); in xe_vma_userptr_force_invalidate() local
671 lockdep_assert_held(&vm->lock); in xe_vma_userptr_force_invalidate()
673 lockdep_assert_held(&vm->userptr.notifier_lock); in xe_vma_userptr_force_invalidate()
678 xe_vm_assert_held(vm); in xe_vma_userptr_force_invalidate()
683 __vma_userptr_invalidate(vm, uvma); in xe_vma_userptr_force_invalidate()
687 int xe_vm_userptr_pin(struct xe_vm *vm) in xe_vm_userptr_pin() argument
693 xe_assert(vm->xe, !xe_vm_in_fault_mode(vm)); in xe_vm_userptr_pin()
694 lockdep_assert_held_write(&vm->lock); in xe_vm_userptr_pin()
697 spin_lock(&vm->userptr.invalidated_lock); in xe_vm_userptr_pin()
698 xe_assert(vm->xe, list_empty(&vm->userptr.repin_list)); in xe_vm_userptr_pin()
699 list_for_each_entry_safe(uvma, next, &vm->userptr.invalidated, in xe_vm_userptr_pin()
703 &vm->userptr.repin_list); in xe_vm_userptr_pin()
705 spin_unlock(&vm->userptr.invalidated_lock); in xe_vm_userptr_pin()
708 list_for_each_entry_safe(uvma, next, &vm->userptr.repin_list, in xe_vm_userptr_pin()
727 xe_vm_lock(vm, false); in xe_vm_userptr_pin()
728 dma_resv_wait_timeout(xe_vm_resv(vm), in xe_vm_userptr_pin()
733 xe_vm_unlock(vm); in xe_vm_userptr_pin()
742 &vm->rebind_list); in xe_vm_userptr_pin()
747 down_write(&vm->userptr.notifier_lock); in xe_vm_userptr_pin()
748 spin_lock(&vm->userptr.invalidated_lock); in xe_vm_userptr_pin()
749 list_for_each_entry_safe(uvma, next, &vm->userptr.repin_list, in xe_vm_userptr_pin()
753 &vm->userptr.invalidated); in xe_vm_userptr_pin()
755 spin_unlock(&vm->userptr.invalidated_lock); in xe_vm_userptr_pin()
756 up_write(&vm->userptr.notifier_lock); in xe_vm_userptr_pin()
762 * xe_vm_userptr_check_repin() - Check whether the VM might have userptrs
764 * @vm: The VM.
766 * This function does an advisory check for whether the VM has userptrs that
772 int xe_vm_userptr_check_repin(struct xe_vm *vm) in xe_vm_userptr_check_repin() argument
774 return (list_empty_careful(&vm->userptr.repin_list) && in xe_vm_userptr_check_repin()
775 list_empty_careful(&vm->userptr.invalidated)) ? 0 : -EAGAIN; in xe_vm_userptr_check_repin()
847 static struct dma_fence *ops_execute(struct xe_vm *vm,
849 static void xe_vma_ops_init(struct xe_vma_ops *vops, struct xe_vm *vm,
853 int xe_vm_rebind(struct xe_vm *vm, bool rebind_worker) in xe_vm_rebind() argument
861 lockdep_assert_held(&vm->lock); in xe_vm_rebind()
862 if ((xe_vm_in_lr_mode(vm) && !rebind_worker) || in xe_vm_rebind()
863 list_empty(&vm->rebind_list)) in xe_vm_rebind()
866 xe_vma_ops_init(&vops, vm, NULL, NULL, 0); in xe_vm_rebind()
870 xe_vm_assert_held(vm); in xe_vm_rebind()
871 list_for_each_entry(vma, &vm->rebind_list, combined_links.rebind) { in xe_vm_rebind()
872 xe_assert(vm->xe, vma->tile_present); in xe_vm_rebind()
889 fence = ops_execute(vm, &vops); in xe_vm_rebind()
894 list_for_each_entry_safe(vma, next, &vm->rebind_list, in xe_vm_rebind()
908 struct dma_fence *xe_vma_rebind(struct xe_vm *vm, struct xe_vma *vma, u8 tile_mask) in xe_vma_rebind() argument
917 lockdep_assert_held(&vm->lock); in xe_vma_rebind()
918 xe_vm_assert_held(vm); in xe_vma_rebind()
919 xe_assert(vm->xe, xe_vm_in_fault_mode(vm)); in xe_vma_rebind()
921 xe_vma_ops_init(&vops, vm, NULL, NULL, 0); in xe_vma_rebind()
922 for_each_tile(tile, vm->xe, id) { in xe_vma_rebind()
938 fence = ops_execute(vm, &vops); in xe_vma_rebind()
962 static struct xe_vma *xe_vma_create(struct xe_vm *vm, in xe_vma_create() argument
975 xe_assert(vm->xe, start < end); in xe_vma_create()
976 xe_assert(vm->xe, end < vm->size); in xe_vma_create()
1003 vma->gpuva.vm = &vm->gpuvm; in xe_vma_create()
1011 for_each_tile(tile, vm->xe, id) in xe_vma_create()
1014 if (vm->xe->info.has_atomic_enable_pte_bit) in xe_vma_create()
1024 vm_bo = drm_gpuvm_bo_obtain(vma->gpuva.vm, &bo->ttm.base); in xe_vma_create()
1058 xe_vm_get(vm); in xe_vma_create()
1066 struct xe_vm *vm = xe_vma_vm(vma); in xe_vma_destroy_late() local
1087 xe_vm_put(vm); in xe_vma_destroy_late()
1089 xe_vm_put(vm); in xe_vma_destroy_late()
1116 struct xe_vm *vm = xe_vma_vm(vma); in xe_vma_destroy() local
1118 lockdep_assert_held_write(&vm->lock); in xe_vma_destroy()
1119 xe_assert(vm->xe, list_empty(&vma->combined_links.destroy)); in xe_vma_destroy()
1122 xe_assert(vm->xe, vma->gpuva.flags & XE_VMA_DESTROYED); in xe_vma_destroy()
1124 spin_lock(&vm->userptr.invalidated_lock); in xe_vma_destroy()
1125 xe_assert(vm->xe, list_empty(&to_userptr_vma(vma)->userptr.repin_link)); in xe_vma_destroy()
1127 spin_unlock(&vm->userptr.invalidated_lock); in xe_vma_destroy()
1134 xe_vm_assert_held(vm); in xe_vma_destroy()
1151 * @vma: The vma for witch we want to lock the vm resv and any attached
1160 struct xe_vm *vm = xe_vma_vm(vma); in xe_vm_lock_vma() local
1164 XE_WARN_ON(!vm); in xe_vm_lock_vma()
1166 err = drm_exec_lock_obj(exec, xe_vm_obj(vm)); in xe_vm_lock_vma()
1167 if (!err && bo && !bo->vm) in xe_vm_lock_vma()
1192 xe_vm_find_overlapping_vma(struct xe_vm *vm, u64 start, u64 range) in xe_vm_find_overlapping_vma() argument
1196 lockdep_assert_held(&vm->lock); in xe_vm_find_overlapping_vma()
1198 if (xe_vm_is_closed_or_banned(vm)) in xe_vm_find_overlapping_vma()
1201 xe_assert(vm->xe, start + range <= vm->size); in xe_vm_find_overlapping_vma()
1203 gpuva = drm_gpuva_find_first(&vm->gpuvm, start, range); in xe_vm_find_overlapping_vma()
1208 static int xe_vm_insert_vma(struct xe_vm *vm, struct xe_vma *vma) in xe_vm_insert_vma() argument
1212 xe_assert(vm->xe, xe_vma_vm(vma) == vm); in xe_vm_insert_vma()
1213 lockdep_assert_held(&vm->lock); in xe_vm_insert_vma()
1215 mutex_lock(&vm->snap_mutex); in xe_vm_insert_vma()
1216 err = drm_gpuva_insert(&vm->gpuvm, &vma->gpuva); in xe_vm_insert_vma()
1217 mutex_unlock(&vm->snap_mutex); in xe_vm_insert_vma()
1223 static void xe_vm_remove_vma(struct xe_vm *vm, struct xe_vma *vma) in xe_vm_remove_vma() argument
1225 xe_assert(vm->xe, xe_vma_vm(vma) == vm); in xe_vm_remove_vma()
1226 lockdep_assert_held(&vm->lock); in xe_vm_remove_vma()
1228 mutex_lock(&vm->snap_mutex); in xe_vm_remove_vma()
1230 mutex_unlock(&vm->snap_mutex); in xe_vm_remove_vma()
1231 if (vm->usm.last_fault_vma == vma) in xe_vm_remove_vma()
1232 vm->usm.last_fault_vma = NULL; in xe_vm_remove_vma()
1384 * given tile and vm.
1387 * @vm: vm to set up for.
1397 struct xe_vm *vm) in xe_vm_create_scratch() argument
1402 for (i = MAX_HUGEPTE_LEVEL; i < vm->pt_root[id]->level; i++) { in xe_vm_create_scratch()
1403 vm->scratch_pt[id][i] = xe_pt_create(vm, tile, i); in xe_vm_create_scratch()
1404 if (IS_ERR(vm->scratch_pt[id][i])) in xe_vm_create_scratch()
1405 return PTR_ERR(vm->scratch_pt[id][i]); in xe_vm_create_scratch()
1407 xe_pt_populate_empty(tile, vm, vm->scratch_pt[id][i]); in xe_vm_create_scratch()
1414 static void xe_vm_free_scratch(struct xe_vm *vm) in xe_vm_free_scratch() argument
1419 if (!xe_vm_has_scratch(vm)) in xe_vm_free_scratch()
1422 for_each_tile(tile, vm->xe, id) { in xe_vm_free_scratch()
1425 if (!vm->pt_root[id]) in xe_vm_free_scratch()
1428 for (i = MAX_HUGEPTE_LEVEL; i < vm->pt_root[id]->level; ++i) in xe_vm_free_scratch()
1429 if (vm->scratch_pt[id][i]) in xe_vm_free_scratch()
1430 xe_pt_destroy(vm->scratch_pt[id][i], vm->flags, NULL); in xe_vm_free_scratch()
1437 struct xe_vm *vm; in xe_vm_create() local
1442 vm = kzalloc(sizeof(*vm), GFP_KERNEL); in xe_vm_create()
1443 if (!vm) in xe_vm_create()
1446 vm->xe = xe; in xe_vm_create()
1448 vm->size = 1ull << xe->info.va_bits; in xe_vm_create()
1450 vm->flags = flags; in xe_vm_create()
1452 init_rwsem(&vm->lock); in xe_vm_create()
1453 mutex_init(&vm->snap_mutex); in xe_vm_create()
1455 INIT_LIST_HEAD(&vm->rebind_list); in xe_vm_create()
1457 INIT_LIST_HEAD(&vm->userptr.repin_list); in xe_vm_create()
1458 INIT_LIST_HEAD(&vm->userptr.invalidated); in xe_vm_create()
1459 init_rwsem(&vm->userptr.notifier_lock); in xe_vm_create()
1460 spin_lock_init(&vm->userptr.invalidated_lock); in xe_vm_create()
1462 ttm_lru_bulk_move_init(&vm->lru_bulk_move); in xe_vm_create()
1464 INIT_WORK(&vm->destroy_work, vm_destroy_work_func); in xe_vm_create()
1466 INIT_LIST_HEAD(&vm->preempt.exec_queues); in xe_vm_create()
1467 vm->preempt.min_run_period_ms = 10; /* FIXME: Wire up to uAPI */ in xe_vm_create()
1470 xe_range_fence_tree_init(&vm->rftree[id]); in xe_vm_create()
1472 vm->pt_ops = &xelp_pt_ops; in xe_vm_create()
1477 * scheduler drops all the references of it, hence protecting the VM in xe_vm_create()
1489 drm_gpuvm_init(&vm->gpuvm, "Xe VM", DRM_GPUVM_RESV_PROTECTED, &xe->drm, in xe_vm_create()
1490 vm_resv_obj, 0, vm->size, 0, 0, &gpuvm_ops); in xe_vm_create()
1494 err = xe_vm_lock(vm, true); in xe_vm_create()
1499 vm->flags |= XE_VM_FLAG_64K; in xe_vm_create()
1506 vm->pt_root[id] = xe_pt_create(vm, tile, xe->info.vm_max_level); in xe_vm_create()
1507 if (IS_ERR(vm->pt_root[id])) { in xe_vm_create()
1508 err = PTR_ERR(vm->pt_root[id]); in xe_vm_create()
1509 vm->pt_root[id] = NULL; in xe_vm_create()
1514 if (xe_vm_has_scratch(vm)) { in xe_vm_create()
1516 if (!vm->pt_root[id]) in xe_vm_create()
1519 err = xe_vm_create_scratch(xe, tile, vm); in xe_vm_create()
1523 vm->batch_invalidate_tlb = true; in xe_vm_create()
1526 if (vm->flags & XE_VM_FLAG_LR_MODE) { in xe_vm_create()
1527 INIT_WORK(&vm->preempt.rebind_work, preempt_rebind_work_func); in xe_vm_create()
1528 vm->batch_invalidate_tlb = false; in xe_vm_create()
1533 if (!vm->pt_root[id]) in xe_vm_create()
1536 xe_pt_populate_empty(tile, vm, vm->pt_root[id]); in xe_vm_create()
1538 xe_vm_unlock(vm); in xe_vm_create()
1540 /* Kernel migration VM shouldn't have a circular loop.. */ in xe_vm_create()
1546 if (!vm->pt_root[id]) in xe_vm_create()
1554 vm->q[id] = q; in xe_vm_create()
1560 vm->composite_fence_ctx = dma_fence_context_alloc(1); in xe_vm_create()
1562 trace_xe_vm_create(vm); in xe_vm_create()
1564 return vm; in xe_vm_create()
1567 xe_vm_unlock(vm); in xe_vm_create()
1569 xe_vm_close_and_put(vm); in xe_vm_create()
1573 mutex_destroy(&vm->snap_mutex); in xe_vm_create()
1575 xe_range_fence_tree_fini(&vm->rftree[id]); in xe_vm_create()
1576 ttm_lru_bulk_move_fini(&xe->ttm, &vm->lru_bulk_move); in xe_vm_create()
1577 kfree(vm); in xe_vm_create()
1583 static void xe_vm_close(struct xe_vm *vm) in xe_vm_close() argument
1585 down_write(&vm->lock); in xe_vm_close()
1586 vm->size = 0; in xe_vm_close()
1587 up_write(&vm->lock); in xe_vm_close()
1590 void xe_vm_close_and_put(struct xe_vm *vm) in xe_vm_close_and_put() argument
1593 struct xe_device *xe = vm->xe; in xe_vm_close_and_put()
1599 xe_assert(xe, !vm->preempt.num_exec_queues); in xe_vm_close_and_put()
1601 xe_vm_close(vm); in xe_vm_close_and_put()
1602 if (xe_vm_in_preempt_fence_mode(vm)) in xe_vm_close_and_put()
1603 flush_work(&vm->preempt.rebind_work); in xe_vm_close_and_put()
1605 down_write(&vm->lock); in xe_vm_close_and_put()
1607 if (vm->q[id]) in xe_vm_close_and_put()
1608 xe_exec_queue_last_fence_put(vm->q[id], vm); in xe_vm_close_and_put()
1610 up_write(&vm->lock); in xe_vm_close_and_put()
1613 if (vm->q[id]) { in xe_vm_close_and_put()
1614 xe_exec_queue_kill(vm->q[id]); in xe_vm_close_and_put()
1615 xe_exec_queue_put(vm->q[id]); in xe_vm_close_and_put()
1616 vm->q[id] = NULL; in xe_vm_close_and_put()
1620 down_write(&vm->lock); in xe_vm_close_and_put()
1621 xe_vm_lock(vm, false); in xe_vm_close_and_put()
1622 drm_gpuvm_for_each_va_safe(gpuva, next, &vm->gpuvm) { in xe_vm_close_and_put()
1626 down_read(&vm->userptr.notifier_lock); in xe_vm_close_and_put()
1628 up_read(&vm->userptr.notifier_lock); in xe_vm_close_and_put()
1631 xe_vm_remove_vma(vm, vma); in xe_vm_close_and_put()
1634 if (xe_vma_has_no_bo(vma) || xe_vma_bo(vma)->vm) { in xe_vm_close_and_put()
1645 * All vm operations will add shared fences to resv. in xe_vm_close_and_put()
1651 xe_vm_free_scratch(vm); in xe_vm_close_and_put()
1654 if (vm->pt_root[id]) { in xe_vm_close_and_put()
1655 xe_pt_destroy(vm->pt_root[id], vm->flags, NULL); in xe_vm_close_and_put()
1656 vm->pt_root[id] = NULL; in xe_vm_close_and_put()
1659 xe_vm_unlock(vm); in xe_vm_close_and_put()
1662 * VM is now dead, cannot re-add nodes to vm->vmas if it's NULL in xe_vm_close_and_put()
1672 up_write(&vm->lock); in xe_vm_close_and_put()
1675 if (vm->usm.asid) { in xe_vm_close_and_put()
1679 xe_assert(xe, !(vm->flags & XE_VM_FLAG_MIGRATION)); in xe_vm_close_and_put()
1681 lookup = xa_erase(&xe->usm.asid_to_vm, vm->usm.asid); in xe_vm_close_and_put()
1682 xe_assert(xe, lookup == vm); in xe_vm_close_and_put()
1687 xe_range_fence_tree_fini(&vm->rftree[id]); in xe_vm_close_and_put()
1689 xe_vm_put(vm); in xe_vm_close_and_put()
1694 struct xe_vm *vm = in vm_destroy_work_func() local
1696 struct xe_device *xe = vm->xe; in vm_destroy_work_func()
1701 xe_assert(xe, !vm->size); in vm_destroy_work_func()
1703 if (xe_vm_in_preempt_fence_mode(vm)) in vm_destroy_work_func()
1704 flush_work(&vm->preempt.rebind_work); in vm_destroy_work_func()
1706 mutex_destroy(&vm->snap_mutex); in vm_destroy_work_func()
1708 if (vm->flags & XE_VM_FLAG_LR_MODE) in vm_destroy_work_func()
1712 XE_WARN_ON(vm->pt_root[id]); in vm_destroy_work_func()
1714 trace_xe_vm_free(vm); in vm_destroy_work_func()
1716 ttm_lru_bulk_move_fini(&xe->ttm, &vm->lru_bulk_move); in vm_destroy_work_func()
1718 if (vm->xef) in vm_destroy_work_func()
1719 xe_file_put(vm->xef); in vm_destroy_work_func()
1721 kfree(vm); in vm_destroy_work_func()
1726 struct xe_vm *vm = container_of(gpuvm, struct xe_vm, gpuvm); in xe_vm_free() local
1728 /* To destroy the VM we need to be able to sleep */ in xe_vm_free()
1729 queue_work(system_unbound_wq, &vm->destroy_work); in xe_vm_free()
1734 struct xe_vm *vm; in xe_vm_lookup() local
1736 mutex_lock(&xef->vm.lock); in xe_vm_lookup()
1737 vm = xa_load(&xef->vm.xa, id); in xe_vm_lookup()
1738 if (vm) in xe_vm_lookup()
1739 xe_vm_get(vm); in xe_vm_lookup()
1740 mutex_unlock(&xef->vm.lock); in xe_vm_lookup()
1742 return vm; in xe_vm_lookup()
1745 u64 xe_vm_pdp4_descriptor(struct xe_vm *vm, struct xe_tile *tile) in xe_vm_pdp4_descriptor() argument
1747 return vm->pt_ops->pde_encode_bo(vm->pt_root[tile->id]->bo, 0, in xe_vm_pdp4_descriptor()
1752 to_wait_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q) in to_wait_exec_queue() argument
1754 return q ? q : vm->q[0]; in to_wait_exec_queue()
1783 struct xe_vm *vm; in xe_vm_create_ioctl() local
1819 vm = xe_vm_create(xe, flags); in xe_vm_create_ioctl()
1820 if (IS_ERR(vm)) in xe_vm_create_ioctl()
1821 return PTR_ERR(vm); in xe_vm_create_ioctl()
1825 err = xa_alloc_cyclic(&xe->usm.asid_to_vm, &asid, vm, in xe_vm_create_ioctl()
1832 vm->usm.asid = asid; in xe_vm_create_ioctl()
1835 vm->xef = xe_file_get(xef); in xe_vm_create_ioctl()
1837 /* Record BO memory for VM pagetable created against client */ in xe_vm_create_ioctl()
1839 if (vm->pt_root[id]) in xe_vm_create_ioctl()
1840 xe_drm_client_add_bo(vm->xef->client, vm->pt_root[id]->bo); in xe_vm_create_ioctl()
1844 args->reserved[0] = xe_bo_main_addr(vm->pt_root[0]->bo, XE_PAGE_SIZE); in xe_vm_create_ioctl()
1848 err = xa_alloc(&xef->vm.xa, &id, vm, xa_limit_32b, GFP_KERNEL); in xe_vm_create_ioctl()
1857 xe_vm_close_and_put(vm); in xe_vm_create_ioctl()
1868 struct xe_vm *vm; in xe_vm_destroy_ioctl() local
1875 mutex_lock(&xef->vm.lock); in xe_vm_destroy_ioctl()
1876 vm = xa_load(&xef->vm.xa, args->vm_id); in xe_vm_destroy_ioctl()
1877 if (XE_IOCTL_DBG(xe, !vm)) in xe_vm_destroy_ioctl()
1879 else if (XE_IOCTL_DBG(xe, vm->preempt.num_exec_queues)) in xe_vm_destroy_ioctl()
1882 xa_erase(&xef->vm.xa, args->vm_id); in xe_vm_destroy_ioctl()
1883 mutex_unlock(&xef->vm.lock); in xe_vm_destroy_ioctl()
1886 xe_vm_close_and_put(vm); in xe_vm_destroy_ioctl()
1897 static void prep_vma_destroy(struct xe_vm *vm, struct xe_vma *vma, in prep_vma_destroy() argument
1900 down_read(&vm->userptr.notifier_lock); in prep_vma_destroy()
1902 up_read(&vm->userptr.notifier_lock); in prep_vma_destroy()
1904 xe_vm_remove_vma(vm, vma); in prep_vma_destroy()
1962 vm_bind_ioctl_ops_create(struct xe_vm *vm, struct xe_bo *bo, in vm_bind_ioctl_ops_create() argument
1973 lockdep_assert_held_write(&vm->lock); in vm_bind_ioctl_ops_create()
1975 vm_dbg(&vm->xe->drm, in vm_bind_ioctl_ops_create()
1983 ops = drm_gpuvm_sm_map_ops_create(&vm->gpuvm, addr, range, in vm_bind_ioctl_ops_create()
1987 ops = drm_gpuvm_sm_unmap_ops_create(&vm->gpuvm, addr, range); in vm_bind_ioctl_ops_create()
1990 ops = drm_gpuvm_prefetch_ops_create(&vm->gpuvm, addr, range); in vm_bind_ioctl_ops_create()
1993 xe_assert(vm->xe, bo); in vm_bind_ioctl_ops_create()
1999 vm_bo = drm_gpuvm_bo_obtain(&vm->gpuvm, obj); in vm_bind_ioctl_ops_create()
2010 drm_warn(&vm->xe->drm, "NOT POSSIBLE"); in vm_bind_ioctl_ops_create()
2031 print_op(vm->xe, __op); in vm_bind_ioctl_ops_create()
2038 static struct xe_vma *new_vma(struct xe_vm *vm, struct drm_gpuva_op_map *op, in new_vma() argument
2046 lockdep_assert_held_write(&vm->lock); in new_vma()
2052 if (!bo->vm) { in new_vma()
2053 err = drm_exec_lock_obj(&exec, xe_vm_obj(vm)); in new_vma()
2066 vma = xe_vma_create(vm, bo, op->gem.offset, in new_vma()
2074 else if (!xe_vma_has_no_bo(vma) && !bo->vm) in new_vma()
2075 err = add_preempt_fences(vm, bo); in new_vma()
2082 prep_vma_destroy(vm, vma, false); in new_vma()
2122 static int xe_vma_op_commit(struct xe_vm *vm, struct xe_vma_op *op) in xe_vma_op_commit() argument
2126 lockdep_assert_held_write(&vm->lock); in xe_vma_op_commit()
2130 err |= xe_vm_insert_vma(vm, op->map.vma); in xe_vma_op_commit()
2139 prep_vma_destroy(vm, gpuva_to_vma(op->base.remap.unmap->va), in xe_vma_op_commit()
2144 err |= xe_vm_insert_vma(vm, op->remap.prev); in xe_vma_op_commit()
2154 err |= xe_vm_insert_vma(vm, op->remap.next); in xe_vma_op_commit()
2164 /* Adjust for partial unbind after removing VMA from VM */ in xe_vma_op_commit()
2172 prep_vma_destroy(vm, gpuva_to_vma(op->base.unmap.va), true); in xe_vma_op_commit()
2179 drm_warn(&vm->xe->drm, "NOT POSSIBLE"); in xe_vma_op_commit()
2185 static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct drm_gpuva_ops *ops, in vm_bind_ioctl_ops_parse() argument
2188 struct xe_device *xe = vm->xe; in vm_bind_ioctl_ops_parse()
2194 lockdep_assert_held_write(&vm->lock); in vm_bind_ioctl_ops_parse()
2196 for_each_tile(tile, vm->xe, id) in vm_bind_ioctl_ops_parse()
2218 vma = new_vma(vm, &op->base.map, op->map.pat_index, in vm_bind_ioctl_ops_parse()
2224 if (op->map.immediate || !xe_vm_in_fault_mode(vm)) in vm_bind_ioctl_ops_parse()
2248 vma = new_vma(vm, op->base.remap.prev, in vm_bind_ioctl_ops_parse()
2287 vma = new_vma(vm, op->base.remap.next, in vm_bind_ioctl_ops_parse()
2331 drm_warn(&vm->xe->drm, "NOT POSSIBLE"); in vm_bind_ioctl_ops_parse()
2334 err = xe_vma_op_commit(vm, op); in vm_bind_ioctl_ops_parse()
2342 static void xe_vma_op_unwind(struct xe_vm *vm, struct xe_vma_op *op, in xe_vma_op_unwind() argument
2346 lockdep_assert_held_write(&vm->lock); in xe_vma_op_unwind()
2351 prep_vma_destroy(vm, op->map.vma, post_commit); in xe_vma_op_unwind()
2360 down_read(&vm->userptr.notifier_lock); in xe_vma_op_unwind()
2362 up_read(&vm->userptr.notifier_lock); in xe_vma_op_unwind()
2364 xe_vm_insert_vma(vm, vma); in xe_vma_op_unwind()
2373 prep_vma_destroy(vm, op->remap.prev, prev_post_commit); in xe_vma_op_unwind()
2377 prep_vma_destroy(vm, op->remap.next, next_post_commit); in xe_vma_op_unwind()
2381 down_read(&vm->userptr.notifier_lock); in xe_vma_op_unwind()
2383 up_read(&vm->userptr.notifier_lock); in xe_vma_op_unwind()
2385 xe_vm_insert_vma(vm, vma); in xe_vma_op_unwind()
2393 drm_warn(&vm->xe->drm, "NOT POSSIBLE"); in xe_vma_op_unwind()
2397 static void vm_bind_ioctl_ops_unwind(struct xe_vm *vm, in vm_bind_ioctl_ops_unwind() argument
2413 xe_vma_op_unwind(vm, op, in vm_bind_ioctl_ops_unwind()
2425 struct xe_vm *vm = xe_vma_vm(vma); in vma_lock_and_validate() local
2429 if (!bo->vm) in vma_lock_and_validate()
2432 err = xe_bo_validate(bo, vm, in vma_lock_and_validate()
2433 !xe_vm_in_preempt_fence_mode(vm)); in vma_lock_and_validate()
2454 static int op_lock_and_prep(struct drm_exec *exec, struct xe_vm *vm, in op_lock_and_prep() argument
2462 !xe_vm_in_fault_mode(vm) || in op_lock_and_prep()
2492 xe_assert(vm->xe, region <= ARRAY_SIZE(region_to_mem_type)); in op_lock_and_prep()
2503 drm_warn(&vm->xe->drm, "NOT POSSIBLE"); in op_lock_and_prep()
2510 struct xe_vm *vm, in vm_bind_ioctl_ops_lock_and_prep() argument
2516 err = drm_exec_lock_obj(exec, xe_vm_obj(vm)); in vm_bind_ioctl_ops_lock_and_prep()
2521 err = op_lock_and_prep(exec, vm, op); in vm_bind_ioctl_ops_lock_and_prep()
2528 vm->xe->vm_inject_error_position == FORCE_OP_ERROR_LOCK) in vm_bind_ioctl_ops_lock_and_prep()
2567 static int vm_ops_setup_tile_args(struct xe_vm *vm, struct xe_vma_ops *vops) in vm_ops_setup_tile_args() argument
2574 for_each_tile(tile, vm->xe, id) { in vm_ops_setup_tile_args()
2583 if (vm->pt_root[id] && !list_empty(&q->multi_gt_list)) in vm_ops_setup_tile_args()
2586 vops->pt_update_ops[id].q = vm->q[id]; in vm_ops_setup_tile_args()
2593 static struct dma_fence *ops_execute(struct xe_vm *vm, in ops_execute() argument
2603 number_tiles = vm_ops_setup_tile_args(vm, vops); in ops_execute()
2616 for_each_tile(tile, vm->xe, id) { in ops_execute()
2629 for_each_tile(tile, vm->xe, id) { in ops_execute()
2643 vm->composite_fence_ctx, in ops_execute()
2644 vm->composite_fence_seqno++, in ops_execute()
2647 --vm->composite_fence_seqno; in ops_execute()
2654 for_each_tile(tile, vm->xe, id) { in ops_execute()
2664 for_each_tile(tile, vm->xe, id) { in ops_execute()
2676 trace_xe_vm_ops_fail(vm); in ops_execute()
2687 static void op_add_ufence(struct xe_vm *vm, struct xe_vma_op *op, in op_add_ufence() argument
2706 drm_warn(&vm->xe->drm, "NOT POSSIBLE"); in op_add_ufence()
2710 static void vm_bind_ioctl_ops_fini(struct xe_vm *vm, struct xe_vma_ops *vops, in vm_bind_ioctl_ops_fini() argument
2713 struct xe_exec_queue *wait_exec_queue = to_wait_exec_queue(vm, vops->q); in vm_bind_ioctl_ops_fini()
2721 op_add_ufence(vm, op, ufence); in vm_bind_ioctl_ops_fini()
2733 xe_exec_queue_last_fence_set(wait_exec_queue, vm, fence); in vm_bind_ioctl_ops_fini()
2737 static int vm_bind_ioctl_ops_execute(struct xe_vm *vm, in vm_bind_ioctl_ops_execute() argument
2744 lockdep_assert_held_write(&vm->lock); in vm_bind_ioctl_ops_execute()
2749 err = vm_bind_ioctl_ops_lock_and_prep(&exec, vm, vops); in vm_bind_ioctl_ops_execute()
2754 fence = ops_execute(vm, vops); in vm_bind_ioctl_ops_execute()
2760 vm_bind_ioctl_ops_fini(vm, vops, fence); in vm_bind_ioctl_ops_execute()
2899 static int vm_bind_ioctl_signal_fences(struct xe_vm *vm, in vm_bind_ioctl_signal_fences() argument
2908 to_wait_exec_queue(vm, q), vm); in vm_bind_ioctl_signal_fences()
2915 xe_exec_queue_last_fence_set(to_wait_exec_queue(vm, q), vm, in vm_bind_ioctl_signal_fences()
2922 static void xe_vma_ops_init(struct xe_vma_ops *vops, struct xe_vm *vm, in xe_vma_ops_init() argument
2928 vops->vm = vm; in xe_vma_ops_init()
2991 struct xe_vm *vm; in xe_vm_bind_ioctl() local
3017 vm = xe_vm_lookup(xef, args->vm_id); in xe_vm_bind_ioctl()
3018 if (XE_IOCTL_DBG(xe, !vm)) { in xe_vm_bind_ioctl()
3023 err = down_write_killable(&vm->lock); in xe_vm_bind_ioctl()
3027 if (XE_IOCTL_DBG(xe, xe_vm_is_closed_or_banned(vm))) { in xe_vm_bind_ioctl()
3036 if (XE_IOCTL_DBG(xe, range > vm->size) || in xe_vm_bind_ioctl()
3037 XE_IOCTL_DBG(xe, addr > vm->size - range)) { in xe_vm_bind_ioctl()
3097 (xe_vm_in_lr_mode(vm) ? in xe_vm_bind_ioctl()
3118 xe_vma_ops_init(&vops, vm, q, syncs, num_syncs); in xe_vm_bind_ioctl()
3128 ops[i] = vm_bind_ioctl_ops_create(vm, bos[i], obj_offset, in xe_vm_bind_ioctl()
3137 err = vm_bind_ioctl_ops_parse(vm, ops[i], &vops); in xe_vm_bind_ioctl()
3144 vm->xe->vm_inject_error_position = in xe_vm_bind_ioctl()
3145 (vm->xe->vm_inject_error_position + 1) % in xe_vm_bind_ioctl()
3161 err = vm_bind_ioctl_ops_execute(vm, &vops); in xe_vm_bind_ioctl()
3165 vm_bind_ioctl_ops_unwind(vm, ops, args->num_binds); in xe_vm_bind_ioctl()
3169 drm_gpuva_ops_free(&vm->gpuvm, ops[i]); in xe_vm_bind_ioctl()
3172 err = vm_bind_ioctl_signal_fences(vm, q, syncs, num_syncs); in xe_vm_bind_ioctl()
3181 up_write(&vm->lock); in xe_vm_bind_ioctl()
3183 xe_vm_put(vm); in xe_vm_bind_ioctl()
3196 * xe_vm_lock() - Lock the vm's dma_resv object
3197 * @vm: The struct xe_vm whose lock is to be locked
3204 int xe_vm_lock(struct xe_vm *vm, bool intr) in xe_vm_lock() argument
3207 return dma_resv_lock_interruptible(xe_vm_resv(vm), NULL); in xe_vm_lock()
3209 return dma_resv_lock(xe_vm_resv(vm), NULL); in xe_vm_lock()
3213 * xe_vm_unlock() - Unlock the vm's dma_resv object
3214 * @vm: The struct xe_vm whose lock is to be released.
3218 void xe_vm_unlock(struct xe_vm *vm) in xe_vm_unlock() argument
3220 dma_resv_unlock(xe_vm_resv(vm)); in xe_vm_unlock()
3312 struct xe_vm_snapshot *xe_vm_snapshot_capture(struct xe_vm *vm) in xe_vm_snapshot_capture() argument
3318 if (!vm) in xe_vm_snapshot_capture()
3321 mutex_lock(&vm->snap_mutex); in xe_vm_snapshot_capture()
3322 drm_gpuvm_for_each_va(gpuva, &vm->gpuvm) { in xe_vm_snapshot_capture()
3336 drm_gpuvm_for_each_va(gpuva, &vm->gpuvm) { in xe_vm_snapshot_capture()
3366 mutex_unlock(&vm->snap_mutex); in xe_vm_snapshot_capture()