Lines Matching full:vm

31 static inline struct xe_vm *xe_vm_get(struct xe_vm *vm)  in xe_vm_get()  argument
33 drm_gpuvm_get(&vm->gpuvm); in xe_vm_get()
34 return vm; in xe_vm_get()
37 static inline void xe_vm_put(struct xe_vm *vm) in xe_vm_put() argument
39 drm_gpuvm_put(&vm->gpuvm); in xe_vm_put()
42 int xe_vm_lock(struct xe_vm *vm, bool intr);
44 void xe_vm_unlock(struct xe_vm *vm);
46 static inline bool xe_vm_is_closed(struct xe_vm *vm) in xe_vm_is_closed() argument
48 /* Only guaranteed not to change when vm->lock is held */ in xe_vm_is_closed()
49 return !vm->size; in xe_vm_is_closed()
52 static inline bool xe_vm_is_banned(struct xe_vm *vm) in xe_vm_is_banned() argument
54 return vm->flags & XE_VM_FLAG_BANNED; in xe_vm_is_banned()
57 static inline bool xe_vm_is_closed_or_banned(struct xe_vm *vm) in xe_vm_is_closed_or_banned() argument
59 lockdep_assert_held(&vm->lock); in xe_vm_is_closed_or_banned()
60 return xe_vm_is_closed(vm) || xe_vm_is_banned(vm); in xe_vm_is_closed_or_banned()
64 xe_vm_find_overlapping_vma(struct xe_vm *vm, u64 start, u64 range);
67 * xe_vm_has_scratch() - Whether the vm is configured for scratch PTEs
68 * @vm: The vm
70 * Return: whether the vm populates unmapped areas with scratch PTEs
72 static inline bool xe_vm_has_scratch(const struct xe_vm *vm) in xe_vm_has_scratch() argument
74 return vm->flags & XE_VM_FLAG_SCRATCH_PAGE; in xe_vm_has_scratch()
90 return gpuvm_to_vm(gpuva->vm); in gpuva_to_vm()
135 return container_of(vma->gpuva.vm, struct xe_vm, gpuvm); in xe_vma_vm()
175 u64 xe_vm_pdp4_descriptor(struct xe_vm *vm, struct xe_tile *tile);
184 void xe_vm_close_and_put(struct xe_vm *vm);
186 static inline bool xe_vm_in_fault_mode(struct xe_vm *vm) in xe_vm_in_fault_mode() argument
188 return vm->flags & XE_VM_FLAG_FAULT_MODE; in xe_vm_in_fault_mode()
191 static inline bool xe_vm_in_lr_mode(struct xe_vm *vm) in xe_vm_in_lr_mode() argument
193 return vm->flags & XE_VM_FLAG_LR_MODE; in xe_vm_in_lr_mode()
196 static inline bool xe_vm_in_preempt_fence_mode(struct xe_vm *vm) in xe_vm_in_preempt_fence_mode() argument
198 return xe_vm_in_lr_mode(vm) && !xe_vm_in_fault_mode(vm); in xe_vm_in_preempt_fence_mode()
201 int xe_vm_add_compute_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q);
202 void xe_vm_remove_compute_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q);
204 int xe_vm_userptr_pin(struct xe_vm *vm);
206 int __xe_vm_userptr_needs_repin(struct xe_vm *vm);
208 int xe_vm_userptr_check_repin(struct xe_vm *vm);
210 int xe_vm_rebind(struct xe_vm *vm, bool rebind_worker);
211 struct dma_fence *xe_vma_rebind(struct xe_vm *vm, struct xe_vma *vma,
216 static inline void xe_vm_queue_rebind_worker(struct xe_vm *vm) in xe_vm_queue_rebind_worker() argument
218 xe_assert(vm->xe, xe_vm_in_preempt_fence_mode(vm)); in xe_vm_queue_rebind_worker()
219 queue_work(vm->xe->ordered_wq, &vm->preempt.rebind_work); in xe_vm_queue_rebind_worker()
225 * @vm: The vm.
227 * If the rebind functionality on a compute vm was disabled due
229 * This function should be called after submitting a batch to a compute vm.
231 static inline void xe_vm_reactivate_rebind(struct xe_vm *vm) in xe_vm_reactivate_rebind() argument
233 if (xe_vm_in_preempt_fence_mode(vm) && vm->preempt.rebind_deactivated) { in xe_vm_reactivate_rebind()
234 vm->preempt.rebind_deactivated = false; in xe_vm_reactivate_rebind()
235 xe_vm_queue_rebind_worker(vm); in xe_vm_reactivate_rebind()
247 int xe_vm_validate_rebind(struct xe_vm *vm, struct drm_exec *exec,
251 * xe_vm_resv() - Return's the vm's reservation object
252 * @vm: The vm
254 * Return: Pointer to the vm's reservation object.
256 static inline struct dma_resv *xe_vm_resv(struct xe_vm *vm) in xe_vm_resv() argument
258 return drm_gpuvm_resv(&vm->gpuvm); in xe_vm_resv()
261 void xe_vm_kill(struct xe_vm *vm, bool unlocked);
264 * xe_vm_assert_held(vm) - Assert that the vm's reservation object is held.
265 * @vm: The vm
267 #define xe_vm_assert_held(vm) dma_resv_assert_held(xe_vm_resv(vm)) argument
278 struct xe_vm_snapshot *xe_vm_snapshot_capture(struct xe_vm *vm);