Lines Matching +full:0 +full:- +full:job +full:- +full:ring

1 // SPDX-License-Identifier: MIT
30 * - Passing in a list BO which are read / written to creating implicit syncs
31 * - Binding at exec time
32 * - Flow controlling the ring at exec time
35 * passed into an exec, using the dma-buf implicit sync uAPI, have binds as
36 * separate operations, and using the DRM scheduler to flow control the ring.
60 * Rebinds / dma-resv usage applies to non-compute mode VMs only as for compute
63 * There is no need to flow control the ring in the exec as we write the ring at
64 * submission time and set the DRM scheduler max job limit SIZE_OF_RING /
66 * ring is available.
73 * .. code-block::
76 * Wait for any async VM bind passed as in-fences to start
77 * <----------------------------------------------------------------------|
80 * Lock exec (VM dma-resv lock, external BOs dma-resv locks) |
82 * Create job |
83 * Rebind invalidated userptrs + evicted BOs (non-compute-mode) |
84 * Add rebind fence dependency to job |
85 * Add job VM dma-resv bookkeeping slot (non-compute mode) |
86 * Add job to external BOs dma-resv write slots (non-compute mode) |
87 * Check if any userptrs invalidated since pin ------ Drop locks ---------|
88 * Install in / out fences for job
89 * Submit job
99 struct xe_vm *vm = container_of(vm_exec->vm, struct xe_vm, gpuvm); in xe_exec_fn()
101 /* The fence slot added here is intended for the exec sched job. */ in xe_exec_fn()
102 return xe_vm_validate_rebind(vm, &vm_exec->exec, 1); in xe_exec_fn()
110 struct drm_xe_sync __user *syncs_user = u64_to_user_ptr(args->syncs); in xe_exec_ioctl()
111 u64 __user *addresses_user = u64_to_user_ptr(args->address); in xe_exec_ioctl()
117 u32 i, num_syncs, num_ufence = 0; in xe_exec_ioctl()
118 struct xe_sched_job *job; in xe_exec_ioctl() local
121 ktime_t end = 0; in xe_exec_ioctl()
122 int err = 0; in xe_exec_ioctl()
126 if (XE_IOCTL_DBG(xe, args->extensions) || in xe_exec_ioctl()
127 XE_IOCTL_DBG(xe, args->pad[0] || args->pad[1] || args->pad[2]) || in xe_exec_ioctl()
128 XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1])) in xe_exec_ioctl()
129 return -EINVAL; in xe_exec_ioctl()
131 q = xe_exec_queue_lookup(xef, args->exec_queue_id); in xe_exec_ioctl()
133 return -ENOENT; in xe_exec_ioctl()
135 if (XE_IOCTL_DBG(xe, q->flags & EXEC_QUEUE_FLAG_VM)) { in xe_exec_ioctl()
136 err = -EINVAL; in xe_exec_ioctl()
140 if (XE_IOCTL_DBG(xe, args->num_batch_buffer && in xe_exec_ioctl()
141 q->width != args->num_batch_buffer)) { in xe_exec_ioctl()
142 err = -EINVAL; in xe_exec_ioctl()
146 if (XE_IOCTL_DBG(xe, q->ops->reset_status(q))) { in xe_exec_ioctl()
147 err = -ECANCELED; in xe_exec_ioctl()
151 if (args->num_syncs) { in xe_exec_ioctl()
152 syncs = kcalloc(args->num_syncs, sizeof(*syncs), GFP_KERNEL); in xe_exec_ioctl()
154 err = -ENOMEM; in xe_exec_ioctl()
159 vm = q->vm; in xe_exec_ioctl()
161 for (num_syncs = 0; num_syncs < args->num_syncs; num_syncs++) { in xe_exec_ioctl()
165 SYNC_PARSE_FLAG_LR_MODE : 0)); in xe_exec_ioctl()
174 err = -EINVAL; in xe_exec_ioctl()
180 q->width); in xe_exec_ioctl()
182 err = -EFAULT; in xe_exec_ioctl()
187 group = q->hwe->hw_engine_group; in xe_exec_ioctl()
198 err = down_write_killable(&vm->lock); in xe_exec_ioctl()
202 err = down_read_interruptible(&vm->lock); in xe_exec_ioctl()
210 downgrade_write(&vm->lock); in xe_exec_ioctl()
216 if (!args->num_batch_buffer) { in xe_exec_ioctl()
230 for (i = 0; i < num_syncs; i++) in xe_exec_ioctl()
240 vm_exec.vm = &vm->gpuvm; in xe_exec_ioctl()
243 drm_exec_init(exec, vm_exec.flags, 0); in xe_exec_ioctl()
248 err = -EAGAIN; in xe_exec_ioctl()
253 if (xe_vm_is_closed_or_banned(q->vm)) { in xe_exec_ioctl()
254 drm_warn(&xe->drm, "Trying to schedule after vm is closed or banned\n"); in xe_exec_ioctl()
255 err = -ECANCELED; in xe_exec_ioctl()
260 err = -EWOULDBLOCK; /* Aliased to -EAGAIN */ in xe_exec_ioctl()
265 job = xe_sched_job_create(q, xe_exec_queue_is_parallel(q) ? in xe_exec_ioctl()
266 addresses : &args->address); in xe_exec_ioctl()
267 if (IS_ERR(job)) { in xe_exec_ioctl()
268 err = PTR_ERR(job); in xe_exec_ioctl()
274 err = xe_sched_job_add_deps(job, in xe_exec_ioctl()
281 for (i = 0; i < num_syncs && !err; i++) in xe_exec_ioctl()
282 err = xe_sync_entry_add_deps(&syncs[i], job); in xe_exec_ioctl()
287 err = xe_sched_job_last_fence_add_dep(job, vm); in xe_exec_ioctl()
291 err = down_read_interruptible(&vm->userptr.notifier_lock); in xe_exec_ioctl()
302 * the job and let the DRM scheduler / backend clean up the job. in xe_exec_ioctl()
304 xe_sched_job_arm(job); in xe_exec_ioctl()
306 drm_gpuvm_resv_add_fence(&vm->gpuvm, exec, &job->drm.s_fence->finished, in xe_exec_ioctl()
310 for (i = 0; i < num_syncs; i++) { in xe_exec_ioctl()
311 xe_sync_entry_signal(&syncs[i], &job->drm.s_fence->finished); in xe_exec_ioctl()
312 xe_sched_job_init_user_fence(job, &syncs[i]); in xe_exec_ioctl()
316 q->ring_ops->emit_job(job); in xe_exec_ioctl()
318 xe_exec_queue_last_fence_set(q, vm, &job->drm.s_fence->finished); in xe_exec_ioctl()
319 xe_sched_job_push(job); in xe_exec_ioctl()
323 spin_lock(&xe->ttm.lru_lock); in xe_exec_ioctl()
324 ttm_lru_bulk_move_tail(&vm->lru_bulk_move); in xe_exec_ioctl()
325 spin_unlock(&xe->ttm.lru_lock); in xe_exec_ioctl()
333 up_read(&vm->userptr.notifier_lock); in xe_exec_ioctl()
336 xe_sched_job_put(job); in xe_exec_ioctl()
340 up_read(&vm->lock); in xe_exec_ioctl()
341 if (err == -EAGAIN && !skip_retry) in xe_exec_ioctl()
347 while (num_syncs--) in xe_exec_ioctl()