Lines Matching full:job
68 static void job_free(struct xe_sched_job *job) in job_free() argument
70 struct xe_exec_queue *q = job->q; in job_free()
73 kmem_cache_free(xe_exec_queue_is_parallel(job->q) || is_migration ? in job_free()
74 xe_sched_job_parallel_slab : xe_sched_job_slab, job); in job_free()
77 static struct xe_device *job_to_xe(struct xe_sched_job *job) in job_to_xe() argument
79 return gt_to_xe(job->q->gt); in job_to_xe()
83 static void xe_sched_job_free_fences(struct xe_sched_job *job) in xe_sched_job_free_fences() argument
87 for (i = 0; i < job->q->width; ++i) { in xe_sched_job_free_fences()
88 struct xe_job_ptrs *ptrs = &job->ptrs[i]; in xe_sched_job_free_fences()
100 struct xe_sched_job *job; in xe_sched_job_create() local
105 /* only a kernel context can submit a vm-less job */ in xe_sched_job_create()
108 job = job_alloc(xe_exec_queue_is_parallel(q) || is_migration); in xe_sched_job_create()
109 if (!job) in xe_sched_job_create()
112 job->q = q; in xe_sched_job_create()
113 kref_init(&job->refcount); in xe_sched_job_create()
114 xe_exec_queue_get(job->q); in xe_sched_job_create()
116 err = drm_sched_job_init(&job->drm, q->entity, 1, NULL); in xe_sched_job_create()
128 job->ptrs[i].lrc_fence = fence; in xe_sched_job_create()
138 job->ptrs[i].chain_fence = chain; in xe_sched_job_create()
146 job->ptrs[i].batch_addr = batch_addr[i]; in xe_sched_job_create()
148 xe_pm_runtime_get_noresume(job_to_xe(job)); in xe_sched_job_create()
149 trace_xe_sched_job_create(job); in xe_sched_job_create()
150 return job; in xe_sched_job_create()
153 xe_sched_job_free_fences(job); in xe_sched_job_create()
154 drm_sched_job_cleanup(&job->drm); in xe_sched_job_create()
157 job_free(job); in xe_sched_job_create()
162 * xe_sched_job_destroy - Destroy XE schedule job
163 * @ref: reference to XE schedule job
165 * Called when ref == 0, drop a reference to job's xe_engine + fence, cleanup
166 * base DRM schedule job, and free memory for XE schedule job.
170 struct xe_sched_job *job = in xe_sched_job_destroy() local
172 struct xe_device *xe = job_to_xe(job); in xe_sched_job_destroy()
173 struct xe_exec_queue *q = job->q; in xe_sched_job_destroy()
175 xe_sched_job_free_fences(job); in xe_sched_job_destroy()
176 dma_fence_put(job->fence); in xe_sched_job_destroy()
177 drm_sched_job_cleanup(&job->drm); in xe_sched_job_destroy()
178 job_free(job); in xe_sched_job_destroy()
198 void xe_sched_job_set_error(struct xe_sched_job *job, int error) in xe_sched_job_set_error() argument
200 if (xe_fence_set_error(job->fence, error)) in xe_sched_job_set_error()
203 if (dma_fence_is_chain(job->fence)) { in xe_sched_job_set_error()
206 dma_fence_chain_for_each(iter, job->fence) in xe_sched_job_set_error()
211 trace_xe_sched_job_set_error(job); in xe_sched_job_set_error()
213 dma_fence_enable_sw_signaling(job->fence); in xe_sched_job_set_error()
214 xe_hw_fence_irq_run(job->q->fence_irq); in xe_sched_job_set_error()
217 bool xe_sched_job_started(struct xe_sched_job *job) in xe_sched_job_started() argument
219 struct xe_lrc *lrc = job->q->lrc[0]; in xe_sched_job_started()
221 return !__dma_fence_is_later(xe_sched_job_lrc_seqno(job), in xe_sched_job_started()
223 dma_fence_chain_contained(job->fence)->ops); in xe_sched_job_started()
226 bool xe_sched_job_completed(struct xe_sched_job *job) in xe_sched_job_completed() argument
228 struct xe_lrc *lrc = job->q->lrc[0]; in xe_sched_job_completed()
235 return !__dma_fence_is_later(xe_sched_job_lrc_seqno(job), in xe_sched_job_completed()
237 dma_fence_chain_contained(job->fence)->ops); in xe_sched_job_completed()
240 void xe_sched_job_arm(struct xe_sched_job *job) in xe_sched_job_arm() argument
242 struct xe_exec_queue *q = job->q; in xe_sched_job_arm()
260 job->ring_ops_flush_tlb = true; in xe_sched_job_arm()
267 fence = job->ptrs[i].lrc_fence; in xe_sched_job_arm()
269 job->ptrs[i].lrc_fence = NULL; in xe_sched_job_arm()
271 job->lrc_seqno = fence->seqno; in xe_sched_job_arm()
274 xe_assert(gt_to_xe(q->gt), job->lrc_seqno == fence->seqno); in xe_sched_job_arm()
277 chain = job->ptrs[i - 1].chain_fence; in xe_sched_job_arm()
279 job->ptrs[i - 1].chain_fence = NULL; in xe_sched_job_arm()
283 job->fence = dma_fence_get(fence); /* Pairs with put in scheduler */ in xe_sched_job_arm()
284 drm_sched_job_arm(&job->drm); in xe_sched_job_arm()
287 void xe_sched_job_push(struct xe_sched_job *job) in xe_sched_job_push() argument
289 xe_sched_job_get(job); in xe_sched_job_push()
290 trace_xe_sched_job_exec(job); in xe_sched_job_push()
291 drm_sched_entity_push_job(&job->drm); in xe_sched_job_push()
292 xe_sched_job_put(job); in xe_sched_job_push()
296 * xe_sched_job_last_fence_add_dep - Add last fence dependency to job
297 * @job:job to add the last fence dependency to
298 * @vm: virtual memory job belongs to
303 int xe_sched_job_last_fence_add_dep(struct xe_sched_job *job, struct xe_vm *vm) in xe_sched_job_last_fence_add_dep() argument
307 fence = xe_exec_queue_last_fence_get(job->q, vm); in xe_sched_job_last_fence_add_dep()
309 return drm_sched_job_add_dependency(&job->drm, fence); in xe_sched_job_last_fence_add_dep()
313 * xe_sched_job_init_user_fence - Initialize user_fence for the job
314 * @job: job whose user_fence needs an init
317 void xe_sched_job_init_user_fence(struct xe_sched_job *job, in xe_sched_job_init_user_fence() argument
323 job->user_fence.used = true; in xe_sched_job_init_user_fence()
324 job->user_fence.addr = sync->addr; in xe_sched_job_init_user_fence()
325 job->user_fence.value = sync->timeline_value; in xe_sched_job_init_user_fence()
329 xe_sched_job_snapshot_capture(struct xe_sched_job *job) in xe_sched_job_snapshot_capture() argument
331 struct xe_exec_queue *q = job->q; in xe_sched_job_snapshot_capture()
344 xe_device_uncanonicalize_addr(xe, job->ptrs[i].batch_addr); in xe_sched_job_snapshot_capture()
367 int xe_sched_job_add_deps(struct xe_sched_job *job, struct dma_resv *resv, in xe_sched_job_add_deps() argument
370 return drm_sched_job_add_resv_dependencies(&job->drm, resv, usage); in xe_sched_job_add_deps()