Lines Matching +full:sync +full:- +full:1
1 // SPDX-License-Identifier: MIT
8 #include <linux/dma-fence-array.h>
38 mmdrop(ufence->mm); in user_fence_destroy()
44 kref_get(&ufence->refcount); in user_fence_get()
49 kref_put(&ufence->refcount, user_fence_destroy); in user_fence_put()
60 return ERR_PTR(-EFAULT); in user_fence_create()
64 return ERR_PTR(-ENOMEM); in user_fence_create()
66 ufence->xe = xe; in user_fence_create()
67 kref_init(&ufence->refcount); in user_fence_create()
68 ufence->addr = ptr; in user_fence_create()
69 ufence->value = value; in user_fence_create()
70 ufence->mm = current->mm; in user_fence_create()
71 mmgrab(ufence->mm); in user_fence_create()
80 if (mmget_not_zero(ufence->mm)) { in user_fence_worker()
81 kthread_use_mm(ufence->mm); in user_fence_worker()
82 if (copy_to_user(ufence->addr, &ufence->value, sizeof(ufence->value))) in user_fence_worker()
84 kthread_unuse_mm(ufence->mm); in user_fence_worker()
85 mmput(ufence->mm); in user_fence_worker()
87 drm_dbg(&ufence->xe->drm, "mmget_not_zero() failed, ufence wasn't signaled\n"); in user_fence_worker()
92 * to safely reuse the same ufence without encountering -EBUSY errors. in user_fence_worker()
94 WRITE_ONCE(ufence->signalled, 1); in user_fence_worker()
95 wake_up_all(&ufence->xe->ufence_wq); in user_fence_worker()
101 INIT_WORK(&ufence->worker, user_fence_worker); in kick_ufence()
102 queue_work(ufence->xe->ordered_wq, &ufence->worker); in kick_ufence()
114 struct xe_sync_entry *sync, in xe_sync_entry_parse() argument
126 return -EFAULT; in xe_sync_entry_parse()
129 XE_IOCTL_DBG(xe, sync_in.reserved[0] || sync_in.reserved[1])) in xe_sync_entry_parse()
130 return -EINVAL; in xe_sync_entry_parse()
136 return -EOPNOTSUPP; in xe_sync_entry_parse()
139 return -EINVAL; in xe_sync_entry_parse()
141 sync->syncobj = drm_syncobj_find(xef->drm, sync_in.handle); in xe_sync_entry_parse()
142 if (XE_IOCTL_DBG(xe, !sync->syncobj)) in xe_sync_entry_parse()
143 return -ENOENT; in xe_sync_entry_parse()
146 sync->fence = drm_syncobj_fence_get(sync->syncobj); in xe_sync_entry_parse()
147 if (XE_IOCTL_DBG(xe, !sync->fence)) in xe_sync_entry_parse()
148 return -EINVAL; in xe_sync_entry_parse()
154 return -EOPNOTSUPP; in xe_sync_entry_parse()
157 return -EINVAL; in xe_sync_entry_parse()
160 return -EINVAL; in xe_sync_entry_parse()
162 sync->syncobj = drm_syncobj_find(xef->drm, sync_in.handle); in xe_sync_entry_parse()
163 if (XE_IOCTL_DBG(xe, !sync->syncobj)) in xe_sync_entry_parse()
164 return -ENOENT; in xe_sync_entry_parse()
167 sync->chain_fence = dma_fence_chain_alloc(); in xe_sync_entry_parse()
168 if (!sync->chain_fence) in xe_sync_entry_parse()
169 return -ENOMEM; in xe_sync_entry_parse()
171 sync->fence = drm_syncobj_fence_get(sync->syncobj); in xe_sync_entry_parse()
172 if (XE_IOCTL_DBG(xe, !sync->fence)) in xe_sync_entry_parse()
173 return -EINVAL; in xe_sync_entry_parse()
175 err = dma_fence_chain_find_seqno(&sync->fence, in xe_sync_entry_parse()
184 return -EOPNOTSUPP; in xe_sync_entry_parse()
187 return -EOPNOTSUPP; in xe_sync_entry_parse()
190 return -EINVAL; in xe_sync_entry_parse()
193 sync->addr = sync_in.addr; in xe_sync_entry_parse()
195 sync->ufence = user_fence_create(xe, sync_in.addr, in xe_sync_entry_parse()
197 if (XE_IOCTL_DBG(xe, IS_ERR(sync->ufence))) in xe_sync_entry_parse()
198 return PTR_ERR(sync->ufence); in xe_sync_entry_parse()
204 return -EINVAL; in xe_sync_entry_parse()
207 sync->type = sync_in.type; in xe_sync_entry_parse()
208 sync->flags = sync_in.flags; in xe_sync_entry_parse()
209 sync->timeline_value = sync_in.timeline_value; in xe_sync_entry_parse()
214 int xe_sync_entry_add_deps(struct xe_sync_entry *sync, struct xe_sched_job *job) in xe_sync_entry_add_deps() argument
216 if (sync->fence) in xe_sync_entry_add_deps()
217 return drm_sched_job_add_dependency(&job->drm, in xe_sync_entry_add_deps()
218 dma_fence_get(sync->fence)); in xe_sync_entry_add_deps()
223 void xe_sync_entry_signal(struct xe_sync_entry *sync, struct dma_fence *fence) in xe_sync_entry_signal() argument
225 if (!(sync->flags & DRM_XE_SYNC_FLAG_SIGNAL)) in xe_sync_entry_signal()
228 if (sync->chain_fence) { in xe_sync_entry_signal()
229 drm_syncobj_add_point(sync->syncobj, sync->chain_fence, in xe_sync_entry_signal()
230 fence, sync->timeline_value); in xe_sync_entry_signal()
235 sync->chain_fence = NULL; in xe_sync_entry_signal()
236 } else if (sync->syncobj) { in xe_sync_entry_signal()
237 drm_syncobj_replace_fence(sync->syncobj, fence); in xe_sync_entry_signal()
238 } else if (sync->ufence) { in xe_sync_entry_signal()
242 user_fence_get(sync->ufence); in xe_sync_entry_signal()
243 err = dma_fence_add_callback(fence, &sync->ufence->cb, in xe_sync_entry_signal()
245 if (err == -ENOENT) { in xe_sync_entry_signal()
246 kick_ufence(sync->ufence, fence); in xe_sync_entry_signal()
249 user_fence_put(sync->ufence); in xe_sync_entry_signal()
255 void xe_sync_entry_cleanup(struct xe_sync_entry *sync) in xe_sync_entry_cleanup() argument
257 if (sync->syncobj) in xe_sync_entry_cleanup()
258 drm_syncobj_put(sync->syncobj); in xe_sync_entry_cleanup()
259 dma_fence_put(sync->fence); in xe_sync_entry_cleanup()
260 dma_fence_chain_free(sync->chain_fence); in xe_sync_entry_cleanup()
261 if (sync->ufence) in xe_sync_entry_cleanup()
262 user_fence_put(sync->ufence); in xe_sync_entry_cleanup()
266 * xe_sync_in_fence_get() - Get a fence from syncs, exec queue, and VM
267 * @sync: input syncs
272 * Get a fence from syncs, exec queue, and VM. If syncs contain in-fences create
273 * and return a composite fence of all in-fences + last fence. If no in-fences
277 * Return: fence on success, ERR_PTR(-ENOMEM) on failure
280 xe_sync_in_fence_get(struct xe_sync_entry *sync, int num_sync, in xe_sync_in_fence_get() argument
288 lockdep_assert_held(&vm->lock); in xe_sync_in_fence_get()
290 /* Count in-fences */ in xe_sync_in_fence_get()
292 if (sync[i].fence) { in xe_sync_in_fence_get()
294 fence = sync[i].fence; in xe_sync_in_fence_get()
305 fences = kmalloc_array(num_in_fence + 1, sizeof(*fences), GFP_KERNEL); in xe_sync_in_fence_get()
307 return ERR_PTR(-ENOMEM); in xe_sync_in_fence_get()
309 if (sync[i].fence) { in xe_sync_in_fence_get()
310 dma_fence_get(sync[i].fence); in xe_sync_in_fence_get()
311 fences[current_fence++] = sync[i].fence; in xe_sync_in_fence_get()
316 vm->composite_fence_ctx, in xe_sync_in_fence_get()
317 vm->composite_fence_seqno++, in xe_sync_in_fence_get()
320 --vm->composite_fence_seqno; in xe_sync_in_fence_get()
324 return &cf->base; in xe_sync_in_fence_get()
328 dma_fence_put(fences[--current_fence]); in xe_sync_in_fence_get()
332 return ERR_PTR(-ENOMEM); in xe_sync_in_fence_get()
336 * __xe_sync_ufence_get() - Get user fence from user fence
351 * xe_sync_ufence_get() - Get user fence from sync
352 * @sync: input sync
354 * Get a user fence reference from sync.
358 struct xe_user_fence *xe_sync_ufence_get(struct xe_sync_entry *sync) in xe_sync_ufence_get() argument
360 user_fence_get(sync->ufence); in xe_sync_ufence_get()
362 return sync->ufence; in xe_sync_ufence_get()
366 * xe_sync_ufence_put() - Put user fence reference
376 * xe_sync_ufence_get_status() - Get user fence status
379 * Return: 1 if signalled, 0 not signalled, <0 on error
383 return READ_ONCE(ufence->signalled); in xe_sync_ufence_get_status()