Lines Matching +full:0 +full:xe

30 	XE_EXEC_QUEUE_JOB_TIMEOUT = 0,
36 static int exec_queue_user_extensions(struct xe_device *xe, struct xe_exec_queue *q,
50 static struct xe_exec_queue *__xe_exec_queue_alloc(struct xe_device *xe, in __xe_exec_queue_alloc() argument
101 err = exec_queue_user_extensions(xe, q, extensions, 0); in __xe_exec_queue_alloc()
122 for (i = 0; i < q->width; ++i) { in __xe_exec_queue_init()
137 return 0; in __xe_exec_queue_init()
143 for (i = i - 1; i >= 0; --i) in __xe_exec_queue_init()
148 struct xe_exec_queue *xe_exec_queue_create(struct xe_device *xe, struct xe_vm *vm, in xe_exec_queue_create() argument
156 q = __xe_exec_queue_alloc(xe, vm, logical_mask, width, hwe, flags, in xe_exec_queue_create()
172 struct xe_exec_queue *xe_exec_queue_create_class(struct xe_device *xe, struct xe_gt *gt, in xe_exec_queue_create_class() argument
179 u32 logical_mask = 0; in xe_exec_queue_create_class()
195 return xe_exec_queue_create(xe, vm, logical_mask, 1, hwe0, flags, extensions); in xe_exec_queue_create_class()
200 * @xe: Xe device.
214 struct xe_exec_queue *xe_exec_queue_create_bind(struct xe_device *xe, in xe_exec_queue_create_bind() argument
223 if (xe->info.has_usm) { in xe_exec_queue_create_bind()
234 q = xe_exec_queue_create(xe, migrate_vm, in xe_exec_queue_create_bind()
238 q = xe_exec_queue_create_class(xe, gt, migrate_vm, in xe_exec_queue_create_bind()
275 for (i = 0; i < q->width; ++i) in xe_exec_queue_fini()
321 xe_exec_queue_device_get_max_priority(struct xe_device *xe) in xe_exec_queue_device_get_max_priority() argument
327 static int exec_queue_set_priority(struct xe_device *xe, struct xe_exec_queue *q, in exec_queue_set_priority() argument
330 if (XE_IOCTL_DBG(xe, value > XE_EXEC_QUEUE_PRIORITY_HIGH)) in exec_queue_set_priority()
333 if (XE_IOCTL_DBG(xe, value > xe_exec_queue_device_get_max_priority(xe))) in exec_queue_set_priority()
337 return 0; in exec_queue_set_priority()
392 static int exec_queue_set_timeslice(struct xe_device *xe, struct xe_exec_queue *q, in exec_queue_set_timeslice() argument
395 u32 min = 0, max = 0; in exec_queue_set_timeslice()
405 return 0; in exec_queue_set_timeslice()
408 typedef int (*xe_exec_queue_set_property_fn)(struct xe_device *xe,
417 static int exec_queue_user_ext_set_property(struct xe_device *xe, in exec_queue_user_ext_set_property() argument
427 if (XE_IOCTL_DBG(xe, err)) in exec_queue_user_ext_set_property()
430 if (XE_IOCTL_DBG(xe, ext.property >= in exec_queue_user_ext_set_property()
432 XE_IOCTL_DBG(xe, ext.pad) || in exec_queue_user_ext_set_property()
433 XE_IOCTL_DBG(xe, ext.property != DRM_XE_EXEC_QUEUE_SET_PROPERTY_PRIORITY && in exec_queue_user_ext_set_property()
441 return exec_queue_set_property_funcs[idx](xe, q, ext.value); in exec_queue_user_ext_set_property()
444 typedef int (*xe_exec_queue_user_extension_fn)(struct xe_device *xe,
453 static int exec_queue_user_extensions(struct xe_device *xe, struct xe_exec_queue *q, in exec_queue_user_extensions() argument
461 if (XE_IOCTL_DBG(xe, ext_number >= MAX_USER_EXTENSIONS)) in exec_queue_user_extensions()
465 if (XE_IOCTL_DBG(xe, err)) in exec_queue_user_extensions()
468 if (XE_IOCTL_DBG(xe, ext.pad) || in exec_queue_user_extensions()
469 XE_IOCTL_DBG(xe, ext.name >= in exec_queue_user_extensions()
475 err = exec_queue_user_extension_funcs[idx](xe, q, extensions); in exec_queue_user_extensions()
476 if (XE_IOCTL_DBG(xe, err)) in exec_queue_user_extensions()
480 return exec_queue_user_extensions(xe, q, ext.next_extension, in exec_queue_user_extensions()
483 return 0; in exec_queue_user_extensions()
486 static u32 calc_validate_logical_mask(struct xe_device *xe, struct xe_gt *gt, in calc_validate_logical_mask() argument
494 u32 return_mask = 0, prev_mask; in calc_validate_logical_mask()
496 if (XE_IOCTL_DBG(xe, !xe_device_uc_enabled(xe) && in calc_validate_logical_mask()
498 return 0; in calc_validate_logical_mask()
500 for (i = 0; i < width; ++i) { in calc_validate_logical_mask()
501 u32 current_mask = 0; in calc_validate_logical_mask()
503 for (j = 0; j < num_placements; ++j) { in calc_validate_logical_mask()
508 hwe = xe_hw_engine_lookup(xe, eci[n]); in calc_validate_logical_mask()
509 if (XE_IOCTL_DBG(xe, !hwe)) in calc_validate_logical_mask()
510 return 0; in calc_validate_logical_mask()
512 if (XE_IOCTL_DBG(xe, xe_hw_engine_is_reserved(hwe))) in calc_validate_logical_mask()
513 return 0; in calc_validate_logical_mask()
515 if (XE_IOCTL_DBG(xe, n && eci[n].gt_id != gt_id) || in calc_validate_logical_mask()
516 XE_IOCTL_DBG(xe, n && eci[n].engine_class != class)) in calc_validate_logical_mask()
517 return 0; in calc_validate_logical_mask()
528 if (i && XE_IOCTL_DBG(xe, current_mask != prev_mask << 1)) in calc_validate_logical_mask()
529 return 0; in calc_validate_logical_mask()
540 struct xe_device *xe = to_xe_device(dev); in xe_exec_queue_create_ioctl() local
556 if (XE_IOCTL_DBG(xe, args->flags) || in xe_exec_queue_create_ioctl()
557 XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1])) in xe_exec_queue_create_ioctl()
561 if (XE_IOCTL_DBG(xe, !len || len > XE_HW_ENGINE_MAX_INSTANCE)) in xe_exec_queue_create_ioctl()
567 if (XE_IOCTL_DBG(xe, err)) in xe_exec_queue_create_ioctl()
570 if (XE_IOCTL_DBG(xe, eci[0].gt_id >= xe->info.gt_count)) in xe_exec_queue_create_ioctl()
573 if (eci[0].engine_class == DRM_XE_ENGINE_CLASS_VM_BIND) { in xe_exec_queue_create_ioctl()
574 if (XE_IOCTL_DBG(xe, args->width != 1) || in xe_exec_queue_create_ioctl()
575 XE_IOCTL_DBG(xe, args->num_placements != 1) || in xe_exec_queue_create_ioctl()
576 XE_IOCTL_DBG(xe, eci[0].engine_instance != 0)) in xe_exec_queue_create_ioctl()
579 for_each_tile(tile, xe, id) { in xe_exec_queue_create_ioctl()
586 new = xe_exec_queue_create_bind(xe, tile, flags, in xe_exec_queue_create_ioctl()
594 if (id == 0) in xe_exec_queue_create_ioctl()
601 gt = xe_device_get_gt(xe, eci[0].gt_id); in xe_exec_queue_create_ioctl()
602 logical_mask = calc_validate_logical_mask(xe, gt, eci, in xe_exec_queue_create_ioctl()
605 if (XE_IOCTL_DBG(xe, !logical_mask)) in xe_exec_queue_create_ioctl()
608 hwe = xe_hw_engine_lookup(xe, eci[0]); in xe_exec_queue_create_ioctl()
609 if (XE_IOCTL_DBG(xe, !hwe)) in xe_exec_queue_create_ioctl()
613 if (XE_IOCTL_DBG(xe, !vm)) in xe_exec_queue_create_ioctl()
622 if (XE_IOCTL_DBG(xe, xe_vm_is_closed_or_banned(vm))) { in xe_exec_queue_create_ioctl()
628 q = xe_exec_queue_create(xe, vm, logical_mask, in xe_exec_queue_create_ioctl()
629 args->width, hwe, 0, in xe_exec_queue_create_ioctl()
640 if (XE_IOCTL_DBG(xe, err)) in xe_exec_queue_create_ioctl()
660 return 0; in xe_exec_queue_create_ioctl()
672 struct xe_device *xe = to_xe_device(dev); in xe_exec_queue_get_property_ioctl() local
678 if (XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1])) in xe_exec_queue_get_property_ioctl()
682 if (XE_IOCTL_DBG(xe, !q)) in xe_exec_queue_get_property_ioctl()
688 ret = 0; in xe_exec_queue_get_property_ioctl()
713 return q->lrc[0]->fence_ctx.next_seqno - xe_lrc_seqno(q->lrc[0]) - 1; in xe_exec_queue_num_job_inflight()
724 struct xe_lrc *lrc = q->lrc[0]; in xe_exec_queue_ring_full()
749 for (i = 0; i < q->width; ++i) { in xe_exec_queue_is_idle()
758 return xe_lrc_seqno(q->lrc[0]) == in xe_exec_queue_is_idle()
759 q->lrc[0]->fence_ctx.next_seqno - 1; in xe_exec_queue_is_idle()
772 struct xe_device *xe = gt_to_xe(q->gt); in xe_exec_queue_update_run_ticks() local
784 /* Synchronize with unbind while holding the xe file open */ in xe_exec_queue_update_run_ticks()
785 if (!drm_dev_enter(&xe->drm, &idx)) in xe_exec_queue_update_run_ticks()
795 lrc = q->lrc[0]; in xe_exec_queue_update_run_ticks()
828 struct xe_device *xe = to_xe_device(dev); in xe_exec_queue_destroy_ioctl() local
833 if (XE_IOCTL_DBG(xe, args->pad) || in xe_exec_queue_destroy_ioctl()
834 XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1])) in xe_exec_queue_destroy_ioctl()
843 if (XE_IOCTL_DBG(xe, !q)) in xe_exec_queue_destroy_ioctl()
854 return 0; in xe_exec_queue_destroy_ioctl()
975 int err = 0; in xe_exec_queue_last_fence_test_dep()
980 0 : -ETIME; in xe_exec_queue_last_fence_test_dep()