Lines Matching +full:tcs +full:- +full:wait

1 // SPDX-License-Identifier: MIT
10 #include <linux/fault-inject.h>
70 int ret = -ENOMEM; in xe_file_open()
83 xef->drm = file; in xe_file_open()
84 xef->client = client; in xe_file_open()
85 xef->xe = xe; in xe_file_open()
87 mutex_init(&xef->vm.lock); in xe_file_open()
88 xa_init_flags(&xef->vm.xa, XA_FLAGS_ALLOC1); in xe_file_open()
90 mutex_init(&xef->exec_queue.lock); in xe_file_open()
91 xa_init_flags(&xef->exec_queue.xa, XA_FLAGS_ALLOC1); in xe_file_open()
93 file->driver_priv = xef; in xe_file_open()
94 kref_init(&xef->refcount); in xe_file_open()
96 task = get_pid_task(rcu_access_pointer(file->pid), PIDTYPE_PID); in xe_file_open()
98 xef->process_name = kstrdup(task->comm, GFP_KERNEL); in xe_file_open()
99 xef->pid = task->pid; in xe_file_open()
110 xa_destroy(&xef->exec_queue.xa); in xe_file_destroy()
111 mutex_destroy(&xef->exec_queue.lock); in xe_file_destroy()
112 xa_destroy(&xef->vm.xa); in xe_file_destroy()
113 mutex_destroy(&xef->vm.lock); in xe_file_destroy()
115 xe_drm_client_put(xef->client); in xe_file_destroy()
116 kfree(xef->process_name); in xe_file_destroy()
121 * xe_file_get() - Take a reference to the xe file object
131 kref_get(&xef->refcount); in xe_file_get()
136 * xe_file_put() - Drop a reference to the xe file object
143 kref_put(&xef->refcount, xe_file_destroy); in xe_file_put()
149 struct xe_file *xef = file->driver_priv; in xe_file_close()
160 * vm->lock taken during xe_exec_queue_kill(). in xe_file_close()
162 xa_for_each(&xef->exec_queue.xa, idx, q) { in xe_file_close()
163 if (q->vm && q->hwe->hw_engine_group) in xe_file_close()
164 xe_hw_engine_group_del_exec_queue(q->hwe->hw_engine_group, q); in xe_file_close()
168 xa_for_each(&xef->vm.xa, idx, vm) in xe_file_close()
198 struct drm_file *file_priv = file->private_data; in xe_drm_ioctl()
199 struct xe_device *xe = to_xe_device(file_priv->minor->dev); in xe_drm_ioctl()
203 return -ECANCELED; in xe_drm_ioctl()
216 struct drm_file *file_priv = file->private_data; in xe_drm_compat_ioctl()
217 struct xe_device *xe = to_xe_device(file_priv->minor->dev); in xe_drm_compat_ioctl()
221 return -ECANCELED; in xe_drm_compat_ioctl()
283 if (xe->preempt_fence_wq) in xe_device_destroy()
284 destroy_workqueue(xe->preempt_fence_wq); in xe_device_destroy()
286 if (xe->ordered_wq) in xe_device_destroy()
287 destroy_workqueue(xe->ordered_wq); in xe_device_destroy()
289 if (xe->unordered_wq) in xe_device_destroy()
290 destroy_workqueue(xe->unordered_wq); in xe_device_destroy()
292 if (xe->destroy_wq) in xe_device_destroy()
293 destroy_workqueue(xe->destroy_wq); in xe_device_destroy()
295 ttm_device_fini(&xe->ttm); in xe_device_destroy()
310 xe = devm_drm_dev_alloc(&pdev->dev, &driver, struct xe_device, drm); in xe_device_create()
314 err = ttm_device_init(&xe->ttm, &xe_ttm_funcs, xe->drm.dev, in xe_device_create()
315 xe->drm.anon_inode->i_mapping, in xe_device_create()
316 xe->drm.vma_offset_manager, false, false); in xe_device_create()
320 err = drmm_add_action_or_reset(&xe->drm, xe_device_destroy, NULL); in xe_device_create()
324 xe->info.devid = pdev->device; in xe_device_create()
325 xe->info.revid = pdev->revision; in xe_device_create()
326 xe->info.force_execlist = xe_modparam.force_execlist; in xe_device_create()
332 init_waitqueue_head(&xe->ufence_wq); in xe_device_create()
334 init_rwsem(&xe->usm.lock); in xe_device_create()
336 xa_init_flags(&xe->usm.asid_to_vm, XA_FLAGS_ALLOC); in xe_device_create()
343 err = xa_alloc_cyclic(&xe->usm.asid_to_vm, &asid, NULL, in xe_device_create()
344 XA_LIMIT(XE_MAX_ASID - 2, XE_MAX_ASID - 1), in xe_device_create()
345 &xe->usm.next_asid, GFP_KERNEL); in xe_device_create()
346 drm_WARN_ON(&xe->drm, err); in xe_device_create()
348 xa_erase(&xe->usm.asid_to_vm, asid); in xe_device_create()
351 spin_lock_init(&xe->pinned.lock); in xe_device_create()
352 INIT_LIST_HEAD(&xe->pinned.kernel_bo_present); in xe_device_create()
353 INIT_LIST_HEAD(&xe->pinned.external_vram); in xe_device_create()
354 INIT_LIST_HEAD(&xe->pinned.evicted); in xe_device_create()
356 xe->preempt_fence_wq = alloc_ordered_workqueue("xe-preempt-fence-wq", in xe_device_create()
358 xe->ordered_wq = alloc_ordered_workqueue("xe-ordered-wq", 0); in xe_device_create()
359 xe->unordered_wq = alloc_workqueue("xe-unordered-wq", 0, 0); in xe_device_create()
360 xe->destroy_wq = alloc_workqueue("xe-destroy-wq", 0, 0); in xe_device_create()
361 if (!xe->ordered_wq || !xe->unordered_wq || in xe_device_create()
362 !xe->preempt_fence_wq || !xe->destroy_wq) { in xe_device_create()
367 drm_err(&xe->drm, "Failed to allocate xe workqueues\n"); in xe_device_create()
368 err = -ENOMEM; in xe_device_create()
372 err = drmm_mutex_init(&xe->drm, &xe->pmt.lock); in xe_device_create()
393 * The driver-initiated FLR is the highest level of reset that we can trigger
396 * it doesn't require a re-enumeration of the PCI BARs. However, the
397 * driver-initiated FLR does still cause a reset of both GT and display and a
399 * re-init and saving/restoring (or re-populating) the wiped memory. Since we
403 * re-init anyway.
407 const unsigned int flr_timeout = 3 * MICRO; /* specs recommend a 3s wait */ in __xe_driver_flr()
411 drm_dbg(&xe->drm, "Triggering Driver-FLR\n"); in __xe_driver_flr()
424 drm_err(&xe->drm, "Driver-FLR-prepare wait for ready failed! %d\n", ret); in __xe_driver_flr()
429 /* Trigger the actual Driver-FLR */ in __xe_driver_flr()
432 /* Wait for hardware teardown to complete */ in __xe_driver_flr()
435 drm_err(&xe->drm, "Driver-FLR-teardown wait completion failed! %d\n", ret); in __xe_driver_flr()
439 /* Wait for hardware/firmware re-init to complete */ in __xe_driver_flr()
443 drm_err(&xe->drm, "Driver-FLR-reinit wait completion failed! %d\n", ret); in __xe_driver_flr()
454 drm_info_once(&xe->drm, "BIOS Disabled Driver-FLR\n"); in xe_driver_flr()
465 if (xe->needs_flr_on_fini) in xe_driver_flr_fini()
481 unsigned int mask_size = xe->info.dma_mask_size; in xe_set_dma_info()
484 dma_set_max_seg_size(xe->drm.dev, xe_sg_segment_size(xe->drm.dev)); in xe_set_dma_info()
486 err = dma_set_mask(xe->drm.dev, DMA_BIT_MASK(mask_size)); in xe_set_dma_info()
490 err = dma_set_coherent_mask(xe->drm.dev, DMA_BIT_MASK(mask_size)); in xe_set_dma_info()
497 drm_err(&xe->drm, "Can't set DMA mask/consistent mask (%d)\n", err); in xe_set_dma_info()
521 drm_dbg(&xe->drm, "Waiting for lmem initialization\n"); in wait_for_lmem_ready()
528 return -EINTR; in wait_for_lmem_ready()
541 drm_dbg(&xe->drm, "lmem not initialized by firmware\n"); in wait_for_lmem_ready()
542 return -EPROBE_DEFER; in wait_for_lmem_ready()
549 drm_dbg(&xe->drm, "lmem ready after %ums", in wait_for_lmem_ready()
550 jiffies_to_msecs(jiffies - start)); in wait_for_lmem_ready()
560 xe->info.probe_display = 0; in update_device_info()
561 xe->info.has_heci_gscfi = 0; in update_device_info()
562 xe->info.skip_guc_pc = 1; in update_device_info()
563 xe->info.skip_pcode = 1; in update_device_info()
597 xe->wedged.mode = xe_modparam.wedged_mode; in xe_device_probe_early()
609 if (GRAPHICS_VER(xe) < 20 || !xe->info.has_flat_ccs || IS_SRIOV_VF(xe)) in probe_has_flat_ccs()
616 return -ETIMEDOUT; in probe_has_flat_ccs()
619 xe->info.has_flat_ccs = (reg & XE2_FLAT_CCS_ENABLE); in probe_has_flat_ccs()
621 if (!xe->info.has_flat_ccs) in probe_has_flat_ccs()
622 drm_dbg(&xe->drm, in probe_has_flat_ccs()
643 xe->info.mem_region_mask = 1; in xe_device_probe()
664 * Only after this point can GT-specific MMIO operations in xe_device_probe()
673 xe_guc_comm_init_early(&tile->primary_gt->uc.guc); in xe_device_probe()
674 err = xe_gt_sriov_vf_bootstrap(tile->primary_gt); in xe_device_probe()
677 err = xe_gt_sriov_vf_query_config(tile->primary_gt); in xe_device_probe()
681 err = xe_ggtt_init_early(tile->mem.ggtt); in xe_device_probe()
684 err = xe_memirq_init(&tile->memirq); in xe_device_probe()
698 err = devm_add_action_or_reset(xe->drm.dev, xe_driver_flr_fini, xe); in xe_device_probe()
755 err = drm_dev_register(&xe->drm, 0); in xe_device_probe()
772 return devm_add_action_or_reset(xe->drm.dev, xe_device_sanitize, xe); in xe_device_probe()
797 drm_dev_unplug(&xe->drm); in xe_device_remove_display()
825 drm_dbg(&xe->drm, "Shutting down device\n"); in xe_device_shutdown()
843 * xe_device_wmb() - Device specific write memory barrier
848 * Since it doesn't matter which register we write to, use the read-only VF_CAP
859 * xe_device_td_flush() - Flush transient L3 cache entries
870 * Note: On non-discrete xe2 platforms, like LNL, the entire L3 cache is flushed
872 * Media is not coherent with L3 and we want to support render-vs-media
898 xe_mmio_write32(&gt->mmio, XE2_TDF_CTRL, TRANSIENT_FLUSH_REQUEST); in xe_device_td_flush()
906 if (xe_mmio_wait32(&gt->mmio, XE2_TDF_CTRL, TRANSIENT_FLUSH_REQUEST, 0, in xe_device_td_flush()
928 spin_lock(&gt->global_invl_lock); in xe_device_l2_flush()
929 xe_mmio_write32(&gt->mmio, XE2_GLOBAL_INVAL, 0x1); in xe_device_l2_flush()
931 if (xe_mmio_wait32(&gt->mmio, XE2_GLOBAL_INVAL, 0x1, 0x0, 500, NULL, true)) in xe_device_l2_flush()
933 spin_unlock(&gt->global_invl_lock); in xe_device_l2_flush()
945 * xe_device_assert_mem_access - Inspect the current runtime_pm state.
965 drm_printf(p, "PCI ID: 0x%04x\n", xe->info.devid); in xe_device_snapshot_print()
966 drm_printf(p, "PCI revision: 0x%02x\n", xe->info.revid); in xe_device_snapshot_print()
970 drm_printf(p, "\tTile: %u\n", gt->tile->id); in xe_device_snapshot_print()
972 gt->info.type == XE_GT_TYPE_MAIN ? "main" : "media"); in xe_device_snapshot_print()
974 REG_FIELD_GET(GMD_ID_ARCH_MASK, gt->info.gmdid), in xe_device_snapshot_print()
975 REG_FIELD_GET(GMD_ID_RELEASE_MASK, gt->info.gmdid), in xe_device_snapshot_print()
976 REG_FIELD_GET(GMD_ID_REVID, gt->info.gmdid)); in xe_device_snapshot_print()
977 drm_printf(p, "\tCS reference clock: %u\n", gt->info.reference_clock); in xe_device_snapshot_print()
983 return sign_extend64(address, xe->info.va_bits - 1); in xe_device_canonicalize_addr()
988 return address & GENMASK_ULL(xe->info.va_bits - 1, 0); in xe_device_uncanonicalize_addr()
999 * xe_device_declare_wedged - Declare device wedged
1003 * re-probe (unbind + bind).
1017 if (xe->wedged.mode == 0) { in xe_device_declare_wedged()
1018 drm_dbg(&xe->drm, "Wedged mode is forcibly disabled\n"); in xe_device_declare_wedged()
1024 if (drmm_add_action_or_reset(&xe->drm, xe_device_wedged_fini, xe)) { in xe_device_declare_wedged()
1025 …drm_err(&xe->drm, "Failed to register xe_device_wedged_fini clean-up. Although device is wedged.\n… in xe_device_declare_wedged()
1029 if (!atomic_xchg(&xe->wedged.flag, 1)) { in xe_device_declare_wedged()
1030 xe->needs_flr_on_fini = true; in xe_device_declare_wedged()
1031 drm_err(&xe->drm, in xe_device_declare_wedged()
1035 dev_name(xe->drm.dev)); in xe_device_declare_wedged()