Lines Matching full:ct
61 static void ct_dead_capture(struct xe_guc_ct *ct, struct guc_ctb *ctb, u32 reason_code);
63 #define CT_DEAD(ct, ctb, reason_code) ct_dead_capture((ct), (ctb), CT_DEAD_##reason_code) argument
65 #define CT_DEAD(ct, ctb, reason) \ argument
73 /* Used when a CT send wants to block and / or receive data */
104 ct_to_guc(struct xe_guc_ct *ct) in ct_to_guc() argument
106 return container_of(ct, struct xe_guc, ct); in ct_to_guc()
110 ct_to_gt(struct xe_guc_ct *ct) in ct_to_gt() argument
112 return container_of(ct, struct xe_gt, uc.guc.ct); in ct_to_gt()
116 ct_to_xe(struct xe_guc_ct *ct) in ct_to_xe() argument
118 return gt_to_xe(ct_to_gt(ct)); in ct_to_xe()
133 * | 0x1000 | H2G CT Buffer (send) | n*4K |
136 * | 0x1000 | G2H CT Buffer (g2h) | m*4K |
140 * Size of each ``CT Buffer`` must be multiple of 4K.
164 * CT command queue
165 * @ct: the &xe_guc_ct. Unused at this moment but will be used in the future.
168 * second to process. Use that to calculate maximum time to process a full CT
171 * Return: Maximum time to process a full CT queue in jiffies.
173 long xe_guc_ct_queue_proc_time_jiffies(struct xe_guc_ct *ct) in xe_guc_ct_queue_proc_time_jiffies() argument
187 struct xe_guc_ct *ct = arg; in guc_ct_fini() local
189 destroy_workqueue(ct->g2h_wq); in guc_ct_fini()
190 xa_destroy(&ct->fence_lookup); in guc_ct_fini()
193 static void receive_g2h(struct xe_guc_ct *ct);
197 static void primelockdep(struct xe_guc_ct *ct) in primelockdep() argument
203 might_lock(&ct->lock); in primelockdep()
207 int xe_guc_ct_init(struct xe_guc_ct *ct) in xe_guc_ct_init() argument
209 struct xe_device *xe = ct_to_xe(ct); in xe_guc_ct_init()
210 struct xe_gt *gt = ct_to_gt(ct); in xe_guc_ct_init()
217 ct->g2h_wq = alloc_ordered_workqueue("xe-g2h-wq", WQ_MEM_RECLAIM); in xe_guc_ct_init()
218 if (!ct->g2h_wq) in xe_guc_ct_init()
221 spin_lock_init(&ct->fast_lock); in xe_guc_ct_init()
222 xa_init(&ct->fence_lookup); in xe_guc_ct_init()
223 INIT_WORK(&ct->g2h_worker, g2h_worker_func); in xe_guc_ct_init()
224 INIT_DELAYED_WORK(&ct->safe_mode_worker, safe_mode_worker_func); in xe_guc_ct_init()
226 spin_lock_init(&ct->dead.lock); in xe_guc_ct_init()
227 INIT_WORK(&ct->dead.worker, ct_dead_worker_func); in xe_guc_ct_init()
229 init_waitqueue_head(&ct->wq); in xe_guc_ct_init()
230 init_waitqueue_head(&ct->g2h_fence_wq); in xe_guc_ct_init()
232 err = drmm_mutex_init(&xe->drm, &ct->lock); in xe_guc_ct_init()
236 primelockdep(ct); in xe_guc_ct_init()
245 ct->bo = bo; in xe_guc_ct_init()
247 err = drmm_add_action_or_reset(&xe->drm, guc_ct_fini, ct); in xe_guc_ct_init()
251 xe_gt_assert(gt, ct->state == XE_GUC_CT_STATE_NOT_INITIALIZED); in xe_guc_ct_init()
252 ct->state = XE_GUC_CT_STATE_DISABLED; in xe_guc_ct_init()
302 static int guc_ct_ctb_h2g_register(struct xe_guc_ct *ct) in guc_ct_ctb_h2g_register() argument
304 struct xe_guc *guc = ct_to_guc(ct); in guc_ct_ctb_h2g_register()
308 desc_addr = xe_bo_ggtt_addr(ct->bo); in guc_ct_ctb_h2g_register()
309 ctb_addr = xe_bo_ggtt_addr(ct->bo) + CTB_DESC_SIZE * 2; in guc_ct_ctb_h2g_register()
310 size = ct->ctbs.h2g.info.size * sizeof(u32); in guc_ct_ctb_h2g_register()
329 static int guc_ct_ctb_g2h_register(struct xe_guc_ct *ct) in guc_ct_ctb_g2h_register() argument
331 struct xe_guc *guc = ct_to_guc(ct); in guc_ct_ctb_g2h_register()
335 desc_addr = xe_bo_ggtt_addr(ct->bo) + CTB_DESC_SIZE; in guc_ct_ctb_g2h_register()
336 ctb_addr = xe_bo_ggtt_addr(ct->bo) + CTB_DESC_SIZE * 2 + in guc_ct_ctb_g2h_register()
338 size = ct->ctbs.g2h.info.size * sizeof(u32); in guc_ct_ctb_g2h_register()
357 static int guc_ct_control_toggle(struct xe_guc_ct *ct, bool enable) in guc_ct_control_toggle() argument
368 int ret = xe_guc_mmio_send(ct_to_guc(ct), request, ARRAY_SIZE(request)); in guc_ct_control_toggle()
373 static void xe_guc_ct_set_state(struct xe_guc_ct *ct, in xe_guc_ct_set_state() argument
376 mutex_lock(&ct->lock); /* Serialise dequeue_one_g2h() */ in xe_guc_ct_set_state()
377 spin_lock_irq(&ct->fast_lock); /* Serialise CT fast-path */ in xe_guc_ct_set_state()
379 xe_gt_assert(ct_to_gt(ct), ct->g2h_outstanding == 0 || in xe_guc_ct_set_state()
382 if (ct->g2h_outstanding) in xe_guc_ct_set_state()
383 xe_pm_runtime_put(ct_to_xe(ct)); in xe_guc_ct_set_state()
384 ct->g2h_outstanding = 0; in xe_guc_ct_set_state()
385 ct->state = state; in xe_guc_ct_set_state()
387 spin_unlock_irq(&ct->fast_lock); in xe_guc_ct_set_state()
391 * needs to be serialized with the send path which ct lock provides. in xe_guc_ct_set_state()
393 xa_destroy(&ct->fence_lookup); in xe_guc_ct_set_state()
395 mutex_unlock(&ct->lock); in xe_guc_ct_set_state()
398 static bool ct_needs_safe_mode(struct xe_guc_ct *ct) in ct_needs_safe_mode() argument
400 return !pci_dev_msi_enabled(to_pci_dev(ct_to_xe(ct)->drm.dev)); in ct_needs_safe_mode()
403 static bool ct_restart_safe_mode_worker(struct xe_guc_ct *ct) in ct_restart_safe_mode_worker() argument
405 if (!ct_needs_safe_mode(ct)) in ct_restart_safe_mode_worker()
408 queue_delayed_work(ct->g2h_wq, &ct->safe_mode_worker, HZ / 10); in ct_restart_safe_mode_worker()
414 struct xe_guc_ct *ct = container_of(w, struct xe_guc_ct, safe_mode_worker.work); in safe_mode_worker_func() local
416 receive_g2h(ct); in safe_mode_worker_func()
418 if (!ct_restart_safe_mode_worker(ct)) in safe_mode_worker_func()
419 xe_gt_dbg(ct_to_gt(ct), "GuC CT safe-mode canceled\n"); in safe_mode_worker_func()
422 static void ct_enter_safe_mode(struct xe_guc_ct *ct) in ct_enter_safe_mode() argument
424 if (ct_restart_safe_mode_worker(ct)) in ct_enter_safe_mode()
425 xe_gt_dbg(ct_to_gt(ct), "GuC CT safe-mode enabled\n"); in ct_enter_safe_mode()
428 static void ct_exit_safe_mode(struct xe_guc_ct *ct) in ct_exit_safe_mode() argument
430 if (cancel_delayed_work_sync(&ct->safe_mode_worker)) in ct_exit_safe_mode()
431 xe_gt_dbg(ct_to_gt(ct), "GuC CT safe-mode disabled\n"); in ct_exit_safe_mode()
434 int xe_guc_ct_enable(struct xe_guc_ct *ct) in xe_guc_ct_enable() argument
436 struct xe_device *xe = ct_to_xe(ct); in xe_guc_ct_enable()
437 struct xe_gt *gt = ct_to_gt(ct); in xe_guc_ct_enable()
440 xe_gt_assert(gt, !xe_guc_ct_enabled(ct)); in xe_guc_ct_enable()
442 xe_map_memset(xe, &ct->bo->vmap, 0, 0, ct->bo->size); in xe_guc_ct_enable()
443 guc_ct_ctb_h2g_init(xe, &ct->ctbs.h2g, &ct->bo->vmap); in xe_guc_ct_enable()
444 guc_ct_ctb_g2h_init(xe, &ct->ctbs.g2h, &ct->bo->vmap); in xe_guc_ct_enable()
446 err = guc_ct_ctb_h2g_register(ct); in xe_guc_ct_enable()
450 err = guc_ct_ctb_g2h_register(ct); in xe_guc_ct_enable()
454 err = guc_ct_control_toggle(ct, true); in xe_guc_ct_enable()
458 xe_guc_ct_set_state(ct, XE_GUC_CT_STATE_ENABLED); in xe_guc_ct_enable()
461 wake_up_all(&ct->wq); in xe_guc_ct_enable()
462 xe_gt_dbg(gt, "GuC CT communication channel enabled\n"); in xe_guc_ct_enable()
464 if (ct_needs_safe_mode(ct)) in xe_guc_ct_enable()
465 ct_enter_safe_mode(ct); in xe_guc_ct_enable()
469 * The CT has now been reset so the dumper can be re-armed in xe_guc_ct_enable()
472 spin_lock_irq(&ct->dead.lock); in xe_guc_ct_enable()
473 if (ct->dead.reason) { in xe_guc_ct_enable()
474 ct->dead.reason |= (1 << CT_DEAD_STATE_REARM); in xe_guc_ct_enable()
475 queue_work(system_unbound_wq, &ct->dead.worker); in xe_guc_ct_enable()
477 spin_unlock_irq(&ct->dead.lock); in xe_guc_ct_enable()
483 xe_gt_err(gt, "Failed to enable GuC CT (%pe)\n", ERR_PTR(err)); in xe_guc_ct_enable()
484 CT_DEAD(ct, NULL, SETUP); in xe_guc_ct_enable()
489 static void stop_g2h_handler(struct xe_guc_ct *ct) in stop_g2h_handler() argument
491 cancel_work_sync(&ct->g2h_worker); in stop_g2h_handler()
496 * @ct: the &xe_guc_ct
498 * Set GuC CT to disabled state and stop g2h handler. No outstanding g2h expected
501 void xe_guc_ct_disable(struct xe_guc_ct *ct) in xe_guc_ct_disable() argument
503 xe_guc_ct_set_state(ct, XE_GUC_CT_STATE_DISABLED); in xe_guc_ct_disable()
504 ct_exit_safe_mode(ct); in xe_guc_ct_disable()
505 stop_g2h_handler(ct); in xe_guc_ct_disable()
510 * @ct: the &xe_guc_ct
512 * Set GuC CT to stopped state, stop g2h handler, and clear any outstanding g2h
514 void xe_guc_ct_stop(struct xe_guc_ct *ct) in xe_guc_ct_stop() argument
516 xe_guc_ct_set_state(ct, XE_GUC_CT_STATE_STOPPED); in xe_guc_ct_stop()
517 stop_g2h_handler(ct); in xe_guc_ct_stop()
520 static bool h2g_has_room(struct xe_guc_ct *ct, u32 cmd_len) in h2g_has_room() argument
522 struct guc_ctb *h2g = &ct->ctbs.h2g; in h2g_has_room()
524 lockdep_assert_held(&ct->lock); in h2g_has_room()
527 h2g->info.head = desc_read(ct_to_xe(ct), h2g, head); in h2g_has_room()
530 struct xe_device *xe = ct_to_xe(ct); in h2g_has_room()
535 xe_gt_err(ct_to_gt(ct), "CT: invalid head offset %u >= %u)\n", in h2g_has_room()
537 CT_DEAD(ct, h2g, H2G_HAS_ROOM); in h2g_has_room()
551 static bool g2h_has_room(struct xe_guc_ct *ct, u32 g2h_len) in g2h_has_room() argument
556 lockdep_assert_held(&ct->fast_lock); in g2h_has_room()
558 return ct->ctbs.g2h.info.space > g2h_len; in g2h_has_room()
561 static int has_room(struct xe_guc_ct *ct, u32 cmd_len, u32 g2h_len) in has_room() argument
563 lockdep_assert_held(&ct->lock); in has_room()
565 if (!g2h_has_room(ct, g2h_len) || !h2g_has_room(ct, cmd_len)) in has_room()
571 static void h2g_reserve_space(struct xe_guc_ct *ct, u32 cmd_len) in h2g_reserve_space() argument
573 lockdep_assert_held(&ct->lock); in h2g_reserve_space()
574 ct->ctbs.h2g.info.space -= cmd_len; in h2g_reserve_space()
577 static void __g2h_reserve_space(struct xe_guc_ct *ct, u32 g2h_len, u32 num_g2h) in __g2h_reserve_space() argument
579 xe_gt_assert(ct_to_gt(ct), g2h_len <= ct->ctbs.g2h.info.space); in __g2h_reserve_space()
580 xe_gt_assert(ct_to_gt(ct), (!g2h_len && !num_g2h) || in __g2h_reserve_space()
584 lockdep_assert_held(&ct->fast_lock); in __g2h_reserve_space()
586 if (!ct->g2h_outstanding) in __g2h_reserve_space()
587 xe_pm_runtime_get_noresume(ct_to_xe(ct)); in __g2h_reserve_space()
589 ct->ctbs.g2h.info.space -= g2h_len; in __g2h_reserve_space()
590 ct->g2h_outstanding += num_g2h; in __g2h_reserve_space()
594 static void __g2h_release_space(struct xe_guc_ct *ct, u32 g2h_len) in __g2h_release_space() argument
598 lockdep_assert_held(&ct->fast_lock); in __g2h_release_space()
600 bad = ct->ctbs.g2h.info.space + g2h_len > in __g2h_release_space()
601 ct->ctbs.g2h.info.size - ct->ctbs.g2h.info.resv_space; in __g2h_release_space()
602 bad |= !ct->g2h_outstanding; in __g2h_release_space()
605 …xe_gt_err(ct_to_gt(ct), "Invalid G2H release: %d + %d vs %d - %d -> %d vs %d, outstanding = %d!\n", in __g2h_release_space()
606 ct->ctbs.g2h.info.space, g2h_len, in __g2h_release_space()
607 ct->ctbs.g2h.info.size, ct->ctbs.g2h.info.resv_space, in __g2h_release_space()
608 ct->ctbs.g2h.info.space + g2h_len, in __g2h_release_space()
609 ct->ctbs.g2h.info.size - ct->ctbs.g2h.info.resv_space, in __g2h_release_space()
610 ct->g2h_outstanding); in __g2h_release_space()
611 CT_DEAD(ct, &ct->ctbs.g2h, G2H_RELEASE); in __g2h_release_space()
615 ct->ctbs.g2h.info.space += g2h_len; in __g2h_release_space()
616 if (!--ct->g2h_outstanding) in __g2h_release_space()
617 xe_pm_runtime_put(ct_to_xe(ct)); in __g2h_release_space()
620 static void g2h_release_space(struct xe_guc_ct *ct, u32 g2h_len) in g2h_release_space() argument
622 spin_lock_irq(&ct->fast_lock); in g2h_release_space()
623 __g2h_release_space(ct, g2h_len); in g2h_release_space()
624 spin_unlock_irq(&ct->fast_lock); in g2h_release_space()
629 static int h2g_write(struct xe_guc_ct *ct, const u32 *action, u32 len, in h2g_write() argument
632 struct xe_device *xe = ct_to_xe(ct); in h2g_write()
633 struct xe_gt *gt = ct_to_gt(ct); in h2g_write()
634 struct guc_ctb *h2g = &ct->ctbs.h2g; in h2g_write()
644 lockdep_assert_held(&ct->lock); in h2g_write()
649 xe_gt_err(gt, "CT write: non-zero status: %u\n", desc_status); in h2g_write()
659 xe_gt_err(gt, "CT write: tail was modified %u != %u\n", desc_tail, tail); in h2g_write()
665 xe_gt_err(gt, "CT write: tail out of range: %u vs %u\n", in h2g_write()
672 xe_gt_err(gt, "CT write: invalid head offset %u >= %u)\n", in h2g_write()
682 h2g_reserve_space(ct, (h2g->info.size - tail)); in h2g_write()
690 * dw0: CT header (including fence) in h2g_write()
720 h2g_reserve_space(ct, full_len); in h2g_write()
731 CT_DEAD(ct, &ct->ctbs.h2g, H2G_WRITE); in h2g_write()
736 * The CT protocol accepts a 16 bits fence. This field is fully owned by the
744 static u16 next_ct_seqno(struct xe_guc_ct *ct, bool is_g2h_fence) in next_ct_seqno() argument
746 u32 seqno = ct->fence_seqno++ & CT_SEQNO_MASK; in next_ct_seqno()
754 static int __guc_ct_send_locked(struct xe_guc_ct *ct, const u32 *action, in __guc_ct_send_locked() argument
758 struct xe_gt *gt __maybe_unused = ct_to_gt(ct); in __guc_ct_send_locked()
762 xe_gt_assert(gt, ct->state != XE_GUC_CT_STATE_NOT_INITIALIZED); in __guc_ct_send_locked()
767 lockdep_assert_held(&ct->lock); in __guc_ct_send_locked()
769 if (unlikely(ct->ctbs.h2g.info.broken)) { in __guc_ct_send_locked()
774 if (ct->state == XE_GUC_CT_STATE_DISABLED) { in __guc_ct_send_locked()
779 if (ct->state == XE_GUC_CT_STATE_STOPPED) { in __guc_ct_send_locked()
784 xe_gt_assert(gt, xe_guc_ct_enabled(ct)); in __guc_ct_send_locked()
791 g2h_fence->seqno = next_ct_seqno(ct, true); in __guc_ct_send_locked()
792 ret = xa_err(xa_store(&ct->fence_lookup, in __guc_ct_send_locked()
801 seqno = next_ct_seqno(ct, false); in __guc_ct_send_locked()
805 spin_lock_irq(&ct->fast_lock); in __guc_ct_send_locked()
807 ret = has_room(ct, len + GUC_CTB_HDR_LEN, g2h_len); in __guc_ct_send_locked()
811 ret = h2g_write(ct, action, len, seqno, !!g2h_fence); in __guc_ct_send_locked()
818 __g2h_reserve_space(ct, g2h_len, num_g2h); in __guc_ct_send_locked()
819 xe_guc_notify(ct_to_guc(ct)); in __guc_ct_send_locked()
822 spin_unlock_irq(&ct->fast_lock); in __guc_ct_send_locked()
827 static void kick_reset(struct xe_guc_ct *ct) in kick_reset() argument
829 xe_gt_reset_async(ct_to_gt(ct)); in kick_reset()
832 static int dequeue_one_g2h(struct xe_guc_ct *ct);
834 static int guc_ct_send_locked(struct xe_guc_ct *ct, const u32 *action, u32 len, in guc_ct_send_locked() argument
838 struct xe_device *xe = ct_to_xe(ct); in guc_ct_send_locked()
839 struct xe_gt *gt = ct_to_gt(ct); in guc_ct_send_locked()
844 lockdep_assert_held(&ct->lock); in guc_ct_send_locked()
845 xe_device_assert_mem_access(ct_to_xe(ct)); in guc_ct_send_locked()
848 ret = __guc_ct_send_locked(ct, action, len, g2h_len, num_g2h, in guc_ct_send_locked()
859 !h2g_has_room(ct, len + GUC_CTB_HDR_LEN))) { in guc_ct_send_locked()
860 struct guc_ctb *h2g = &ct->ctbs.h2g; in guc_ct_send_locked()
874 struct xe_device *xe = ct_to_xe(ct); in guc_ct_send_locked()
875 struct guc_ctb *g2h = &ct->ctbs.g2h; in guc_ct_send_locked()
885 #define g2h_avail(ct) \ in guc_ct_send_locked() argument
886 (desc_read(ct_to_xe(ct), (&ct->ctbs.g2h), tail) != ct->ctbs.g2h.info.head) in guc_ct_send_locked()
887 if (!wait_event_timeout(ct->wq, !ct->g2h_outstanding || in guc_ct_send_locked()
888 g2h_avail(ct), HZ)) in guc_ct_send_locked()
892 ret = dequeue_one_g2h(ct); in guc_ct_send_locked()
895 xe_gt_err(ct_to_gt(ct), "CTB receive failed (%pe)", in guc_ct_send_locked()
907 CT_DEAD(ct, &ct->ctbs.h2g, DEADLOCK); in guc_ct_send_locked()
912 static int guc_ct_send(struct xe_guc_ct *ct, const u32 *action, u32 len, in guc_ct_send() argument
917 xe_gt_assert(ct_to_gt(ct), !g2h_len || !g2h_fence); in guc_ct_send()
919 mutex_lock(&ct->lock); in guc_ct_send()
920 ret = guc_ct_send_locked(ct, action, len, g2h_len, num_g2h, g2h_fence); in guc_ct_send()
921 mutex_unlock(&ct->lock); in guc_ct_send()
926 int xe_guc_ct_send(struct xe_guc_ct *ct, const u32 *action, u32 len, in xe_guc_ct_send() argument
931 ret = guc_ct_send(ct, action, len, g2h_len, num_g2h, NULL); in xe_guc_ct_send()
933 kick_reset(ct); in xe_guc_ct_send()
938 int xe_guc_ct_send_locked(struct xe_guc_ct *ct, const u32 *action, u32 len, in xe_guc_ct_send_locked() argument
943 ret = guc_ct_send_locked(ct, action, len, g2h_len, num_g2h, NULL); in xe_guc_ct_send_locked()
945 kick_reset(ct); in xe_guc_ct_send_locked()
950 int xe_guc_ct_send_g2h_handler(struct xe_guc_ct *ct, const u32 *action, u32 len) in xe_guc_ct_send_g2h_handler() argument
954 lockdep_assert_held(&ct->lock); in xe_guc_ct_send_g2h_handler()
956 ret = guc_ct_send_locked(ct, action, len, 0, 0, NULL); in xe_guc_ct_send_g2h_handler()
958 kick_reset(ct); in xe_guc_ct_send_g2h_handler()
965 * CT back up. Randomly picking 5 seconds for an upper limit to do a GT a reset.
967 static bool retry_failure(struct xe_guc_ct *ct, int ret) in retry_failure() argument
972 #define ct_alive(ct) \ in retry_failure() argument
973 (xe_guc_ct_enabled(ct) && !ct->ctbs.h2g.info.broken && \ in retry_failure()
974 !ct->ctbs.g2h.info.broken) in retry_failure()
975 if (!wait_event_interruptible_timeout(ct->wq, ct_alive(ct), HZ * 5)) in retry_failure()
982 static int guc_ct_send_recv(struct xe_guc_ct *ct, const u32 *action, u32 len, in guc_ct_send_recv() argument
985 struct xe_gt *gt = ct_to_gt(ct); in guc_ct_send_recv()
1000 ret = guc_ct_send(ct, action, len, 0, 0, &g2h_fence); in guc_ct_send_recv()
1003 ret = xa_err(xa_store(&ct->fence_lookup, g2h_fence.seqno, in guc_ct_send_recv()
1011 kick_reset(ct); in guc_ct_send_recv()
1013 if (no_fail && retry_failure(ct, ret)) in guc_ct_send_recv()
1017 xa_erase(&ct->fence_lookup, g2h_fence.seqno); in guc_ct_send_recv()
1022 ret = wait_event_timeout(ct->g2h_fence_wq, g2h_fence.done, HZ); in guc_ct_send_recv()
1024 LNL_FLUSH_WORK(&ct->g2h_worker); in guc_ct_send_recv()
1038 mutex_lock(&ct->lock); in guc_ct_send_recv()
1042 xa_erase(&ct->fence_lookup, g2h_fence.seqno); in guc_ct_send_recv()
1043 mutex_unlock(&ct->lock); in guc_ct_send_recv()
1050 mutex_unlock(&ct->lock); in guc_ct_send_recv()
1062 mutex_unlock(&ct->lock); in guc_ct_send_recv()
1069 * @ct: the &xe_guc_ct
1074 * Send a `HXG Request`_ message to the GuC over CT communication channel and
1085 int xe_guc_ct_send_recv(struct xe_guc_ct *ct, const u32 *action, u32 len, in xe_guc_ct_send_recv() argument
1088 KUNIT_STATIC_STUB_REDIRECT(xe_guc_ct_send_recv, ct, action, len, response_buffer); in xe_guc_ct_send_recv()
1089 return guc_ct_send_recv(ct, action, len, response_buffer, false); in xe_guc_ct_send_recv()
1092 int xe_guc_ct_send_recv_no_fail(struct xe_guc_ct *ct, const u32 *action, in xe_guc_ct_send_recv_no_fail() argument
1095 return guc_ct_send_recv(ct, action, len, response_buffer, true); in xe_guc_ct_send_recv_no_fail()
1108 static int parse_g2h_event(struct xe_guc_ct *ct, u32 *msg, u32 len) in parse_g2h_event() argument
1113 lockdep_assert_held(&ct->lock); in parse_g2h_event()
1120 g2h_release_space(ct, len); in parse_g2h_event()
1126 static int guc_crash_process_msg(struct xe_guc_ct *ct, u32 action) in guc_crash_process_msg() argument
1128 struct xe_gt *gt = ct_to_gt(ct); in guc_crash_process_msg()
1137 CT_DEAD(ct, NULL, CRASH); in guc_crash_process_msg()
1139 kick_reset(ct); in guc_crash_process_msg()
1144 static int parse_g2h_response(struct xe_guc_ct *ct, u32 *msg, u32 len) in parse_g2h_response() argument
1146 struct xe_gt *gt = ct_to_gt(ct); in parse_g2h_response()
1153 lockdep_assert_held(&ct->lock); in parse_g2h_response()
1156 * Fences for FAST_REQUEST messages are not tracked in ct->fence_lookup. in parse_g2h_response()
1172 CT_DEAD(ct, NULL, PARSE_G2H_RESPONSE); in parse_g2h_response()
1177 g2h_fence = xa_erase(&ct->fence_lookup, fence); in parse_g2h_response()
1180 /* CT_DEAD(ct, NULL, PARSE_G2H_UNKNOWN); */ in parse_g2h_response()
1182 g2h_release_space(ct, GUC_CTB_HXG_MSG_MAX_LEN); in parse_g2h_response()
1202 g2h_release_space(ct, GUC_CTB_HXG_MSG_MAX_LEN); in parse_g2h_response()
1207 wake_up_all(&ct->g2h_fence_wq); in parse_g2h_response()
1212 static int parse_g2h_msg(struct xe_guc_ct *ct, u32 *msg, u32 len) in parse_g2h_msg() argument
1214 struct xe_gt *gt = ct_to_gt(ct); in parse_g2h_msg()
1219 lockdep_assert_held(&ct->lock); in parse_g2h_msg()
1225 CT_DEAD(ct, &ct->ctbs.g2h, PARSE_G2H_ORIGIN); in parse_g2h_msg()
1233 ret = parse_g2h_event(ct, msg, len); in parse_g2h_msg()
1238 ret = parse_g2h_response(ct, msg, len); in parse_g2h_msg()
1243 CT_DEAD(ct, &ct->ctbs.g2h, PARSE_G2H_TYPE); in parse_g2h_msg()
1251 static int process_g2h_msg(struct xe_guc_ct *ct, u32 *msg, u32 len) in process_g2h_msg() argument
1253 struct xe_guc *guc = ct_to_guc(ct); in process_g2h_msg()
1254 struct xe_gt *gt = ct_to_gt(ct); in process_g2h_msg()
1320 ret = guc_crash_process_msg(ct, action); in process_g2h_msg()
1329 CT_DEAD(ct, NULL, PROCESS_FAILED); in process_g2h_msg()
1335 static int g2h_read(struct xe_guc_ct *ct, u32 *msg, bool fast_path) in g2h_read() argument
1337 struct xe_device *xe = ct_to_xe(ct); in g2h_read()
1338 struct xe_gt *gt = ct_to_gt(ct); in g2h_read()
1339 struct guc_ctb *g2h = &ct->ctbs.g2h; in g2h_read()
1345 xe_gt_assert(gt, ct->state != XE_GUC_CT_STATE_NOT_INITIALIZED); in g2h_read()
1346 lockdep_assert_held(&ct->fast_lock); in g2h_read()
1348 if (ct->state == XE_GUC_CT_STATE_DISABLED) in g2h_read()
1351 if (ct->state == XE_GUC_CT_STATE_STOPPED) in g2h_read()
1357 xe_gt_assert(gt, xe_guc_ct_enabled(ct)); in g2h_read()
1367 xe_gt_err(gt, "CT read: unexpected G2H after GuC has stopped!\n"); in g2h_read()
1372 xe_gt_err(gt, "CT read: non-zero status: %u\n", desc_status); in g2h_read()
1385 * possible because it is guarded by ct->fast_lock. And yet, some in g2h_read()
1395 xe_gt_err(gt, "CT read: head was modified %u != %u\n", in g2h_read()
1403 xe_gt_err(gt, "CT read: head out of range: %u vs %u\n", in g2h_read()
1410 xe_gt_err(gt, "CT read: invalid tail offset %u >= %u)\n", in g2h_read()
1474 trace_xe_guc_ctb_g2h(xe, ct_to_gt(ct)->info.id, in g2h_read()
1480 CT_DEAD(ct, &ct->ctbs.g2h, G2H_READ); in g2h_read()
1484 static void g2h_fast_path(struct xe_guc_ct *ct, u32 *msg, u32 len) in g2h_fast_path() argument
1486 struct xe_gt *gt = ct_to_gt(ct); in g2h_fast_path()
1487 struct xe_guc *guc = ct_to_guc(ct); in g2h_fast_path()
1500 __g2h_release_space(ct, len); in g2h_fast_path()
1511 CT_DEAD(ct, NULL, FAST_G2H); in g2h_fast_path()
1517 * @ct: GuC CT object
1523 void xe_guc_ct_fast_path(struct xe_guc_ct *ct) in xe_guc_ct_fast_path() argument
1525 struct xe_device *xe = ct_to_xe(ct); in xe_guc_ct_fast_path()
1529 ongoing = xe_pm_runtime_get_if_active(ct_to_xe(ct)); in xe_guc_ct_fast_path()
1530 if (!ongoing && xe_pm_read_callback_task(ct_to_xe(ct)) == NULL) in xe_guc_ct_fast_path()
1533 spin_lock(&ct->fast_lock); in xe_guc_ct_fast_path()
1535 len = g2h_read(ct, ct->fast_msg, true); in xe_guc_ct_fast_path()
1537 g2h_fast_path(ct, ct->fast_msg, len); in xe_guc_ct_fast_path()
1539 spin_unlock(&ct->fast_lock); in xe_guc_ct_fast_path()
1546 static int dequeue_one_g2h(struct xe_guc_ct *ct) in dequeue_one_g2h() argument
1551 lockdep_assert_held(&ct->lock); in dequeue_one_g2h()
1553 spin_lock_irq(&ct->fast_lock); in dequeue_one_g2h()
1554 len = g2h_read(ct, ct->msg, false); in dequeue_one_g2h()
1555 spin_unlock_irq(&ct->fast_lock); in dequeue_one_g2h()
1559 ret = parse_g2h_msg(ct, ct->msg, len); in dequeue_one_g2h()
1563 ret = process_g2h_msg(ct, ct->msg, len); in dequeue_one_g2h()
1570 static void receive_g2h(struct xe_guc_ct *ct) in receive_g2h() argument
1576 * Normal users must always hold mem_access.ref around CT calls. However in receive_g2h()
1577 * during the runtime pm callbacks we rely on CT to talk to the GuC, but in receive_g2h()
1584 * the device has suspended to the point that the CT communication has in receive_g2h()
1588 * still issuing CT requests (since that requires having the in receive_g2h()
1594 * we need to be careful with blocking the pm callbacks from getting CT in receive_g2h()
1598 ongoing = xe_pm_runtime_get_if_active(ct_to_xe(ct)); in receive_g2h()
1599 if (!ongoing && xe_pm_read_callback_task(ct_to_xe(ct)) == NULL) in receive_g2h()
1603 mutex_lock(&ct->lock); in receive_g2h()
1604 ret = dequeue_one_g2h(ct); in receive_g2h()
1605 mutex_unlock(&ct->lock); in receive_g2h()
1608 xe_gt_err(ct_to_gt(ct), "CT dequeue failed: %d", ret); in receive_g2h()
1609 CT_DEAD(ct, NULL, G2H_RECV); in receive_g2h()
1610 kick_reset(ct); in receive_g2h()
1615 xe_pm_runtime_put(ct_to_xe(ct)); in receive_g2h()
1620 struct xe_guc_ct *ct = container_of(w, struct xe_guc_ct, g2h_worker); in g2h_worker_func() local
1622 receive_g2h(ct); in g2h_worker_func()
1625 static struct xe_guc_ct_snapshot *guc_ct_snapshot_alloc(struct xe_guc_ct *ct, bool atomic, in guc_ct_snapshot_alloc() argument
1634 if (ct->bo && want_ctb) { in guc_ct_snapshot_alloc()
1635 snapshot->ctb_size = ct->bo->size; in guc_ct_snapshot_alloc()
1664 static struct xe_guc_ct_snapshot *guc_ct_snapshot_capture(struct xe_guc_ct *ct, bool atomic, in guc_ct_snapshot_capture() argument
1667 struct xe_device *xe = ct_to_xe(ct); in guc_ct_snapshot_capture()
1670 snapshot = guc_ct_snapshot_alloc(ct, atomic, want_ctb); in guc_ct_snapshot_capture()
1672 xe_gt_err(ct_to_gt(ct), "Skipping CTB snapshot entirely.\n"); in guc_ct_snapshot_capture()
1676 if (xe_guc_ct_enabled(ct) || ct->state == XE_GUC_CT_STATE_STOPPED) { in guc_ct_snapshot_capture()
1678 snapshot->g2h_outstanding = READ_ONCE(ct->g2h_outstanding); in guc_ct_snapshot_capture()
1679 guc_ctb_snapshot_capture(xe, &ct->ctbs.h2g, &snapshot->h2g); in guc_ct_snapshot_capture()
1680 guc_ctb_snapshot_capture(xe, &ct->ctbs.g2h, &snapshot->g2h); in guc_ct_snapshot_capture()
1683 if (ct->bo && snapshot->ctb) in guc_ct_snapshot_capture()
1684 xe_map_memcpy_from(xe, snapshot->ctb, &ct->bo->vmap, 0, snapshot->ctb_size); in guc_ct_snapshot_capture()
1690 * xe_guc_ct_snapshot_capture - Take a quick snapshot of the CT state.
1691 * @ct: GuC CT object.
1696 * Returns: a GuC CT snapshot object that must be freed by the caller
1699 struct xe_guc_ct_snapshot *xe_guc_ct_snapshot_capture(struct xe_guc_ct *ct) in xe_guc_ct_snapshot_capture() argument
1701 return guc_ct_snapshot_capture(ct, true, true); in xe_guc_ct_snapshot_capture()
1705 * xe_guc_ct_snapshot_print - Print out a given GuC CT snapshot.
1706 * @snapshot: GuC CT snapshot object.
1709 * This function prints out a given GuC CT snapshot object.
1732 drm_puts(p, "CT disabled\n"); in xe_guc_ct_snapshot_print()
1738 * @snapshot: GuC CT snapshot object.
1753 * xe_guc_ct_print - GuC CT Print.
1754 * @ct: GuC CT.
1758 * This function will quickly capture a snapshot of the CT state
1761 void xe_guc_ct_print(struct xe_guc_ct *ct, struct drm_printer *p, bool want_ctb) in xe_guc_ct_print() argument
1765 snapshot = guc_ct_snapshot_capture(ct, false, want_ctb); in xe_guc_ct_print()
1771 static void ct_dead_capture(struct xe_guc_ct *ct, struct guc_ctb *ctb, u32 reason_code) in ct_dead_capture() argument
1775 struct xe_guc *guc = ct_to_guc(ct); in ct_dead_capture()
1783 if (ct->dead.reported) in ct_dead_capture()
1786 spin_lock_irqsave(&ct->dead.lock, flags); in ct_dead_capture()
1789 have_capture = ct->dead.reason & (1 << CT_DEAD_STATE_CAPTURE); in ct_dead_capture()
1790 ct->dead.reason |= (1 << reason_code) | in ct_dead_capture()
1793 spin_unlock_irqrestore(&ct->dead.lock, flags); in ct_dead_capture()
1799 snapshot_ct = xe_guc_ct_snapshot_capture((ct)); in ct_dead_capture()
1801 spin_lock_irqsave(&ct->dead.lock, flags); in ct_dead_capture()
1803 if (ct->dead.snapshot_log || ct->dead.snapshot_ct) { in ct_dead_capture()
1804 xe_gt_err(ct_to_gt(ct), "Got unexpected dead CT capture!\n"); in ct_dead_capture()
1808 ct->dead.snapshot_log = snapshot_log; in ct_dead_capture()
1809 ct->dead.snapshot_ct = snapshot_ct; in ct_dead_capture()
1812 spin_unlock_irqrestore(&ct->dead.lock, flags); in ct_dead_capture()
1814 queue_work(system_unbound_wq, &(ct)->dead.worker); in ct_dead_capture()
1819 struct xe_guc_ct *ct = container_of(dead, struct xe_guc_ct, dead); in ct_dead_print() local
1820 struct xe_device *xe = ct_to_xe(ct); in ct_dead_print()
1821 struct xe_gt *gt = ct_to_gt(ct); in ct_dead_print()
1843 drm_puts(&lp, "**** GuC CT ****\n"); in ct_dead_print()
1851 struct xe_guc_ct *ct = container_of(w, struct xe_guc_ct, dead.worker); in ct_dead_worker_func() local
1853 if (!ct->dead.reported) { in ct_dead_worker_func()
1854 ct->dead.reported = true; in ct_dead_worker_func()
1855 ct_dead_print(&ct->dead); in ct_dead_worker_func()
1858 spin_lock_irq(&ct->dead.lock); in ct_dead_worker_func()
1860 xe_guc_log_snapshot_free(ct->dead.snapshot_log); in ct_dead_worker_func()
1861 ct->dead.snapshot_log = NULL; in ct_dead_worker_func()
1862 xe_guc_ct_snapshot_free(ct->dead.snapshot_ct); in ct_dead_worker_func()
1863 ct->dead.snapshot_ct = NULL; in ct_dead_worker_func()
1865 if (ct->dead.reason & (1 << CT_DEAD_STATE_REARM)) { in ct_dead_worker_func()
1867 ct->dead.reason = 0; in ct_dead_worker_func()
1868 ct->dead.reported = false; in ct_dead_worker_func()
1871 spin_unlock_irq(&ct->dead.lock); in ct_dead_worker_func()