1 // SPDX-License-Identifier: MIT
2 /*
3 * Copyright © 2022 Intel Corporation
4 */
5
6 #include "xe_gt.h"
7
8 #include <linux/minmax.h>
9
10 #include <drm/drm_managed.h>
11 #include <uapi/drm/xe_drm.h>
12
13 #include <generated/xe_wa_oob.h>
14
15 #include "instructions/xe_gfxpipe_commands.h"
16 #include "instructions/xe_mi_commands.h"
17 #include "regs/xe_gt_regs.h"
18 #include "xe_assert.h"
19 #include "xe_bb.h"
20 #include "xe_bo.h"
21 #include "xe_device.h"
22 #include "xe_exec_queue.h"
23 #include "xe_execlist.h"
24 #include "xe_force_wake.h"
25 #include "xe_ggtt.h"
26 #include "xe_gsc.h"
27 #include "xe_gt_ccs_mode.h"
28 #include "xe_gt_clock.h"
29 #include "xe_gt_freq.h"
30 #include "xe_gt_idle.h"
31 #include "xe_gt_mcr.h"
32 #include "xe_gt_pagefault.h"
33 #include "xe_gt_printk.h"
34 #include "xe_gt_sriov_pf.h"
35 #include "xe_gt_sriov_vf.h"
36 #include "xe_gt_sysfs.h"
37 #include "xe_gt_tlb_invalidation.h"
38 #include "xe_gt_topology.h"
39 #include "xe_guc_exec_queue_types.h"
40 #include "xe_guc_pc.h"
41 #include "xe_hw_fence.h"
42 #include "xe_hw_engine_class_sysfs.h"
43 #include "xe_irq.h"
44 #include "xe_lmtt.h"
45 #include "xe_lrc.h"
46 #include "xe_map.h"
47 #include "xe_migrate.h"
48 #include "xe_mmio.h"
49 #include "xe_pat.h"
50 #include "xe_pm.h"
51 #include "xe_mocs.h"
52 #include "xe_reg_sr.h"
53 #include "xe_ring_ops.h"
54 #include "xe_sa.h"
55 #include "xe_sched_job.h"
56 #include "xe_sriov.h"
57 #include "xe_tuning.h"
58 #include "xe_uc.h"
59 #include "xe_uc_fw.h"
60 #include "xe_vm.h"
61 #include "xe_wa.h"
62 #include "xe_wopcm.h"
63
gt_fini(struct drm_device * drm,void * arg)64 static void gt_fini(struct drm_device *drm, void *arg)
65 {
66 struct xe_gt *gt = arg;
67
68 destroy_workqueue(gt->ordered_wq);
69 }
70
xe_gt_alloc(struct xe_tile * tile)71 struct xe_gt *xe_gt_alloc(struct xe_tile *tile)
72 {
73 struct xe_gt *gt;
74 int err;
75
76 gt = drmm_kzalloc(&tile_to_xe(tile)->drm, sizeof(*gt), GFP_KERNEL);
77 if (!gt)
78 return ERR_PTR(-ENOMEM);
79
80 gt->tile = tile;
81 gt->ordered_wq = alloc_ordered_workqueue("gt-ordered-wq",
82 WQ_MEM_RECLAIM);
83
84 err = drmm_add_action_or_reset(>_to_xe(gt)->drm, gt_fini, gt);
85 if (err)
86 return ERR_PTR(err);
87
88 return gt;
89 }
90
xe_gt_sanitize(struct xe_gt * gt)91 void xe_gt_sanitize(struct xe_gt *gt)
92 {
93 /*
94 * FIXME: if xe_uc_sanitize is called here, on TGL driver will not
95 * reload
96 */
97 gt->uc.guc.submission_state.enabled = false;
98 }
99
xe_gt_enable_host_l2_vram(struct xe_gt * gt)100 static void xe_gt_enable_host_l2_vram(struct xe_gt *gt)
101 {
102 unsigned int fw_ref;
103 u32 reg;
104
105 if (!XE_WA(gt, 16023588340))
106 return;
107
108 fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
109 if (!fw_ref)
110 return;
111
112 if (!xe_gt_is_media_type(gt)) {
113 reg = xe_gt_mcr_unicast_read_any(gt, XE2_GAMREQSTRM_CTRL);
114 reg |= CG_DIS_CNTLBUS;
115 xe_gt_mcr_multicast_write(gt, XE2_GAMREQSTRM_CTRL, reg);
116 }
117
118 xe_gt_mcr_multicast_write(gt, XEHPC_L3CLOS_MASK(3), 0x3);
119 xe_force_wake_put(gt_to_fw(gt), fw_ref);
120 }
121
xe_gt_disable_host_l2_vram(struct xe_gt * gt)122 static void xe_gt_disable_host_l2_vram(struct xe_gt *gt)
123 {
124 unsigned int fw_ref;
125 u32 reg;
126
127 if (!XE_WA(gt, 16023588340))
128 return;
129
130 if (xe_gt_is_media_type(gt))
131 return;
132
133 fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
134 if (!fw_ref)
135 return;
136
137 reg = xe_gt_mcr_unicast_read_any(gt, XE2_GAMREQSTRM_CTRL);
138 reg &= ~CG_DIS_CNTLBUS;
139 xe_gt_mcr_multicast_write(gt, XE2_GAMREQSTRM_CTRL, reg);
140
141 xe_force_wake_put(gt_to_fw(gt), fw_ref);
142 }
143
144 /**
145 * xe_gt_remove() - Clean up the GT structures before driver removal
146 * @gt: the GT object
147 *
148 * This function should only act on objects/structures that must be cleaned
149 * before the driver removal callback is complete and therefore can't be
150 * deferred to a drmm action.
151 */
xe_gt_remove(struct xe_gt * gt)152 void xe_gt_remove(struct xe_gt *gt)
153 {
154 int i;
155
156 xe_uc_remove(>->uc);
157
158 for (i = 0; i < XE_ENGINE_CLASS_MAX; ++i)
159 xe_hw_fence_irq_finish(>->fence_irq[i]);
160
161 xe_gt_disable_host_l2_vram(gt);
162 }
163
164 static void gt_reset_worker(struct work_struct *w);
165
emit_nop_job(struct xe_gt * gt,struct xe_exec_queue * q)166 static int emit_nop_job(struct xe_gt *gt, struct xe_exec_queue *q)
167 {
168 struct xe_sched_job *job;
169 struct xe_bb *bb;
170 struct dma_fence *fence;
171 long timeout;
172
173 bb = xe_bb_new(gt, 4, false);
174 if (IS_ERR(bb))
175 return PTR_ERR(bb);
176
177 job = xe_bb_create_job(q, bb);
178 if (IS_ERR(job)) {
179 xe_bb_free(bb, NULL);
180 return PTR_ERR(job);
181 }
182
183 xe_sched_job_arm(job);
184 fence = dma_fence_get(&job->drm.s_fence->finished);
185 xe_sched_job_push(job);
186
187 timeout = dma_fence_wait_timeout(fence, false, HZ);
188 dma_fence_put(fence);
189 xe_bb_free(bb, NULL);
190 if (timeout < 0)
191 return timeout;
192 else if (!timeout)
193 return -ETIME;
194
195 return 0;
196 }
197
198 /*
199 * Convert back from encoded value to type-safe, only to be used when reg.mcr
200 * is true
201 */
to_xe_reg_mcr(const struct xe_reg reg)202 static struct xe_reg_mcr to_xe_reg_mcr(const struct xe_reg reg)
203 {
204 return (const struct xe_reg_mcr){.__reg.raw = reg.raw };
205 }
206
emit_wa_job(struct xe_gt * gt,struct xe_exec_queue * q)207 static int emit_wa_job(struct xe_gt *gt, struct xe_exec_queue *q)
208 {
209 struct xe_reg_sr *sr = &q->hwe->reg_lrc;
210 struct xe_reg_sr_entry *entry;
211 unsigned long idx;
212 struct xe_sched_job *job;
213 struct xe_bb *bb;
214 struct dma_fence *fence;
215 long timeout;
216 int count = 0;
217
218 if (q->hwe->class == XE_ENGINE_CLASS_RENDER)
219 /* Big enough to emit all of the context's 3DSTATE */
220 bb = xe_bb_new(gt, xe_gt_lrc_size(gt, q->hwe->class), false);
221 else
222 /* Just pick a large BB size */
223 bb = xe_bb_new(gt, SZ_4K, false);
224
225 if (IS_ERR(bb))
226 return PTR_ERR(bb);
227
228 xa_for_each(&sr->xa, idx, entry)
229 ++count;
230
231 if (count) {
232 xe_gt_dbg(gt, "LRC WA %s save-restore batch\n", sr->name);
233
234 bb->cs[bb->len++] = MI_LOAD_REGISTER_IMM | MI_LRI_NUM_REGS(count);
235
236 xa_for_each(&sr->xa, idx, entry) {
237 struct xe_reg reg = entry->reg;
238 struct xe_reg_mcr reg_mcr = to_xe_reg_mcr(reg);
239 u32 val;
240
241 /*
242 * Skip reading the register if it's not really needed
243 */
244 if (reg.masked)
245 val = entry->clr_bits << 16;
246 else if (entry->clr_bits + 1)
247 val = (reg.mcr ?
248 xe_gt_mcr_unicast_read_any(gt, reg_mcr) :
249 xe_mmio_read32(>->mmio, reg)) & (~entry->clr_bits);
250 else
251 val = 0;
252
253 val |= entry->set_bits;
254
255 bb->cs[bb->len++] = reg.addr;
256 bb->cs[bb->len++] = val;
257 xe_gt_dbg(gt, "REG[0x%x] = 0x%08x", reg.addr, val);
258 }
259 }
260
261 xe_lrc_emit_hwe_state_instructions(q, bb);
262
263 job = xe_bb_create_job(q, bb);
264 if (IS_ERR(job)) {
265 xe_bb_free(bb, NULL);
266 return PTR_ERR(job);
267 }
268
269 xe_sched_job_arm(job);
270 fence = dma_fence_get(&job->drm.s_fence->finished);
271 xe_sched_job_push(job);
272
273 timeout = dma_fence_wait_timeout(fence, false, HZ);
274 dma_fence_put(fence);
275 xe_bb_free(bb, NULL);
276 if (timeout < 0)
277 return timeout;
278 else if (!timeout)
279 return -ETIME;
280
281 return 0;
282 }
283
xe_gt_record_default_lrcs(struct xe_gt * gt)284 int xe_gt_record_default_lrcs(struct xe_gt *gt)
285 {
286 struct xe_device *xe = gt_to_xe(gt);
287 struct xe_hw_engine *hwe;
288 enum xe_hw_engine_id id;
289 int err = 0;
290
291 for_each_hw_engine(hwe, gt, id) {
292 struct xe_exec_queue *q, *nop_q;
293 void *default_lrc;
294
295 if (gt->default_lrc[hwe->class])
296 continue;
297
298 xe_reg_sr_init(&hwe->reg_lrc, hwe->name, xe);
299 xe_wa_process_lrc(hwe);
300 xe_hw_engine_setup_default_lrc_state(hwe);
301 xe_tuning_process_lrc(hwe);
302
303 default_lrc = drmm_kzalloc(&xe->drm,
304 xe_gt_lrc_size(gt, hwe->class),
305 GFP_KERNEL);
306 if (!default_lrc)
307 return -ENOMEM;
308
309 q = xe_exec_queue_create(xe, NULL, BIT(hwe->logical_instance), 1,
310 hwe, EXEC_QUEUE_FLAG_KERNEL, 0);
311 if (IS_ERR(q)) {
312 err = PTR_ERR(q);
313 xe_gt_err(gt, "hwe %s: xe_exec_queue_create failed (%pe)\n",
314 hwe->name, q);
315 return err;
316 }
317
318 /* Prime golden LRC with known good state */
319 err = emit_wa_job(gt, q);
320 if (err) {
321 xe_gt_err(gt, "hwe %s: emit_wa_job failed (%pe) guc_id=%u\n",
322 hwe->name, ERR_PTR(err), q->guc->id);
323 goto put_exec_queue;
324 }
325
326 nop_q = xe_exec_queue_create(xe, NULL, BIT(hwe->logical_instance),
327 1, hwe, EXEC_QUEUE_FLAG_KERNEL, 0);
328 if (IS_ERR(nop_q)) {
329 err = PTR_ERR(nop_q);
330 xe_gt_err(gt, "hwe %s: nop xe_exec_queue_create failed (%pe)\n",
331 hwe->name, nop_q);
332 goto put_exec_queue;
333 }
334
335 /* Switch to different LRC */
336 err = emit_nop_job(gt, nop_q);
337 if (err) {
338 xe_gt_err(gt, "hwe %s: nop emit_nop_job failed (%pe) guc_id=%u\n",
339 hwe->name, ERR_PTR(err), nop_q->guc->id);
340 goto put_nop_q;
341 }
342
343 /* Reload golden LRC to record the effect of any indirect W/A */
344 err = emit_nop_job(gt, q);
345 if (err) {
346 xe_gt_err(gt, "hwe %s: emit_nop_job failed (%pe) guc_id=%u\n",
347 hwe->name, ERR_PTR(err), q->guc->id);
348 goto put_nop_q;
349 }
350
351 xe_map_memcpy_from(xe, default_lrc,
352 &q->lrc[0]->bo->vmap,
353 xe_lrc_pphwsp_offset(q->lrc[0]),
354 xe_gt_lrc_size(gt, hwe->class));
355
356 gt->default_lrc[hwe->class] = default_lrc;
357 put_nop_q:
358 xe_exec_queue_put(nop_q);
359 put_exec_queue:
360 xe_exec_queue_put(q);
361 if (err)
362 break;
363 }
364
365 return err;
366 }
367
xe_gt_init_early(struct xe_gt * gt)368 int xe_gt_init_early(struct xe_gt *gt)
369 {
370 int err;
371
372 if (IS_SRIOV_PF(gt_to_xe(gt))) {
373 err = xe_gt_sriov_pf_init_early(gt);
374 if (err)
375 return err;
376 }
377
378 xe_reg_sr_init(>->reg_sr, "GT", gt_to_xe(gt));
379
380 err = xe_wa_init(gt);
381 if (err)
382 return err;
383
384 xe_wa_process_oob(gt);
385
386 xe_force_wake_init_gt(gt, gt_to_fw(gt));
387 spin_lock_init(>->global_invl_lock);
388
389 err = xe_gt_tlb_invalidation_init_early(gt);
390 if (err)
391 return err;
392
393 return 0;
394 }
395
dump_pat_on_error(struct xe_gt * gt)396 static void dump_pat_on_error(struct xe_gt *gt)
397 {
398 struct drm_printer p;
399 char prefix[32];
400
401 snprintf(prefix, sizeof(prefix), "[GT%u Error]", gt->info.id);
402 p = drm_dbg_printer(>_to_xe(gt)->drm, DRM_UT_DRIVER, prefix);
403
404 xe_pat_dump(gt, &p);
405 }
406
gt_fw_domain_init(struct xe_gt * gt)407 static int gt_fw_domain_init(struct xe_gt *gt)
408 {
409 unsigned int fw_ref;
410 int err, i;
411
412 fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
413 if (!fw_ref) {
414 err = -ETIMEDOUT;
415 goto err_hw_fence_irq;
416 }
417
418 if (!xe_gt_is_media_type(gt)) {
419 err = xe_ggtt_init(gt_to_tile(gt)->mem.ggtt);
420 if (err)
421 goto err_force_wake;
422 if (IS_SRIOV_PF(gt_to_xe(gt)))
423 xe_lmtt_init(>_to_tile(gt)->sriov.pf.lmtt);
424 }
425
426 /* Enable per hw engine IRQs */
427 xe_irq_enable_hwe(gt);
428
429 /* Rerun MCR init as we now have hw engine list */
430 xe_gt_mcr_init(gt);
431
432 err = xe_hw_engines_init_early(gt);
433 if (err)
434 goto err_force_wake;
435
436 err = xe_hw_engine_class_sysfs_init(gt);
437 if (err)
438 goto err_force_wake;
439
440 /* Initialize CCS mode sysfs after early initialization of HW engines */
441 err = xe_gt_ccs_mode_sysfs_init(gt);
442 if (err)
443 goto err_force_wake;
444
445 /*
446 * Stash hardware-reported version. Since this register does not exist
447 * on pre-MTL platforms, reading it there will (correctly) return 0.
448 */
449 gt->info.gmdid = xe_mmio_read32(>->mmio, GMD_ID);
450
451 xe_force_wake_put(gt_to_fw(gt), fw_ref);
452 return 0;
453
454 err_force_wake:
455 dump_pat_on_error(gt);
456 xe_force_wake_put(gt_to_fw(gt), fw_ref);
457 err_hw_fence_irq:
458 for (i = 0; i < XE_ENGINE_CLASS_MAX; ++i)
459 xe_hw_fence_irq_finish(>->fence_irq[i]);
460
461 return err;
462 }
463
all_fw_domain_init(struct xe_gt * gt)464 static int all_fw_domain_init(struct xe_gt *gt)
465 {
466 unsigned int fw_ref;
467 int err, i;
468
469 fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
470 if (!xe_force_wake_ref_has_domain(fw_ref, XE_FORCEWAKE_ALL)) {
471 err = -ETIMEDOUT;
472 goto err_force_wake;
473 }
474
475 xe_gt_mcr_set_implicit_defaults(gt);
476 xe_wa_process_gt(gt);
477 xe_tuning_process_gt(gt);
478 xe_reg_sr_apply_mmio(>->reg_sr, gt);
479
480 err = xe_gt_clock_init(gt);
481 if (err)
482 goto err_force_wake;
483
484 xe_mocs_init(gt);
485 err = xe_execlist_init(gt);
486 if (err)
487 goto err_force_wake;
488
489 err = xe_hw_engines_init(gt);
490 if (err)
491 goto err_force_wake;
492
493 err = xe_uc_init_post_hwconfig(>->uc);
494 if (err)
495 goto err_force_wake;
496
497 if (!xe_gt_is_media_type(gt)) {
498 /*
499 * USM has its only SA pool to non-block behind user operations
500 */
501 if (gt_to_xe(gt)->info.has_usm) {
502 struct xe_device *xe = gt_to_xe(gt);
503
504 gt->usm.bb_pool = xe_sa_bo_manager_init(gt_to_tile(gt),
505 IS_DGFX(xe) ? SZ_1M : SZ_512K, 16);
506 if (IS_ERR(gt->usm.bb_pool)) {
507 err = PTR_ERR(gt->usm.bb_pool);
508 goto err_force_wake;
509 }
510 }
511 }
512
513 if (!xe_gt_is_media_type(gt)) {
514 struct xe_tile *tile = gt_to_tile(gt);
515
516 tile->migrate = xe_migrate_init(tile);
517 if (IS_ERR(tile->migrate)) {
518 err = PTR_ERR(tile->migrate);
519 goto err_force_wake;
520 }
521 }
522
523 err = xe_uc_init_hw(>->uc);
524 if (err)
525 goto err_force_wake;
526
527 /* Configure default CCS mode of 1 engine with all resources */
528 if (xe_gt_ccs_mode_enabled(gt)) {
529 gt->ccs_mode = 1;
530 xe_gt_apply_ccs_mode(gt);
531 }
532
533 if (IS_SRIOV_PF(gt_to_xe(gt)) && !xe_gt_is_media_type(gt))
534 xe_lmtt_init_hw(>_to_tile(gt)->sriov.pf.lmtt);
535
536 if (IS_SRIOV_PF(gt_to_xe(gt))) {
537 xe_gt_sriov_pf_init(gt);
538 xe_gt_sriov_pf_init_hw(gt);
539 }
540
541 xe_force_wake_put(gt_to_fw(gt), fw_ref);
542
543 return 0;
544
545 err_force_wake:
546 xe_force_wake_put(gt_to_fw(gt), fw_ref);
547 for (i = 0; i < XE_ENGINE_CLASS_MAX; ++i)
548 xe_hw_fence_irq_finish(>->fence_irq[i]);
549
550 return err;
551 }
552
553 /*
554 * Initialize enough GT to be able to load GuC in order to obtain hwconfig and
555 * enable CTB communication.
556 */
xe_gt_init_hwconfig(struct xe_gt * gt)557 int xe_gt_init_hwconfig(struct xe_gt *gt)
558 {
559 unsigned int fw_ref;
560 int err;
561
562 fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
563 if (!fw_ref)
564 return -ETIMEDOUT;
565
566 xe_gt_mcr_init_early(gt);
567 xe_pat_init(gt);
568
569 err = xe_uc_init(>->uc);
570 if (err)
571 goto out_fw;
572
573 err = xe_uc_init_hwconfig(>->uc);
574 if (err)
575 goto out_fw;
576
577 xe_gt_topology_init(gt);
578 xe_gt_mcr_init(gt);
579 xe_gt_enable_host_l2_vram(gt);
580
581 out_fw:
582 xe_force_wake_put(gt_to_fw(gt), fw_ref);
583 return err;
584 }
585
xe_gt_init(struct xe_gt * gt)586 int xe_gt_init(struct xe_gt *gt)
587 {
588 int err;
589 int i;
590
591 INIT_WORK(>->reset.worker, gt_reset_worker);
592
593 for (i = 0; i < XE_ENGINE_CLASS_MAX; ++i) {
594 gt->ring_ops[i] = xe_ring_ops_get(gt, i);
595 xe_hw_fence_irq_init(>->fence_irq[i]);
596 }
597
598 err = xe_gt_pagefault_init(gt);
599 if (err)
600 return err;
601
602 xe_mocs_init_early(gt);
603
604 err = xe_gt_sysfs_init(gt);
605 if (err)
606 return err;
607
608 err = gt_fw_domain_init(gt);
609 if (err)
610 return err;
611
612 err = xe_gt_idle_init(>->gtidle);
613 if (err)
614 return err;
615
616 err = xe_gt_freq_init(gt);
617 if (err)
618 return err;
619
620 xe_force_wake_init_engines(gt, gt_to_fw(gt));
621
622 err = all_fw_domain_init(gt);
623 if (err)
624 return err;
625
626 xe_gt_record_user_engines(gt);
627
628 return 0;
629 }
630
631 /**
632 * xe_gt_mmio_init() - Initialize GT's MMIO access
633 * @gt: the GT object
634 *
635 * Initialize GT's MMIO accessor, which will be used to access registers inside
636 * this GT.
637 */
xe_gt_mmio_init(struct xe_gt * gt)638 void xe_gt_mmio_init(struct xe_gt *gt)
639 {
640 struct xe_tile *tile = gt_to_tile(gt);
641
642 gt->mmio.regs = tile->mmio.regs;
643 gt->mmio.regs_size = tile->mmio.regs_size;
644 gt->mmio.tile = tile;
645
646 if (gt->info.type == XE_GT_TYPE_MEDIA) {
647 gt->mmio.adj_offset = MEDIA_GT_GSI_OFFSET;
648 gt->mmio.adj_limit = MEDIA_GT_GSI_LENGTH;
649 }
650
651 if (IS_SRIOV_VF(gt_to_xe(gt)))
652 gt->mmio.sriov_vf_gt = gt;
653 }
654
xe_gt_record_user_engines(struct xe_gt * gt)655 void xe_gt_record_user_engines(struct xe_gt *gt)
656 {
657 struct xe_hw_engine *hwe;
658 enum xe_hw_engine_id id;
659
660 gt->user_engines.mask = 0;
661 memset(gt->user_engines.instances_per_class, 0,
662 sizeof(gt->user_engines.instances_per_class));
663
664 for_each_hw_engine(hwe, gt, id) {
665 if (xe_hw_engine_is_reserved(hwe))
666 continue;
667
668 gt->user_engines.mask |= BIT_ULL(id);
669 gt->user_engines.instances_per_class[hwe->class]++;
670 }
671
672 xe_gt_assert(gt, (gt->user_engines.mask | gt->info.engine_mask)
673 == gt->info.engine_mask);
674 }
675
do_gt_reset(struct xe_gt * gt)676 static int do_gt_reset(struct xe_gt *gt)
677 {
678 int err;
679
680 if (IS_SRIOV_VF(gt_to_xe(gt)))
681 return xe_gt_sriov_vf_reset(gt);
682
683 xe_gsc_wa_14015076503(gt, true);
684
685 xe_mmio_write32(>->mmio, GDRST, GRDOM_FULL);
686 err = xe_mmio_wait32(>->mmio, GDRST, GRDOM_FULL, 0, 5000, NULL, false);
687 if (err)
688 xe_gt_err(gt, "failed to clear GRDOM_FULL (%pe)\n",
689 ERR_PTR(err));
690
691 xe_gsc_wa_14015076503(gt, false);
692
693 return err;
694 }
695
vf_gt_restart(struct xe_gt * gt)696 static int vf_gt_restart(struct xe_gt *gt)
697 {
698 int err;
699
700 err = xe_uc_sanitize_reset(>->uc);
701 if (err)
702 return err;
703
704 err = xe_uc_init_hw(>->uc);
705 if (err)
706 return err;
707
708 err = xe_uc_start(>->uc);
709 if (err)
710 return err;
711
712 return 0;
713 }
714
do_gt_restart(struct xe_gt * gt)715 static int do_gt_restart(struct xe_gt *gt)
716 {
717 struct xe_hw_engine *hwe;
718 enum xe_hw_engine_id id;
719 int err;
720
721 if (IS_SRIOV_VF(gt_to_xe(gt)))
722 return vf_gt_restart(gt);
723
724 xe_pat_init(gt);
725
726 xe_gt_enable_host_l2_vram(gt);
727
728 xe_gt_mcr_set_implicit_defaults(gt);
729 xe_reg_sr_apply_mmio(>->reg_sr, gt);
730
731 err = xe_wopcm_init(>->uc.wopcm);
732 if (err)
733 return err;
734
735 for_each_hw_engine(hwe, gt, id)
736 xe_hw_engine_enable_ring(hwe);
737
738 err = xe_uc_sanitize_reset(>->uc);
739 if (err)
740 return err;
741
742 err = xe_uc_init_hw(>->uc);
743 if (err)
744 return err;
745
746 if (IS_SRIOV_PF(gt_to_xe(gt)) && !xe_gt_is_media_type(gt))
747 xe_lmtt_init_hw(>_to_tile(gt)->sriov.pf.lmtt);
748
749 if (IS_SRIOV_PF(gt_to_xe(gt)))
750 xe_gt_sriov_pf_init_hw(gt);
751
752 xe_mocs_init(gt);
753 err = xe_uc_start(>->uc);
754 if (err)
755 return err;
756
757 for_each_hw_engine(hwe, gt, id)
758 xe_reg_sr_apply_mmio(&hwe->reg_sr, gt);
759
760 /* Get CCS mode in sync between sw/hw */
761 xe_gt_apply_ccs_mode(gt);
762
763 /* Restore GT freq to expected values */
764 xe_gt_sanitize_freq(gt);
765
766 if (IS_SRIOV_PF(gt_to_xe(gt)))
767 xe_gt_sriov_pf_restart(gt);
768
769 return 0;
770 }
771
gt_reset(struct xe_gt * gt)772 static int gt_reset(struct xe_gt *gt)
773 {
774 unsigned int fw_ref;
775 int err;
776
777 if (xe_device_wedged(gt_to_xe(gt)))
778 return -ECANCELED;
779
780 /* We only support GT resets with GuC submission */
781 if (!xe_device_uc_enabled(gt_to_xe(gt)))
782 return -ENODEV;
783
784 xe_gt_info(gt, "reset started\n");
785
786 xe_pm_runtime_get(gt_to_xe(gt));
787
788 if (xe_fault_inject_gt_reset()) {
789 err = -ECANCELED;
790 goto err_fail;
791 }
792
793 xe_gt_sanitize(gt);
794
795 fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
796 if (!xe_force_wake_ref_has_domain(fw_ref, XE_FORCEWAKE_ALL)) {
797 err = -ETIMEDOUT;
798 goto err_out;
799 }
800
801 xe_uc_gucrc_disable(>->uc);
802 xe_uc_stop_prepare(>->uc);
803 xe_gt_pagefault_reset(gt);
804
805 xe_uc_stop(>->uc);
806
807 xe_gt_tlb_invalidation_reset(gt);
808
809 err = do_gt_reset(gt);
810 if (err)
811 goto err_out;
812
813 err = do_gt_restart(gt);
814 if (err)
815 goto err_out;
816
817 xe_force_wake_put(gt_to_fw(gt), fw_ref);
818 xe_pm_runtime_put(gt_to_xe(gt));
819
820 xe_gt_info(gt, "reset done\n");
821
822 return 0;
823
824 err_out:
825 xe_force_wake_put(gt_to_fw(gt), fw_ref);
826 XE_WARN_ON(xe_uc_start(>->uc));
827 err_fail:
828 xe_gt_err(gt, "reset failed (%pe)\n", ERR_PTR(err));
829
830 xe_device_declare_wedged(gt_to_xe(gt));
831 xe_pm_runtime_put(gt_to_xe(gt));
832
833 return err;
834 }
835
gt_reset_worker(struct work_struct * w)836 static void gt_reset_worker(struct work_struct *w)
837 {
838 struct xe_gt *gt = container_of(w, typeof(*gt), reset.worker);
839
840 gt_reset(gt);
841 }
842
xe_gt_reset_async(struct xe_gt * gt)843 void xe_gt_reset_async(struct xe_gt *gt)
844 {
845 xe_gt_info(gt, "trying reset from %ps\n", __builtin_return_address(0));
846
847 /* Don't do a reset while one is already in flight */
848 if (!xe_fault_inject_gt_reset() && xe_uc_reset_prepare(>->uc))
849 return;
850
851 xe_gt_info(gt, "reset queued\n");
852 queue_work(gt->ordered_wq, >->reset.worker);
853 }
854
xe_gt_suspend_prepare(struct xe_gt * gt)855 void xe_gt_suspend_prepare(struct xe_gt *gt)
856 {
857 unsigned int fw_ref;
858
859 fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
860
861 xe_uc_stop_prepare(>->uc);
862
863 xe_force_wake_put(gt_to_fw(gt), fw_ref);
864 }
865
xe_gt_suspend(struct xe_gt * gt)866 int xe_gt_suspend(struct xe_gt *gt)
867 {
868 unsigned int fw_ref;
869 int err;
870
871 xe_gt_dbg(gt, "suspending\n");
872 xe_gt_sanitize(gt);
873
874 fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
875 if (!xe_force_wake_ref_has_domain(fw_ref, XE_FORCEWAKE_ALL))
876 goto err_msg;
877
878 err = xe_uc_suspend(>->uc);
879 if (err)
880 goto err_force_wake;
881
882 xe_gt_idle_disable_pg(gt);
883
884 xe_gt_disable_host_l2_vram(gt);
885
886 xe_force_wake_put(gt_to_fw(gt), fw_ref);
887 xe_gt_dbg(gt, "suspended\n");
888
889 return 0;
890
891 err_msg:
892 err = -ETIMEDOUT;
893 err_force_wake:
894 xe_force_wake_put(gt_to_fw(gt), fw_ref);
895 xe_gt_err(gt, "suspend failed (%pe)\n", ERR_PTR(err));
896
897 return err;
898 }
899
xe_gt_shutdown(struct xe_gt * gt)900 void xe_gt_shutdown(struct xe_gt *gt)
901 {
902 unsigned int fw_ref;
903
904 fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
905 do_gt_reset(gt);
906 xe_force_wake_put(gt_to_fw(gt), fw_ref);
907 }
908
909 /**
910 * xe_gt_sanitize_freq() - Restore saved frequencies if necessary.
911 * @gt: the GT object
912 *
913 * Called after driver init/GSC load completes to restore GT frequencies if we
914 * limited them for any WAs.
915 */
xe_gt_sanitize_freq(struct xe_gt * gt)916 int xe_gt_sanitize_freq(struct xe_gt *gt)
917 {
918 int ret = 0;
919
920 if ((!xe_uc_fw_is_available(>->uc.gsc.fw) ||
921 xe_uc_fw_is_loaded(>->uc.gsc.fw) ||
922 xe_uc_fw_is_in_error_state(>->uc.gsc.fw)) &&
923 XE_WA(gt, 22019338487))
924 ret = xe_guc_pc_restore_stashed_freq(>->uc.guc.pc);
925
926 return ret;
927 }
928
xe_gt_resume(struct xe_gt * gt)929 int xe_gt_resume(struct xe_gt *gt)
930 {
931 unsigned int fw_ref;
932 int err;
933
934 xe_gt_dbg(gt, "resuming\n");
935 fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
936 if (!xe_force_wake_ref_has_domain(fw_ref, XE_FORCEWAKE_ALL))
937 goto err_msg;
938
939 err = do_gt_restart(gt);
940 if (err)
941 goto err_force_wake;
942
943 xe_gt_idle_enable_pg(gt);
944
945 xe_force_wake_put(gt_to_fw(gt), fw_ref);
946 xe_gt_dbg(gt, "resumed\n");
947
948 return 0;
949
950 err_msg:
951 err = -ETIMEDOUT;
952 err_force_wake:
953 xe_force_wake_put(gt_to_fw(gt), fw_ref);
954 xe_gt_err(gt, "resume failed (%pe)\n", ERR_PTR(err));
955
956 return err;
957 }
958
xe_gt_hw_engine(struct xe_gt * gt,enum xe_engine_class class,u16 instance,bool logical)959 struct xe_hw_engine *xe_gt_hw_engine(struct xe_gt *gt,
960 enum xe_engine_class class,
961 u16 instance, bool logical)
962 {
963 struct xe_hw_engine *hwe;
964 enum xe_hw_engine_id id;
965
966 for_each_hw_engine(hwe, gt, id)
967 if (hwe->class == class &&
968 ((!logical && hwe->instance == instance) ||
969 (logical && hwe->logical_instance == instance)))
970 return hwe;
971
972 return NULL;
973 }
974
xe_gt_any_hw_engine_by_reset_domain(struct xe_gt * gt,enum xe_engine_class class)975 struct xe_hw_engine *xe_gt_any_hw_engine_by_reset_domain(struct xe_gt *gt,
976 enum xe_engine_class class)
977 {
978 struct xe_hw_engine *hwe;
979 enum xe_hw_engine_id id;
980
981 for_each_hw_engine(hwe, gt, id) {
982 switch (class) {
983 case XE_ENGINE_CLASS_RENDER:
984 case XE_ENGINE_CLASS_COMPUTE:
985 if (hwe->class == XE_ENGINE_CLASS_RENDER ||
986 hwe->class == XE_ENGINE_CLASS_COMPUTE)
987 return hwe;
988 break;
989 default:
990 if (hwe->class == class)
991 return hwe;
992 }
993 }
994
995 return NULL;
996 }
997
xe_gt_any_hw_engine(struct xe_gt * gt)998 struct xe_hw_engine *xe_gt_any_hw_engine(struct xe_gt *gt)
999 {
1000 struct xe_hw_engine *hwe;
1001 enum xe_hw_engine_id id;
1002
1003 for_each_hw_engine(hwe, gt, id)
1004 return hwe;
1005
1006 return NULL;
1007 }
1008
1009 /**
1010 * xe_gt_declare_wedged() - Declare GT wedged
1011 * @gt: the GT object
1012 *
1013 * Wedge the GT which stops all submission, saves desired debug state, and
1014 * cleans up anything which could timeout.
1015 */
xe_gt_declare_wedged(struct xe_gt * gt)1016 void xe_gt_declare_wedged(struct xe_gt *gt)
1017 {
1018 xe_gt_assert(gt, gt_to_xe(gt)->wedged.mode);
1019
1020 xe_uc_declare_wedged(>->uc);
1021 xe_gt_tlb_invalidation_reset(gt);
1022 }
1023