1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2023-2024 Intel Corporation
4  */
5 
6 #include <linux/bitfield.h>
7 #include <linux/bsearch.h>
8 
9 #include <drm/drm_managed.h>
10 #include <drm/drm_print.h>
11 
12 #include "abi/guc_actions_sriov_abi.h"
13 #include "abi/guc_communication_mmio_abi.h"
14 #include "abi/guc_klvs_abi.h"
15 #include "abi/guc_relay_actions_abi.h"
16 #include "regs/xe_gt_regs.h"
17 #include "regs/xe_gtt_defs.h"
18 
19 #include "xe_assert.h"
20 #include "xe_device.h"
21 #include "xe_ggtt.h"
22 #include "xe_gt_sriov_printk.h"
23 #include "xe_gt_sriov_vf.h"
24 #include "xe_gt_sriov_vf_types.h"
25 #include "xe_guc.h"
26 #include "xe_guc_hxg_helpers.h"
27 #include "xe_guc_relay.h"
28 #include "xe_mmio.h"
29 #include "xe_sriov.h"
30 #include "xe_sriov_vf.h"
31 #include "xe_uc_fw.h"
32 #include "xe_wopcm.h"
33 
34 #define make_u64_from_u32(hi, lo) ((u64)((u64)(u32)(hi) << 32 | (u32)(lo)))
35 
guc_action_vf_reset(struct xe_guc * guc)36 static int guc_action_vf_reset(struct xe_guc *guc)
37 {
38 	u32 request[GUC_HXG_REQUEST_MSG_MIN_LEN] = {
39 		FIELD_PREP(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_HOST) |
40 		FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_REQUEST) |
41 		FIELD_PREP(GUC_HXG_REQUEST_MSG_0_ACTION, GUC_ACTION_VF2GUC_VF_RESET),
42 	};
43 	int ret;
44 
45 	ret = xe_guc_mmio_send(guc, request, ARRAY_SIZE(request));
46 
47 	return ret > 0 ? -EPROTO : ret;
48 }
49 
vf_reset_guc_state(struct xe_gt * gt)50 static int vf_reset_guc_state(struct xe_gt *gt)
51 {
52 	struct xe_guc *guc = &gt->uc.guc;
53 	int err;
54 
55 	err = guc_action_vf_reset(guc);
56 	if (unlikely(err))
57 		xe_gt_sriov_err(gt, "Failed to reset GuC state (%pe)\n", ERR_PTR(err));
58 	return err;
59 }
60 
61 /**
62  * xe_gt_sriov_vf_reset - Reset GuC VF internal state.
63  * @gt: the &xe_gt
64  *
65  * It requires functional `GuC MMIO based communication`_.
66  *
67  * Return: 0 on success or a negative error code on failure.
68  */
xe_gt_sriov_vf_reset(struct xe_gt * gt)69 int xe_gt_sriov_vf_reset(struct xe_gt *gt)
70 {
71 	if (!xe_device_uc_enabled(gt_to_xe(gt)))
72 		return -ENODEV;
73 
74 	return vf_reset_guc_state(gt);
75 }
76 
guc_action_match_version(struct xe_guc * guc,u32 wanted_branch,u32 wanted_major,u32 wanted_minor,u32 * branch,u32 * major,u32 * minor,u32 * patch)77 static int guc_action_match_version(struct xe_guc *guc,
78 				    u32 wanted_branch, u32 wanted_major, u32 wanted_minor,
79 				    u32 *branch, u32 *major, u32 *minor, u32 *patch)
80 {
81 	u32 request[VF2GUC_MATCH_VERSION_REQUEST_MSG_LEN] = {
82 		FIELD_PREP(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_HOST) |
83 		FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_REQUEST) |
84 		FIELD_PREP(GUC_HXG_REQUEST_MSG_0_ACTION,
85 			   GUC_ACTION_VF2GUC_MATCH_VERSION),
86 		FIELD_PREP(VF2GUC_MATCH_VERSION_REQUEST_MSG_1_BRANCH, wanted_branch) |
87 		FIELD_PREP(VF2GUC_MATCH_VERSION_REQUEST_MSG_1_MAJOR, wanted_major) |
88 		FIELD_PREP(VF2GUC_MATCH_VERSION_REQUEST_MSG_1_MINOR, wanted_minor),
89 	};
90 	u32 response[GUC_MAX_MMIO_MSG_LEN];
91 	int ret;
92 
93 	BUILD_BUG_ON(VF2GUC_MATCH_VERSION_RESPONSE_MSG_LEN > GUC_MAX_MMIO_MSG_LEN);
94 
95 	ret = xe_guc_mmio_send_recv(guc, request, ARRAY_SIZE(request), response);
96 	if (unlikely(ret < 0))
97 		return ret;
98 
99 	if (unlikely(FIELD_GET(VF2GUC_MATCH_VERSION_RESPONSE_MSG_0_MBZ, response[0])))
100 		return -EPROTO;
101 
102 	*branch = FIELD_GET(VF2GUC_MATCH_VERSION_RESPONSE_MSG_1_BRANCH, response[1]);
103 	*major = FIELD_GET(VF2GUC_MATCH_VERSION_RESPONSE_MSG_1_MAJOR, response[1]);
104 	*minor = FIELD_GET(VF2GUC_MATCH_VERSION_RESPONSE_MSG_1_MINOR, response[1]);
105 	*patch = FIELD_GET(VF2GUC_MATCH_VERSION_RESPONSE_MSG_1_PATCH, response[1]);
106 
107 	return 0;
108 }
109 
vf_minimum_guc_version(struct xe_gt * gt,u32 * branch,u32 * major,u32 * minor)110 static void vf_minimum_guc_version(struct xe_gt *gt, u32 *branch, u32 *major, u32 *minor)
111 {
112 	struct xe_device *xe = gt_to_xe(gt);
113 
114 	switch (xe->info.platform) {
115 	case XE_TIGERLAKE ... XE_PVC:
116 		/* 1.1 this is current baseline for Xe driver */
117 		*branch = 0;
118 		*major = 1;
119 		*minor = 1;
120 		break;
121 	default:
122 		/* 1.2 has support for the GMD_ID KLV */
123 		*branch = 0;
124 		*major = 1;
125 		*minor = 2;
126 		break;
127 	}
128 }
129 
vf_wanted_guc_version(struct xe_gt * gt,u32 * branch,u32 * major,u32 * minor)130 static void vf_wanted_guc_version(struct xe_gt *gt, u32 *branch, u32 *major, u32 *minor)
131 {
132 	/* for now it's the same as minimum */
133 	return vf_minimum_guc_version(gt, branch, major, minor);
134 }
135 
vf_handshake_with_guc(struct xe_gt * gt)136 static int vf_handshake_with_guc(struct xe_gt *gt)
137 {
138 	struct xe_gt_sriov_vf_guc_version *guc_version = &gt->sriov.vf.guc_version;
139 	struct xe_guc *guc = &gt->uc.guc;
140 	u32 wanted_branch, wanted_major, wanted_minor;
141 	u32 branch, major, minor, patch;
142 	int err;
143 
144 	xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt)));
145 
146 	/* select wanted version - prefer previous (if any) */
147 	if (guc_version->major || guc_version->minor) {
148 		wanted_branch = guc_version->branch;
149 		wanted_major = guc_version->major;
150 		wanted_minor = guc_version->minor;
151 	} else {
152 		vf_wanted_guc_version(gt, &wanted_branch, &wanted_major, &wanted_minor);
153 		xe_gt_assert(gt, wanted_major != GUC_VERSION_MAJOR_ANY);
154 	}
155 
156 	err = guc_action_match_version(guc, wanted_branch, wanted_major, wanted_minor,
157 				       &branch, &major, &minor, &patch);
158 	if (unlikely(err))
159 		goto fail;
160 
161 	/* we don't support interface version change */
162 	if ((guc_version->major || guc_version->minor) &&
163 	    (guc_version->branch != branch || guc_version->major != major ||
164 	     guc_version->minor != minor)) {
165 		xe_gt_sriov_err(gt, "New GuC interface version detected: %u.%u.%u.%u\n",
166 				branch, major, minor, patch);
167 		xe_gt_sriov_info(gt, "Previously used version was: %u.%u.%u.%u\n",
168 				 guc_version->branch, guc_version->major,
169 				 guc_version->minor, guc_version->patch);
170 		err = -EREMCHG;
171 		goto fail;
172 	}
173 
174 	/* illegal */
175 	if (major > wanted_major) {
176 		err = -EPROTO;
177 		goto unsupported;
178 	}
179 
180 	/* there's no fallback on major version. */
181 	if (major != wanted_major) {
182 		err = -ENOPKG;
183 		goto unsupported;
184 	}
185 
186 	/* check against minimum version supported by us */
187 	vf_minimum_guc_version(gt, &wanted_branch, &wanted_major, &wanted_minor);
188 	xe_gt_assert(gt, major != GUC_VERSION_MAJOR_ANY);
189 	if (major < wanted_major || (major == wanted_major && minor < wanted_minor)) {
190 		err = -ENOKEY;
191 		goto unsupported;
192 	}
193 
194 	xe_gt_sriov_dbg(gt, "using GuC interface version %u.%u.%u.%u\n",
195 			branch, major, minor, patch);
196 
197 	guc_version->branch = branch;
198 	guc_version->major = major;
199 	guc_version->minor = minor;
200 	guc_version->patch = patch;
201 	return 0;
202 
203 unsupported:
204 	xe_gt_sriov_err(gt, "Unsupported GuC version %u.%u.%u.%u (%pe)\n",
205 			branch, major, minor, patch, ERR_PTR(err));
206 fail:
207 	xe_gt_sriov_err(gt, "Unable to confirm GuC version %u.%u (%pe)\n",
208 			wanted_major, wanted_minor, ERR_PTR(err));
209 
210 	/* try again with *any* just to query which version is supported */
211 	if (!guc_action_match_version(guc, GUC_VERSION_BRANCH_ANY,
212 				      GUC_VERSION_MAJOR_ANY, GUC_VERSION_MINOR_ANY,
213 				      &branch, &major, &minor, &patch))
214 		xe_gt_sriov_notice(gt, "GuC reports interface version %u.%u.%u.%u\n",
215 				   branch, major, minor, patch);
216 	return err;
217 }
218 
219 /**
220  * xe_gt_sriov_vf_bootstrap - Query and setup GuC ABI interface version.
221  * @gt: the &xe_gt
222  *
223  * This function is for VF use only.
224  * It requires functional `GuC MMIO based communication`_.
225  *
226  * Return: 0 on success or a negative error code on failure.
227  */
xe_gt_sriov_vf_bootstrap(struct xe_gt * gt)228 int xe_gt_sriov_vf_bootstrap(struct xe_gt *gt)
229 {
230 	int err;
231 
232 	err = vf_reset_guc_state(gt);
233 	if (unlikely(err))
234 		return err;
235 
236 	err = vf_handshake_with_guc(gt);
237 	if (unlikely(err))
238 		return err;
239 
240 	return 0;
241 }
242 
guc_action_vf_notify_resfix_done(struct xe_guc * guc)243 static int guc_action_vf_notify_resfix_done(struct xe_guc *guc)
244 {
245 	u32 request[GUC_HXG_REQUEST_MSG_MIN_LEN] = {
246 		FIELD_PREP(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_HOST) |
247 		FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_REQUEST) |
248 		FIELD_PREP(GUC_HXG_REQUEST_MSG_0_ACTION, GUC_ACTION_VF2GUC_NOTIFY_RESFIX_DONE),
249 	};
250 	int ret;
251 
252 	ret = xe_guc_mmio_send(guc, request, ARRAY_SIZE(request));
253 
254 	return ret > 0 ? -EPROTO : ret;
255 }
256 
257 /**
258  * xe_gt_sriov_vf_notify_resfix_done - Notify GuC about resource fixups apply completed.
259  * @gt: the &xe_gt struct instance linked to target GuC
260  *
261  * Returns: 0 if the operation completed successfully, or a negative error
262  * code otherwise.
263  */
xe_gt_sriov_vf_notify_resfix_done(struct xe_gt * gt)264 int xe_gt_sriov_vf_notify_resfix_done(struct xe_gt *gt)
265 {
266 	struct xe_guc *guc = &gt->uc.guc;
267 	int err;
268 
269 	xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt)));
270 
271 	err = guc_action_vf_notify_resfix_done(guc);
272 	if (unlikely(err))
273 		xe_gt_sriov_err(gt, "Failed to notify GuC about resource fixup done (%pe)\n",
274 				ERR_PTR(err));
275 	else
276 		xe_gt_sriov_dbg_verbose(gt, "sent GuC resource fixup done\n");
277 
278 	return err;
279 }
280 
guc_action_query_single_klv(struct xe_guc * guc,u32 key,u32 * value,u32 value_len)281 static int guc_action_query_single_klv(struct xe_guc *guc, u32 key,
282 				       u32 *value, u32 value_len)
283 {
284 	u32 request[VF2GUC_QUERY_SINGLE_KLV_REQUEST_MSG_LEN] = {
285 		FIELD_PREP(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_HOST) |
286 		FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_REQUEST) |
287 		FIELD_PREP(GUC_HXG_REQUEST_MSG_0_ACTION,
288 			   GUC_ACTION_VF2GUC_QUERY_SINGLE_KLV),
289 		FIELD_PREP(VF2GUC_QUERY_SINGLE_KLV_REQUEST_MSG_1_KEY, key),
290 	};
291 	u32 response[GUC_MAX_MMIO_MSG_LEN];
292 	u32 length;
293 	int ret;
294 
295 	BUILD_BUG_ON(VF2GUC_QUERY_SINGLE_KLV_RESPONSE_MSG_MAX_LEN > GUC_MAX_MMIO_MSG_LEN);
296 	ret = xe_guc_mmio_send_recv(guc, request, ARRAY_SIZE(request), response);
297 	if (unlikely(ret < 0))
298 		return ret;
299 
300 	if (unlikely(FIELD_GET(VF2GUC_QUERY_SINGLE_KLV_RESPONSE_MSG_0_MBZ, response[0])))
301 		return -EPROTO;
302 
303 	length = FIELD_GET(VF2GUC_QUERY_SINGLE_KLV_RESPONSE_MSG_0_LENGTH, response[0]);
304 	if (unlikely(length > value_len))
305 		return -EOVERFLOW;
306 	if (unlikely(length < value_len))
307 		return -ENODATA;
308 
309 	switch (value_len) {
310 	default:
311 		xe_gt_WARN_ON(guc_to_gt(guc), value_len > 3);
312 		fallthrough;
313 	case 3:
314 		value[2] = FIELD_GET(VF2GUC_QUERY_SINGLE_KLV_RESPONSE_MSG_3_VALUE96, response[3]);
315 		fallthrough;
316 	case 2:
317 		value[1] = FIELD_GET(VF2GUC_QUERY_SINGLE_KLV_RESPONSE_MSG_2_VALUE64, response[2]);
318 		fallthrough;
319 	case 1:
320 		value[0] = FIELD_GET(VF2GUC_QUERY_SINGLE_KLV_RESPONSE_MSG_1_VALUE32, response[1]);
321 		fallthrough;
322 	case 0:
323 		break;
324 	}
325 
326 	return 0;
327 }
328 
guc_action_query_single_klv32(struct xe_guc * guc,u32 key,u32 * value32)329 static int guc_action_query_single_klv32(struct xe_guc *guc, u32 key, u32 *value32)
330 {
331 	return guc_action_query_single_klv(guc, key, value32, hxg_sizeof(u32));
332 }
333 
guc_action_query_single_klv64(struct xe_guc * guc,u32 key,u64 * value64)334 static int guc_action_query_single_klv64(struct xe_guc *guc, u32 key, u64 *value64)
335 {
336 	u32 value[2];
337 	int err;
338 
339 	err = guc_action_query_single_klv(guc, key, value, hxg_sizeof(value));
340 	if (unlikely(err))
341 		return err;
342 
343 	*value64 = make_u64_from_u32(value[1], value[0]);
344 	return 0;
345 }
346 
has_gmdid(struct xe_device * xe)347 static bool has_gmdid(struct xe_device *xe)
348 {
349 	return GRAPHICS_VERx100(xe) >= 1270;
350 }
351 
352 /**
353  * xe_gt_sriov_vf_gmdid - Query GMDID over MMIO.
354  * @gt: the &xe_gt
355  *
356  * This function is for VF use only.
357  *
358  * Return: value of GMDID KLV on success or 0 on failure.
359  */
xe_gt_sriov_vf_gmdid(struct xe_gt * gt)360 u32 xe_gt_sriov_vf_gmdid(struct xe_gt *gt)
361 {
362 	const char *type = xe_gt_is_media_type(gt) ? "media" : "graphics";
363 	struct xe_guc *guc = &gt->uc.guc;
364 	u32 value;
365 	int err;
366 
367 	xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt)));
368 	xe_gt_assert(gt, !GRAPHICS_VERx100(gt_to_xe(gt)) || has_gmdid(gt_to_xe(gt)));
369 	xe_gt_assert(gt, gt->sriov.vf.guc_version.major > 1 || gt->sriov.vf.guc_version.minor >= 2);
370 
371 	err = guc_action_query_single_klv32(guc, GUC_KLV_GLOBAL_CFG_GMD_ID_KEY, &value);
372 	if (unlikely(err)) {
373 		xe_gt_sriov_err(gt, "Failed to obtain %s GMDID (%pe)\n",
374 				type, ERR_PTR(err));
375 		return 0;
376 	}
377 
378 	xe_gt_sriov_dbg(gt, "%s GMDID = %#x\n", type, value);
379 	return value;
380 }
381 
vf_get_ggtt_info(struct xe_gt * gt)382 static int vf_get_ggtt_info(struct xe_gt *gt)
383 {
384 	struct xe_gt_sriov_vf_selfconfig *config = &gt->sriov.vf.self_config;
385 	struct xe_guc *guc = &gt->uc.guc;
386 	u64 start, size;
387 	int err;
388 
389 	xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt)));
390 
391 	err = guc_action_query_single_klv64(guc, GUC_KLV_VF_CFG_GGTT_START_KEY, &start);
392 	if (unlikely(err))
393 		return err;
394 
395 	err = guc_action_query_single_klv64(guc, GUC_KLV_VF_CFG_GGTT_SIZE_KEY, &size);
396 	if (unlikely(err))
397 		return err;
398 
399 	if (config->ggtt_size && config->ggtt_size != size) {
400 		xe_gt_sriov_err(gt, "Unexpected GGTT reassignment: %lluK != %lluK\n",
401 				size / SZ_1K, config->ggtt_size / SZ_1K);
402 		return -EREMCHG;
403 	}
404 
405 	xe_gt_sriov_dbg_verbose(gt, "GGTT %#llx-%#llx = %lluK\n",
406 				start, start + size - 1, size / SZ_1K);
407 
408 	config->ggtt_base = start;
409 	config->ggtt_size = size;
410 
411 	return config->ggtt_size ? 0 : -ENODATA;
412 }
413 
vf_get_lmem_info(struct xe_gt * gt)414 static int vf_get_lmem_info(struct xe_gt *gt)
415 {
416 	struct xe_gt_sriov_vf_selfconfig *config = &gt->sriov.vf.self_config;
417 	struct xe_guc *guc = &gt->uc.guc;
418 	char size_str[10];
419 	u64 size;
420 	int err;
421 
422 	xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt)));
423 
424 	err = guc_action_query_single_klv64(guc, GUC_KLV_VF_CFG_LMEM_SIZE_KEY, &size);
425 	if (unlikely(err))
426 		return err;
427 
428 	if (config->lmem_size && config->lmem_size != size) {
429 		xe_gt_sriov_err(gt, "Unexpected LMEM reassignment: %lluM != %lluM\n",
430 				size / SZ_1M, config->lmem_size / SZ_1M);
431 		return -EREMCHG;
432 	}
433 
434 	string_get_size(size, 1, STRING_UNITS_2, size_str, sizeof(size_str));
435 	xe_gt_sriov_dbg_verbose(gt, "LMEM %lluM %s\n", size / SZ_1M, size_str);
436 
437 	config->lmem_size = size;
438 
439 	return config->lmem_size ? 0 : -ENODATA;
440 }
441 
vf_get_submission_cfg(struct xe_gt * gt)442 static int vf_get_submission_cfg(struct xe_gt *gt)
443 {
444 	struct xe_gt_sriov_vf_selfconfig *config = &gt->sriov.vf.self_config;
445 	struct xe_guc *guc = &gt->uc.guc;
446 	u32 num_ctxs, num_dbs;
447 	int err;
448 
449 	xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt)));
450 
451 	err = guc_action_query_single_klv32(guc, GUC_KLV_VF_CFG_NUM_CONTEXTS_KEY, &num_ctxs);
452 	if (unlikely(err))
453 		return err;
454 
455 	err = guc_action_query_single_klv32(guc, GUC_KLV_VF_CFG_NUM_DOORBELLS_KEY, &num_dbs);
456 	if (unlikely(err))
457 		return err;
458 
459 	if (config->num_ctxs && config->num_ctxs != num_ctxs) {
460 		xe_gt_sriov_err(gt, "Unexpected CTXs reassignment: %u != %u\n",
461 				num_ctxs, config->num_ctxs);
462 		return -EREMCHG;
463 	}
464 	if (config->num_dbs && config->num_dbs != num_dbs) {
465 		xe_gt_sriov_err(gt, "Unexpected DBs reassignment: %u != %u\n",
466 				num_dbs, config->num_dbs);
467 		return -EREMCHG;
468 	}
469 
470 	xe_gt_sriov_dbg_verbose(gt, "CTXs %u DBs %u\n", num_ctxs, num_dbs);
471 
472 	config->num_ctxs = num_ctxs;
473 	config->num_dbs = num_dbs;
474 
475 	return config->num_ctxs ? 0 : -ENODATA;
476 }
477 
vf_cache_gmdid(struct xe_gt * gt)478 static void vf_cache_gmdid(struct xe_gt *gt)
479 {
480 	xe_gt_assert(gt, has_gmdid(gt_to_xe(gt)));
481 	xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt)));
482 
483 	gt->sriov.vf.runtime.gmdid = xe_gt_sriov_vf_gmdid(gt);
484 }
485 
486 /**
487  * xe_gt_sriov_vf_query_config - Query SR-IOV config data over MMIO.
488  * @gt: the &xe_gt
489  *
490  * This function is for VF use only.
491  *
492  * Return: 0 on success or a negative error code on failure.
493  */
xe_gt_sriov_vf_query_config(struct xe_gt * gt)494 int xe_gt_sriov_vf_query_config(struct xe_gt *gt)
495 {
496 	struct xe_device *xe = gt_to_xe(gt);
497 	int err;
498 
499 	err = vf_get_ggtt_info(gt);
500 	if (unlikely(err))
501 		return err;
502 
503 	if (IS_DGFX(xe) && !xe_gt_is_media_type(gt)) {
504 		err = vf_get_lmem_info(gt);
505 		if (unlikely(err))
506 			return err;
507 	}
508 
509 	err = vf_get_submission_cfg(gt);
510 	if (unlikely(err))
511 		return err;
512 
513 	if (has_gmdid(xe))
514 		vf_cache_gmdid(gt);
515 
516 	return 0;
517 }
518 
519 /**
520  * xe_gt_sriov_vf_guc_ids - VF GuC context IDs configuration.
521  * @gt: the &xe_gt
522  *
523  * This function is for VF use only.
524  *
525  * Return: number of GuC context IDs assigned to VF.
526  */
xe_gt_sriov_vf_guc_ids(struct xe_gt * gt)527 u16 xe_gt_sriov_vf_guc_ids(struct xe_gt *gt)
528 {
529 	xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt)));
530 	xe_gt_assert(gt, gt->sriov.vf.guc_version.major);
531 	xe_gt_assert(gt, gt->sriov.vf.self_config.num_ctxs);
532 
533 	return gt->sriov.vf.self_config.num_ctxs;
534 }
535 
536 /**
537  * xe_gt_sriov_vf_lmem - VF LMEM configuration.
538  * @gt: the &xe_gt
539  *
540  * This function is for VF use only.
541  *
542  * Return: size of the LMEM assigned to VF.
543  */
xe_gt_sriov_vf_lmem(struct xe_gt * gt)544 u64 xe_gt_sriov_vf_lmem(struct xe_gt *gt)
545 {
546 	xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt)));
547 	xe_gt_assert(gt, gt->sriov.vf.guc_version.major);
548 	xe_gt_assert(gt, gt->sriov.vf.self_config.lmem_size);
549 
550 	return gt->sriov.vf.self_config.lmem_size;
551 }
552 
553 static struct xe_ggtt_node *
vf_balloon_ggtt_node(struct xe_ggtt * ggtt,u64 start,u64 end)554 vf_balloon_ggtt_node(struct xe_ggtt *ggtt, u64 start, u64 end)
555 {
556 	struct xe_ggtt_node *node;
557 	int err;
558 
559 	node = xe_ggtt_node_init(ggtt);
560 	if (IS_ERR(node))
561 		return node;
562 
563 	err = xe_ggtt_node_insert_balloon(node, start, end);
564 	if (err) {
565 		xe_ggtt_node_fini(node);
566 		return ERR_PTR(err);
567 	}
568 
569 	return node;
570 }
571 
vf_balloon_ggtt(struct xe_gt * gt)572 static int vf_balloon_ggtt(struct xe_gt *gt)
573 {
574 	struct xe_gt_sriov_vf_selfconfig *config = &gt->sriov.vf.self_config;
575 	struct xe_tile *tile = gt_to_tile(gt);
576 	struct xe_ggtt *ggtt = tile->mem.ggtt;
577 	struct xe_device *xe = gt_to_xe(gt);
578 	u64 start, end;
579 
580 	xe_gt_assert(gt, IS_SRIOV_VF(xe));
581 	xe_gt_assert(gt, !xe_gt_is_media_type(gt));
582 
583 	if (!config->ggtt_size)
584 		return -ENODATA;
585 
586 	/*
587 	 * VF can only use part of the GGTT as allocated by the PF:
588 	 *
589 	 *      WOPCM                                  GUC_GGTT_TOP
590 	 *      |<------------ Total GGTT size ------------------>|
591 	 *
592 	 *           VF GGTT base -->|<- size ->|
593 	 *
594 	 *      +--------------------+----------+-----------------+
595 	 *      |////////////////////|   block  |\\\\\\\\\\\\\\\\\|
596 	 *      +--------------------+----------+-----------------+
597 	 *
598 	 *      |<--- balloon[0] --->|<-- VF -->|<-- balloon[1] ->|
599 	 */
600 
601 	start = xe_wopcm_size(xe);
602 	end = config->ggtt_base;
603 	if (end != start) {
604 		tile->sriov.vf.ggtt_balloon[0] = vf_balloon_ggtt_node(ggtt, start, end);
605 		if (IS_ERR(tile->sriov.vf.ggtt_balloon[0]))
606 			return PTR_ERR(tile->sriov.vf.ggtt_balloon[0]);
607 	}
608 
609 	start = config->ggtt_base + config->ggtt_size;
610 	end = GUC_GGTT_TOP;
611 	if (end != start) {
612 		tile->sriov.vf.ggtt_balloon[1] = vf_balloon_ggtt_node(ggtt, start, end);
613 		if (IS_ERR(tile->sriov.vf.ggtt_balloon[1])) {
614 			xe_ggtt_node_remove_balloon(tile->sriov.vf.ggtt_balloon[0]);
615 			return PTR_ERR(tile->sriov.vf.ggtt_balloon[1]);
616 		}
617 	}
618 
619 	return 0;
620 }
621 
deballoon_ggtt(struct drm_device * drm,void * arg)622 static void deballoon_ggtt(struct drm_device *drm, void *arg)
623 {
624 	struct xe_tile *tile = arg;
625 
626 	xe_tile_assert(tile, IS_SRIOV_VF(tile_to_xe(tile)));
627 	xe_ggtt_node_remove_balloon(tile->sriov.vf.ggtt_balloon[1]);
628 	xe_ggtt_node_remove_balloon(tile->sriov.vf.ggtt_balloon[0]);
629 }
630 
631 /**
632  * xe_gt_sriov_vf_prepare_ggtt - Prepare a VF's GGTT configuration.
633  * @gt: the &xe_gt
634  *
635  * This function is for VF use only.
636  *
637  * Return: 0 on success or a negative error code on failure.
638  */
xe_gt_sriov_vf_prepare_ggtt(struct xe_gt * gt)639 int xe_gt_sriov_vf_prepare_ggtt(struct xe_gt *gt)
640 {
641 	struct xe_tile *tile = gt_to_tile(gt);
642 	struct xe_device *xe = tile_to_xe(tile);
643 	int err;
644 
645 	if (xe_gt_is_media_type(gt))
646 		return 0;
647 
648 	err = vf_balloon_ggtt(gt);
649 	if (err)
650 		return err;
651 
652 	return drmm_add_action_or_reset(&xe->drm, deballoon_ggtt, tile);
653 }
654 
relay_action_handshake(struct xe_gt * gt,u32 * major,u32 * minor)655 static int relay_action_handshake(struct xe_gt *gt, u32 *major, u32 *minor)
656 {
657 	u32 request[VF2PF_HANDSHAKE_REQUEST_MSG_LEN] = {
658 		FIELD_PREP(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_HOST) |
659 		FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_REQUEST) |
660 		FIELD_PREP(GUC_HXG_REQUEST_MSG_0_ACTION, GUC_RELAY_ACTION_VF2PF_HANDSHAKE),
661 		FIELD_PREP(VF2PF_HANDSHAKE_REQUEST_MSG_1_MAJOR, *major) |
662 		FIELD_PREP(VF2PF_HANDSHAKE_REQUEST_MSG_1_MINOR, *minor),
663 	};
664 	u32 response[VF2PF_HANDSHAKE_RESPONSE_MSG_LEN];
665 	int ret;
666 
667 	xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt)));
668 
669 	ret = xe_guc_relay_send_to_pf(&gt->uc.guc.relay,
670 				      request, ARRAY_SIZE(request),
671 				      response, ARRAY_SIZE(response));
672 	if (unlikely(ret < 0))
673 		return ret;
674 
675 	if (unlikely(ret != VF2PF_HANDSHAKE_RESPONSE_MSG_LEN))
676 		return -EPROTO;
677 
678 	if (unlikely(FIELD_GET(VF2PF_HANDSHAKE_RESPONSE_MSG_0_MBZ, response[0])))
679 		return -EPROTO;
680 
681 	*major = FIELD_GET(VF2PF_HANDSHAKE_RESPONSE_MSG_1_MAJOR, response[1]);
682 	*minor = FIELD_GET(VF2PF_HANDSHAKE_RESPONSE_MSG_1_MINOR, response[1]);
683 
684 	return 0;
685 }
686 
vf_connect_pf(struct xe_gt * gt,u16 major,u16 minor)687 static void vf_connect_pf(struct xe_gt *gt, u16 major, u16 minor)
688 {
689 	xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt)));
690 
691 	gt->sriov.vf.pf_version.major = major;
692 	gt->sriov.vf.pf_version.minor = minor;
693 }
694 
vf_disconnect_pf(struct xe_gt * gt)695 static void vf_disconnect_pf(struct xe_gt *gt)
696 {
697 	vf_connect_pf(gt, 0, 0);
698 }
699 
vf_handshake_with_pf(struct xe_gt * gt)700 static int vf_handshake_with_pf(struct xe_gt *gt)
701 {
702 	u32 major_wanted = GUC_RELAY_VERSION_LATEST_MAJOR;
703 	u32 minor_wanted = GUC_RELAY_VERSION_LATEST_MINOR;
704 	u32 major = major_wanted, minor = minor_wanted;
705 	int err;
706 
707 	err = relay_action_handshake(gt, &major, &minor);
708 	if (unlikely(err))
709 		goto failed;
710 
711 	if (!major && !minor) {
712 		err = -ENODATA;
713 		goto failed;
714 	}
715 
716 	xe_gt_sriov_dbg(gt, "using VF/PF ABI %u.%u\n", major, minor);
717 	vf_connect_pf(gt, major, minor);
718 	return 0;
719 
720 failed:
721 	xe_gt_sriov_err(gt, "Unable to confirm VF/PF ABI version %u.%u (%pe)\n",
722 			major, minor, ERR_PTR(err));
723 	vf_disconnect_pf(gt);
724 	return err;
725 }
726 
727 /**
728  * xe_gt_sriov_vf_connect - Establish connection with the PF driver.
729  * @gt: the &xe_gt
730  *
731  * This function is for VF use only.
732  *
733  * Return: 0 on success or a negative error code on failure.
734  */
xe_gt_sriov_vf_connect(struct xe_gt * gt)735 int xe_gt_sriov_vf_connect(struct xe_gt *gt)
736 {
737 	int err;
738 
739 	err = vf_handshake_with_pf(gt);
740 	if (unlikely(err))
741 		goto failed;
742 
743 	return 0;
744 
745 failed:
746 	xe_gt_sriov_err(gt, "Failed to get version info (%pe)\n", ERR_PTR(err));
747 	return err;
748 }
749 
750 /**
751  * xe_gt_sriov_vf_migrated_event_handler - Start a VF migration recovery,
752  *   or just mark that a GuC is ready for it.
753  * @gt: the &xe_gt struct instance linked to target GuC
754  *
755  * This function shall be called only by VF.
756  */
xe_gt_sriov_vf_migrated_event_handler(struct xe_gt * gt)757 void xe_gt_sriov_vf_migrated_event_handler(struct xe_gt *gt)
758 {
759 	struct xe_device *xe = gt_to_xe(gt);
760 
761 	xe_gt_assert(gt, IS_SRIOV_VF(xe));
762 
763 	set_bit(gt->info.id, &xe->sriov.vf.migration.gt_flags);
764 	/*
765 	 * We need to be certain that if all flags were set, at least one
766 	 * thread will notice that and schedule the recovery.
767 	 */
768 	smp_mb__after_atomic();
769 
770 	xe_gt_sriov_info(gt, "ready for recovery after migration\n");
771 	xe_sriov_vf_start_migration_recovery(xe);
772 }
773 
vf_is_negotiated(struct xe_gt * gt,u16 major,u16 minor)774 static bool vf_is_negotiated(struct xe_gt *gt, u16 major, u16 minor)
775 {
776 	xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt)));
777 
778 	return major == gt->sriov.vf.pf_version.major &&
779 	       minor <= gt->sriov.vf.pf_version.minor;
780 }
781 
vf_prepare_runtime_info(struct xe_gt * gt,unsigned int num_regs)782 static int vf_prepare_runtime_info(struct xe_gt *gt, unsigned int num_regs)
783 {
784 	struct vf_runtime_reg *regs = gt->sriov.vf.runtime.regs;
785 	unsigned int regs_size = round_up(num_regs, 4);
786 	struct xe_device *xe = gt_to_xe(gt);
787 
788 	xe_gt_assert(gt, IS_SRIOV_VF(xe));
789 
790 	if (regs) {
791 		if (num_regs <= gt->sriov.vf.runtime.regs_size) {
792 			memset(regs, 0, num_regs * sizeof(*regs));
793 			gt->sriov.vf.runtime.num_regs = num_regs;
794 			return 0;
795 		}
796 
797 		drmm_kfree(&xe->drm, regs);
798 		gt->sriov.vf.runtime.regs = NULL;
799 		gt->sriov.vf.runtime.num_regs = 0;
800 		gt->sriov.vf.runtime.regs_size = 0;
801 	}
802 
803 	regs = drmm_kcalloc(&xe->drm, regs_size, sizeof(*regs), GFP_KERNEL);
804 	if (unlikely(!regs))
805 		return -ENOMEM;
806 
807 	gt->sriov.vf.runtime.regs = regs;
808 	gt->sriov.vf.runtime.num_regs = num_regs;
809 	gt->sriov.vf.runtime.regs_size = regs_size;
810 	return 0;
811 }
812 
vf_query_runtime_info(struct xe_gt * gt)813 static int vf_query_runtime_info(struct xe_gt *gt)
814 {
815 	u32 request[VF2PF_QUERY_RUNTIME_REQUEST_MSG_LEN];
816 	u32 response[VF2PF_QUERY_RUNTIME_RESPONSE_MSG_MIN_LEN + 32]; /* up to 16 regs */
817 	u32 limit = (ARRAY_SIZE(response) - VF2PF_QUERY_RUNTIME_RESPONSE_MSG_MIN_LEN) / 2;
818 	u32 count, remaining, num, i;
819 	u32 start = 0;
820 	int ret;
821 
822 	xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt)));
823 	xe_gt_assert(gt, limit);
824 
825 	/* this is part of the 1.0 PF/VF ABI */
826 	if (!vf_is_negotiated(gt, 1, 0))
827 		return -ENOPKG;
828 
829 	request[0] = FIELD_PREP(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_HOST) |
830 		     FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_REQUEST) |
831 		     FIELD_PREP(GUC_HXG_REQUEST_MSG_0_ACTION,
832 				GUC_RELAY_ACTION_VF2PF_QUERY_RUNTIME) |
833 		     FIELD_PREP(VF2PF_QUERY_RUNTIME_REQUEST_MSG_0_LIMIT, limit);
834 
835 repeat:
836 	request[1] = FIELD_PREP(VF2PF_QUERY_RUNTIME_REQUEST_MSG_1_START, start);
837 	ret = xe_guc_relay_send_to_pf(&gt->uc.guc.relay,
838 				      request, ARRAY_SIZE(request),
839 				      response, ARRAY_SIZE(response));
840 	if (unlikely(ret < 0))
841 		goto failed;
842 
843 	if (unlikely(ret < VF2PF_QUERY_RUNTIME_RESPONSE_MSG_MIN_LEN)) {
844 		ret = -EPROTO;
845 		goto failed;
846 	}
847 	if (unlikely((ret - VF2PF_QUERY_RUNTIME_RESPONSE_MSG_MIN_LEN) % 2)) {
848 		ret = -EPROTO;
849 		goto failed;
850 	}
851 
852 	num = (ret - VF2PF_QUERY_RUNTIME_RESPONSE_MSG_MIN_LEN) / 2;
853 	count = FIELD_GET(VF2PF_QUERY_RUNTIME_RESPONSE_MSG_0_COUNT, response[0]);
854 	remaining = FIELD_GET(VF2PF_QUERY_RUNTIME_RESPONSE_MSG_1_REMAINING, response[1]);
855 
856 	xe_gt_sriov_dbg_verbose(gt, "count=%u num=%u ret=%d start=%u remaining=%u\n",
857 				count, num, ret, start, remaining);
858 
859 	if (unlikely(count != num)) {
860 		ret = -EPROTO;
861 		goto failed;
862 	}
863 
864 	if (start == 0) {
865 		ret = vf_prepare_runtime_info(gt, num + remaining);
866 		if (unlikely(ret < 0))
867 			goto failed;
868 	} else if (unlikely(start + num > gt->sriov.vf.runtime.num_regs)) {
869 		ret = -EPROTO;
870 		goto failed;
871 	}
872 
873 	for (i = 0; i < num; ++i) {
874 		struct vf_runtime_reg *reg = &gt->sriov.vf.runtime.regs[start + i];
875 
876 		reg->offset = response[VF2PF_QUERY_RUNTIME_RESPONSE_MSG_MIN_LEN + 2 * i];
877 		reg->value = response[VF2PF_QUERY_RUNTIME_RESPONSE_MSG_MIN_LEN + 2 * i + 1];
878 	}
879 
880 	if (remaining) {
881 		start += num;
882 		goto repeat;
883 	}
884 
885 	return 0;
886 
887 failed:
888 	vf_prepare_runtime_info(gt, 0);
889 	return ret;
890 }
891 
vf_show_runtime_info(struct xe_gt * gt)892 static void vf_show_runtime_info(struct xe_gt *gt)
893 {
894 	struct vf_runtime_reg *vf_regs = gt->sriov.vf.runtime.regs;
895 	unsigned int size = gt->sriov.vf.runtime.num_regs;
896 
897 	xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt)));
898 
899 	for (; size--; vf_regs++)
900 		xe_gt_sriov_dbg(gt, "runtime(%#x) = %#x\n",
901 				vf_regs->offset, vf_regs->value);
902 }
903 
904 /**
905  * xe_gt_sriov_vf_query_runtime - Query SR-IOV runtime data.
906  * @gt: the &xe_gt
907  *
908  * This function is for VF use only.
909  *
910  * Return: 0 on success or a negative error code on failure.
911  */
xe_gt_sriov_vf_query_runtime(struct xe_gt * gt)912 int xe_gt_sriov_vf_query_runtime(struct xe_gt *gt)
913 {
914 	int err;
915 
916 	err = vf_query_runtime_info(gt);
917 	if (unlikely(err))
918 		goto failed;
919 
920 	if (IS_ENABLED(CONFIG_DRM_XE_DEBUG))
921 		vf_show_runtime_info(gt);
922 
923 	return 0;
924 
925 failed:
926 	xe_gt_sriov_err(gt, "Failed to get runtime info (%pe)\n",
927 			ERR_PTR(err));
928 	return err;
929 }
930 
vf_runtime_reg_cmp(const void * a,const void * b)931 static int vf_runtime_reg_cmp(const void *a, const void *b)
932 {
933 	const struct vf_runtime_reg *ra = a;
934 	const struct vf_runtime_reg *rb = b;
935 
936 	return (int)ra->offset - (int)rb->offset;
937 }
938 
vf_lookup_reg(struct xe_gt * gt,u32 addr)939 static struct vf_runtime_reg *vf_lookup_reg(struct xe_gt *gt, u32 addr)
940 {
941 	struct xe_gt_sriov_vf_runtime *runtime = &gt->sriov.vf.runtime;
942 	struct vf_runtime_reg key = { .offset = addr };
943 
944 	xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt)));
945 
946 	return bsearch(&key, runtime->regs, runtime->num_regs, sizeof(key),
947 		       vf_runtime_reg_cmp);
948 }
949 
950 /**
951  * xe_gt_sriov_vf_read32 - Get a register value from the runtime data.
952  * @gt: the &xe_gt
953  * @reg: the register to read
954  *
955  * This function is for VF use only.
956  * This function shall be called after VF has connected to PF.
957  * This function is dedicated for registers that VFs can't read directly.
958  *
959  * Return: register value obtained from the PF or 0 if not found.
960  */
xe_gt_sriov_vf_read32(struct xe_gt * gt,struct xe_reg reg)961 u32 xe_gt_sriov_vf_read32(struct xe_gt *gt, struct xe_reg reg)
962 {
963 	u32 addr = xe_mmio_adjusted_addr(&gt->mmio, reg.addr);
964 	struct vf_runtime_reg *rr;
965 
966 	xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt)));
967 	xe_gt_assert(gt, gt->sriov.vf.pf_version.major);
968 	xe_gt_assert(gt, !reg.vf);
969 
970 	if (reg.addr == GMD_ID.addr) {
971 		xe_gt_sriov_dbg_verbose(gt, "gmdid(%#x) = %#x\n",
972 					addr, gt->sriov.vf.runtime.gmdid);
973 		return gt->sriov.vf.runtime.gmdid;
974 	}
975 
976 	rr = vf_lookup_reg(gt, addr);
977 	if (!rr) {
978 		xe_gt_WARN(gt, IS_ENABLED(CONFIG_DRM_XE_DEBUG),
979 			   "VF is trying to read an inaccessible register %#x+%#x\n",
980 			   reg.addr, addr - reg.addr);
981 		return 0;
982 	}
983 
984 	xe_gt_sriov_dbg_verbose(gt, "runtime[%#x] = %#x\n", addr, rr->value);
985 	return rr->value;
986 }
987 
988 /**
989  * xe_gt_sriov_vf_write32 - Handle a write to an inaccessible register.
990  * @gt: the &xe_gt
991  * @reg: the register to write
992  * @val: value to write
993  *
994  * This function is for VF use only.
995  * Currently it will trigger a WARN if running on debug build.
996  */
xe_gt_sriov_vf_write32(struct xe_gt * gt,struct xe_reg reg,u32 val)997 void xe_gt_sriov_vf_write32(struct xe_gt *gt, struct xe_reg reg, u32 val)
998 {
999 	u32 addr = xe_mmio_adjusted_addr(&gt->mmio, reg.addr);
1000 
1001 	xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt)));
1002 	xe_gt_assert(gt, !reg.vf);
1003 
1004 	/*
1005 	 * In the future, we may want to handle selected writes to inaccessible
1006 	 * registers in some custom way, but for now let's just log a warning
1007 	 * about such attempt, as likely we might be doing something wrong.
1008 	 */
1009 	xe_gt_WARN(gt, IS_ENABLED(CONFIG_DRM_XE_DEBUG),
1010 		   "VF is trying to write %#x to an inaccessible register %#x+%#x\n",
1011 		   val, reg.addr, addr - reg.addr);
1012 }
1013 
1014 /**
1015  * xe_gt_sriov_vf_print_config - Print VF self config.
1016  * @gt: the &xe_gt
1017  * @p: the &drm_printer
1018  *
1019  * This function is for VF use only.
1020  */
xe_gt_sriov_vf_print_config(struct xe_gt * gt,struct drm_printer * p)1021 void xe_gt_sriov_vf_print_config(struct xe_gt *gt, struct drm_printer *p)
1022 {
1023 	struct xe_gt_sriov_vf_selfconfig *config = &gt->sriov.vf.self_config;
1024 	struct xe_device *xe = gt_to_xe(gt);
1025 	char buf[10];
1026 
1027 	xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt)));
1028 
1029 	drm_printf(p, "GGTT range:\t%#llx-%#llx\n",
1030 		   config->ggtt_base,
1031 		   config->ggtt_base + config->ggtt_size - 1);
1032 
1033 	string_get_size(config->ggtt_size, 1, STRING_UNITS_2, buf, sizeof(buf));
1034 	drm_printf(p, "GGTT size:\t%llu (%s)\n", config->ggtt_size, buf);
1035 
1036 	if (IS_DGFX(xe) && !xe_gt_is_media_type(gt)) {
1037 		string_get_size(config->lmem_size, 1, STRING_UNITS_2, buf, sizeof(buf));
1038 		drm_printf(p, "LMEM size:\t%llu (%s)\n", config->lmem_size, buf);
1039 	}
1040 
1041 	drm_printf(p, "GuC contexts:\t%u\n", config->num_ctxs);
1042 	drm_printf(p, "GuC doorbells:\t%u\n", config->num_dbs);
1043 }
1044 
1045 /**
1046  * xe_gt_sriov_vf_print_runtime - Print VF's runtime regs received from PF.
1047  * @gt: the &xe_gt
1048  * @p: the &drm_printer
1049  *
1050  * This function is for VF use only.
1051  */
xe_gt_sriov_vf_print_runtime(struct xe_gt * gt,struct drm_printer * p)1052 void xe_gt_sriov_vf_print_runtime(struct xe_gt *gt, struct drm_printer *p)
1053 {
1054 	struct vf_runtime_reg *vf_regs = gt->sriov.vf.runtime.regs;
1055 	unsigned int size = gt->sriov.vf.runtime.num_regs;
1056 
1057 	xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt)));
1058 
1059 	for (; size--; vf_regs++)
1060 		drm_printf(p, "%#x = %#x\n", vf_regs->offset, vf_regs->value);
1061 }
1062 
1063 /**
1064  * xe_gt_sriov_vf_print_version - Print VF ABI versions.
1065  * @gt: the &xe_gt
1066  * @p: the &drm_printer
1067  *
1068  * This function is for VF use only.
1069  */
xe_gt_sriov_vf_print_version(struct xe_gt * gt,struct drm_printer * p)1070 void xe_gt_sriov_vf_print_version(struct xe_gt *gt, struct drm_printer *p)
1071 {
1072 	struct xe_gt_sriov_vf_guc_version *guc_version = &gt->sriov.vf.guc_version;
1073 	struct xe_gt_sriov_vf_relay_version *pf_version = &gt->sriov.vf.pf_version;
1074 	u32 branch, major, minor;
1075 
1076 	xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt)));
1077 
1078 	drm_printf(p, "GuC ABI:\n");
1079 
1080 	vf_minimum_guc_version(gt, &branch, &major, &minor);
1081 	drm_printf(p, "\tbase:\t%u.%u.%u.*\n", branch, major, minor);
1082 
1083 	vf_wanted_guc_version(gt, &branch, &major, &minor);
1084 	drm_printf(p, "\twanted:\t%u.%u.%u.*\n", branch, major, minor);
1085 
1086 	drm_printf(p, "\thandshake:\t%u.%u.%u.%u\n",
1087 		   guc_version->branch, guc_version->major,
1088 		   guc_version->minor, guc_version->patch);
1089 
1090 	drm_printf(p, "PF ABI:\n");
1091 
1092 	drm_printf(p, "\tbase:\t%u.%u\n",
1093 		   GUC_RELAY_VERSION_BASE_MAJOR, GUC_RELAY_VERSION_BASE_MINOR);
1094 	drm_printf(p, "\twanted:\t%u.%u\n",
1095 		   GUC_RELAY_VERSION_LATEST_MAJOR, GUC_RELAY_VERSION_LATEST_MINOR);
1096 	drm_printf(p, "\thandshake:\t%u.%u\n",
1097 		   pf_version->major, pf_version->minor);
1098 }
1099