1 // SPDX-License-Identifier: MIT
2 /*
3 * Copyright © 2023-2024 Intel Corporation
4 */
5
6 #include "abi/guc_actions_sriov_abi.h"
7
8 #include "xe_bo.h"
9 #include "xe_gt.h"
10 #include "xe_gt_sriov_pf_helpers.h"
11 #include "xe_gt_sriov_pf_policy.h"
12 #include "xe_gt_sriov_printk.h"
13 #include "xe_guc_ct.h"
14 #include "xe_guc_klv_helpers.h"
15 #include "xe_pm.h"
16
17 /*
18 * Return: number of KLVs that were successfully parsed and saved,
19 * negative error code on failure.
20 */
guc_action_update_vgt_policy(struct xe_guc * guc,u64 addr,u32 size)21 static int guc_action_update_vgt_policy(struct xe_guc *guc, u64 addr, u32 size)
22 {
23 u32 request[] = {
24 GUC_ACTION_PF2GUC_UPDATE_VGT_POLICY,
25 lower_32_bits(addr),
26 upper_32_bits(addr),
27 size,
28 };
29
30 return xe_guc_ct_send_block(&guc->ct, request, ARRAY_SIZE(request));
31 }
32
33 /*
34 * Return: number of KLVs that were successfully parsed and saved,
35 * negative error code on failure.
36 */
pf_send_policy_klvs(struct xe_gt * gt,const u32 * klvs,u32 num_dwords)37 static int pf_send_policy_klvs(struct xe_gt *gt, const u32 *klvs, u32 num_dwords)
38 {
39 const u32 bytes = num_dwords * sizeof(u32);
40 struct xe_tile *tile = gt_to_tile(gt);
41 struct xe_device *xe = tile_to_xe(tile);
42 struct xe_guc *guc = >->uc.guc;
43 struct xe_bo *bo;
44 int ret;
45
46 bo = xe_bo_create_pin_map(xe, tile, NULL,
47 ALIGN(bytes, PAGE_SIZE),
48 ttm_bo_type_kernel,
49 XE_BO_FLAG_VRAM_IF_DGFX(tile) |
50 XE_BO_FLAG_GGTT);
51 if (IS_ERR(bo))
52 return PTR_ERR(bo);
53
54 xe_map_memcpy_to(xe, &bo->vmap, 0, klvs, bytes);
55
56 ret = guc_action_update_vgt_policy(guc, xe_bo_ggtt_addr(bo), num_dwords);
57
58 xe_bo_unpin_map_no_vm(bo);
59
60 return ret;
61 }
62
63 /*
64 * Return: 0 on success, -ENOKEY if some KLVs were not updated, -EPROTO if reply was malformed,
65 * negative error code on failure.
66 */
pf_push_policy_klvs(struct xe_gt * gt,u32 num_klvs,const u32 * klvs,u32 num_dwords)67 static int pf_push_policy_klvs(struct xe_gt *gt, u32 num_klvs,
68 const u32 *klvs, u32 num_dwords)
69 {
70 int ret;
71
72 xe_gt_assert(gt, num_klvs == xe_guc_klv_count(klvs, num_dwords));
73
74 ret = pf_send_policy_klvs(gt, klvs, num_dwords);
75
76 if (ret != num_klvs) {
77 int err = ret < 0 ? ret : ret < num_klvs ? -ENOKEY : -EPROTO;
78 struct drm_printer p = xe_gt_info_printer(gt);
79
80 xe_gt_sriov_notice(gt, "Failed to push %u policy KLV%s (%pe)\n",
81 num_klvs, str_plural(num_klvs), ERR_PTR(err));
82 xe_guc_klv_print(klvs, num_dwords, &p);
83 return err;
84 }
85
86 return 0;
87 }
88
pf_push_policy_u32(struct xe_gt * gt,u16 key,u32 value)89 static int pf_push_policy_u32(struct xe_gt *gt, u16 key, u32 value)
90 {
91 u32 klv[] = {
92 PREP_GUC_KLV(key, 1),
93 value,
94 };
95
96 return pf_push_policy_klvs(gt, 1, klv, ARRAY_SIZE(klv));
97 }
98
pf_update_policy_bool(struct xe_gt * gt,u16 key,bool * policy,bool value)99 static int pf_update_policy_bool(struct xe_gt *gt, u16 key, bool *policy, bool value)
100 {
101 int err;
102
103 err = pf_push_policy_u32(gt, key, value);
104 if (unlikely(err)) {
105 xe_gt_sriov_notice(gt, "Failed to update policy %#x '%s' to '%s' (%pe)\n",
106 key, xe_guc_klv_key_to_string(key),
107 str_enabled_disabled(value), ERR_PTR(err));
108 return err;
109 }
110
111 xe_gt_sriov_dbg(gt, "policy key %#x '%s' updated to '%s'\n",
112 key, xe_guc_klv_key_to_string(key),
113 str_enabled_disabled(value));
114
115 *policy = value;
116 return 0;
117 }
118
pf_update_policy_u32(struct xe_gt * gt,u16 key,u32 * policy,u32 value)119 static int pf_update_policy_u32(struct xe_gt *gt, u16 key, u32 *policy, u32 value)
120 {
121 int err;
122
123 err = pf_push_policy_u32(gt, key, value);
124 if (unlikely(err)) {
125 xe_gt_sriov_notice(gt, "Failed to update policy %#x '%s' to '%s' (%pe)\n",
126 key, xe_guc_klv_key_to_string(key),
127 str_enabled_disabled(value), ERR_PTR(err));
128 return err;
129 }
130
131 xe_gt_sriov_dbg(gt, "policy key %#x '%s' updated to %u\n",
132 key, xe_guc_klv_key_to_string(key), value);
133
134 *policy = value;
135 return 0;
136 }
137
pf_bulk_reset_sched_priority(struct xe_gt * gt,u32 priority)138 static void pf_bulk_reset_sched_priority(struct xe_gt *gt, u32 priority)
139 {
140 unsigned int total_vfs = 1 + xe_gt_sriov_pf_get_totalvfs(gt);
141 unsigned int n;
142
143 xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
144 lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
145
146 for (n = 0; n < total_vfs; n++)
147 gt->sriov.pf.vfs[n].config.sched_priority = priority;
148 }
149
pf_provision_sched_if_idle(struct xe_gt * gt,bool enable)150 static int pf_provision_sched_if_idle(struct xe_gt *gt, bool enable)
151 {
152 int err;
153
154 xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
155 lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
156
157 err = pf_update_policy_bool(gt, GUC_KLV_VGT_POLICY_SCHED_IF_IDLE_KEY,
158 >->sriov.pf.policy.guc.sched_if_idle,
159 enable);
160
161 if (!err)
162 pf_bulk_reset_sched_priority(gt, enable ? GUC_SCHED_PRIORITY_NORMAL :
163 GUC_SCHED_PRIORITY_LOW);
164 return err;
165 }
166
pf_reprovision_sched_if_idle(struct xe_gt * gt)167 static int pf_reprovision_sched_if_idle(struct xe_gt *gt)
168 {
169 xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
170 lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
171
172 return pf_provision_sched_if_idle(gt, gt->sriov.pf.policy.guc.sched_if_idle);
173 }
174
pf_sanitize_sched_if_idle(struct xe_gt * gt)175 static void pf_sanitize_sched_if_idle(struct xe_gt *gt)
176 {
177 xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
178 lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
179
180 gt->sriov.pf.policy.guc.sched_if_idle = false;
181 }
182
183 /**
184 * xe_gt_sriov_pf_policy_set_sched_if_idle - Control the 'sched_if_idle' policy.
185 * @gt: the &xe_gt where to apply the policy
186 * @enable: the value of the 'sched_if_idle' policy
187 *
188 * This function can only be called on PF.
189 *
190 * Return: 0 on success or a negative error code on failure.
191 */
xe_gt_sriov_pf_policy_set_sched_if_idle(struct xe_gt * gt,bool enable)192 int xe_gt_sriov_pf_policy_set_sched_if_idle(struct xe_gt *gt, bool enable)
193 {
194 int err;
195
196 mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
197 err = pf_provision_sched_if_idle(gt, enable);
198 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
199
200 return err;
201 }
202
203 /**
204 * xe_gt_sriov_pf_policy_get_sched_if_idle - Retrieve value of 'sched_if_idle' policy.
205 * @gt: the &xe_gt where to read the policy from
206 *
207 * This function can only be called on PF.
208 *
209 * Return: value of 'sched_if_idle' policy.
210 */
xe_gt_sriov_pf_policy_get_sched_if_idle(struct xe_gt * gt)211 bool xe_gt_sriov_pf_policy_get_sched_if_idle(struct xe_gt *gt)
212 {
213 bool enable;
214
215 xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
216
217 mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
218 enable = gt->sriov.pf.policy.guc.sched_if_idle;
219 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
220
221 return enable;
222 }
223
pf_provision_reset_engine(struct xe_gt * gt,bool enable)224 static int pf_provision_reset_engine(struct xe_gt *gt, bool enable)
225 {
226 xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
227 lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
228
229 return pf_update_policy_bool(gt, GUC_KLV_VGT_POLICY_RESET_AFTER_VF_SWITCH_KEY,
230 >->sriov.pf.policy.guc.reset_engine, enable);
231 }
232
pf_reprovision_reset_engine(struct xe_gt * gt)233 static int pf_reprovision_reset_engine(struct xe_gt *gt)
234 {
235 xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
236 lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
237
238 return pf_provision_reset_engine(gt, gt->sriov.pf.policy.guc.reset_engine);
239 }
240
pf_sanitize_reset_engine(struct xe_gt * gt)241 static void pf_sanitize_reset_engine(struct xe_gt *gt)
242 {
243 xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
244 lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
245
246 gt->sriov.pf.policy.guc.reset_engine = false;
247 }
248
249 /**
250 * xe_gt_sriov_pf_policy_set_reset_engine - Control the 'reset_engine' policy.
251 * @gt: the &xe_gt where to apply the policy
252 * @enable: the value of the 'reset_engine' policy
253 *
254 * This function can only be called on PF.
255 *
256 * Return: 0 on success or a negative error code on failure.
257 */
xe_gt_sriov_pf_policy_set_reset_engine(struct xe_gt * gt,bool enable)258 int xe_gt_sriov_pf_policy_set_reset_engine(struct xe_gt *gt, bool enable)
259 {
260 int err;
261
262 mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
263 err = pf_provision_reset_engine(gt, enable);
264 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
265
266 return err;
267 }
268
269 /**
270 * xe_gt_sriov_pf_policy_get_reset_engine - Retrieve value of 'reset_engine' policy.
271 * @gt: the &xe_gt where to read the policy from
272 *
273 * This function can only be called on PF.
274 *
275 * Return: value of 'reset_engine' policy.
276 */
xe_gt_sriov_pf_policy_get_reset_engine(struct xe_gt * gt)277 bool xe_gt_sriov_pf_policy_get_reset_engine(struct xe_gt *gt)
278 {
279 bool enable;
280
281 xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
282
283 mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
284 enable = gt->sriov.pf.policy.guc.reset_engine;
285 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
286
287 return enable;
288 }
289
pf_provision_sample_period(struct xe_gt * gt,u32 value)290 static int pf_provision_sample_period(struct xe_gt *gt, u32 value)
291 {
292 xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
293 lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
294
295 return pf_update_policy_u32(gt, GUC_KLV_VGT_POLICY_ADVERSE_SAMPLE_PERIOD_KEY,
296 >->sriov.pf.policy.guc.sample_period, value);
297 }
298
pf_reprovision_sample_period(struct xe_gt * gt)299 static int pf_reprovision_sample_period(struct xe_gt *gt)
300 {
301 xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
302 lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
303
304 return pf_provision_sample_period(gt, gt->sriov.pf.policy.guc.sample_period);
305 }
306
pf_sanitize_sample_period(struct xe_gt * gt)307 static void pf_sanitize_sample_period(struct xe_gt *gt)
308 {
309 xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
310 lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
311
312 gt->sriov.pf.policy.guc.sample_period = 0;
313 }
314
315 /**
316 * xe_gt_sriov_pf_policy_set_sample_period - Control the 'sample_period' policy.
317 * @gt: the &xe_gt where to apply the policy
318 * @value: the value of the 'sample_period' policy
319 *
320 * This function can only be called on PF.
321 *
322 * Return: 0 on success or a negative error code on failure.
323 */
xe_gt_sriov_pf_policy_set_sample_period(struct xe_gt * gt,u32 value)324 int xe_gt_sriov_pf_policy_set_sample_period(struct xe_gt *gt, u32 value)
325 {
326 int err;
327
328 mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
329 err = pf_provision_sample_period(gt, value);
330 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
331
332 return err;
333 }
334
335 /**
336 * xe_gt_sriov_pf_policy_get_sample_period - Retrieve value of 'sample_period' policy.
337 * @gt: the &xe_gt where to read the policy from
338 *
339 * This function can only be called on PF.
340 *
341 * Return: value of 'sample_period' policy.
342 */
xe_gt_sriov_pf_policy_get_sample_period(struct xe_gt * gt)343 u32 xe_gt_sriov_pf_policy_get_sample_period(struct xe_gt *gt)
344 {
345 u32 value;
346
347 xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
348
349 mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
350 value = gt->sriov.pf.policy.guc.sample_period;
351 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
352
353 return value;
354 }
355
pf_sanitize_guc_policies(struct xe_gt * gt)356 static void pf_sanitize_guc_policies(struct xe_gt *gt)
357 {
358 pf_sanitize_sched_if_idle(gt);
359 pf_sanitize_reset_engine(gt);
360 pf_sanitize_sample_period(gt);
361 }
362
363 /**
364 * xe_gt_sriov_pf_policy_sanitize - Reset policy settings.
365 * @gt: the &xe_gt
366 *
367 * This function can only be called on PF.
368 *
369 * Return: 0 on success or a negative error code on failure.
370 */
xe_gt_sriov_pf_policy_sanitize(struct xe_gt * gt)371 void xe_gt_sriov_pf_policy_sanitize(struct xe_gt *gt)
372 {
373 mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
374 pf_sanitize_guc_policies(gt);
375 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
376 }
377
378 /**
379 * xe_gt_sriov_pf_policy_reprovision - Reprovision (and optionally reset) policy settings.
380 * @gt: the &xe_gt
381 * @reset: if true will reprovision using default values instead of latest
382 *
383 * This function can only be called on PF.
384 *
385 * Return: 0 on success or a negative error code on failure.
386 */
xe_gt_sriov_pf_policy_reprovision(struct xe_gt * gt,bool reset)387 int xe_gt_sriov_pf_policy_reprovision(struct xe_gt *gt, bool reset)
388 {
389 int err = 0;
390
391 xe_pm_runtime_get_noresume(gt_to_xe(gt));
392
393 mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
394 if (reset)
395 pf_sanitize_guc_policies(gt);
396 err |= pf_reprovision_sched_if_idle(gt);
397 err |= pf_reprovision_reset_engine(gt);
398 err |= pf_reprovision_sample_period(gt);
399 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
400
401 xe_pm_runtime_put(gt_to_xe(gt));
402
403 return err ? -ENXIO : 0;
404 }
405
print_guc_policies(struct drm_printer * p,struct xe_gt_sriov_guc_policies * policy)406 static void print_guc_policies(struct drm_printer *p, struct xe_gt_sriov_guc_policies *policy)
407 {
408 drm_printf(p, "%s:\t%s\n",
409 xe_guc_klv_key_to_string(GUC_KLV_VGT_POLICY_SCHED_IF_IDLE_KEY),
410 str_enabled_disabled(policy->sched_if_idle));
411 drm_printf(p, "%s:\t%s\n",
412 xe_guc_klv_key_to_string(GUC_KLV_VGT_POLICY_RESET_AFTER_VF_SWITCH_KEY),
413 str_enabled_disabled(policy->reset_engine));
414 drm_printf(p, "%s:\t%u %s\n",
415 xe_guc_klv_key_to_string(GUC_KLV_VGT_POLICY_ADVERSE_SAMPLE_PERIOD_KEY),
416 policy->sample_period, policy->sample_period ? "ms" : "(disabled)");
417 }
418
419 /**
420 * xe_gt_sriov_pf_policy_print - Dump actual policy values.
421 * @gt: the &xe_gt where to read the policy from
422 * @p: the &drm_printer
423 *
424 * This function can only be called on PF.
425 *
426 * Return: 0 on success or a negative error code on failure.
427 */
xe_gt_sriov_pf_policy_print(struct xe_gt * gt,struct drm_printer * p)428 int xe_gt_sriov_pf_policy_print(struct xe_gt *gt, struct drm_printer *p)
429 {
430 xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
431
432 mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
433 print_guc_policies(p, >->sriov.pf.policy.guc);
434 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
435
436 return 0;
437 }
438