1 /*
2 * Copyright 2019 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24 #include <linux/firmware.h>
25 #include <drm/drm_exec.h>
26
27 #include "amdgpu_mes.h"
28 #include "amdgpu.h"
29 #include "soc15_common.h"
30 #include "amdgpu_mes_ctx.h"
31
32 #define AMDGPU_MES_MAX_NUM_OF_QUEUES_PER_PROCESS 1024
33 #define AMDGPU_ONE_DOORBELL_SIZE 8
34
amdgpu_mes_doorbell_process_slice(struct amdgpu_device * adev)35 int amdgpu_mes_doorbell_process_slice(struct amdgpu_device *adev)
36 {
37 return roundup(AMDGPU_ONE_DOORBELL_SIZE *
38 AMDGPU_MES_MAX_NUM_OF_QUEUES_PER_PROCESS,
39 PAGE_SIZE);
40 }
41
amdgpu_mes_kernel_doorbell_get(struct amdgpu_device * adev,int ip_type,uint64_t * doorbell_index)42 static int amdgpu_mes_kernel_doorbell_get(struct amdgpu_device *adev,
43 int ip_type, uint64_t *doorbell_index)
44 {
45 unsigned int offset, found;
46 struct amdgpu_mes *mes = &adev->mes;
47
48 if (ip_type == AMDGPU_RING_TYPE_SDMA)
49 offset = adev->doorbell_index.sdma_engine[0];
50 else
51 offset = 0;
52
53 found = find_next_zero_bit(mes->doorbell_bitmap, mes->num_mes_dbs, offset);
54 if (found >= mes->num_mes_dbs) {
55 DRM_WARN("No doorbell available\n");
56 return -ENOSPC;
57 }
58
59 set_bit(found, mes->doorbell_bitmap);
60
61 /* Get the absolute doorbell index on BAR */
62 *doorbell_index = mes->db_start_dw_offset + found * 2;
63 return 0;
64 }
65
amdgpu_mes_kernel_doorbell_free(struct amdgpu_device * adev,uint32_t doorbell_index)66 static void amdgpu_mes_kernel_doorbell_free(struct amdgpu_device *adev,
67 uint32_t doorbell_index)
68 {
69 unsigned int old, rel_index;
70 struct amdgpu_mes *mes = &adev->mes;
71
72 /* Find the relative index of the doorbell in this object */
73 rel_index = (doorbell_index - mes->db_start_dw_offset) / 2;
74 old = test_and_clear_bit(rel_index, mes->doorbell_bitmap);
75 WARN_ON(!old);
76 }
77
amdgpu_mes_doorbell_init(struct amdgpu_device * adev)78 static int amdgpu_mes_doorbell_init(struct amdgpu_device *adev)
79 {
80 int i;
81 struct amdgpu_mes *mes = &adev->mes;
82
83 /* Bitmap for dynamic allocation of kernel doorbells */
84 mes->doorbell_bitmap = bitmap_zalloc(PAGE_SIZE / sizeof(u32), GFP_KERNEL);
85 if (!mes->doorbell_bitmap) {
86 DRM_ERROR("Failed to allocate MES doorbell bitmap\n");
87 return -ENOMEM;
88 }
89
90 mes->num_mes_dbs = PAGE_SIZE / AMDGPU_ONE_DOORBELL_SIZE;
91 for (i = 0; i < AMDGPU_MES_PRIORITY_NUM_LEVELS; i++) {
92 adev->mes.aggregated_doorbells[i] = mes->db_start_dw_offset + i * 2;
93 set_bit(i, mes->doorbell_bitmap);
94 }
95
96 return 0;
97 }
98
amdgpu_mes_event_log_init(struct amdgpu_device * adev)99 static int amdgpu_mes_event_log_init(struct amdgpu_device *adev)
100 {
101 int r;
102
103 if (!amdgpu_mes_log_enable)
104 return 0;
105
106 r = amdgpu_bo_create_kernel(adev, adev->mes.event_log_size, PAGE_SIZE,
107 AMDGPU_GEM_DOMAIN_VRAM,
108 &adev->mes.event_log_gpu_obj,
109 &adev->mes.event_log_gpu_addr,
110 &adev->mes.event_log_cpu_addr);
111 if (r) {
112 dev_warn(adev->dev, "failed to create MES event log buffer (%d)", r);
113 return r;
114 }
115
116 memset(adev->mes.event_log_cpu_addr, 0, adev->mes.event_log_size);
117
118 return 0;
119
120 }
121
amdgpu_mes_doorbell_free(struct amdgpu_device * adev)122 static void amdgpu_mes_doorbell_free(struct amdgpu_device *adev)
123 {
124 bitmap_free(adev->mes.doorbell_bitmap);
125 }
126
amdgpu_mes_init(struct amdgpu_device * adev)127 int amdgpu_mes_init(struct amdgpu_device *adev)
128 {
129 int i, r;
130
131 adev->mes.adev = adev;
132
133 idr_init(&adev->mes.pasid_idr);
134 idr_init(&adev->mes.gang_id_idr);
135 idr_init(&adev->mes.queue_id_idr);
136 ida_init(&adev->mes.doorbell_ida);
137 spin_lock_init(&adev->mes.queue_id_lock);
138 mutex_init(&adev->mes.mutex_hidden);
139
140 for (i = 0; i < AMDGPU_MAX_MES_PIPES; i++)
141 spin_lock_init(&adev->mes.ring_lock[i]);
142
143 adev->mes.total_max_queue = AMDGPU_FENCE_MES_QUEUE_ID_MASK;
144 adev->mes.vmid_mask_mmhub = 0xffffff00;
145 adev->mes.vmid_mask_gfxhub = 0xffffff00;
146
147 for (i = 0; i < AMDGPU_MES_MAX_COMPUTE_PIPES; i++) {
148 if (i >= (adev->gfx.mec.num_pipe_per_mec * adev->gfx.mec.num_mec))
149 break;
150 adev->mes.compute_hqd_mask[i] = 0xc;
151 }
152
153 for (i = 0; i < AMDGPU_MES_MAX_GFX_PIPES; i++)
154 adev->mes.gfx_hqd_mask[i] = i ? 0 : 0xfffffffe;
155
156 for (i = 0; i < AMDGPU_MES_MAX_SDMA_PIPES; i++) {
157 if (amdgpu_ip_version(adev, SDMA0_HWIP, 0) <
158 IP_VERSION(6, 0, 0))
159 adev->mes.sdma_hqd_mask[i] = i ? 0 : 0x3fc;
160 /* zero sdma_hqd_mask for non-existent engine */
161 else if (adev->sdma.num_instances == 1)
162 adev->mes.sdma_hqd_mask[i] = i ? 0 : 0xfc;
163 else
164 adev->mes.sdma_hqd_mask[i] = 0xfc;
165 }
166
167 for (i = 0; i < AMDGPU_MAX_MES_PIPES; i++) {
168 r = amdgpu_device_wb_get(adev, &adev->mes.sch_ctx_offs[i]);
169 if (r) {
170 dev_err(adev->dev,
171 "(%d) ring trail_fence_offs wb alloc failed\n",
172 r);
173 goto error;
174 }
175 adev->mes.sch_ctx_gpu_addr[i] =
176 adev->wb.gpu_addr + (adev->mes.sch_ctx_offs[i] * 4);
177 adev->mes.sch_ctx_ptr[i] =
178 (uint64_t *)&adev->wb.wb[adev->mes.sch_ctx_offs[i]];
179
180 r = amdgpu_device_wb_get(adev,
181 &adev->mes.query_status_fence_offs[i]);
182 if (r) {
183 dev_err(adev->dev,
184 "(%d) query_status_fence_offs wb alloc failed\n",
185 r);
186 goto error;
187 }
188 adev->mes.query_status_fence_gpu_addr[i] = adev->wb.gpu_addr +
189 (adev->mes.query_status_fence_offs[i] * 4);
190 adev->mes.query_status_fence_ptr[i] =
191 (uint64_t *)&adev->wb.wb[adev->mes.query_status_fence_offs[i]];
192 }
193
194 r = amdgpu_mes_doorbell_init(adev);
195 if (r)
196 goto error;
197
198 r = amdgpu_mes_event_log_init(adev);
199 if (r)
200 goto error_doorbell;
201
202 return 0;
203
204 error_doorbell:
205 amdgpu_mes_doorbell_free(adev);
206 error:
207 for (i = 0; i < AMDGPU_MAX_MES_PIPES; i++) {
208 if (adev->mes.sch_ctx_ptr[i])
209 amdgpu_device_wb_free(adev, adev->mes.sch_ctx_offs[i]);
210 if (adev->mes.query_status_fence_ptr[i])
211 amdgpu_device_wb_free(adev,
212 adev->mes.query_status_fence_offs[i]);
213 }
214
215 idr_destroy(&adev->mes.pasid_idr);
216 idr_destroy(&adev->mes.gang_id_idr);
217 idr_destroy(&adev->mes.queue_id_idr);
218 ida_destroy(&adev->mes.doorbell_ida);
219 mutex_destroy(&adev->mes.mutex_hidden);
220 return r;
221 }
222
amdgpu_mes_fini(struct amdgpu_device * adev)223 void amdgpu_mes_fini(struct amdgpu_device *adev)
224 {
225 int i;
226
227 amdgpu_bo_free_kernel(&adev->mes.event_log_gpu_obj,
228 &adev->mes.event_log_gpu_addr,
229 &adev->mes.event_log_cpu_addr);
230
231 for (i = 0; i < AMDGPU_MAX_MES_PIPES; i++) {
232 if (adev->mes.sch_ctx_ptr[i])
233 amdgpu_device_wb_free(adev, adev->mes.sch_ctx_offs[i]);
234 if (adev->mes.query_status_fence_ptr[i])
235 amdgpu_device_wb_free(adev,
236 adev->mes.query_status_fence_offs[i]);
237 }
238
239 amdgpu_mes_doorbell_free(adev);
240
241 idr_destroy(&adev->mes.pasid_idr);
242 idr_destroy(&adev->mes.gang_id_idr);
243 idr_destroy(&adev->mes.queue_id_idr);
244 ida_destroy(&adev->mes.doorbell_ida);
245 mutex_destroy(&adev->mes.mutex_hidden);
246 }
247
amdgpu_mes_queue_free_mqd(struct amdgpu_mes_queue * q)248 static void amdgpu_mes_queue_free_mqd(struct amdgpu_mes_queue *q)
249 {
250 amdgpu_bo_free_kernel(&q->mqd_obj,
251 &q->mqd_gpu_addr,
252 &q->mqd_cpu_ptr);
253 }
254
amdgpu_mes_create_process(struct amdgpu_device * adev,int pasid,struct amdgpu_vm * vm)255 int amdgpu_mes_create_process(struct amdgpu_device *adev, int pasid,
256 struct amdgpu_vm *vm)
257 {
258 struct amdgpu_mes_process *process;
259 int r;
260
261 /* allocate the mes process buffer */
262 process = kzalloc(sizeof(struct amdgpu_mes_process), GFP_KERNEL);
263 if (!process) {
264 DRM_ERROR("no more memory to create mes process\n");
265 return -ENOMEM;
266 }
267
268 /* allocate the process context bo and map it */
269 r = amdgpu_bo_create_kernel(adev, AMDGPU_MES_PROC_CTX_SIZE, PAGE_SIZE,
270 AMDGPU_GEM_DOMAIN_GTT,
271 &process->proc_ctx_bo,
272 &process->proc_ctx_gpu_addr,
273 &process->proc_ctx_cpu_ptr);
274 if (r) {
275 DRM_ERROR("failed to allocate process context bo\n");
276 goto clean_up_memory;
277 }
278 memset(process->proc_ctx_cpu_ptr, 0, AMDGPU_MES_PROC_CTX_SIZE);
279
280 /*
281 * Avoid taking any other locks under MES lock to avoid circular
282 * lock dependencies.
283 */
284 amdgpu_mes_lock(&adev->mes);
285
286 /* add the mes process to idr list */
287 r = idr_alloc(&adev->mes.pasid_idr, process, pasid, pasid + 1,
288 GFP_KERNEL);
289 if (r < 0) {
290 DRM_ERROR("failed to lock pasid=%d\n", pasid);
291 goto clean_up_ctx;
292 }
293
294 INIT_LIST_HEAD(&process->gang_list);
295 process->vm = vm;
296 process->pasid = pasid;
297 process->process_quantum = adev->mes.default_process_quantum;
298 process->pd_gpu_addr = amdgpu_bo_gpu_offset(vm->root.bo);
299
300 amdgpu_mes_unlock(&adev->mes);
301 return 0;
302
303 clean_up_ctx:
304 amdgpu_mes_unlock(&adev->mes);
305 amdgpu_bo_free_kernel(&process->proc_ctx_bo,
306 &process->proc_ctx_gpu_addr,
307 &process->proc_ctx_cpu_ptr);
308 clean_up_memory:
309 kfree(process);
310 return r;
311 }
312
amdgpu_mes_destroy_process(struct amdgpu_device * adev,int pasid)313 void amdgpu_mes_destroy_process(struct amdgpu_device *adev, int pasid)
314 {
315 struct amdgpu_mes_process *process;
316 struct amdgpu_mes_gang *gang, *tmp1;
317 struct amdgpu_mes_queue *queue, *tmp2;
318 struct mes_remove_queue_input queue_input;
319 unsigned long flags;
320 int r;
321
322 /*
323 * Avoid taking any other locks under MES lock to avoid circular
324 * lock dependencies.
325 */
326 amdgpu_mes_lock(&adev->mes);
327
328 process = idr_find(&adev->mes.pasid_idr, pasid);
329 if (!process) {
330 DRM_WARN("pasid %d doesn't exist\n", pasid);
331 amdgpu_mes_unlock(&adev->mes);
332 return;
333 }
334
335 /* Remove all queues from hardware */
336 list_for_each_entry_safe(gang, tmp1, &process->gang_list, list) {
337 list_for_each_entry_safe(queue, tmp2, &gang->queue_list, list) {
338 spin_lock_irqsave(&adev->mes.queue_id_lock, flags);
339 idr_remove(&adev->mes.queue_id_idr, queue->queue_id);
340 spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
341
342 queue_input.doorbell_offset = queue->doorbell_off;
343 queue_input.gang_context_addr = gang->gang_ctx_gpu_addr;
344
345 r = adev->mes.funcs->remove_hw_queue(&adev->mes,
346 &queue_input);
347 if (r)
348 DRM_WARN("failed to remove hardware queue\n");
349 }
350
351 idr_remove(&adev->mes.gang_id_idr, gang->gang_id);
352 }
353
354 idr_remove(&adev->mes.pasid_idr, pasid);
355 amdgpu_mes_unlock(&adev->mes);
356
357 /* free all memory allocated by the process */
358 list_for_each_entry_safe(gang, tmp1, &process->gang_list, list) {
359 /* free all queues in the gang */
360 list_for_each_entry_safe(queue, tmp2, &gang->queue_list, list) {
361 amdgpu_mes_queue_free_mqd(queue);
362 list_del(&queue->list);
363 kfree(queue);
364 }
365 amdgpu_bo_free_kernel(&gang->gang_ctx_bo,
366 &gang->gang_ctx_gpu_addr,
367 &gang->gang_ctx_cpu_ptr);
368 list_del(&gang->list);
369 kfree(gang);
370
371 }
372 amdgpu_bo_free_kernel(&process->proc_ctx_bo,
373 &process->proc_ctx_gpu_addr,
374 &process->proc_ctx_cpu_ptr);
375 kfree(process);
376 }
377
amdgpu_mes_add_gang(struct amdgpu_device * adev,int pasid,struct amdgpu_mes_gang_properties * gprops,int * gang_id)378 int amdgpu_mes_add_gang(struct amdgpu_device *adev, int pasid,
379 struct amdgpu_mes_gang_properties *gprops,
380 int *gang_id)
381 {
382 struct amdgpu_mes_process *process;
383 struct amdgpu_mes_gang *gang;
384 int r;
385
386 /* allocate the mes gang buffer */
387 gang = kzalloc(sizeof(struct amdgpu_mes_gang), GFP_KERNEL);
388 if (!gang) {
389 return -ENOMEM;
390 }
391
392 /* allocate the gang context bo and map it to cpu space */
393 r = amdgpu_bo_create_kernel(adev, AMDGPU_MES_GANG_CTX_SIZE, PAGE_SIZE,
394 AMDGPU_GEM_DOMAIN_GTT,
395 &gang->gang_ctx_bo,
396 &gang->gang_ctx_gpu_addr,
397 &gang->gang_ctx_cpu_ptr);
398 if (r) {
399 DRM_ERROR("failed to allocate process context bo\n");
400 goto clean_up_mem;
401 }
402 memset(gang->gang_ctx_cpu_ptr, 0, AMDGPU_MES_GANG_CTX_SIZE);
403
404 /*
405 * Avoid taking any other locks under MES lock to avoid circular
406 * lock dependencies.
407 */
408 amdgpu_mes_lock(&adev->mes);
409
410 process = idr_find(&adev->mes.pasid_idr, pasid);
411 if (!process) {
412 DRM_ERROR("pasid %d doesn't exist\n", pasid);
413 r = -EINVAL;
414 goto clean_up_ctx;
415 }
416
417 /* add the mes gang to idr list */
418 r = idr_alloc(&adev->mes.gang_id_idr, gang, 1, 0,
419 GFP_KERNEL);
420 if (r < 0) {
421 DRM_ERROR("failed to allocate idr for gang\n");
422 goto clean_up_ctx;
423 }
424
425 gang->gang_id = r;
426 *gang_id = r;
427
428 INIT_LIST_HEAD(&gang->queue_list);
429 gang->process = process;
430 gang->priority = gprops->priority;
431 gang->gang_quantum = gprops->gang_quantum ?
432 gprops->gang_quantum : adev->mes.default_gang_quantum;
433 gang->global_priority_level = gprops->global_priority_level;
434 gang->inprocess_gang_priority = gprops->inprocess_gang_priority;
435 list_add_tail(&gang->list, &process->gang_list);
436
437 amdgpu_mes_unlock(&adev->mes);
438 return 0;
439
440 clean_up_ctx:
441 amdgpu_mes_unlock(&adev->mes);
442 amdgpu_bo_free_kernel(&gang->gang_ctx_bo,
443 &gang->gang_ctx_gpu_addr,
444 &gang->gang_ctx_cpu_ptr);
445 clean_up_mem:
446 kfree(gang);
447 return r;
448 }
449
amdgpu_mes_remove_gang(struct amdgpu_device * adev,int gang_id)450 int amdgpu_mes_remove_gang(struct amdgpu_device *adev, int gang_id)
451 {
452 struct amdgpu_mes_gang *gang;
453
454 /*
455 * Avoid taking any other locks under MES lock to avoid circular
456 * lock dependencies.
457 */
458 amdgpu_mes_lock(&adev->mes);
459
460 gang = idr_find(&adev->mes.gang_id_idr, gang_id);
461 if (!gang) {
462 DRM_ERROR("gang id %d doesn't exist\n", gang_id);
463 amdgpu_mes_unlock(&adev->mes);
464 return -EINVAL;
465 }
466
467 if (!list_empty(&gang->queue_list)) {
468 DRM_ERROR("queue list is not empty\n");
469 amdgpu_mes_unlock(&adev->mes);
470 return -EBUSY;
471 }
472
473 idr_remove(&adev->mes.gang_id_idr, gang->gang_id);
474 list_del(&gang->list);
475 amdgpu_mes_unlock(&adev->mes);
476
477 amdgpu_bo_free_kernel(&gang->gang_ctx_bo,
478 &gang->gang_ctx_gpu_addr,
479 &gang->gang_ctx_cpu_ptr);
480
481 kfree(gang);
482
483 return 0;
484 }
485
amdgpu_mes_suspend(struct amdgpu_device * adev)486 int amdgpu_mes_suspend(struct amdgpu_device *adev)
487 {
488 struct mes_suspend_gang_input input;
489 int r;
490
491 if (!amdgpu_mes_suspend_resume_all_supported(adev))
492 return 0;
493
494 memset(&input, 0x0, sizeof(struct mes_suspend_gang_input));
495 input.suspend_all_gangs = 1;
496
497 /*
498 * Avoid taking any other locks under MES lock to avoid circular
499 * lock dependencies.
500 */
501 amdgpu_mes_lock(&adev->mes);
502 r = adev->mes.funcs->suspend_gang(&adev->mes, &input);
503 amdgpu_mes_unlock(&adev->mes);
504 if (r)
505 DRM_ERROR("failed to suspend all gangs");
506
507 return r;
508 }
509
amdgpu_mes_resume(struct amdgpu_device * adev)510 int amdgpu_mes_resume(struct amdgpu_device *adev)
511 {
512 struct mes_resume_gang_input input;
513 int r;
514
515 if (!amdgpu_mes_suspend_resume_all_supported(adev))
516 return 0;
517
518 memset(&input, 0x0, sizeof(struct mes_resume_gang_input));
519 input.resume_all_gangs = 1;
520
521 /*
522 * Avoid taking any other locks under MES lock to avoid circular
523 * lock dependencies.
524 */
525 amdgpu_mes_lock(&adev->mes);
526 r = adev->mes.funcs->resume_gang(&adev->mes, &input);
527 amdgpu_mes_unlock(&adev->mes);
528 if (r)
529 DRM_ERROR("failed to resume all gangs");
530
531 return r;
532 }
533
amdgpu_mes_queue_alloc_mqd(struct amdgpu_device * adev,struct amdgpu_mes_queue * q,struct amdgpu_mes_queue_properties * p)534 static int amdgpu_mes_queue_alloc_mqd(struct amdgpu_device *adev,
535 struct amdgpu_mes_queue *q,
536 struct amdgpu_mes_queue_properties *p)
537 {
538 struct amdgpu_mqd *mqd_mgr = &adev->mqds[p->queue_type];
539 u32 mqd_size = mqd_mgr->mqd_size;
540 int r;
541
542 r = amdgpu_bo_create_kernel(adev, mqd_size, PAGE_SIZE,
543 AMDGPU_GEM_DOMAIN_GTT,
544 &q->mqd_obj,
545 &q->mqd_gpu_addr, &q->mqd_cpu_ptr);
546 if (r) {
547 dev_warn(adev->dev, "failed to create queue mqd bo (%d)", r);
548 return r;
549 }
550 memset(q->mqd_cpu_ptr, 0, mqd_size);
551
552 r = amdgpu_bo_reserve(q->mqd_obj, false);
553 if (unlikely(r != 0))
554 goto clean_up;
555
556 return 0;
557
558 clean_up:
559 amdgpu_bo_free_kernel(&q->mqd_obj,
560 &q->mqd_gpu_addr,
561 &q->mqd_cpu_ptr);
562 return r;
563 }
564
amdgpu_mes_queue_init_mqd(struct amdgpu_device * adev,struct amdgpu_mes_queue * q,struct amdgpu_mes_queue_properties * p)565 static void amdgpu_mes_queue_init_mqd(struct amdgpu_device *adev,
566 struct amdgpu_mes_queue *q,
567 struct amdgpu_mes_queue_properties *p)
568 {
569 struct amdgpu_mqd *mqd_mgr = &adev->mqds[p->queue_type];
570 struct amdgpu_mqd_prop mqd_prop = {0};
571
572 mqd_prop.mqd_gpu_addr = q->mqd_gpu_addr;
573 mqd_prop.hqd_base_gpu_addr = p->hqd_base_gpu_addr;
574 mqd_prop.rptr_gpu_addr = p->rptr_gpu_addr;
575 mqd_prop.wptr_gpu_addr = p->wptr_gpu_addr;
576 mqd_prop.queue_size = p->queue_size;
577 mqd_prop.use_doorbell = true;
578 mqd_prop.doorbell_index = p->doorbell_off;
579 mqd_prop.eop_gpu_addr = p->eop_gpu_addr;
580 mqd_prop.hqd_pipe_priority = p->hqd_pipe_priority;
581 mqd_prop.hqd_queue_priority = p->hqd_queue_priority;
582 mqd_prop.hqd_active = false;
583
584 if (p->queue_type == AMDGPU_RING_TYPE_GFX ||
585 p->queue_type == AMDGPU_RING_TYPE_COMPUTE) {
586 mutex_lock(&adev->srbm_mutex);
587 amdgpu_gfx_select_me_pipe_q(adev, p->ring->me, p->ring->pipe, 0, 0, 0);
588 }
589
590 mqd_mgr->init_mqd(adev, q->mqd_cpu_ptr, &mqd_prop);
591
592 if (p->queue_type == AMDGPU_RING_TYPE_GFX ||
593 p->queue_type == AMDGPU_RING_TYPE_COMPUTE) {
594 amdgpu_gfx_select_me_pipe_q(adev, 0, 0, 0, 0, 0);
595 mutex_unlock(&adev->srbm_mutex);
596 }
597
598 amdgpu_bo_unreserve(q->mqd_obj);
599 }
600
amdgpu_mes_add_hw_queue(struct amdgpu_device * adev,int gang_id,struct amdgpu_mes_queue_properties * qprops,int * queue_id)601 int amdgpu_mes_add_hw_queue(struct amdgpu_device *adev, int gang_id,
602 struct amdgpu_mes_queue_properties *qprops,
603 int *queue_id)
604 {
605 struct amdgpu_mes_queue *queue;
606 struct amdgpu_mes_gang *gang;
607 struct mes_add_queue_input queue_input;
608 unsigned long flags;
609 int r;
610
611 memset(&queue_input, 0, sizeof(struct mes_add_queue_input));
612
613 /* allocate the mes queue buffer */
614 queue = kzalloc(sizeof(struct amdgpu_mes_queue), GFP_KERNEL);
615 if (!queue) {
616 DRM_ERROR("Failed to allocate memory for queue\n");
617 return -ENOMEM;
618 }
619
620 /* Allocate the queue mqd */
621 r = amdgpu_mes_queue_alloc_mqd(adev, queue, qprops);
622 if (r)
623 goto clean_up_memory;
624
625 /*
626 * Avoid taking any other locks under MES lock to avoid circular
627 * lock dependencies.
628 */
629 amdgpu_mes_lock(&adev->mes);
630
631 gang = idr_find(&adev->mes.gang_id_idr, gang_id);
632 if (!gang) {
633 DRM_ERROR("gang id %d doesn't exist\n", gang_id);
634 r = -EINVAL;
635 goto clean_up_mqd;
636 }
637
638 /* add the mes gang to idr list */
639 spin_lock_irqsave(&adev->mes.queue_id_lock, flags);
640 r = idr_alloc(&adev->mes.queue_id_idr, queue, 1, 0,
641 GFP_ATOMIC);
642 if (r < 0) {
643 spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
644 goto clean_up_mqd;
645 }
646 spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
647 *queue_id = queue->queue_id = r;
648
649 /* allocate a doorbell index for the queue */
650 r = amdgpu_mes_kernel_doorbell_get(adev,
651 qprops->queue_type,
652 &qprops->doorbell_off);
653 if (r)
654 goto clean_up_queue_id;
655
656 /* initialize the queue mqd */
657 amdgpu_mes_queue_init_mqd(adev, queue, qprops);
658
659 /* add hw queue to mes */
660 queue_input.process_id = gang->process->pasid;
661
662 queue_input.page_table_base_addr =
663 adev->vm_manager.vram_base_offset + gang->process->pd_gpu_addr -
664 adev->gmc.vram_start;
665
666 queue_input.process_va_start = 0;
667 queue_input.process_va_end =
668 (adev->vm_manager.max_pfn - 1) << AMDGPU_GPU_PAGE_SHIFT;
669 queue_input.process_quantum = gang->process->process_quantum;
670 queue_input.process_context_addr = gang->process->proc_ctx_gpu_addr;
671 queue_input.gang_quantum = gang->gang_quantum;
672 queue_input.gang_context_addr = gang->gang_ctx_gpu_addr;
673 queue_input.inprocess_gang_priority = gang->inprocess_gang_priority;
674 queue_input.gang_global_priority_level = gang->global_priority_level;
675 queue_input.doorbell_offset = qprops->doorbell_off;
676 queue_input.mqd_addr = queue->mqd_gpu_addr;
677 queue_input.wptr_addr = qprops->wptr_gpu_addr;
678 queue_input.wptr_mc_addr = qprops->wptr_mc_addr;
679 queue_input.queue_type = qprops->queue_type;
680 queue_input.paging = qprops->paging;
681 queue_input.is_kfd_process = 0;
682
683 r = adev->mes.funcs->add_hw_queue(&adev->mes, &queue_input);
684 if (r) {
685 DRM_ERROR("failed to add hardware queue to MES, doorbell=0x%llx\n",
686 qprops->doorbell_off);
687 goto clean_up_doorbell;
688 }
689
690 DRM_DEBUG("MES hw queue was added, pasid=%d, gang id=%d, "
691 "queue type=%d, doorbell=0x%llx\n",
692 gang->process->pasid, gang_id, qprops->queue_type,
693 qprops->doorbell_off);
694
695 queue->ring = qprops->ring;
696 queue->doorbell_off = qprops->doorbell_off;
697 queue->wptr_gpu_addr = qprops->wptr_gpu_addr;
698 queue->queue_type = qprops->queue_type;
699 queue->paging = qprops->paging;
700 queue->gang = gang;
701 queue->ring->mqd_ptr = queue->mqd_cpu_ptr;
702 list_add_tail(&queue->list, &gang->queue_list);
703
704 amdgpu_mes_unlock(&adev->mes);
705 return 0;
706
707 clean_up_doorbell:
708 amdgpu_mes_kernel_doorbell_free(adev, qprops->doorbell_off);
709 clean_up_queue_id:
710 spin_lock_irqsave(&adev->mes.queue_id_lock, flags);
711 idr_remove(&adev->mes.queue_id_idr, queue->queue_id);
712 spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
713 clean_up_mqd:
714 amdgpu_mes_unlock(&adev->mes);
715 amdgpu_mes_queue_free_mqd(queue);
716 clean_up_memory:
717 kfree(queue);
718 return r;
719 }
720
amdgpu_mes_remove_hw_queue(struct amdgpu_device * adev,int queue_id)721 int amdgpu_mes_remove_hw_queue(struct amdgpu_device *adev, int queue_id)
722 {
723 unsigned long flags;
724 struct amdgpu_mes_queue *queue;
725 struct amdgpu_mes_gang *gang;
726 struct mes_remove_queue_input queue_input;
727 int r;
728
729 /*
730 * Avoid taking any other locks under MES lock to avoid circular
731 * lock dependencies.
732 */
733 amdgpu_mes_lock(&adev->mes);
734
735 /* remove the mes gang from idr list */
736 spin_lock_irqsave(&adev->mes.queue_id_lock, flags);
737
738 queue = idr_find(&adev->mes.queue_id_idr, queue_id);
739 if (!queue) {
740 spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
741 amdgpu_mes_unlock(&adev->mes);
742 DRM_ERROR("queue id %d doesn't exist\n", queue_id);
743 return -EINVAL;
744 }
745
746 idr_remove(&adev->mes.queue_id_idr, queue_id);
747 spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
748
749 DRM_DEBUG("try to remove queue, doorbell off = 0x%llx\n",
750 queue->doorbell_off);
751
752 gang = queue->gang;
753 queue_input.doorbell_offset = queue->doorbell_off;
754 queue_input.gang_context_addr = gang->gang_ctx_gpu_addr;
755
756 r = adev->mes.funcs->remove_hw_queue(&adev->mes, &queue_input);
757 if (r)
758 DRM_ERROR("failed to remove hardware queue, queue id = %d\n",
759 queue_id);
760
761 list_del(&queue->list);
762 amdgpu_mes_kernel_doorbell_free(adev, queue->doorbell_off);
763 amdgpu_mes_unlock(&adev->mes);
764
765 amdgpu_mes_queue_free_mqd(queue);
766 kfree(queue);
767 return 0;
768 }
769
amdgpu_mes_reset_hw_queue(struct amdgpu_device * adev,int queue_id)770 int amdgpu_mes_reset_hw_queue(struct amdgpu_device *adev, int queue_id)
771 {
772 unsigned long flags;
773 struct amdgpu_mes_queue *queue;
774 struct amdgpu_mes_gang *gang;
775 struct mes_reset_queue_input queue_input;
776 int r;
777
778 /*
779 * Avoid taking any other locks under MES lock to avoid circular
780 * lock dependencies.
781 */
782 amdgpu_mes_lock(&adev->mes);
783
784 /* remove the mes gang from idr list */
785 spin_lock_irqsave(&adev->mes.queue_id_lock, flags);
786
787 queue = idr_find(&adev->mes.queue_id_idr, queue_id);
788 if (!queue) {
789 spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
790 amdgpu_mes_unlock(&adev->mes);
791 DRM_ERROR("queue id %d doesn't exist\n", queue_id);
792 return -EINVAL;
793 }
794 spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
795
796 DRM_DEBUG("try to reset queue, doorbell off = 0x%llx\n",
797 queue->doorbell_off);
798
799 gang = queue->gang;
800 queue_input.doorbell_offset = queue->doorbell_off;
801 queue_input.gang_context_addr = gang->gang_ctx_gpu_addr;
802
803 r = adev->mes.funcs->reset_hw_queue(&adev->mes, &queue_input);
804 if (r)
805 DRM_ERROR("failed to reset hardware queue, queue id = %d\n",
806 queue_id);
807
808 amdgpu_mes_unlock(&adev->mes);
809
810 return 0;
811 }
812
amdgpu_mes_reset_hw_queue_mmio(struct amdgpu_device * adev,int queue_type,int me_id,int pipe_id,int queue_id,int vmid)813 int amdgpu_mes_reset_hw_queue_mmio(struct amdgpu_device *adev, int queue_type,
814 int me_id, int pipe_id, int queue_id, int vmid)
815 {
816 struct mes_reset_queue_input queue_input;
817 int r;
818
819 queue_input.queue_type = queue_type;
820 queue_input.use_mmio = true;
821 queue_input.me_id = me_id;
822 queue_input.pipe_id = pipe_id;
823 queue_input.queue_id = queue_id;
824 queue_input.vmid = vmid;
825 r = adev->mes.funcs->reset_hw_queue(&adev->mes, &queue_input);
826 if (r)
827 DRM_ERROR("failed to reset hardware queue by mmio, queue id = %d\n",
828 queue_id);
829 return r;
830 }
831
amdgpu_mes_map_legacy_queue(struct amdgpu_device * adev,struct amdgpu_ring * ring)832 int amdgpu_mes_map_legacy_queue(struct amdgpu_device *adev,
833 struct amdgpu_ring *ring)
834 {
835 struct mes_map_legacy_queue_input queue_input;
836 int r;
837
838 memset(&queue_input, 0, sizeof(queue_input));
839
840 queue_input.queue_type = ring->funcs->type;
841 queue_input.doorbell_offset = ring->doorbell_index;
842 queue_input.pipe_id = ring->pipe;
843 queue_input.queue_id = ring->queue;
844 queue_input.mqd_addr = amdgpu_bo_gpu_offset(ring->mqd_obj);
845 queue_input.wptr_addr = ring->wptr_gpu_addr;
846
847 r = adev->mes.funcs->map_legacy_queue(&adev->mes, &queue_input);
848 if (r)
849 DRM_ERROR("failed to map legacy queue\n");
850
851 return r;
852 }
853
amdgpu_mes_unmap_legacy_queue(struct amdgpu_device * adev,struct amdgpu_ring * ring,enum amdgpu_unmap_queues_action action,u64 gpu_addr,u64 seq)854 int amdgpu_mes_unmap_legacy_queue(struct amdgpu_device *adev,
855 struct amdgpu_ring *ring,
856 enum amdgpu_unmap_queues_action action,
857 u64 gpu_addr, u64 seq)
858 {
859 struct mes_unmap_legacy_queue_input queue_input;
860 int r;
861
862 queue_input.action = action;
863 queue_input.queue_type = ring->funcs->type;
864 queue_input.doorbell_offset = ring->doorbell_index;
865 queue_input.pipe_id = ring->pipe;
866 queue_input.queue_id = ring->queue;
867 queue_input.trail_fence_addr = gpu_addr;
868 queue_input.trail_fence_data = seq;
869
870 r = adev->mes.funcs->unmap_legacy_queue(&adev->mes, &queue_input);
871 if (r)
872 DRM_ERROR("failed to unmap legacy queue\n");
873
874 return r;
875 }
876
amdgpu_mes_reset_legacy_queue(struct amdgpu_device * adev,struct amdgpu_ring * ring,unsigned int vmid,bool use_mmio)877 int amdgpu_mes_reset_legacy_queue(struct amdgpu_device *adev,
878 struct amdgpu_ring *ring,
879 unsigned int vmid,
880 bool use_mmio)
881 {
882 struct mes_reset_legacy_queue_input queue_input;
883 int r;
884
885 memset(&queue_input, 0, sizeof(queue_input));
886
887 queue_input.queue_type = ring->funcs->type;
888 queue_input.doorbell_offset = ring->doorbell_index;
889 queue_input.me_id = ring->me;
890 queue_input.pipe_id = ring->pipe;
891 queue_input.queue_id = ring->queue;
892 queue_input.mqd_addr = ring->mqd_obj ? amdgpu_bo_gpu_offset(ring->mqd_obj) : 0;
893 queue_input.wptr_addr = ring->wptr_gpu_addr;
894 queue_input.vmid = vmid;
895 queue_input.use_mmio = use_mmio;
896
897 r = adev->mes.funcs->reset_legacy_queue(&adev->mes, &queue_input);
898 if (r)
899 DRM_ERROR("failed to reset legacy queue\n");
900
901 return r;
902 }
903
amdgpu_mes_rreg(struct amdgpu_device * adev,uint32_t reg)904 uint32_t amdgpu_mes_rreg(struct amdgpu_device *adev, uint32_t reg)
905 {
906 struct mes_misc_op_input op_input;
907 int r, val = 0;
908 uint32_t addr_offset = 0;
909 uint64_t read_val_gpu_addr;
910 uint32_t *read_val_ptr;
911
912 if (amdgpu_device_wb_get(adev, &addr_offset)) {
913 DRM_ERROR("critical bug! too many mes readers\n");
914 goto error;
915 }
916 read_val_gpu_addr = adev->wb.gpu_addr + (addr_offset * 4);
917 read_val_ptr = (uint32_t *)&adev->wb.wb[addr_offset];
918 op_input.op = MES_MISC_OP_READ_REG;
919 op_input.read_reg.reg_offset = reg;
920 op_input.read_reg.buffer_addr = read_val_gpu_addr;
921
922 if (!adev->mes.funcs->misc_op) {
923 DRM_ERROR("mes rreg is not supported!\n");
924 goto error;
925 }
926
927 r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
928 if (r)
929 DRM_ERROR("failed to read reg (0x%x)\n", reg);
930 else
931 val = *(read_val_ptr);
932
933 error:
934 if (addr_offset)
935 amdgpu_device_wb_free(adev, addr_offset);
936 return val;
937 }
938
amdgpu_mes_wreg(struct amdgpu_device * adev,uint32_t reg,uint32_t val)939 int amdgpu_mes_wreg(struct amdgpu_device *adev,
940 uint32_t reg, uint32_t val)
941 {
942 struct mes_misc_op_input op_input;
943 int r;
944
945 op_input.op = MES_MISC_OP_WRITE_REG;
946 op_input.write_reg.reg_offset = reg;
947 op_input.write_reg.reg_value = val;
948
949 if (!adev->mes.funcs->misc_op) {
950 DRM_ERROR("mes wreg is not supported!\n");
951 r = -EINVAL;
952 goto error;
953 }
954
955 r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
956 if (r)
957 DRM_ERROR("failed to write reg (0x%x)\n", reg);
958
959 error:
960 return r;
961 }
962
amdgpu_mes_reg_write_reg_wait(struct amdgpu_device * adev,uint32_t reg0,uint32_t reg1,uint32_t ref,uint32_t mask)963 int amdgpu_mes_reg_write_reg_wait(struct amdgpu_device *adev,
964 uint32_t reg0, uint32_t reg1,
965 uint32_t ref, uint32_t mask)
966 {
967 struct mes_misc_op_input op_input;
968 int r;
969
970 op_input.op = MES_MISC_OP_WRM_REG_WR_WAIT;
971 op_input.wrm_reg.reg0 = reg0;
972 op_input.wrm_reg.reg1 = reg1;
973 op_input.wrm_reg.ref = ref;
974 op_input.wrm_reg.mask = mask;
975
976 if (!adev->mes.funcs->misc_op) {
977 DRM_ERROR("mes reg_write_reg_wait is not supported!\n");
978 r = -EINVAL;
979 goto error;
980 }
981
982 r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
983 if (r)
984 DRM_ERROR("failed to reg_write_reg_wait\n");
985
986 error:
987 return r;
988 }
989
amdgpu_mes_reg_wait(struct amdgpu_device * adev,uint32_t reg,uint32_t val,uint32_t mask)990 int amdgpu_mes_reg_wait(struct amdgpu_device *adev, uint32_t reg,
991 uint32_t val, uint32_t mask)
992 {
993 struct mes_misc_op_input op_input;
994 int r;
995
996 op_input.op = MES_MISC_OP_WRM_REG_WAIT;
997 op_input.wrm_reg.reg0 = reg;
998 op_input.wrm_reg.ref = val;
999 op_input.wrm_reg.mask = mask;
1000
1001 if (!adev->mes.funcs->misc_op) {
1002 DRM_ERROR("mes reg wait is not supported!\n");
1003 r = -EINVAL;
1004 goto error;
1005 }
1006
1007 r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
1008 if (r)
1009 DRM_ERROR("failed to reg_write_reg_wait\n");
1010
1011 error:
1012 return r;
1013 }
1014
amdgpu_mes_set_shader_debugger(struct amdgpu_device * adev,uint64_t process_context_addr,uint32_t spi_gdbg_per_vmid_cntl,const uint32_t * tcp_watch_cntl,uint32_t flags,bool trap_en)1015 int amdgpu_mes_set_shader_debugger(struct amdgpu_device *adev,
1016 uint64_t process_context_addr,
1017 uint32_t spi_gdbg_per_vmid_cntl,
1018 const uint32_t *tcp_watch_cntl,
1019 uint32_t flags,
1020 bool trap_en)
1021 {
1022 struct mes_misc_op_input op_input = {0};
1023 int r;
1024
1025 if (!adev->mes.funcs->misc_op) {
1026 DRM_ERROR("mes set shader debugger is not supported!\n");
1027 return -EINVAL;
1028 }
1029
1030 op_input.op = MES_MISC_OP_SET_SHADER_DEBUGGER;
1031 op_input.set_shader_debugger.process_context_addr = process_context_addr;
1032 op_input.set_shader_debugger.flags.u32all = flags;
1033
1034 /* use amdgpu mes_flush_shader_debugger instead */
1035 if (op_input.set_shader_debugger.flags.process_ctx_flush)
1036 return -EINVAL;
1037
1038 op_input.set_shader_debugger.spi_gdbg_per_vmid_cntl = spi_gdbg_per_vmid_cntl;
1039 memcpy(op_input.set_shader_debugger.tcp_watch_cntl, tcp_watch_cntl,
1040 sizeof(op_input.set_shader_debugger.tcp_watch_cntl));
1041
1042 if (((adev->mes.sched_version & AMDGPU_MES_API_VERSION_MASK) >>
1043 AMDGPU_MES_API_VERSION_SHIFT) >= 14)
1044 op_input.set_shader_debugger.trap_en = trap_en;
1045
1046 amdgpu_mes_lock(&adev->mes);
1047
1048 r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
1049 if (r)
1050 DRM_ERROR("failed to set_shader_debugger\n");
1051
1052 amdgpu_mes_unlock(&adev->mes);
1053
1054 return r;
1055 }
1056
amdgpu_mes_flush_shader_debugger(struct amdgpu_device * adev,uint64_t process_context_addr)1057 int amdgpu_mes_flush_shader_debugger(struct amdgpu_device *adev,
1058 uint64_t process_context_addr)
1059 {
1060 struct mes_misc_op_input op_input = {0};
1061 int r;
1062
1063 if (!adev->mes.funcs->misc_op) {
1064 DRM_ERROR("mes flush shader debugger is not supported!\n");
1065 return -EINVAL;
1066 }
1067
1068 op_input.op = MES_MISC_OP_SET_SHADER_DEBUGGER;
1069 op_input.set_shader_debugger.process_context_addr = process_context_addr;
1070 op_input.set_shader_debugger.flags.process_ctx_flush = true;
1071
1072 amdgpu_mes_lock(&adev->mes);
1073
1074 r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
1075 if (r)
1076 DRM_ERROR("failed to set_shader_debugger\n");
1077
1078 amdgpu_mes_unlock(&adev->mes);
1079
1080 return r;
1081 }
1082
1083 static void
amdgpu_mes_ring_to_queue_props(struct amdgpu_device * adev,struct amdgpu_ring * ring,struct amdgpu_mes_queue_properties * props)1084 amdgpu_mes_ring_to_queue_props(struct amdgpu_device *adev,
1085 struct amdgpu_ring *ring,
1086 struct amdgpu_mes_queue_properties *props)
1087 {
1088 props->queue_type = ring->funcs->type;
1089 props->hqd_base_gpu_addr = ring->gpu_addr;
1090 props->rptr_gpu_addr = ring->rptr_gpu_addr;
1091 props->wptr_gpu_addr = ring->wptr_gpu_addr;
1092 props->wptr_mc_addr =
1093 ring->mes_ctx->meta_data_mc_addr + ring->wptr_offs;
1094 props->queue_size = ring->ring_size;
1095 props->eop_gpu_addr = ring->eop_gpu_addr;
1096 props->hqd_pipe_priority = AMDGPU_GFX_PIPE_PRIO_NORMAL;
1097 props->hqd_queue_priority = AMDGPU_GFX_QUEUE_PRIORITY_MINIMUM;
1098 props->paging = false;
1099 props->ring = ring;
1100 }
1101
1102 #define DEFINE_AMDGPU_MES_CTX_GET_OFFS_ENG(_eng) \
1103 do { \
1104 if (id_offs < AMDGPU_MES_CTX_MAX_OFFS) \
1105 return offsetof(struct amdgpu_mes_ctx_meta_data, \
1106 _eng[ring->idx].slots[id_offs]); \
1107 else if (id_offs == AMDGPU_MES_CTX_RING_OFFS) \
1108 return offsetof(struct amdgpu_mes_ctx_meta_data, \
1109 _eng[ring->idx].ring); \
1110 else if (id_offs == AMDGPU_MES_CTX_IB_OFFS) \
1111 return offsetof(struct amdgpu_mes_ctx_meta_data, \
1112 _eng[ring->idx].ib); \
1113 else if (id_offs == AMDGPU_MES_CTX_PADDING_OFFS) \
1114 return offsetof(struct amdgpu_mes_ctx_meta_data, \
1115 _eng[ring->idx].padding); \
1116 } while(0)
1117
amdgpu_mes_ctx_get_offs(struct amdgpu_ring * ring,unsigned int id_offs)1118 int amdgpu_mes_ctx_get_offs(struct amdgpu_ring *ring, unsigned int id_offs)
1119 {
1120 switch (ring->funcs->type) {
1121 case AMDGPU_RING_TYPE_GFX:
1122 DEFINE_AMDGPU_MES_CTX_GET_OFFS_ENG(gfx);
1123 break;
1124 case AMDGPU_RING_TYPE_COMPUTE:
1125 DEFINE_AMDGPU_MES_CTX_GET_OFFS_ENG(compute);
1126 break;
1127 case AMDGPU_RING_TYPE_SDMA:
1128 DEFINE_AMDGPU_MES_CTX_GET_OFFS_ENG(sdma);
1129 break;
1130 default:
1131 break;
1132 }
1133
1134 WARN_ON(1);
1135 return -EINVAL;
1136 }
1137
amdgpu_mes_add_ring(struct amdgpu_device * adev,int gang_id,int queue_type,int idx,struct amdgpu_mes_ctx_data * ctx_data,struct amdgpu_ring ** out)1138 int amdgpu_mes_add_ring(struct amdgpu_device *adev, int gang_id,
1139 int queue_type, int idx,
1140 struct amdgpu_mes_ctx_data *ctx_data,
1141 struct amdgpu_ring **out)
1142 {
1143 struct amdgpu_ring *ring;
1144 struct amdgpu_mes_gang *gang;
1145 struct amdgpu_mes_queue_properties qprops = {0};
1146 int r, queue_id, pasid;
1147
1148 /*
1149 * Avoid taking any other locks under MES lock to avoid circular
1150 * lock dependencies.
1151 */
1152 amdgpu_mes_lock(&adev->mes);
1153 gang = idr_find(&adev->mes.gang_id_idr, gang_id);
1154 if (!gang) {
1155 DRM_ERROR("gang id %d doesn't exist\n", gang_id);
1156 amdgpu_mes_unlock(&adev->mes);
1157 return -EINVAL;
1158 }
1159 pasid = gang->process->pasid;
1160
1161 ring = kzalloc(sizeof(struct amdgpu_ring), GFP_KERNEL);
1162 if (!ring) {
1163 amdgpu_mes_unlock(&adev->mes);
1164 return -ENOMEM;
1165 }
1166
1167 ring->ring_obj = NULL;
1168 ring->use_doorbell = true;
1169 ring->is_mes_queue = true;
1170 ring->mes_ctx = ctx_data;
1171 ring->idx = idx;
1172 ring->no_scheduler = true;
1173
1174 if (queue_type == AMDGPU_RING_TYPE_COMPUTE) {
1175 int offset = offsetof(struct amdgpu_mes_ctx_meta_data,
1176 compute[ring->idx].mec_hpd);
1177 ring->eop_gpu_addr =
1178 amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset);
1179 }
1180
1181 switch (queue_type) {
1182 case AMDGPU_RING_TYPE_GFX:
1183 ring->funcs = adev->gfx.gfx_ring[0].funcs;
1184 ring->me = adev->gfx.gfx_ring[0].me;
1185 ring->pipe = adev->gfx.gfx_ring[0].pipe;
1186 break;
1187 case AMDGPU_RING_TYPE_COMPUTE:
1188 ring->funcs = adev->gfx.compute_ring[0].funcs;
1189 ring->me = adev->gfx.compute_ring[0].me;
1190 ring->pipe = adev->gfx.compute_ring[0].pipe;
1191 break;
1192 case AMDGPU_RING_TYPE_SDMA:
1193 ring->funcs = adev->sdma.instance[0].ring.funcs;
1194 break;
1195 default:
1196 BUG();
1197 }
1198
1199 r = amdgpu_ring_init(adev, ring, 1024, NULL, 0,
1200 AMDGPU_RING_PRIO_DEFAULT, NULL);
1201 if (r) {
1202 amdgpu_mes_unlock(&adev->mes);
1203 goto clean_up_memory;
1204 }
1205
1206 amdgpu_mes_ring_to_queue_props(adev, ring, &qprops);
1207
1208 dma_fence_wait(gang->process->vm->last_update, false);
1209 dma_fence_wait(ctx_data->meta_data_va->last_pt_update, false);
1210 amdgpu_mes_unlock(&adev->mes);
1211
1212 r = amdgpu_mes_add_hw_queue(adev, gang_id, &qprops, &queue_id);
1213 if (r)
1214 goto clean_up_ring;
1215
1216 ring->hw_queue_id = queue_id;
1217 ring->doorbell_index = qprops.doorbell_off;
1218
1219 if (queue_type == AMDGPU_RING_TYPE_GFX)
1220 sprintf(ring->name, "gfx_%d.%d.%d", pasid, gang_id, queue_id);
1221 else if (queue_type == AMDGPU_RING_TYPE_COMPUTE)
1222 sprintf(ring->name, "compute_%d.%d.%d", pasid, gang_id,
1223 queue_id);
1224 else if (queue_type == AMDGPU_RING_TYPE_SDMA)
1225 sprintf(ring->name, "sdma_%d.%d.%d", pasid, gang_id,
1226 queue_id);
1227 else
1228 BUG();
1229
1230 *out = ring;
1231 return 0;
1232
1233 clean_up_ring:
1234 amdgpu_ring_fini(ring);
1235 clean_up_memory:
1236 kfree(ring);
1237 return r;
1238 }
1239
amdgpu_mes_remove_ring(struct amdgpu_device * adev,struct amdgpu_ring * ring)1240 void amdgpu_mes_remove_ring(struct amdgpu_device *adev,
1241 struct amdgpu_ring *ring)
1242 {
1243 if (!ring)
1244 return;
1245
1246 amdgpu_mes_remove_hw_queue(adev, ring->hw_queue_id);
1247 del_timer_sync(&ring->fence_drv.fallback_timer);
1248 amdgpu_ring_fini(ring);
1249 kfree(ring);
1250 }
1251
amdgpu_mes_get_aggregated_doorbell_index(struct amdgpu_device * adev,enum amdgpu_mes_priority_level prio)1252 uint32_t amdgpu_mes_get_aggregated_doorbell_index(struct amdgpu_device *adev,
1253 enum amdgpu_mes_priority_level prio)
1254 {
1255 return adev->mes.aggregated_doorbells[prio];
1256 }
1257
amdgpu_mes_ctx_alloc_meta_data(struct amdgpu_device * adev,struct amdgpu_mes_ctx_data * ctx_data)1258 int amdgpu_mes_ctx_alloc_meta_data(struct amdgpu_device *adev,
1259 struct amdgpu_mes_ctx_data *ctx_data)
1260 {
1261 int r;
1262
1263 r = amdgpu_bo_create_kernel(adev,
1264 sizeof(struct amdgpu_mes_ctx_meta_data),
1265 PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
1266 &ctx_data->meta_data_obj,
1267 &ctx_data->meta_data_mc_addr,
1268 &ctx_data->meta_data_ptr);
1269 if (r) {
1270 dev_warn(adev->dev, "(%d) create CTX bo failed\n", r);
1271 return r;
1272 }
1273
1274 if (!ctx_data->meta_data_obj)
1275 return -ENOMEM;
1276
1277 memset(ctx_data->meta_data_ptr, 0,
1278 sizeof(struct amdgpu_mes_ctx_meta_data));
1279
1280 return 0;
1281 }
1282
amdgpu_mes_ctx_free_meta_data(struct amdgpu_mes_ctx_data * ctx_data)1283 void amdgpu_mes_ctx_free_meta_data(struct amdgpu_mes_ctx_data *ctx_data)
1284 {
1285 if (ctx_data->meta_data_obj)
1286 amdgpu_bo_free_kernel(&ctx_data->meta_data_obj,
1287 &ctx_data->meta_data_mc_addr,
1288 &ctx_data->meta_data_ptr);
1289 }
1290
amdgpu_mes_ctx_map_meta_data(struct amdgpu_device * adev,struct amdgpu_vm * vm,struct amdgpu_mes_ctx_data * ctx_data)1291 int amdgpu_mes_ctx_map_meta_data(struct amdgpu_device *adev,
1292 struct amdgpu_vm *vm,
1293 struct amdgpu_mes_ctx_data *ctx_data)
1294 {
1295 struct amdgpu_bo_va *bo_va;
1296 struct amdgpu_sync sync;
1297 struct drm_exec exec;
1298 int r;
1299
1300 amdgpu_sync_create(&sync);
1301
1302 drm_exec_init(&exec, 0, 0);
1303 drm_exec_until_all_locked(&exec) {
1304 r = drm_exec_lock_obj(&exec,
1305 &ctx_data->meta_data_obj->tbo.base);
1306 drm_exec_retry_on_contention(&exec);
1307 if (unlikely(r))
1308 goto error_fini_exec;
1309
1310 r = amdgpu_vm_lock_pd(vm, &exec, 0);
1311 drm_exec_retry_on_contention(&exec);
1312 if (unlikely(r))
1313 goto error_fini_exec;
1314 }
1315
1316 bo_va = amdgpu_vm_bo_add(adev, vm, ctx_data->meta_data_obj);
1317 if (!bo_va) {
1318 DRM_ERROR("failed to create bo_va for meta data BO\n");
1319 r = -ENOMEM;
1320 goto error_fini_exec;
1321 }
1322
1323 r = amdgpu_vm_bo_map(adev, bo_va, ctx_data->meta_data_gpu_addr, 0,
1324 sizeof(struct amdgpu_mes_ctx_meta_data),
1325 AMDGPU_PTE_READABLE | AMDGPU_PTE_WRITEABLE |
1326 AMDGPU_PTE_EXECUTABLE);
1327
1328 if (r) {
1329 DRM_ERROR("failed to do bo_map on meta data, err=%d\n", r);
1330 goto error_del_bo_va;
1331 }
1332
1333 r = amdgpu_vm_bo_update(adev, bo_va, false);
1334 if (r) {
1335 DRM_ERROR("failed to do vm_bo_update on meta data\n");
1336 goto error_del_bo_va;
1337 }
1338 amdgpu_sync_fence(&sync, bo_va->last_pt_update);
1339
1340 r = amdgpu_vm_update_pdes(adev, vm, false);
1341 if (r) {
1342 DRM_ERROR("failed to update pdes on meta data\n");
1343 goto error_del_bo_va;
1344 }
1345 amdgpu_sync_fence(&sync, vm->last_update);
1346
1347 amdgpu_sync_wait(&sync, false);
1348 drm_exec_fini(&exec);
1349
1350 amdgpu_sync_free(&sync);
1351 ctx_data->meta_data_va = bo_va;
1352 return 0;
1353
1354 error_del_bo_va:
1355 amdgpu_vm_bo_del(adev, bo_va);
1356
1357 error_fini_exec:
1358 drm_exec_fini(&exec);
1359 amdgpu_sync_free(&sync);
1360 return r;
1361 }
1362
amdgpu_mes_ctx_unmap_meta_data(struct amdgpu_device * adev,struct amdgpu_mes_ctx_data * ctx_data)1363 int amdgpu_mes_ctx_unmap_meta_data(struct amdgpu_device *adev,
1364 struct amdgpu_mes_ctx_data *ctx_data)
1365 {
1366 struct amdgpu_bo_va *bo_va = ctx_data->meta_data_va;
1367 struct amdgpu_bo *bo = ctx_data->meta_data_obj;
1368 struct amdgpu_vm *vm = bo_va->base.vm;
1369 struct dma_fence *fence;
1370 struct drm_exec exec;
1371 long r;
1372
1373 drm_exec_init(&exec, 0, 0);
1374 drm_exec_until_all_locked(&exec) {
1375 r = drm_exec_lock_obj(&exec,
1376 &ctx_data->meta_data_obj->tbo.base);
1377 drm_exec_retry_on_contention(&exec);
1378 if (unlikely(r))
1379 goto out_unlock;
1380
1381 r = amdgpu_vm_lock_pd(vm, &exec, 0);
1382 drm_exec_retry_on_contention(&exec);
1383 if (unlikely(r))
1384 goto out_unlock;
1385 }
1386
1387 amdgpu_vm_bo_del(adev, bo_va);
1388 if (!amdgpu_vm_ready(vm))
1389 goto out_unlock;
1390
1391 r = dma_resv_get_singleton(bo->tbo.base.resv, DMA_RESV_USAGE_BOOKKEEP,
1392 &fence);
1393 if (r)
1394 goto out_unlock;
1395 if (fence) {
1396 amdgpu_bo_fence(bo, fence, true);
1397 fence = NULL;
1398 }
1399
1400 r = amdgpu_vm_clear_freed(adev, vm, &fence);
1401 if (r || !fence)
1402 goto out_unlock;
1403
1404 dma_fence_wait(fence, false);
1405 amdgpu_bo_fence(bo, fence, true);
1406 dma_fence_put(fence);
1407
1408 out_unlock:
1409 if (unlikely(r < 0))
1410 dev_err(adev->dev, "failed to clear page tables (%ld)\n", r);
1411 drm_exec_fini(&exec);
1412
1413 return r;
1414 }
1415
amdgpu_mes_test_create_gang_and_queues(struct amdgpu_device * adev,int pasid,int * gang_id,int queue_type,int num_queue,struct amdgpu_ring ** added_rings,struct amdgpu_mes_ctx_data * ctx_data)1416 static int amdgpu_mes_test_create_gang_and_queues(struct amdgpu_device *adev,
1417 int pasid, int *gang_id,
1418 int queue_type, int num_queue,
1419 struct amdgpu_ring **added_rings,
1420 struct amdgpu_mes_ctx_data *ctx_data)
1421 {
1422 struct amdgpu_ring *ring;
1423 struct amdgpu_mes_gang_properties gprops = {0};
1424 int r, j;
1425
1426 /* create a gang for the process */
1427 gprops.priority = AMDGPU_MES_PRIORITY_LEVEL_NORMAL;
1428 gprops.gang_quantum = adev->mes.default_gang_quantum;
1429 gprops.inprocess_gang_priority = AMDGPU_MES_PRIORITY_LEVEL_NORMAL;
1430 gprops.priority_level = AMDGPU_MES_PRIORITY_LEVEL_NORMAL;
1431 gprops.global_priority_level = AMDGPU_MES_PRIORITY_LEVEL_NORMAL;
1432
1433 r = amdgpu_mes_add_gang(adev, pasid, &gprops, gang_id);
1434 if (r) {
1435 DRM_ERROR("failed to add gang\n");
1436 return r;
1437 }
1438
1439 /* create queues for the gang */
1440 for (j = 0; j < num_queue; j++) {
1441 r = amdgpu_mes_add_ring(adev, *gang_id, queue_type, j,
1442 ctx_data, &ring);
1443 if (r) {
1444 DRM_ERROR("failed to add ring\n");
1445 break;
1446 }
1447
1448 DRM_INFO("ring %s was added\n", ring->name);
1449 added_rings[j] = ring;
1450 }
1451
1452 return 0;
1453 }
1454
amdgpu_mes_test_queues(struct amdgpu_ring ** added_rings)1455 static int amdgpu_mes_test_queues(struct amdgpu_ring **added_rings)
1456 {
1457 struct amdgpu_ring *ring;
1458 int i, r;
1459
1460 for (i = 0; i < AMDGPU_MES_CTX_MAX_RINGS; i++) {
1461 ring = added_rings[i];
1462 if (!ring)
1463 continue;
1464
1465 r = amdgpu_ring_test_helper(ring);
1466 if (r)
1467 return r;
1468
1469 r = amdgpu_ring_test_ib(ring, 1000 * 10);
1470 if (r) {
1471 DRM_DEV_ERROR(ring->adev->dev,
1472 "ring %s ib test failed (%d)\n",
1473 ring->name, r);
1474 return r;
1475 } else
1476 DRM_INFO("ring %s ib test pass\n", ring->name);
1477 }
1478
1479 return 0;
1480 }
1481
amdgpu_mes_self_test(struct amdgpu_device * adev)1482 int amdgpu_mes_self_test(struct amdgpu_device *adev)
1483 {
1484 struct amdgpu_vm *vm = NULL;
1485 struct amdgpu_mes_ctx_data ctx_data = {0};
1486 struct amdgpu_ring *added_rings[AMDGPU_MES_CTX_MAX_RINGS] = { NULL };
1487 int gang_ids[3] = {0};
1488 int queue_types[][2] = { { AMDGPU_RING_TYPE_GFX, 1 },
1489 { AMDGPU_RING_TYPE_COMPUTE, 1 },
1490 { AMDGPU_RING_TYPE_SDMA, 1} };
1491 int i, r, pasid, k = 0;
1492
1493 pasid = amdgpu_pasid_alloc(16);
1494 if (pasid < 0) {
1495 dev_warn(adev->dev, "No more PASIDs available!");
1496 pasid = 0;
1497 }
1498
1499 vm = kzalloc(sizeof(*vm), GFP_KERNEL);
1500 if (!vm) {
1501 r = -ENOMEM;
1502 goto error_pasid;
1503 }
1504
1505 r = amdgpu_vm_init(adev, vm, -1);
1506 if (r) {
1507 DRM_ERROR("failed to initialize vm\n");
1508 goto error_pasid;
1509 }
1510
1511 r = amdgpu_mes_ctx_alloc_meta_data(adev, &ctx_data);
1512 if (r) {
1513 DRM_ERROR("failed to alloc ctx meta data\n");
1514 goto error_fini;
1515 }
1516
1517 ctx_data.meta_data_gpu_addr = AMDGPU_VA_RESERVED_BOTTOM;
1518 r = amdgpu_mes_ctx_map_meta_data(adev, vm, &ctx_data);
1519 if (r) {
1520 DRM_ERROR("failed to map ctx meta data\n");
1521 goto error_vm;
1522 }
1523
1524 r = amdgpu_mes_create_process(adev, pasid, vm);
1525 if (r) {
1526 DRM_ERROR("failed to create MES process\n");
1527 goto error_vm;
1528 }
1529
1530 for (i = 0; i < ARRAY_SIZE(queue_types); i++) {
1531 /* On GFX v10.3, fw hasn't supported to map sdma queue. */
1532 if (amdgpu_ip_version(adev, GC_HWIP, 0) >=
1533 IP_VERSION(10, 3, 0) &&
1534 amdgpu_ip_version(adev, GC_HWIP, 0) <
1535 IP_VERSION(11, 0, 0) &&
1536 queue_types[i][0] == AMDGPU_RING_TYPE_SDMA)
1537 continue;
1538
1539 r = amdgpu_mes_test_create_gang_and_queues(adev, pasid,
1540 &gang_ids[i],
1541 queue_types[i][0],
1542 queue_types[i][1],
1543 &added_rings[k],
1544 &ctx_data);
1545 if (r)
1546 goto error_queues;
1547
1548 k += queue_types[i][1];
1549 }
1550
1551 /* start ring test and ib test for MES queues */
1552 amdgpu_mes_test_queues(added_rings);
1553
1554 error_queues:
1555 /* remove all queues */
1556 for (i = 0; i < ARRAY_SIZE(added_rings); i++) {
1557 if (!added_rings[i])
1558 continue;
1559 amdgpu_mes_remove_ring(adev, added_rings[i]);
1560 }
1561
1562 for (i = 0; i < ARRAY_SIZE(gang_ids); i++) {
1563 if (!gang_ids[i])
1564 continue;
1565 amdgpu_mes_remove_gang(adev, gang_ids[i]);
1566 }
1567
1568 amdgpu_mes_destroy_process(adev, pasid);
1569
1570 error_vm:
1571 amdgpu_mes_ctx_unmap_meta_data(adev, &ctx_data);
1572
1573 error_fini:
1574 amdgpu_vm_fini(adev, vm);
1575
1576 error_pasid:
1577 if (pasid)
1578 amdgpu_pasid_free(pasid);
1579
1580 amdgpu_mes_ctx_free_meta_data(&ctx_data);
1581 kfree(vm);
1582 return 0;
1583 }
1584
amdgpu_mes_init_microcode(struct amdgpu_device * adev,int pipe)1585 int amdgpu_mes_init_microcode(struct amdgpu_device *adev, int pipe)
1586 {
1587 const struct mes_firmware_header_v1_0 *mes_hdr;
1588 struct amdgpu_firmware_info *info;
1589 char ucode_prefix[30];
1590 char fw_name[50];
1591 bool need_retry = false;
1592 u32 *ucode_ptr;
1593 int r;
1594
1595 amdgpu_ucode_ip_version_decode(adev, GC_HWIP, ucode_prefix,
1596 sizeof(ucode_prefix));
1597 if (adev->enable_uni_mes) {
1598 snprintf(fw_name, sizeof(fw_name),
1599 "amdgpu/%s_uni_mes.bin", ucode_prefix);
1600 } else if (amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(11, 0, 0) &&
1601 amdgpu_ip_version(adev, GC_HWIP, 0) < IP_VERSION(12, 0, 0)) {
1602 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mes%s.bin",
1603 ucode_prefix,
1604 pipe == AMDGPU_MES_SCHED_PIPE ? "_2" : "1");
1605 need_retry = true;
1606 } else {
1607 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mes%s.bin",
1608 ucode_prefix,
1609 pipe == AMDGPU_MES_SCHED_PIPE ? "" : "1");
1610 }
1611
1612 r = amdgpu_ucode_request(adev, &adev->mes.fw[pipe], AMDGPU_UCODE_REQUIRED,
1613 "%s", fw_name);
1614 if (r && need_retry && pipe == AMDGPU_MES_SCHED_PIPE) {
1615 dev_info(adev->dev, "try to fall back to %s_mes.bin\n", ucode_prefix);
1616 r = amdgpu_ucode_request(adev, &adev->mes.fw[pipe],
1617 AMDGPU_UCODE_REQUIRED,
1618 "amdgpu/%s_mes.bin", ucode_prefix);
1619 }
1620
1621 if (r)
1622 goto out;
1623
1624 mes_hdr = (const struct mes_firmware_header_v1_0 *)
1625 adev->mes.fw[pipe]->data;
1626 adev->mes.uc_start_addr[pipe] =
1627 le32_to_cpu(mes_hdr->mes_uc_start_addr_lo) |
1628 ((uint64_t)(le32_to_cpu(mes_hdr->mes_uc_start_addr_hi)) << 32);
1629 adev->mes.data_start_addr[pipe] =
1630 le32_to_cpu(mes_hdr->mes_data_start_addr_lo) |
1631 ((uint64_t)(le32_to_cpu(mes_hdr->mes_data_start_addr_hi)) << 32);
1632 ucode_ptr = (u32 *)(adev->mes.fw[pipe]->data +
1633 sizeof(union amdgpu_firmware_header));
1634 adev->mes.fw_version[pipe] =
1635 le32_to_cpu(ucode_ptr[24]) & AMDGPU_MES_VERSION_MASK;
1636
1637 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1638 int ucode, ucode_data;
1639
1640 if (pipe == AMDGPU_MES_SCHED_PIPE) {
1641 ucode = AMDGPU_UCODE_ID_CP_MES;
1642 ucode_data = AMDGPU_UCODE_ID_CP_MES_DATA;
1643 } else {
1644 ucode = AMDGPU_UCODE_ID_CP_MES1;
1645 ucode_data = AMDGPU_UCODE_ID_CP_MES1_DATA;
1646 }
1647
1648 info = &adev->firmware.ucode[ucode];
1649 info->ucode_id = ucode;
1650 info->fw = adev->mes.fw[pipe];
1651 adev->firmware.fw_size +=
1652 ALIGN(le32_to_cpu(mes_hdr->mes_ucode_size_bytes),
1653 PAGE_SIZE);
1654
1655 info = &adev->firmware.ucode[ucode_data];
1656 info->ucode_id = ucode_data;
1657 info->fw = adev->mes.fw[pipe];
1658 adev->firmware.fw_size +=
1659 ALIGN(le32_to_cpu(mes_hdr->mes_ucode_data_size_bytes),
1660 PAGE_SIZE);
1661 }
1662
1663 return 0;
1664 out:
1665 amdgpu_ucode_release(&adev->mes.fw[pipe]);
1666 return r;
1667 }
1668
amdgpu_mes_suspend_resume_all_supported(struct amdgpu_device * adev)1669 bool amdgpu_mes_suspend_resume_all_supported(struct amdgpu_device *adev)
1670 {
1671 uint32_t mes_rev = adev->mes.sched_version & AMDGPU_MES_VERSION_MASK;
1672 bool is_supported = false;
1673
1674 if (amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(11, 0, 0) &&
1675 amdgpu_ip_version(adev, GC_HWIP, 0) < IP_VERSION(12, 0, 0) &&
1676 mes_rev >= 0x63)
1677 is_supported = true;
1678
1679 return is_supported;
1680 }
1681
1682 /* Fix me -- node_id is used to identify the correct MES instances in the future */
amdgpu_mes_set_enforce_isolation(struct amdgpu_device * adev,uint32_t node_id,bool enable)1683 static int amdgpu_mes_set_enforce_isolation(struct amdgpu_device *adev,
1684 uint32_t node_id, bool enable)
1685 {
1686 struct mes_misc_op_input op_input = {0};
1687 int r;
1688
1689 op_input.op = MES_MISC_OP_CHANGE_CONFIG;
1690 op_input.change_config.option.limit_single_process = enable ? 1 : 0;
1691
1692 if (!adev->mes.funcs->misc_op) {
1693 dev_err(adev->dev, "mes change config is not supported!\n");
1694 r = -EINVAL;
1695 goto error;
1696 }
1697
1698 r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
1699 if (r)
1700 dev_err(adev->dev, "failed to change_config.\n");
1701
1702 error:
1703 return r;
1704 }
1705
amdgpu_mes_update_enforce_isolation(struct amdgpu_device * adev)1706 int amdgpu_mes_update_enforce_isolation(struct amdgpu_device *adev)
1707 {
1708 int i, r = 0;
1709
1710 if (adev->enable_mes && adev->gfx.enable_cleaner_shader) {
1711 mutex_lock(&adev->enforce_isolation_mutex);
1712 for (i = 0; i < (adev->xcp_mgr ? adev->xcp_mgr->num_xcps : 1); i++) {
1713 if (adev->enforce_isolation[i])
1714 r |= amdgpu_mes_set_enforce_isolation(adev, i, true);
1715 else
1716 r |= amdgpu_mes_set_enforce_isolation(adev, i, false);
1717 }
1718 mutex_unlock(&adev->enforce_isolation_mutex);
1719 }
1720 return r;
1721 }
1722
1723 #if defined(CONFIG_DEBUG_FS)
1724
amdgpu_debugfs_mes_event_log_show(struct seq_file * m,void * unused)1725 static int amdgpu_debugfs_mes_event_log_show(struct seq_file *m, void *unused)
1726 {
1727 struct amdgpu_device *adev = m->private;
1728 uint32_t *mem = (uint32_t *)(adev->mes.event_log_cpu_addr);
1729
1730 seq_hex_dump(m, "", DUMP_PREFIX_OFFSET, 32, 4,
1731 mem, adev->mes.event_log_size, false);
1732
1733 return 0;
1734 }
1735
1736 DEFINE_SHOW_ATTRIBUTE(amdgpu_debugfs_mes_event_log);
1737
1738 #endif
1739
amdgpu_debugfs_mes_event_log_init(struct amdgpu_device * adev)1740 void amdgpu_debugfs_mes_event_log_init(struct amdgpu_device *adev)
1741 {
1742
1743 #if defined(CONFIG_DEBUG_FS)
1744 struct drm_minor *minor = adev_to_drm(adev)->primary;
1745 struct dentry *root = minor->debugfs_root;
1746 if (adev->enable_mes && amdgpu_mes_log_enable)
1747 debugfs_create_file("amdgpu_mes_event_log", 0444, root,
1748 adev, &amdgpu_debugfs_mes_event_log_fops);
1749
1750 #endif
1751 }
1752