1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /*
3  * Copyright 2014-2022 Advanced Micro Devices, Inc.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice shall be included in
13  * all copies or substantial portions of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21  * OTHER DEALINGS IN THE SOFTWARE.
22  *
23  */
24 
25 #include <linux/slab.h>
26 #include <linux/list.h>
27 #include "kfd_device_queue_manager.h"
28 #include "kfd_priv.h"
29 #include "kfd_kernel_queue.h"
30 #include "amdgpu_amdkfd.h"
31 #include "amdgpu_reset.h"
32 
get_queue_by_qid(struct process_queue_manager * pqm,unsigned int qid)33 static inline struct process_queue_node *get_queue_by_qid(
34 			struct process_queue_manager *pqm, unsigned int qid)
35 {
36 	struct process_queue_node *pqn;
37 
38 	list_for_each_entry(pqn, &pqm->queues, process_queue_list) {
39 		if ((pqn->q && pqn->q->properties.queue_id == qid) ||
40 		    (pqn->kq && pqn->kq->queue->properties.queue_id == qid))
41 			return pqn;
42 	}
43 
44 	return NULL;
45 }
46 
assign_queue_slot_by_qid(struct process_queue_manager * pqm,unsigned int qid)47 static int assign_queue_slot_by_qid(struct process_queue_manager *pqm,
48 				    unsigned int qid)
49 {
50 	if (qid >= KFD_MAX_NUM_OF_QUEUES_PER_PROCESS)
51 		return -EINVAL;
52 
53 	if (__test_and_set_bit(qid, pqm->queue_slot_bitmap)) {
54 		pr_err("Cannot create new queue because requested qid(%u) is in use\n", qid);
55 		return -ENOSPC;
56 	}
57 
58 	return 0;
59 }
60 
find_available_queue_slot(struct process_queue_manager * pqm,unsigned int * qid)61 static int find_available_queue_slot(struct process_queue_manager *pqm,
62 					unsigned int *qid)
63 {
64 	unsigned long found;
65 
66 	found = find_first_zero_bit(pqm->queue_slot_bitmap,
67 			KFD_MAX_NUM_OF_QUEUES_PER_PROCESS);
68 
69 	pr_debug("The new slot id %lu\n", found);
70 
71 	if (found >= KFD_MAX_NUM_OF_QUEUES_PER_PROCESS) {
72 		pr_info("Cannot open more queues for process with pasid 0x%x\n",
73 				pqm->process->pasid);
74 		return -ENOMEM;
75 	}
76 
77 	set_bit(found, pqm->queue_slot_bitmap);
78 	*qid = found;
79 
80 	return 0;
81 }
82 
kfd_process_dequeue_from_device(struct kfd_process_device * pdd)83 void kfd_process_dequeue_from_device(struct kfd_process_device *pdd)
84 {
85 	struct kfd_node *dev = pdd->dev;
86 
87 	if (pdd->already_dequeued)
88 		return;
89 	/* The MES context flush needs to filter out the case which the
90 	 * KFD process is created without setting up the MES context and
91 	 * queue for creating a compute queue.
92 	 */
93 	dev->dqm->ops.process_termination(dev->dqm, &pdd->qpd);
94 	if (dev->kfd->shared_resources.enable_mes && !!pdd->proc_ctx_gpu_addr &&
95 	    down_read_trylock(&dev->adev->reset_domain->sem)) {
96 		amdgpu_mes_flush_shader_debugger(dev->adev,
97 						 pdd->proc_ctx_gpu_addr);
98 		up_read(&dev->adev->reset_domain->sem);
99 	}
100 	pdd->already_dequeued = true;
101 }
102 
pqm_set_gws(struct process_queue_manager * pqm,unsigned int qid,void * gws)103 int pqm_set_gws(struct process_queue_manager *pqm, unsigned int qid,
104 			void *gws)
105 {
106 	struct mqd_update_info minfo = {0};
107 	struct kfd_node *dev = NULL;
108 	struct process_queue_node *pqn;
109 	struct kfd_process_device *pdd;
110 	struct kgd_mem *mem = NULL;
111 	int ret;
112 
113 	pqn = get_queue_by_qid(pqm, qid);
114 	if (!pqn) {
115 		pr_err("Queue id does not match any known queue\n");
116 		return -EINVAL;
117 	}
118 
119 	if (pqn->q)
120 		dev = pqn->q->device;
121 	if (WARN_ON(!dev))
122 		return -ENODEV;
123 
124 	pdd = kfd_get_process_device_data(dev, pqm->process);
125 	if (!pdd) {
126 		pr_err("Process device data doesn't exist\n");
127 		return -EINVAL;
128 	}
129 
130 	/* Only allow one queue per process can have GWS assigned */
131 	if (gws && pdd->qpd.num_gws)
132 		return -EBUSY;
133 
134 	if (!gws && pdd->qpd.num_gws == 0)
135 		return -EINVAL;
136 
137 	if ((KFD_GC_VERSION(dev) != IP_VERSION(9, 4, 3) &&
138 	     KFD_GC_VERSION(dev) != IP_VERSION(9, 4, 4) &&
139 	     KFD_GC_VERSION(dev) != IP_VERSION(9, 5, 0)) &&
140 	    !dev->kfd->shared_resources.enable_mes) {
141 		if (gws)
142 			ret = amdgpu_amdkfd_add_gws_to_process(pdd->process->kgd_process_info,
143 				gws, &mem);
144 		else
145 			ret = amdgpu_amdkfd_remove_gws_from_process(pdd->process->kgd_process_info,
146 				pqn->q->gws);
147 		if (unlikely(ret))
148 			return ret;
149 		pqn->q->gws = mem;
150 	} else {
151 		/*
152 		 * Intentionally set GWS to a non-NULL value
153 		 * for devices that do not use GWS for global wave
154 		 * synchronization but require the formality
155 		 * of setting GWS for cooperative groups.
156 		 */
157 		pqn->q->gws = gws ? ERR_PTR(-ENOMEM) : NULL;
158 	}
159 
160 	pdd->qpd.num_gws = gws ? dev->adev->gds.gws_size : 0;
161 	minfo.update_flag = gws ? UPDATE_FLAG_IS_GWS : 0;
162 
163 	return pqn->q->device->dqm->ops.update_queue(pqn->q->device->dqm,
164 							pqn->q, &minfo);
165 }
166 
kfd_process_dequeue_from_all_devices(struct kfd_process * p)167 void kfd_process_dequeue_from_all_devices(struct kfd_process *p)
168 {
169 	int i;
170 
171 	for (i = 0; i < p->n_pdds; i++)
172 		kfd_process_dequeue_from_device(p->pdds[i]);
173 }
174 
pqm_init(struct process_queue_manager * pqm,struct kfd_process * p)175 int pqm_init(struct process_queue_manager *pqm, struct kfd_process *p)
176 {
177 	INIT_LIST_HEAD(&pqm->queues);
178 	pqm->queue_slot_bitmap = bitmap_zalloc(KFD_MAX_NUM_OF_QUEUES_PER_PROCESS,
179 					       GFP_KERNEL);
180 	if (!pqm->queue_slot_bitmap)
181 		return -ENOMEM;
182 	pqm->process = p;
183 
184 	return 0;
185 }
186 
pqm_clean_queue_resource(struct process_queue_manager * pqm,struct process_queue_node * pqn)187 static void pqm_clean_queue_resource(struct process_queue_manager *pqm,
188 				     struct process_queue_node *pqn)
189 {
190 	struct kfd_node *dev;
191 	struct kfd_process_device *pdd;
192 
193 	dev = pqn->q->device;
194 
195 	pdd = kfd_get_process_device_data(dev, pqm->process);
196 	if (!pdd) {
197 		pr_err("Process device data doesn't exist\n");
198 		return;
199 	}
200 
201 	if (pqn->q->gws) {
202 		if (KFD_GC_VERSION(pqn->q->device) != IP_VERSION(9, 4, 3) &&
203 		    KFD_GC_VERSION(pqn->q->device) != IP_VERSION(9, 4, 4) &&
204 		    KFD_GC_VERSION(pqn->q->device) != IP_VERSION(9, 5, 0) &&
205 		    !dev->kfd->shared_resources.enable_mes)
206 			amdgpu_amdkfd_remove_gws_from_process(
207 				pqm->process->kgd_process_info, pqn->q->gws);
208 		pdd->qpd.num_gws = 0;
209 	}
210 
211 	if (dev->kfd->shared_resources.enable_mes) {
212 		amdgpu_amdkfd_free_gtt_mem(dev->adev, &pqn->q->gang_ctx_bo);
213 		amdgpu_amdkfd_free_gtt_mem(dev->adev, (void **)&pqn->q->wptr_bo_gart);
214 	}
215 }
216 
pqm_uninit(struct process_queue_manager * pqm)217 void pqm_uninit(struct process_queue_manager *pqm)
218 {
219 	struct process_queue_node *pqn, *next;
220 
221 	list_for_each_entry_safe(pqn, next, &pqm->queues, process_queue_list) {
222 		if (pqn->q) {
223 			struct kfd_process_device *pdd = kfd_get_process_device_data(pqn->q->device,
224 										     pqm->process);
225 			if (pdd) {
226 				kfd_queue_unref_bo_vas(pdd, &pqn->q->properties);
227 				kfd_queue_release_buffers(pdd, &pqn->q->properties);
228 			} else {
229 				WARN_ON(!pdd);
230 			}
231 			pqm_clean_queue_resource(pqm, pqn);
232 		}
233 
234 		kfd_procfs_del_queue(pqn->q);
235 		uninit_queue(pqn->q);
236 		list_del(&pqn->process_queue_list);
237 		kfree(pqn);
238 	}
239 
240 	bitmap_free(pqm->queue_slot_bitmap);
241 	pqm->queue_slot_bitmap = NULL;
242 }
243 
init_user_queue(struct process_queue_manager * pqm,struct kfd_node * dev,struct queue ** q,struct queue_properties * q_properties,unsigned int qid)244 static int init_user_queue(struct process_queue_manager *pqm,
245 				struct kfd_node *dev, struct queue **q,
246 				struct queue_properties *q_properties,
247 				unsigned int qid)
248 {
249 	int retval;
250 
251 	/* Doorbell initialized in user space*/
252 	q_properties->doorbell_ptr = NULL;
253 	q_properties->exception_status = KFD_EC_MASK(EC_QUEUE_NEW);
254 
255 	/* let DQM handle it*/
256 	q_properties->vmid = 0;
257 	q_properties->queue_id = qid;
258 
259 	retval = init_queue(q, q_properties);
260 	if (retval != 0)
261 		return retval;
262 
263 	(*q)->device = dev;
264 	(*q)->process = pqm->process;
265 
266 	if (dev->kfd->shared_resources.enable_mes) {
267 		retval = amdgpu_amdkfd_alloc_gtt_mem(dev->adev,
268 						AMDGPU_MES_GANG_CTX_SIZE,
269 						&(*q)->gang_ctx_bo,
270 						&(*q)->gang_ctx_gpu_addr,
271 						&(*q)->gang_ctx_cpu_ptr,
272 						false);
273 		if (retval) {
274 			pr_err("failed to allocate gang context bo\n");
275 			goto cleanup;
276 		}
277 		memset((*q)->gang_ctx_cpu_ptr, 0, AMDGPU_MES_GANG_CTX_SIZE);
278 
279 		/* Starting with GFX11, wptr BOs must be mapped to GART for MES to determine work
280 		 * on unmapped queues for usermode queue oversubscription (no aggregated doorbell)
281 		 */
282 		if (((dev->adev->mes.sched_version & AMDGPU_MES_API_VERSION_MASK)
283 		    >> AMDGPU_MES_API_VERSION_SHIFT) >= 2) {
284 			if (dev->adev != amdgpu_ttm_adev(q_properties->wptr_bo->tbo.bdev)) {
285 				pr_err("Queue memory allocated to wrong device\n");
286 				retval = -EINVAL;
287 				goto free_gang_ctx_bo;
288 			}
289 
290 			retval = amdgpu_amdkfd_map_gtt_bo_to_gart(q_properties->wptr_bo,
291 								  &(*q)->wptr_bo_gart);
292 			if (retval) {
293 				pr_err("Failed to map wptr bo to GART\n");
294 				goto free_gang_ctx_bo;
295 			}
296 		}
297 	}
298 
299 	pr_debug("PQM After init queue");
300 	return 0;
301 
302 free_gang_ctx_bo:
303 	amdgpu_amdkfd_free_gtt_mem(dev->adev, &(*q)->gang_ctx_bo);
304 cleanup:
305 	uninit_queue(*q);
306 	*q = NULL;
307 	return retval;
308 }
309 
pqm_create_queue(struct process_queue_manager * pqm,struct kfd_node * dev,struct queue_properties * properties,unsigned int * qid,const struct kfd_criu_queue_priv_data * q_data,const void * restore_mqd,const void * restore_ctl_stack,uint32_t * p_doorbell_offset_in_process)310 int pqm_create_queue(struct process_queue_manager *pqm,
311 			    struct kfd_node *dev,
312 			    struct queue_properties *properties,
313 			    unsigned int *qid,
314 			    const struct kfd_criu_queue_priv_data *q_data,
315 			    const void *restore_mqd,
316 			    const void *restore_ctl_stack,
317 			    uint32_t *p_doorbell_offset_in_process)
318 {
319 	int retval;
320 	struct kfd_process_device *pdd;
321 	struct queue *q;
322 	struct process_queue_node *pqn;
323 	struct kernel_queue *kq;
324 	enum kfd_queue_type type = properties->type;
325 	unsigned int max_queues = 127; /* HWS limit */
326 
327 	/*
328 	 * On GFX 9.4.3/9.5.0, increase the number of queues that
329 	 * can be created to 255. No HWS limit on GFX 9.4.3/9.5.0.
330 	 */
331 	if (KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 3) ||
332 	    KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 4) ||
333 	    KFD_GC_VERSION(dev) == IP_VERSION(9, 5, 0))
334 		max_queues = 255;
335 
336 	q = NULL;
337 	kq = NULL;
338 
339 	pdd = kfd_get_process_device_data(dev, pqm->process);
340 	if (!pdd) {
341 		pr_err("Process device data doesn't exist\n");
342 		return -1;
343 	}
344 
345 	/*
346 	 * for debug process, verify that it is within the static queues limit
347 	 * currently limit is set to half of the total avail HQD slots
348 	 * If we are just about to create DIQ, the is_debug flag is not set yet
349 	 * Hence we also check the type as well
350 	 */
351 	if ((pdd->qpd.is_debug) || (type == KFD_QUEUE_TYPE_DIQ))
352 		max_queues = dev->kfd->device_info.max_no_of_hqd/2;
353 
354 	if (pdd->qpd.queue_count >= max_queues)
355 		return -ENOSPC;
356 
357 	if (q_data) {
358 		retval = assign_queue_slot_by_qid(pqm, q_data->q_id);
359 		*qid = q_data->q_id;
360 	} else
361 		retval = find_available_queue_slot(pqm, qid);
362 
363 	if (retval != 0)
364 		return retval;
365 
366 	/* Register process if this is the first queue */
367 	if (list_empty(&pdd->qpd.queues_list) &&
368 	    list_empty(&pdd->qpd.priv_queue_list))
369 		dev->dqm->ops.register_process(dev->dqm, &pdd->qpd);
370 
371 	/* Allocate proc_ctx_bo only if MES is enabled and this is the first queue */
372 	if (!pdd->proc_ctx_cpu_ptr && dev->kfd->shared_resources.enable_mes) {
373 		retval = amdgpu_amdkfd_alloc_gtt_mem(dev->adev,
374 						     AMDGPU_MES_PROC_CTX_SIZE,
375 						     &pdd->proc_ctx_bo,
376 						     &pdd->proc_ctx_gpu_addr,
377 						     &pdd->proc_ctx_cpu_ptr,
378 						     false);
379 		if (retval) {
380 			dev_err(dev->adev->dev, "failed to allocate process context bo\n");
381 			return retval;
382 		}
383 		memset(pdd->proc_ctx_cpu_ptr, 0, AMDGPU_MES_PROC_CTX_SIZE);
384 	}
385 
386 	pqn = kzalloc(sizeof(*pqn), GFP_KERNEL);
387 	if (!pqn) {
388 		retval = -ENOMEM;
389 		goto err_allocate_pqn;
390 	}
391 
392 	switch (type) {
393 	case KFD_QUEUE_TYPE_SDMA:
394 	case KFD_QUEUE_TYPE_SDMA_XGMI:
395 	case KFD_QUEUE_TYPE_SDMA_BY_ENG_ID:
396 		/* SDMA queues are always allocated statically no matter
397 		 * which scheduler mode is used. We also do not need to
398 		 * check whether a SDMA queue can be allocated here, because
399 		 * allocate_sdma_queue() in create_queue() has the
400 		 * corresponding check logic.
401 		 */
402 		retval = init_user_queue(pqm, dev, &q, properties, *qid);
403 		if (retval != 0)
404 			goto err_create_queue;
405 		pqn->q = q;
406 		pqn->kq = NULL;
407 		retval = dev->dqm->ops.create_queue(dev->dqm, q, &pdd->qpd, q_data,
408 						    restore_mqd, restore_ctl_stack);
409 		print_queue(q);
410 		break;
411 
412 	case KFD_QUEUE_TYPE_COMPUTE:
413 		/* check if there is over subscription */
414 		if ((dev->dqm->sched_policy ==
415 		     KFD_SCHED_POLICY_HWS_NO_OVERSUBSCRIPTION) &&
416 		((dev->dqm->processes_count >= dev->vm_info.vmid_num_kfd) ||
417 		(dev->dqm->active_queue_count >= get_cp_queues_num(dev->dqm)))) {
418 			pr_debug("Over-subscription is not allowed when amdkfd.sched_policy == 1\n");
419 			retval = -EPERM;
420 			goto err_create_queue;
421 		}
422 
423 		retval = init_user_queue(pqm, dev, &q, properties, *qid);
424 		if (retval != 0)
425 			goto err_create_queue;
426 		pqn->q = q;
427 		pqn->kq = NULL;
428 		retval = dev->dqm->ops.create_queue(dev->dqm, q, &pdd->qpd, q_data,
429 						    restore_mqd, restore_ctl_stack);
430 		print_queue(q);
431 		break;
432 	case KFD_QUEUE_TYPE_DIQ:
433 		kq = kernel_queue_init(dev, KFD_QUEUE_TYPE_DIQ);
434 		if (!kq) {
435 			retval = -ENOMEM;
436 			goto err_create_queue;
437 		}
438 		kq->queue->properties.queue_id = *qid;
439 		pqn->kq = kq;
440 		pqn->q = NULL;
441 		retval = kfd_process_drain_interrupts(pdd);
442 		if (retval)
443 			break;
444 
445 		retval = dev->dqm->ops.create_kernel_queue(dev->dqm,
446 							kq, &pdd->qpd);
447 		break;
448 	default:
449 		WARN(1, "Invalid queue type %d", type);
450 		retval = -EINVAL;
451 	}
452 
453 	if (retval != 0) {
454 		pr_err("Pasid 0x%x DQM create queue type %d failed. ret %d\n",
455 			pqm->process->pasid, type, retval);
456 		goto err_create_queue;
457 	}
458 
459 	if (q && p_doorbell_offset_in_process) {
460 		/* Return the doorbell offset within the doorbell page
461 		 * to the caller so it can be passed up to user mode
462 		 * (in bytes).
463 		 * relative doorbell index = Absolute doorbell index -
464 		 * absolute index of first doorbell in the page.
465 		 */
466 		uint32_t first_db_index = amdgpu_doorbell_index_on_bar(pdd->dev->adev,
467 								       pdd->qpd.proc_doorbells,
468 								       0,
469 								       pdd->dev->kfd->device_info.doorbell_size);
470 
471 		*p_doorbell_offset_in_process = (q->properties.doorbell_off
472 						- first_db_index) * sizeof(uint32_t);
473 	}
474 
475 	pr_debug("PQM After DQM create queue\n");
476 
477 	list_add(&pqn->process_queue_list, &pqm->queues);
478 
479 	if (q) {
480 		pr_debug("PQM done creating queue\n");
481 		kfd_procfs_add_queue(q);
482 		print_queue_properties(&q->properties);
483 	}
484 
485 	return retval;
486 
487 err_create_queue:
488 	uninit_queue(q);
489 	if (kq)
490 		kernel_queue_uninit(kq);
491 	kfree(pqn);
492 err_allocate_pqn:
493 	/* check if queues list is empty unregister process from device */
494 	clear_bit(*qid, pqm->queue_slot_bitmap);
495 	if (list_empty(&pdd->qpd.queues_list) &&
496 	    list_empty(&pdd->qpd.priv_queue_list))
497 		dev->dqm->ops.unregister_process(dev->dqm, &pdd->qpd);
498 	return retval;
499 }
500 
pqm_destroy_queue(struct process_queue_manager * pqm,unsigned int qid)501 int pqm_destroy_queue(struct process_queue_manager *pqm, unsigned int qid)
502 {
503 	struct process_queue_node *pqn;
504 	struct kfd_process_device *pdd;
505 	struct device_queue_manager *dqm;
506 	struct kfd_node *dev;
507 	int retval;
508 
509 	dqm = NULL;
510 
511 	retval = 0;
512 
513 	pqn = get_queue_by_qid(pqm, qid);
514 	if (!pqn) {
515 		pr_err("Queue id does not match any known queue\n");
516 		return -EINVAL;
517 	}
518 
519 	dev = NULL;
520 	if (pqn->kq)
521 		dev = pqn->kq->dev;
522 	if (pqn->q)
523 		dev = pqn->q->device;
524 	if (WARN_ON(!dev))
525 		return -ENODEV;
526 
527 	pdd = kfd_get_process_device_data(dev, pqm->process);
528 	if (!pdd) {
529 		pr_err("Process device data doesn't exist\n");
530 		return -1;
531 	}
532 
533 	if (pqn->kq) {
534 		/* destroy kernel queue (DIQ) */
535 		dqm = pqn->kq->dev->dqm;
536 		dqm->ops.destroy_kernel_queue(dqm, pqn->kq, &pdd->qpd);
537 		kernel_queue_uninit(pqn->kq);
538 	}
539 
540 	if (pqn->q) {
541 		retval = kfd_queue_unref_bo_vas(pdd, &pqn->q->properties);
542 		if (retval)
543 			goto err_destroy_queue;
544 
545 		dqm = pqn->q->device->dqm;
546 		retval = dqm->ops.destroy_queue(dqm, &pdd->qpd, pqn->q);
547 		if (retval) {
548 			pr_err("Pasid 0x%x destroy queue %d failed, ret %d\n",
549 				pqm->process->pasid,
550 				pqn->q->properties.queue_id, retval);
551 			if (retval != -ETIME && retval != -EIO)
552 				goto err_destroy_queue;
553 		}
554 		kfd_procfs_del_queue(pqn->q);
555 		kfd_queue_release_buffers(pdd, &pqn->q->properties);
556 		pqm_clean_queue_resource(pqm, pqn);
557 		uninit_queue(pqn->q);
558 	}
559 
560 	list_del(&pqn->process_queue_list);
561 	kfree(pqn);
562 	clear_bit(qid, pqm->queue_slot_bitmap);
563 
564 	if (list_empty(&pdd->qpd.queues_list) &&
565 	    list_empty(&pdd->qpd.priv_queue_list))
566 		dqm->ops.unregister_process(dqm, &pdd->qpd);
567 
568 err_destroy_queue:
569 	return retval;
570 }
571 
pqm_update_queue_properties(struct process_queue_manager * pqm,unsigned int qid,struct queue_properties * p)572 int pqm_update_queue_properties(struct process_queue_manager *pqm,
573 				unsigned int qid, struct queue_properties *p)
574 {
575 	int retval;
576 	struct process_queue_node *pqn;
577 
578 	pqn = get_queue_by_qid(pqm, qid);
579 	if (!pqn || !pqn->q) {
580 		pr_debug("No queue %d exists for update operation\n", qid);
581 		return -EFAULT;
582 	}
583 
584 	/*
585 	 * Update with NULL ring address is used to disable the queue
586 	 */
587 	if (p->queue_address && p->queue_size) {
588 		struct kfd_process_device *pdd;
589 		struct amdgpu_vm *vm;
590 		struct queue *q = pqn->q;
591 		int err;
592 
593 		pdd = kfd_get_process_device_data(q->device, q->process);
594 		if (!pdd)
595 			return -ENODEV;
596 		vm = drm_priv_to_vm(pdd->drm_priv);
597 		err = amdgpu_bo_reserve(vm->root.bo, false);
598 		if (err)
599 			return err;
600 
601 		if (kfd_queue_buffer_get(vm, (void *)p->queue_address, &p->ring_bo,
602 					 p->queue_size)) {
603 			pr_debug("ring buf 0x%llx size 0x%llx not mapped on GPU\n",
604 				 p->queue_address, p->queue_size);
605 			return -EFAULT;
606 		}
607 
608 		kfd_queue_unref_bo_va(vm, &pqn->q->properties.ring_bo);
609 		kfd_queue_buffer_put(&pqn->q->properties.ring_bo);
610 		amdgpu_bo_unreserve(vm->root.bo);
611 
612 		pqn->q->properties.ring_bo = p->ring_bo;
613 	}
614 
615 	pqn->q->properties.queue_address = p->queue_address;
616 	pqn->q->properties.queue_size = p->queue_size;
617 	pqn->q->properties.queue_percent = p->queue_percent;
618 	pqn->q->properties.priority = p->priority;
619 	pqn->q->properties.pm4_target_xcc = p->pm4_target_xcc;
620 
621 	retval = pqn->q->device->dqm->ops.update_queue(pqn->q->device->dqm,
622 							pqn->q, NULL);
623 	if (retval != 0)
624 		return retval;
625 
626 	return 0;
627 }
628 
pqm_update_mqd(struct process_queue_manager * pqm,unsigned int qid,struct mqd_update_info * minfo)629 int pqm_update_mqd(struct process_queue_manager *pqm,
630 				unsigned int qid, struct mqd_update_info *minfo)
631 {
632 	int retval;
633 	struct process_queue_node *pqn;
634 
635 	pqn = get_queue_by_qid(pqm, qid);
636 	if (!pqn) {
637 		pr_debug("No queue %d exists for update operation\n", qid);
638 		return -EFAULT;
639 	}
640 
641 	/* CUs are masked for debugger requirements so deny user mask  */
642 	if (pqn->q->properties.is_dbg_wa && minfo && minfo->cu_mask.ptr)
643 		return -EBUSY;
644 
645 	/* ASICs that have WGPs must enforce pairwise enabled mask checks. */
646 	if (minfo && minfo->cu_mask.ptr &&
647 			KFD_GC_VERSION(pqn->q->device) >= IP_VERSION(10, 0, 0)) {
648 		int i;
649 
650 		for (i = 0; i < minfo->cu_mask.count; i += 2) {
651 			uint32_t cu_pair = (minfo->cu_mask.ptr[i / 32] >> (i % 32)) & 0x3;
652 
653 			if (cu_pair && cu_pair != 0x3) {
654 				pr_debug("CUs must be adjacent pairwise enabled.\n");
655 				return -EINVAL;
656 			}
657 		}
658 	}
659 
660 	retval = pqn->q->device->dqm->ops.update_queue(pqn->q->device->dqm,
661 							pqn->q, minfo);
662 	if (retval != 0)
663 		return retval;
664 
665 	if (minfo && minfo->cu_mask.ptr)
666 		pqn->q->properties.is_user_cu_masked = true;
667 
668 	return 0;
669 }
670 
pqm_get_kernel_queue(struct process_queue_manager * pqm,unsigned int qid)671 struct kernel_queue *pqm_get_kernel_queue(
672 					struct process_queue_manager *pqm,
673 					unsigned int qid)
674 {
675 	struct process_queue_node *pqn;
676 
677 	pqn = get_queue_by_qid(pqm, qid);
678 	if (pqn && pqn->kq)
679 		return pqn->kq;
680 
681 	return NULL;
682 }
683 
pqm_get_user_queue(struct process_queue_manager * pqm,unsigned int qid)684 struct queue *pqm_get_user_queue(struct process_queue_manager *pqm,
685 					unsigned int qid)
686 {
687 	struct process_queue_node *pqn;
688 
689 	pqn = get_queue_by_qid(pqm, qid);
690 	return pqn ? pqn->q : NULL;
691 }
692 
pqm_get_wave_state(struct process_queue_manager * pqm,unsigned int qid,void __user * ctl_stack,u32 * ctl_stack_used_size,u32 * save_area_used_size)693 int pqm_get_wave_state(struct process_queue_manager *pqm,
694 		       unsigned int qid,
695 		       void __user *ctl_stack,
696 		       u32 *ctl_stack_used_size,
697 		       u32 *save_area_used_size)
698 {
699 	struct process_queue_node *pqn;
700 
701 	pqn = get_queue_by_qid(pqm, qid);
702 	if (!pqn) {
703 		pr_debug("amdkfd: No queue %d exists for operation\n",
704 			 qid);
705 		return -EFAULT;
706 	}
707 
708 	return pqn->q->device->dqm->ops.get_wave_state(pqn->q->device->dqm,
709 						       pqn->q,
710 						       ctl_stack,
711 						       ctl_stack_used_size,
712 						       save_area_used_size);
713 }
714 
pqm_get_queue_snapshot(struct process_queue_manager * pqm,uint64_t exception_clear_mask,void __user * buf,int * num_qss_entries,uint32_t * entry_size)715 int pqm_get_queue_snapshot(struct process_queue_manager *pqm,
716 			   uint64_t exception_clear_mask,
717 			   void __user *buf,
718 			   int *num_qss_entries,
719 			   uint32_t *entry_size)
720 {
721 	struct process_queue_node *pqn;
722 	struct kfd_queue_snapshot_entry src;
723 	uint32_t tmp_entry_size = *entry_size, tmp_qss_entries = *num_qss_entries;
724 	int r = 0;
725 
726 	*num_qss_entries = 0;
727 	if (!(*entry_size))
728 		return -EINVAL;
729 
730 	*entry_size = min_t(size_t, *entry_size, sizeof(struct kfd_queue_snapshot_entry));
731 	mutex_lock(&pqm->process->event_mutex);
732 
733 	memset(&src, 0, sizeof(src));
734 
735 	list_for_each_entry(pqn, &pqm->queues, process_queue_list) {
736 		if (!pqn->q)
737 			continue;
738 
739 		if (*num_qss_entries < tmp_qss_entries) {
740 			set_queue_snapshot_entry(pqn->q, exception_clear_mask, &src);
741 
742 			if (copy_to_user(buf, &src, *entry_size)) {
743 				r = -EFAULT;
744 				break;
745 			}
746 			buf += tmp_entry_size;
747 		}
748 		*num_qss_entries += 1;
749 	}
750 
751 	mutex_unlock(&pqm->process->event_mutex);
752 	return r;
753 }
754 
get_queue_data_sizes(struct kfd_process_device * pdd,struct queue * q,uint32_t * mqd_size,uint32_t * ctl_stack_size)755 static int get_queue_data_sizes(struct kfd_process_device *pdd,
756 				struct queue *q,
757 				uint32_t *mqd_size,
758 				uint32_t *ctl_stack_size)
759 {
760 	int ret;
761 
762 	ret = pqm_get_queue_checkpoint_info(&pdd->process->pqm,
763 					    q->properties.queue_id,
764 					    mqd_size,
765 					    ctl_stack_size);
766 	if (ret)
767 		pr_err("Failed to get queue dump info (%d)\n", ret);
768 
769 	return ret;
770 }
771 
kfd_process_get_queue_info(struct kfd_process * p,uint32_t * num_queues,uint64_t * priv_data_sizes)772 int kfd_process_get_queue_info(struct kfd_process *p,
773 			       uint32_t *num_queues,
774 			       uint64_t *priv_data_sizes)
775 {
776 	uint32_t extra_data_sizes = 0;
777 	struct queue *q;
778 	int i;
779 	int ret;
780 
781 	*num_queues = 0;
782 
783 	/* Run over all PDDs of the process */
784 	for (i = 0; i < p->n_pdds; i++) {
785 		struct kfd_process_device *pdd = p->pdds[i];
786 
787 		list_for_each_entry(q, &pdd->qpd.queues_list, list) {
788 			if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE ||
789 				q->properties.type == KFD_QUEUE_TYPE_SDMA ||
790 				q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) {
791 				uint32_t mqd_size, ctl_stack_size;
792 
793 				*num_queues = *num_queues + 1;
794 
795 				ret = get_queue_data_sizes(pdd, q, &mqd_size, &ctl_stack_size);
796 				if (ret)
797 					return ret;
798 
799 				extra_data_sizes += mqd_size + ctl_stack_size;
800 			} else {
801 				pr_err("Unsupported queue type (%d)\n", q->properties.type);
802 				return -EOPNOTSUPP;
803 			}
804 		}
805 	}
806 	*priv_data_sizes = extra_data_sizes +
807 				(*num_queues * sizeof(struct kfd_criu_queue_priv_data));
808 
809 	return 0;
810 }
811 
pqm_checkpoint_mqd(struct process_queue_manager * pqm,unsigned int qid,void * mqd,void * ctl_stack)812 static int pqm_checkpoint_mqd(struct process_queue_manager *pqm,
813 			      unsigned int qid,
814 			      void *mqd,
815 			      void *ctl_stack)
816 {
817 	struct process_queue_node *pqn;
818 
819 	pqn = get_queue_by_qid(pqm, qid);
820 	if (!pqn) {
821 		pr_debug("amdkfd: No queue %d exists for operation\n", qid);
822 		return -EFAULT;
823 	}
824 
825 	if (!pqn->q->device->dqm->ops.checkpoint_mqd) {
826 		pr_err("amdkfd: queue dumping not supported on this device\n");
827 		return -EOPNOTSUPP;
828 	}
829 
830 	return pqn->q->device->dqm->ops.checkpoint_mqd(pqn->q->device->dqm,
831 						       pqn->q, mqd, ctl_stack);
832 }
833 
criu_checkpoint_queue(struct kfd_process_device * pdd,struct queue * q,struct kfd_criu_queue_priv_data * q_data)834 static int criu_checkpoint_queue(struct kfd_process_device *pdd,
835 			   struct queue *q,
836 			   struct kfd_criu_queue_priv_data *q_data)
837 {
838 	uint8_t *mqd, *ctl_stack;
839 	int ret;
840 
841 	mqd = (void *)(q_data + 1);
842 	ctl_stack = mqd + q_data->mqd_size;
843 
844 	q_data->gpu_id = pdd->user_gpu_id;
845 	q_data->type = q->properties.type;
846 	q_data->format = q->properties.format;
847 	q_data->q_id =  q->properties.queue_id;
848 	q_data->q_address = q->properties.queue_address;
849 	q_data->q_size = q->properties.queue_size;
850 	q_data->priority = q->properties.priority;
851 	q_data->q_percent = q->properties.queue_percent;
852 	q_data->read_ptr_addr = (uint64_t)q->properties.read_ptr;
853 	q_data->write_ptr_addr = (uint64_t)q->properties.write_ptr;
854 	q_data->doorbell_id = q->doorbell_id;
855 
856 	q_data->sdma_id = q->sdma_id;
857 
858 	q_data->eop_ring_buffer_address =
859 		q->properties.eop_ring_buffer_address;
860 
861 	q_data->eop_ring_buffer_size = q->properties.eop_ring_buffer_size;
862 
863 	q_data->ctx_save_restore_area_address =
864 		q->properties.ctx_save_restore_area_address;
865 
866 	q_data->ctx_save_restore_area_size =
867 		q->properties.ctx_save_restore_area_size;
868 
869 	q_data->gws = !!q->gws;
870 
871 	ret = pqm_checkpoint_mqd(&pdd->process->pqm, q->properties.queue_id, mqd, ctl_stack);
872 	if (ret) {
873 		pr_err("Failed checkpoint queue_mqd (%d)\n", ret);
874 		return ret;
875 	}
876 
877 	pr_debug("Dumping Queue: gpu_id:%x queue_id:%u\n", q_data->gpu_id, q_data->q_id);
878 	return ret;
879 }
880 
criu_checkpoint_queues_device(struct kfd_process_device * pdd,uint8_t __user * user_priv,unsigned int * q_index,uint64_t * queues_priv_data_offset)881 static int criu_checkpoint_queues_device(struct kfd_process_device *pdd,
882 				   uint8_t __user *user_priv,
883 				   unsigned int *q_index,
884 				   uint64_t *queues_priv_data_offset)
885 {
886 	unsigned int q_private_data_size = 0;
887 	uint8_t *q_private_data = NULL; /* Local buffer to store individual queue private data */
888 	struct queue *q;
889 	int ret = 0;
890 
891 	list_for_each_entry(q, &pdd->qpd.queues_list, list) {
892 		struct kfd_criu_queue_priv_data *q_data;
893 		uint64_t q_data_size;
894 		uint32_t mqd_size;
895 		uint32_t ctl_stack_size;
896 
897 		if (q->properties.type != KFD_QUEUE_TYPE_COMPUTE &&
898 			q->properties.type != KFD_QUEUE_TYPE_SDMA &&
899 			q->properties.type != KFD_QUEUE_TYPE_SDMA_XGMI) {
900 
901 			pr_err("Unsupported queue type (%d)\n", q->properties.type);
902 			ret = -EOPNOTSUPP;
903 			break;
904 		}
905 
906 		ret = get_queue_data_sizes(pdd, q, &mqd_size, &ctl_stack_size);
907 		if (ret)
908 			break;
909 
910 		q_data_size = sizeof(*q_data) + mqd_size + ctl_stack_size;
911 
912 		/* Increase local buffer space if needed */
913 		if (q_private_data_size < q_data_size) {
914 			kfree(q_private_data);
915 
916 			q_private_data = kzalloc(q_data_size, GFP_KERNEL);
917 			if (!q_private_data) {
918 				ret = -ENOMEM;
919 				break;
920 			}
921 			q_private_data_size = q_data_size;
922 		}
923 
924 		q_data = (struct kfd_criu_queue_priv_data *)q_private_data;
925 
926 		/* data stored in this order: priv_data, mqd, ctl_stack */
927 		q_data->mqd_size = mqd_size;
928 		q_data->ctl_stack_size = ctl_stack_size;
929 
930 		ret = criu_checkpoint_queue(pdd, q, q_data);
931 		if (ret)
932 			break;
933 
934 		q_data->object_type = KFD_CRIU_OBJECT_TYPE_QUEUE;
935 
936 		ret = copy_to_user(user_priv + *queues_priv_data_offset,
937 				q_data, q_data_size);
938 		if (ret) {
939 			ret = -EFAULT;
940 			break;
941 		}
942 		*queues_priv_data_offset += q_data_size;
943 		*q_index = *q_index + 1;
944 	}
945 
946 	kfree(q_private_data);
947 
948 	return ret;
949 }
950 
kfd_criu_checkpoint_queues(struct kfd_process * p,uint8_t __user * user_priv_data,uint64_t * priv_data_offset)951 int kfd_criu_checkpoint_queues(struct kfd_process *p,
952 			 uint8_t __user *user_priv_data,
953 			 uint64_t *priv_data_offset)
954 {
955 	int ret = 0, pdd_index, q_index = 0;
956 
957 	for (pdd_index = 0; pdd_index < p->n_pdds; pdd_index++) {
958 		struct kfd_process_device *pdd = p->pdds[pdd_index];
959 
960 		/*
961 		 * criu_checkpoint_queues_device will copy data to user and update q_index and
962 		 * queues_priv_data_offset
963 		 */
964 		ret = criu_checkpoint_queues_device(pdd, user_priv_data, &q_index,
965 					      priv_data_offset);
966 
967 		if (ret)
968 			break;
969 	}
970 
971 	return ret;
972 }
973 
set_queue_properties_from_criu(struct queue_properties * qp,struct kfd_criu_queue_priv_data * q_data)974 static void set_queue_properties_from_criu(struct queue_properties *qp,
975 					  struct kfd_criu_queue_priv_data *q_data)
976 {
977 	qp->is_interop = false;
978 	qp->queue_percent = q_data->q_percent;
979 	qp->priority = q_data->priority;
980 	qp->queue_address = q_data->q_address;
981 	qp->queue_size = q_data->q_size;
982 	qp->read_ptr = (uint32_t *) q_data->read_ptr_addr;
983 	qp->write_ptr = (uint32_t *) q_data->write_ptr_addr;
984 	qp->eop_ring_buffer_address = q_data->eop_ring_buffer_address;
985 	qp->eop_ring_buffer_size = q_data->eop_ring_buffer_size;
986 	qp->ctx_save_restore_area_address = q_data->ctx_save_restore_area_address;
987 	qp->ctx_save_restore_area_size = q_data->ctx_save_restore_area_size;
988 	qp->ctl_stack_size = q_data->ctl_stack_size;
989 	qp->type = q_data->type;
990 	qp->format = q_data->format;
991 }
992 
kfd_criu_restore_queue(struct kfd_process * p,uint8_t __user * user_priv_ptr,uint64_t * priv_data_offset,uint64_t max_priv_data_size)993 int kfd_criu_restore_queue(struct kfd_process *p,
994 			   uint8_t __user *user_priv_ptr,
995 			   uint64_t *priv_data_offset,
996 			   uint64_t max_priv_data_size)
997 {
998 	uint8_t *mqd, *ctl_stack, *q_extra_data = NULL;
999 	struct kfd_criu_queue_priv_data *q_data;
1000 	struct kfd_process_device *pdd;
1001 	uint64_t q_extra_data_size;
1002 	struct queue_properties qp;
1003 	unsigned int queue_id;
1004 	int ret = 0;
1005 
1006 	if (*priv_data_offset + sizeof(*q_data) > max_priv_data_size)
1007 		return -EINVAL;
1008 
1009 	q_data = kmalloc(sizeof(*q_data), GFP_KERNEL);
1010 	if (!q_data)
1011 		return -ENOMEM;
1012 
1013 	ret = copy_from_user(q_data, user_priv_ptr + *priv_data_offset, sizeof(*q_data));
1014 	if (ret) {
1015 		ret = -EFAULT;
1016 		goto exit;
1017 	}
1018 
1019 	*priv_data_offset += sizeof(*q_data);
1020 	q_extra_data_size = (uint64_t)q_data->ctl_stack_size + q_data->mqd_size;
1021 
1022 	if (*priv_data_offset + q_extra_data_size > max_priv_data_size) {
1023 		ret = -EINVAL;
1024 		goto exit;
1025 	}
1026 
1027 	q_extra_data = kmalloc(q_extra_data_size, GFP_KERNEL);
1028 	if (!q_extra_data) {
1029 		ret = -ENOMEM;
1030 		goto exit;
1031 	}
1032 
1033 	ret = copy_from_user(q_extra_data, user_priv_ptr + *priv_data_offset, q_extra_data_size);
1034 	if (ret) {
1035 		ret = -EFAULT;
1036 		goto exit;
1037 	}
1038 
1039 	*priv_data_offset += q_extra_data_size;
1040 
1041 	pdd = kfd_process_device_data_by_id(p, q_data->gpu_id);
1042 	if (!pdd) {
1043 		pr_err("Failed to get pdd\n");
1044 		ret = -EINVAL;
1045 		goto exit;
1046 	}
1047 
1048 	/* data stored in this order: mqd, ctl_stack */
1049 	mqd = q_extra_data;
1050 	ctl_stack = mqd + q_data->mqd_size;
1051 
1052 	memset(&qp, 0, sizeof(qp));
1053 	set_queue_properties_from_criu(&qp, q_data);
1054 
1055 	print_queue_properties(&qp);
1056 
1057 	ret = pqm_create_queue(&p->pqm, pdd->dev, &qp, &queue_id, q_data, mqd, ctl_stack, NULL);
1058 	if (ret) {
1059 		pr_err("Failed to create new queue err:%d\n", ret);
1060 		goto exit;
1061 	}
1062 
1063 	if (q_data->gws)
1064 		ret = pqm_set_gws(&p->pqm, q_data->q_id, pdd->dev->gws);
1065 
1066 exit:
1067 	if (ret)
1068 		pr_err("Failed to restore queue (%d)\n", ret);
1069 	else
1070 		pr_debug("Queue id %d was restored successfully\n", queue_id);
1071 
1072 	kfree(q_data);
1073 	kfree(q_extra_data);
1074 
1075 	return ret;
1076 }
1077 
pqm_get_queue_checkpoint_info(struct process_queue_manager * pqm,unsigned int qid,uint32_t * mqd_size,uint32_t * ctl_stack_size)1078 int pqm_get_queue_checkpoint_info(struct process_queue_manager *pqm,
1079 				  unsigned int qid,
1080 				  uint32_t *mqd_size,
1081 				  uint32_t *ctl_stack_size)
1082 {
1083 	struct process_queue_node *pqn;
1084 
1085 	pqn = get_queue_by_qid(pqm, qid);
1086 	if (!pqn) {
1087 		pr_debug("amdkfd: No queue %d exists for operation\n", qid);
1088 		return -EFAULT;
1089 	}
1090 
1091 	if (!pqn->q->device->dqm->ops.get_queue_checkpoint_info) {
1092 		pr_err("amdkfd: queue dumping not supported on this device\n");
1093 		return -EOPNOTSUPP;
1094 	}
1095 
1096 	pqn->q->device->dqm->ops.get_queue_checkpoint_info(pqn->q->device->dqm,
1097 						       pqn->q, mqd_size,
1098 						       ctl_stack_size);
1099 	return 0;
1100 }
1101 
1102 #if defined(CONFIG_DEBUG_FS)
1103 
pqm_debugfs_mqds(struct seq_file * m,void * data)1104 int pqm_debugfs_mqds(struct seq_file *m, void *data)
1105 {
1106 	struct process_queue_manager *pqm = data;
1107 	struct process_queue_node *pqn;
1108 	struct queue *q;
1109 	enum KFD_MQD_TYPE mqd_type;
1110 	struct mqd_manager *mqd_mgr;
1111 	int r = 0, xcc, num_xccs = 1;
1112 	void *mqd;
1113 	uint64_t size = 0;
1114 
1115 	list_for_each_entry(pqn, &pqm->queues, process_queue_list) {
1116 		if (pqn->q) {
1117 			q = pqn->q;
1118 			switch (q->properties.type) {
1119 			case KFD_QUEUE_TYPE_SDMA:
1120 			case KFD_QUEUE_TYPE_SDMA_XGMI:
1121 				seq_printf(m, "  SDMA queue on device %x\n",
1122 					   q->device->id);
1123 				mqd_type = KFD_MQD_TYPE_SDMA;
1124 				break;
1125 			case KFD_QUEUE_TYPE_COMPUTE:
1126 				seq_printf(m, "  Compute queue on device %x\n",
1127 					   q->device->id);
1128 				mqd_type = KFD_MQD_TYPE_CP;
1129 				num_xccs = NUM_XCC(q->device->xcc_mask);
1130 				break;
1131 			default:
1132 				seq_printf(m,
1133 				"  Bad user queue type %d on device %x\n",
1134 					   q->properties.type, q->device->id);
1135 				continue;
1136 			}
1137 			mqd_mgr = q->device->dqm->mqd_mgrs[mqd_type];
1138 			size = mqd_mgr->mqd_stride(mqd_mgr,
1139 							&q->properties);
1140 		} else if (pqn->kq) {
1141 			q = pqn->kq->queue;
1142 			mqd_mgr = pqn->kq->mqd_mgr;
1143 			switch (q->properties.type) {
1144 			case KFD_QUEUE_TYPE_DIQ:
1145 				seq_printf(m, "  DIQ on device %x\n",
1146 					   pqn->kq->dev->id);
1147 				break;
1148 			default:
1149 				seq_printf(m,
1150 				"  Bad kernel queue type %d on device %x\n",
1151 					   q->properties.type,
1152 					   pqn->kq->dev->id);
1153 				continue;
1154 			}
1155 		} else {
1156 			seq_printf(m,
1157 		"  Weird: Queue node with neither kernel nor user queue\n");
1158 			continue;
1159 		}
1160 
1161 		for (xcc = 0; xcc < num_xccs; xcc++) {
1162 			mqd = q->mqd + size * xcc;
1163 			r = mqd_mgr->debugfs_show_mqd(m, mqd);
1164 			if (r != 0)
1165 				break;
1166 		}
1167 	}
1168 
1169 	return r;
1170 }
1171 
1172 #endif
1173