Lines Matching full:pm
45 static void pm_calc_rlib_size(struct packet_manager *pm, in pm_calc_rlib_size() argument
52 struct kfd_node *node = pm->dqm->dev; in pm_calc_rlib_size()
55 process_count = pm->dqm->processes_count; in pm_calc_rlib_size()
56 queue_count = pm->dqm->active_queue_count; in pm_calc_rlib_size()
57 compute_queue_count = pm->dqm->active_cp_queue_count; in pm_calc_rlib_size()
58 gws_queue_count = pm->dqm->gws_queue_count; in pm_calc_rlib_size()
72 if (compute_queue_count > get_cp_queues_num(pm->dqm)) in pm_calc_rlib_size()
80 map_queue_size = pm->pmf->map_queues_size; in pm_calc_rlib_size()
82 *rlib_size = process_count * pm->pmf->map_process_size + in pm_calc_rlib_size()
90 *rlib_size += pm->pmf->runlist_size; in pm_calc_rlib_size()
95 static int pm_allocate_runlist_ib(struct packet_manager *pm, in pm_allocate_runlist_ib() argument
101 struct kfd_node *node = pm->dqm->dev; in pm_allocate_runlist_ib()
105 if (WARN_ON(pm->allocated)) in pm_allocate_runlist_ib()
108 pm_calc_rlib_size(pm, rl_buffer_size, is_over_subscription); in pm_allocate_runlist_ib()
110 mutex_lock(&pm->lock); in pm_allocate_runlist_ib()
112 retval = kfd_gtt_sa_allocate(node, *rl_buffer_size, &pm->ib_buffer_obj); in pm_allocate_runlist_ib()
119 *(void **)rl_buffer = pm->ib_buffer_obj->cpu_ptr; in pm_allocate_runlist_ib()
120 *rl_gpu_buffer = pm->ib_buffer_obj->gpu_addr; in pm_allocate_runlist_ib()
123 pm->allocated = true; in pm_allocate_runlist_ib()
126 mutex_unlock(&pm->lock); in pm_allocate_runlist_ib()
130 static int pm_create_runlist_ib(struct packet_manager *pm, in pm_create_runlist_ib() argument
137 struct kfd_node *node = pm->dqm->dev; in pm_create_runlist_ib()
148 retval = pm_allocate_runlist_ib(pm, &rl_buffer, rl_gpu_addr, in pm_create_runlist_ib()
154 pm->ib_size_bytes = alloc_size_bytes; in pm_create_runlist_ib()
157 pm->dqm->processes_count, pm->dqm->active_queue_count); in pm_create_runlist_ib()
163 if (processes_mapped >= pm->dqm->processes_count) { in pm_create_runlist_ib()
165 pm_release_ib(pm); in pm_create_runlist_ib()
169 retval = pm->pmf->map_process(pm, &rl_buffer[rl_wptr], qpd); in pm_create_runlist_ib()
174 inc_wptr(&rl_wptr, pm->pmf->map_process_size, in pm_create_runlist_ib()
185 retval = pm->pmf->map_queues(pm, in pm_create_runlist_ib()
193 pm->pmf->map_queues_size, in pm_create_runlist_ib()
205 retval = pm->pmf->map_queues(pm, in pm_create_runlist_ib()
214 pm->pmf->map_queues_size, in pm_create_runlist_ib()
222 if (!pm->is_over_subscription) in pm_create_runlist_ib()
231 retval = pm->pmf->runlist(pm, &rl_buffer[rl_wptr], in pm_create_runlist_ib()
236 pm->is_over_subscription = !!is_over_subscription; in pm_create_runlist_ib()
245 int pm_init(struct packet_manager *pm, struct device_queue_manager *dqm) in pm_init() argument
258 pm->pmf = &kfd_vi_pm_funcs; in pm_init()
265 pm->pmf = &kfd_aldebaran_pm_funcs; in pm_init()
267 pm->pmf = &kfd_v9_pm_funcs; in pm_init()
275 pm->dqm = dqm; in pm_init()
276 mutex_init(&pm->lock); in pm_init()
277 pm->priv_queue = kernel_queue_init(dqm->dev, KFD_QUEUE_TYPE_HIQ); in pm_init()
278 if (!pm->priv_queue) { in pm_init()
279 mutex_destroy(&pm->lock); in pm_init()
282 pm->allocated = false; in pm_init()
287 void pm_uninit(struct packet_manager *pm) in pm_uninit() argument
289 mutex_destroy(&pm->lock); in pm_uninit()
290 kernel_queue_uninit(pm->priv_queue); in pm_uninit()
291 pm->priv_queue = NULL; in pm_uninit()
294 int pm_send_set_resources(struct packet_manager *pm, in pm_send_set_resources() argument
297 struct kfd_node *node = pm->dqm->dev; in pm_send_set_resources()
302 size = pm->pmf->set_resources_size; in pm_send_set_resources()
303 mutex_lock(&pm->lock); in pm_send_set_resources()
304 kq_acquire_packet_buffer(pm->priv_queue, in pm_send_set_resources()
313 retval = pm->pmf->set_resources(pm, buffer, res); in pm_send_set_resources()
315 retval = kq_submit_packet(pm->priv_queue); in pm_send_set_resources()
317 kq_rollback_packet(pm->priv_queue); in pm_send_set_resources()
320 mutex_unlock(&pm->lock); in pm_send_set_resources()
325 int pm_send_runlist(struct packet_manager *pm, struct list_head *dqm_queues) in pm_send_runlist() argument
332 retval = pm_create_runlist_ib(pm, dqm_queues, &rl_gpu_ib_addr, in pm_send_runlist()
339 packet_size_dwords = pm->pmf->runlist_size / sizeof(uint32_t); in pm_send_runlist()
340 mutex_lock(&pm->lock); in pm_send_runlist()
342 retval = kq_acquire_packet_buffer(pm->priv_queue, in pm_send_runlist()
347 retval = pm->pmf->runlist(pm, rl_buffer, rl_gpu_ib_addr, in pm_send_runlist()
352 retval = kq_submit_packet(pm->priv_queue); in pm_send_runlist()
354 mutex_unlock(&pm->lock); in pm_send_runlist()
359 kq_rollback_packet(pm->priv_queue); in pm_send_runlist()
361 mutex_unlock(&pm->lock); in pm_send_runlist()
363 pm_release_ib(pm); in pm_send_runlist()
367 int pm_send_query_status(struct packet_manager *pm, uint64_t fence_address, in pm_send_query_status() argument
370 struct kfd_node *node = pm->dqm->dev; in pm_send_query_status()
378 size = pm->pmf->query_status_size; in pm_send_query_status()
379 mutex_lock(&pm->lock); in pm_send_query_status()
380 kq_acquire_packet_buffer(pm->priv_queue, in pm_send_query_status()
388 retval = pm->pmf->query_status(pm, buffer, fence_address, fence_value); in pm_send_query_status()
390 retval = kq_submit_packet(pm->priv_queue); in pm_send_query_status()
392 kq_rollback_packet(pm->priv_queue); in pm_send_query_status()
395 mutex_unlock(&pm->lock); in pm_send_query_status()
399 int pm_update_grace_period(struct packet_manager *pm, uint32_t grace_period) in pm_update_grace_period() argument
401 struct kfd_node *node = pm->dqm->dev; in pm_update_grace_period()
406 size = pm->pmf->set_grace_period_size; in pm_update_grace_period()
408 mutex_lock(&pm->lock); in pm_update_grace_period()
411 kq_acquire_packet_buffer(pm->priv_queue, in pm_update_grace_period()
422 retval = pm->pmf->set_grace_period(pm, buffer, grace_period); in pm_update_grace_period()
424 retval = kq_submit_packet(pm->priv_queue); in pm_update_grace_period()
426 kq_rollback_packet(pm->priv_queue); in pm_update_grace_period()
430 mutex_unlock(&pm->lock); in pm_update_grace_period()
434 int pm_send_unmap_queue(struct packet_manager *pm, in pm_send_unmap_queue() argument
438 struct kfd_node *node = pm->dqm->dev; in pm_send_unmap_queue()
443 size = pm->pmf->unmap_queues_size; in pm_send_unmap_queue()
444 mutex_lock(&pm->lock); in pm_send_unmap_queue()
445 kq_acquire_packet_buffer(pm->priv_queue, in pm_send_unmap_queue()
453 retval = pm->pmf->unmap_queues(pm, buffer, filter, filter_param, reset); in pm_send_unmap_queue()
455 retval = kq_submit_packet(pm->priv_queue); in pm_send_unmap_queue()
457 kq_rollback_packet(pm->priv_queue); in pm_send_unmap_queue()
460 mutex_unlock(&pm->lock); in pm_send_unmap_queue()
464 void pm_release_ib(struct packet_manager *pm) in pm_release_ib() argument
466 mutex_lock(&pm->lock); in pm_release_ib()
467 if (pm->allocated) { in pm_release_ib()
468 kfd_gtt_sa_free(pm->dqm->dev, pm->ib_buffer_obj); in pm_release_ib()
469 pm->allocated = false; in pm_release_ib()
471 mutex_unlock(&pm->lock); in pm_release_ib()
478 struct packet_manager *pm = data; in pm_debugfs_runlist() local
480 mutex_lock(&pm->lock); in pm_debugfs_runlist()
482 if (!pm->allocated) { in pm_debugfs_runlist()
488 pm->ib_buffer_obj->cpu_ptr, pm->ib_size_bytes, false); in pm_debugfs_runlist()
491 mutex_unlock(&pm->lock); in pm_debugfs_runlist()
495 int pm_debugfs_hang_hws(struct packet_manager *pm) in pm_debugfs_hang_hws() argument
497 struct kfd_node *node = pm->dqm->dev; in pm_debugfs_hang_hws()
502 if (!pm->priv_queue) in pm_debugfs_hang_hws()
505 size = pm->pmf->query_status_size; in pm_debugfs_hang_hws()
506 mutex_lock(&pm->lock); in pm_debugfs_hang_hws()
507 kq_acquire_packet_buffer(pm->priv_queue, in pm_debugfs_hang_hws()
515 kq_submit_packet(pm->priv_queue); in pm_debugfs_hang_hws()
521 mutex_unlock(&pm->lock); in pm_debugfs_hang_hws()