Lines Matching full:gfx
37 /* delay 0.1 second to enable gfx off feature */
43 * GPU GFX IP block helpers function.
51 bit += mec * adev->gfx.mec.num_pipe_per_mec in amdgpu_gfx_mec_queue_to_bit()
52 * adev->gfx.mec.num_queue_per_pipe; in amdgpu_gfx_mec_queue_to_bit()
53 bit += pipe * adev->gfx.mec.num_queue_per_pipe; in amdgpu_gfx_mec_queue_to_bit()
62 *queue = bit % adev->gfx.mec.num_queue_per_pipe; in amdgpu_queue_mask_bit_to_mec_queue()
63 *pipe = (bit / adev->gfx.mec.num_queue_per_pipe) in amdgpu_queue_mask_bit_to_mec_queue()
64 % adev->gfx.mec.num_pipe_per_mec; in amdgpu_queue_mask_bit_to_mec_queue()
65 *mec = (bit / adev->gfx.mec.num_queue_per_pipe) in amdgpu_queue_mask_bit_to_mec_queue()
66 / adev->gfx.mec.num_pipe_per_mec; in amdgpu_queue_mask_bit_to_mec_queue()
74 adev->gfx.mec_bitmap[xcc_id].queue_bitmap); in amdgpu_gfx_is_mec_queue_enabled()
82 bit += me * adev->gfx.me.num_pipe_per_me in amdgpu_gfx_me_queue_to_bit()
83 * adev->gfx.me.num_queue_per_pipe; in amdgpu_gfx_me_queue_to_bit()
84 bit += pipe * adev->gfx.me.num_queue_per_pipe; in amdgpu_gfx_me_queue_to_bit()
94 adev->gfx.me.queue_bitmap); in amdgpu_gfx_is_me_queue_enabled()
144 return amdgpu_async_gfx_ring && adev->gfx.me.num_pipe_per_me > 1; in amdgpu_gfx_is_graphics_multipipe_capable()
163 return adev->gfx.mec.num_mec > 1; in amdgpu_gfx_is_compute_multipipe_capable()
173 * have more than one gfx pipe. in amdgpu_gfx_is_high_priority_graphics_queue()
176 adev->gfx.num_gfx_rings > 1 && pipe == 1 && queue == 0) { in amdgpu_gfx_is_high_priority_graphics_queue()
181 if (ring == &adev->gfx.gfx_ring[bit]) in amdgpu_gfx_is_high_priority_graphics_queue()
194 if (adev->gfx.num_compute_rings > 1 && in amdgpu_gfx_is_high_priority_compute_queue()
195 ring == &adev->gfx.compute_ring[0]) in amdgpu_gfx_is_high_priority_compute_queue()
205 int max_queues_per_mec = min(adev->gfx.mec.num_pipe_per_mec * in amdgpu_gfx_compute_queue_acquire()
206 adev->gfx.mec.num_queue_per_pipe, in amdgpu_gfx_compute_queue_acquire()
207 adev->gfx.num_compute_rings); in amdgpu_gfx_compute_queue_acquire()
208 int num_xcc = adev->gfx.xcc_mask ? NUM_XCC(adev->gfx.xcc_mask) : 1; in amdgpu_gfx_compute_queue_acquire()
215 pipe = i % adev->gfx.mec.num_pipe_per_mec; in amdgpu_gfx_compute_queue_acquire()
216 queue = (i / adev->gfx.mec.num_pipe_per_mec) % in amdgpu_gfx_compute_queue_acquire()
217 adev->gfx.mec.num_queue_per_pipe; in amdgpu_gfx_compute_queue_acquire()
219 set_bit(pipe * adev->gfx.mec.num_queue_per_pipe + queue, in amdgpu_gfx_compute_queue_acquire()
220 adev->gfx.mec_bitmap[j].queue_bitmap); in amdgpu_gfx_compute_queue_acquire()
227 set_bit(i, adev->gfx.mec_bitmap[j].queue_bitmap); in amdgpu_gfx_compute_queue_acquire()
233 bitmap_weight(adev->gfx.mec_bitmap[j].queue_bitmap, AMDGPU_MAX_COMPUTE_QUEUES)); in amdgpu_gfx_compute_queue_acquire()
241 int max_queues_per_me = adev->gfx.me.num_pipe_per_me * in amdgpu_gfx_graphics_queue_acquire()
242 adev->gfx.me.num_queue_per_pipe; in amdgpu_gfx_graphics_queue_acquire()
248 pipe = i % adev->gfx.me.num_pipe_per_me; in amdgpu_gfx_graphics_queue_acquire()
249 queue = (i / adev->gfx.me.num_pipe_per_me) % in amdgpu_gfx_graphics_queue_acquire()
250 adev->gfx.me.num_queue_per_pipe; in amdgpu_gfx_graphics_queue_acquire()
252 set_bit(pipe * adev->gfx.me.num_queue_per_pipe + queue, in amdgpu_gfx_graphics_queue_acquire()
253 adev->gfx.me.queue_bitmap); in amdgpu_gfx_graphics_queue_acquire()
257 set_bit(i, adev->gfx.me.queue_bitmap); in amdgpu_gfx_graphics_queue_acquire()
261 adev->gfx.num_gfx_rings = in amdgpu_gfx_graphics_queue_acquire()
262 bitmap_weight(adev->gfx.me.queue_bitmap, AMDGPU_MAX_GFX_QUEUES); in amdgpu_gfx_graphics_queue_acquire()
271 queue_bit = adev->gfx.mec.num_mec in amdgpu_gfx_kiq_acquire()
272 * adev->gfx.mec.num_pipe_per_mec in amdgpu_gfx_kiq_acquire()
273 * adev->gfx.mec.num_queue_per_pipe; in amdgpu_gfx_kiq_acquire()
276 if (test_bit(queue_bit, adev->gfx.mec_bitmap[xcc_id].queue_bitmap)) in amdgpu_gfx_kiq_acquire()
302 struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id]; in amdgpu_gfx_kiq_init_ring()
343 struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id]; in amdgpu_gfx_kiq_fini()
353 struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id]; in amdgpu_gfx_kiq_init()
374 /* create MQD for each compute/gfx queue */
379 struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id]; in amdgpu_gfx_mqd_sw_init()
418 for (i = 0; i < adev->gfx.num_gfx_rings; i++) { in amdgpu_gfx_mqd_sw_init()
419 ring = &adev->gfx.gfx_ring[i]; in amdgpu_gfx_mqd_sw_init()
431 adev->gfx.me.mqd_backup[i] = kzalloc(mqd_size, GFP_KERNEL); in amdgpu_gfx_mqd_sw_init()
432 if (!adev->gfx.me.mqd_backup[i]) { in amdgpu_gfx_mqd_sw_init()
441 for (i = 0; i < adev->gfx.num_compute_rings; i++) { in amdgpu_gfx_mqd_sw_init()
442 j = i + xcc_id * adev->gfx.num_compute_rings; in amdgpu_gfx_mqd_sw_init()
443 ring = &adev->gfx.compute_ring[j]; in amdgpu_gfx_mqd_sw_init()
455 adev->gfx.mec.mqd_backup[j] = kzalloc(mqd_size, GFP_KERNEL); in amdgpu_gfx_mqd_sw_init()
456 if (!adev->gfx.mec.mqd_backup[j]) { in amdgpu_gfx_mqd_sw_init()
470 struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id]; in amdgpu_gfx_mqd_sw_fini()
473 for (i = 0; i < adev->gfx.num_gfx_rings; i++) { in amdgpu_gfx_mqd_sw_fini()
474 ring = &adev->gfx.gfx_ring[i]; in amdgpu_gfx_mqd_sw_fini()
475 kfree(adev->gfx.me.mqd_backup[i]); in amdgpu_gfx_mqd_sw_fini()
482 for (i = 0; i < adev->gfx.num_compute_rings; i++) { in amdgpu_gfx_mqd_sw_fini()
483 j = i + xcc_id * adev->gfx.num_compute_rings; in amdgpu_gfx_mqd_sw_fini()
484 ring = &adev->gfx.compute_ring[j]; in amdgpu_gfx_mqd_sw_fini()
485 kfree(adev->gfx.mec.mqd_backup[j]); in amdgpu_gfx_mqd_sw_fini()
500 struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id]; in amdgpu_gfx_disable_kcq()
506 for (i = 0; i < adev->gfx.num_compute_rings; i++) { in amdgpu_gfx_disable_kcq()
507 j = i + xcc_id * adev->gfx.num_compute_rings; in amdgpu_gfx_disable_kcq()
509 &adev->gfx.compute_ring[j], in amdgpu_gfx_disable_kcq()
523 adev->gfx.num_compute_rings)) { in amdgpu_gfx_disable_kcq()
528 for (i = 0; i < adev->gfx.num_compute_rings; i++) { in amdgpu_gfx_disable_kcq()
529 j = i + xcc_id * adev->gfx.num_compute_rings; in amdgpu_gfx_disable_kcq()
531 &adev->gfx.compute_ring[j], in amdgpu_gfx_disable_kcq()
550 struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id]; in amdgpu_gfx_disable_kgq()
557 for (i = 0; i < adev->gfx.num_gfx_rings; i++) { in amdgpu_gfx_disable_kgq()
558 j = i + xcc_id * adev->gfx.num_gfx_rings; in amdgpu_gfx_disable_kgq()
560 &adev->gfx.gfx_ring[j], in amdgpu_gfx_disable_kgq()
570 if (!adev->gfx.kiq[0].ring.sched.ready || amdgpu_in_reset(adev)) in amdgpu_gfx_disable_kgq()
576 adev->gfx.num_gfx_rings)) { in amdgpu_gfx_disable_kgq()
581 for (i = 0; i < adev->gfx.num_gfx_rings; i++) { in amdgpu_gfx_disable_kgq()
582 j = i + xcc_id * adev->gfx.num_gfx_rings; in amdgpu_gfx_disable_kgq()
584 &adev->gfx.gfx_ring[j], in amdgpu_gfx_disable_kgq()
617 struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id]; in amdgpu_gfx_mes_enable_kcq()
640 for (i = 0; i < adev->gfx.num_compute_rings; i++) { in amdgpu_gfx_mes_enable_kcq()
641 j = i + xcc_id * adev->gfx.num_compute_rings; in amdgpu_gfx_mes_enable_kcq()
643 &adev->gfx.compute_ring[j]); in amdgpu_gfx_mes_enable_kcq()
655 struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id]; in amdgpu_gfx_enable_kcq()
667 if (!test_bit(i, adev->gfx.mec_bitmap[xcc_id].queue_bitmap)) in amdgpu_gfx_enable_kcq()
688 adev->gfx.num_compute_rings + in amdgpu_gfx_enable_kcq()
697 for (i = 0; i < adev->gfx.num_compute_rings; i++) { in amdgpu_gfx_enable_kcq()
698 j = i + xcc_id * adev->gfx.num_compute_rings; in amdgpu_gfx_enable_kcq()
700 &adev->gfx.compute_ring[j]); in amdgpu_gfx_enable_kcq()
719 struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id]; in amdgpu_gfx_enable_kgq()
729 for (i = 0; i < adev->gfx.num_gfx_rings; i++) { in amdgpu_gfx_enable_kgq()
730 j = i + xcc_id * adev->gfx.num_gfx_rings; in amdgpu_gfx_enable_kgq()
732 &adev->gfx.gfx_ring[j]); in amdgpu_gfx_enable_kgq()
734 DRM_ERROR("failed to map gfx queue\n"); in amdgpu_gfx_enable_kgq()
746 adev->gfx.num_gfx_rings); in amdgpu_gfx_enable_kgq()
753 for (i = 0; i < adev->gfx.num_gfx_rings; i++) { in amdgpu_gfx_enable_kgq()
754 j = i + xcc_id * adev->gfx.num_gfx_rings; in amdgpu_gfx_enable_kgq()
756 &adev->gfx.gfx_ring[j]); in amdgpu_gfx_enable_kgq()
774 /* amdgpu_gfx_off_ctrl - Handle gfx off feature enable/disable
777 * @bool enable true: enable gfx off feature, false: disable gfx off feature
779 * 1. gfx off feature will be enabled by gfx ip after gfx cg gp enabled.
780 * 2. other client can send request to disable gfx off feature, the request should be honored.
781 * 3. other client can cancel their request of disable gfx off feature
782 * 4. other client should not send request to enable gfx off feature before disable gfx off feature.
792 mutex_lock(&adev->gfx.gfx_off_mutex); in amdgpu_gfx_off_ctrl()
799 if (WARN_ON_ONCE(adev->gfx.gfx_off_req_count == 0)) in amdgpu_gfx_off_ctrl()
802 adev->gfx.gfx_off_req_count--; in amdgpu_gfx_off_ctrl()
804 if (adev->gfx.gfx_off_req_count == 0 && in amdgpu_gfx_off_ctrl()
805 !adev->gfx.gfx_off_state) { in amdgpu_gfx_off_ctrl()
810 adev->gfx.gfx_off_state = true; in amdgpu_gfx_off_ctrl()
812 schedule_delayed_work(&adev->gfx.gfx_off_delay_work, in amdgpu_gfx_off_ctrl()
817 if (adev->gfx.gfx_off_req_count == 0) { in amdgpu_gfx_off_ctrl()
818 cancel_delayed_work_sync(&adev->gfx.gfx_off_delay_work); in amdgpu_gfx_off_ctrl()
820 if (adev->gfx.gfx_off_state && in amdgpu_gfx_off_ctrl()
822 adev->gfx.gfx_off_state = false; in amdgpu_gfx_off_ctrl()
824 if (adev->gfx.funcs->init_spm_golden) { in amdgpu_gfx_off_ctrl()
832 adev->gfx.gfx_off_req_count++; in amdgpu_gfx_off_ctrl()
836 mutex_unlock(&adev->gfx.gfx_off_mutex); in amdgpu_gfx_off_ctrl()
843 mutex_lock(&adev->gfx.gfx_off_mutex); in amdgpu_set_gfx_off_residency()
847 mutex_unlock(&adev->gfx.gfx_off_mutex); in amdgpu_set_gfx_off_residency()
856 mutex_lock(&adev->gfx.gfx_off_mutex); in amdgpu_get_gfx_off_residency()
860 mutex_unlock(&adev->gfx.gfx_off_mutex); in amdgpu_get_gfx_off_residency()
869 mutex_lock(&adev->gfx.gfx_off_mutex); in amdgpu_get_gfx_off_entrycount()
873 mutex_unlock(&adev->gfx.gfx_off_mutex); in amdgpu_get_gfx_off_entrycount()
883 mutex_lock(&adev->gfx.gfx_off_mutex); in amdgpu_get_gfx_off_status()
887 mutex_unlock(&adev->gfx.gfx_off_mutex); in amdgpu_get_gfx_off_status()
910 if (adev->gfx.cp_ecc_error_irq.funcs) { in amdgpu_gfx_ras_late_init()
911 r = amdgpu_irq_get(adev, &adev->gfx.cp_ecc_error_irq, 0); in amdgpu_gfx_ras_late_init()
930 /* adev->gfx.ras is NULL, which means gfx does not in amdgpu_gfx_ras_sw_init()
933 if (!adev->gfx.ras) in amdgpu_gfx_ras_sw_init()
936 ras = adev->gfx.ras; in amdgpu_gfx_ras_sw_init()
940 dev_err(adev->dev, "Failed to register gfx ras block!\n"); in amdgpu_gfx_ras_sw_init()
944 strcpy(ras->ras_block.ras_comm.name, "gfx"); in amdgpu_gfx_ras_sw_init()
947 adev->gfx.ras_if = &ras->ras_block.ras_comm; in amdgpu_gfx_ras_sw_init()
949 /* If not define special ras_late_init function, use gfx default ras_late_init */ in amdgpu_gfx_ras_sw_init()
963 if (adev->gfx.ras && adev->gfx.ras->poison_consumption_handler) in amdgpu_gfx_poison_consumption_handler()
964 return adev->gfx.ras->poison_consumption_handler(adev, entry); in amdgpu_gfx_poison_consumption_handler()
981 if (adev->gfx.ras && adev->gfx.ras->ras_block.hw_ops && in amdgpu_gfx_process_ras_data_cb()
982 adev->gfx.ras->ras_block.hw_ops->query_ras_error_count) in amdgpu_gfx_process_ras_data_cb()
983 adev->gfx.ras->ras_block.hw_ops->query_ras_error_count(adev, err_data); in amdgpu_gfx_process_ras_data_cb()
993 struct ras_common_if *ras_if = adev->gfx.ras_if; in amdgpu_gfx_cp_ecc_error_irq()
1014 int num_xcc = adev->gfx.xcc_mask ? NUM_XCC(adev->gfx.xcc_mask) : 1; in amdgpu_gfx_ras_error_func()
1032 struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id]; in amdgpu_kiq_rreg()
1103 struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id]; in amdgpu_kiq_wreg()
1185 adev->gfx.pfp_fw->data; in amdgpu_gfx_cp_init_microcode()
1186 adev->gfx.pfp_fw_version = in amdgpu_gfx_cp_init_microcode()
1188 adev->gfx.pfp_feature_version = in amdgpu_gfx_cp_init_microcode()
1190 ucode_fw = adev->gfx.pfp_fw; in amdgpu_gfx_cp_init_microcode()
1195 adev->gfx.pfp_fw->data; in amdgpu_gfx_cp_init_microcode()
1196 adev->gfx.pfp_fw_version = in amdgpu_gfx_cp_init_microcode()
1198 adev->gfx.pfp_feature_version = in amdgpu_gfx_cp_init_microcode()
1200 ucode_fw = adev->gfx.pfp_fw; in amdgpu_gfx_cp_init_microcode()
1206 adev->gfx.pfp_fw->data; in amdgpu_gfx_cp_init_microcode()
1207 ucode_fw = adev->gfx.pfp_fw; in amdgpu_gfx_cp_init_microcode()
1212 adev->gfx.me_fw->data; in amdgpu_gfx_cp_init_microcode()
1213 adev->gfx.me_fw_version = in amdgpu_gfx_cp_init_microcode()
1215 adev->gfx.me_feature_version = in amdgpu_gfx_cp_init_microcode()
1217 ucode_fw = adev->gfx.me_fw; in amdgpu_gfx_cp_init_microcode()
1222 adev->gfx.me_fw->data; in amdgpu_gfx_cp_init_microcode()
1223 adev->gfx.me_fw_version = in amdgpu_gfx_cp_init_microcode()
1225 adev->gfx.me_feature_version = in amdgpu_gfx_cp_init_microcode()
1227 ucode_fw = adev->gfx.me_fw; in amdgpu_gfx_cp_init_microcode()
1233 adev->gfx.me_fw->data; in amdgpu_gfx_cp_init_microcode()
1234 ucode_fw = adev->gfx.me_fw; in amdgpu_gfx_cp_init_microcode()
1239 adev->gfx.ce_fw->data; in amdgpu_gfx_cp_init_microcode()
1240 adev->gfx.ce_fw_version = in amdgpu_gfx_cp_init_microcode()
1242 adev->gfx.ce_feature_version = in amdgpu_gfx_cp_init_microcode()
1244 ucode_fw = adev->gfx.ce_fw; in amdgpu_gfx_cp_init_microcode()
1249 adev->gfx.mec_fw->data; in amdgpu_gfx_cp_init_microcode()
1250 adev->gfx.mec_fw_version = in amdgpu_gfx_cp_init_microcode()
1252 adev->gfx.mec_feature_version = in amdgpu_gfx_cp_init_microcode()
1254 ucode_fw = adev->gfx.mec_fw; in amdgpu_gfx_cp_init_microcode()
1260 adev->gfx.mec_fw->data; in amdgpu_gfx_cp_init_microcode()
1261 ucode_fw = adev->gfx.mec_fw; in amdgpu_gfx_cp_init_microcode()
1266 adev->gfx.mec2_fw->data; in amdgpu_gfx_cp_init_microcode()
1267 adev->gfx.mec2_fw_version = in amdgpu_gfx_cp_init_microcode()
1269 adev->gfx.mec2_feature_version = in amdgpu_gfx_cp_init_microcode()
1271 ucode_fw = adev->gfx.mec2_fw; in amdgpu_gfx_cp_init_microcode()
1277 adev->gfx.mec2_fw->data; in amdgpu_gfx_cp_init_microcode()
1278 ucode_fw = adev->gfx.mec2_fw; in amdgpu_gfx_cp_init_microcode()
1283 adev->gfx.mec_fw->data; in amdgpu_gfx_cp_init_microcode()
1284 adev->gfx.mec_fw_version = in amdgpu_gfx_cp_init_microcode()
1286 adev->gfx.mec_feature_version = in amdgpu_gfx_cp_init_microcode()
1288 ucode_fw = adev->gfx.mec_fw; in amdgpu_gfx_cp_init_microcode()
1296 adev->gfx.mec_fw->data; in amdgpu_gfx_cp_init_microcode()
1297 ucode_fw = adev->gfx.mec_fw; in amdgpu_gfx_cp_init_microcode()
1315 return !(xcc_id % (adev->gfx.num_xcc_per_xcp ? in amdgpu_gfx_is_master_xcc()
1316 adev->gfx.num_xcc_per_xcp : 1)); in amdgpu_gfx_is_master_xcc()
1342 num_xcc = NUM_XCC(adev->gfx.xcc_mask); in amdgpu_gfx_set_compute_partition()
1423 dev_err(adev->dev, "Failed setting up GFX kernel entity.\n"); in amdgpu_gfx_run_cleaner_shader_job()
1458 int num_xcc = NUM_XCC(adev->gfx.xcc_mask); in amdgpu_gfx_run_cleaner_shader()
1463 if (adev->gfx.num_xcc_per_xcp) in amdgpu_gfx_run_cleaner_shader()
1464 num_xcc_to_clear = adev->gfx.num_xcc_per_xcp; in amdgpu_gfx_run_cleaner_shader()
1469 for (i = 0; i < adev->gfx.num_compute_rings; i++) { in amdgpu_gfx_run_cleaner_shader()
1470 ring = &adev->gfx.compute_ring[i + xcc_id * adev->gfx.num_compute_rings]; in amdgpu_gfx_run_cleaner_shader()
1488 * amdgpu_gfx_set_run_cleaner_shader - Execute the AMDGPU GFX Cleaner Shader
1554 * amdgpu_gfx_get_enforce_isolation - Query AMDGPU GFX Enforce Isolation Settings
1590 * amdgpu_gfx_set_enforce_isolation - Control AMDGPU GFX Enforce Isolation
1667 return amdgpu_show_reset_mask(buf, adev->gfx.gfx_supported_reset); in amdgpu_gfx_get_gfx_reset_mask()
1680 return amdgpu_show_reset_mask(buf, adev->gfx.compute_supported_reset); in amdgpu_gfx_get_compute_reset_mask()
1753 if (adev->gfx.enable_cleaner_shader) in amdgpu_gfx_sysfs_isolation_shader_init()
1762 if (adev->gfx.enable_cleaner_shader) in amdgpu_gfx_sysfs_isolation_shader_fini()
1773 if (adev->gfx.num_gfx_rings) { in amdgpu_gfx_sysfs_reset_mask_init()
1779 if (adev->gfx.num_compute_rings) { in amdgpu_gfx_sysfs_reset_mask_init()
1793 if (adev->gfx.num_gfx_rings) in amdgpu_gfx_sysfs_reset_mask_fini()
1796 if (adev->gfx.num_compute_rings) in amdgpu_gfx_sysfs_reset_mask_fini()
1833 if (!adev->gfx.enable_cleaner_shader) in amdgpu_gfx_cleaner_shader_sw_init()
1838 &adev->gfx.cleaner_shader_obj, in amdgpu_gfx_cleaner_shader_sw_init()
1839 &adev->gfx.cleaner_shader_gpu_addr, in amdgpu_gfx_cleaner_shader_sw_init()
1840 (void **)&adev->gfx.cleaner_shader_cpu_ptr); in amdgpu_gfx_cleaner_shader_sw_init()
1845 if (!adev->gfx.enable_cleaner_shader) in amdgpu_gfx_cleaner_shader_sw_fini()
1848 amdgpu_bo_free_kernel(&adev->gfx.cleaner_shader_obj, in amdgpu_gfx_cleaner_shader_sw_fini()
1849 &adev->gfx.cleaner_shader_gpu_addr, in amdgpu_gfx_cleaner_shader_sw_fini()
1850 (void **)&adev->gfx.cleaner_shader_cpu_ptr); in amdgpu_gfx_cleaner_shader_sw_fini()
1857 if (!adev->gfx.enable_cleaner_shader) in amdgpu_gfx_cleaner_shader_init()
1860 if (adev->gfx.cleaner_shader_cpu_ptr && cleaner_shader_ptr) in amdgpu_gfx_cleaner_shader_init()
1861 memcpy_toio(adev->gfx.cleaner_shader_cpu_ptr, cleaner_shader_ptr, in amdgpu_gfx_cleaner_shader_init()
1898 mutex_lock(&adev->gfx.kfd_sch_mutex); in amdgpu_gfx_kfd_sch_ctrl()
1905 if (WARN_ON_ONCE(adev->gfx.kfd_sch_req_count[idx] == 0)) { in amdgpu_gfx_kfd_sch_ctrl()
1910 adev->gfx.kfd_sch_req_count[idx]--; in amdgpu_gfx_kfd_sch_ctrl()
1912 if (adev->gfx.kfd_sch_req_count[idx] == 0 && in amdgpu_gfx_kfd_sch_ctrl()
1913 adev->gfx.kfd_sch_inactive[idx]) { in amdgpu_gfx_kfd_sch_ctrl()
1914 schedule_delayed_work(&adev->gfx.enforce_isolation[idx].work, in amdgpu_gfx_kfd_sch_ctrl()
1915 msecs_to_jiffies(adev->gfx.enforce_isolation_time[idx])); in amdgpu_gfx_kfd_sch_ctrl()
1918 if (adev->gfx.kfd_sch_req_count[idx] == 0) { in amdgpu_gfx_kfd_sch_ctrl()
1919 cancel_delayed_work_sync(&adev->gfx.enforce_isolation[idx].work); in amdgpu_gfx_kfd_sch_ctrl()
1920 if (!adev->gfx.kfd_sch_inactive[idx]) { in amdgpu_gfx_kfd_sch_ctrl()
1922 adev->gfx.kfd_sch_inactive[idx] = true; in amdgpu_gfx_kfd_sch_ctrl()
1926 adev->gfx.kfd_sch_req_count[idx]++; in amdgpu_gfx_kfd_sch_ctrl()
1930 mutex_unlock(&adev->gfx.kfd_sch_mutex); in amdgpu_gfx_kfd_sch_ctrl()
1939 * It counts the number of emitted fences for each GFX and compute ring. If there
1962 if (isolation_work->xcp_id == adev->gfx.gfx_ring[i].xcp_id) in amdgpu_gfx_enforce_isolation_handler()
1963 fences += amdgpu_fence_count_emitted(&adev->gfx.gfx_ring[i]); in amdgpu_gfx_enforce_isolation_handler()
1966 if (isolation_work->xcp_id == adev->gfx.compute_ring[i].xcp_id) in amdgpu_gfx_enforce_isolation_handler()
1967 fences += amdgpu_fence_count_emitted(&adev->gfx.compute_ring[i]); in amdgpu_gfx_enforce_isolation_handler()
1971 schedule_delayed_work(&adev->gfx.enforce_isolation[idx].work, in amdgpu_gfx_enforce_isolation_handler()
1976 WARN_ON_ONCE(!adev->gfx.kfd_sch_inactive[idx]); in amdgpu_gfx_enforce_isolation_handler()
1977 WARN_ON_ONCE(adev->gfx.kfd_sch_req_count[idx]); in amdgpu_gfx_enforce_isolation_handler()
1979 adev->gfx.kfd_sch_inactive[idx] = false; in amdgpu_gfx_enforce_isolation_handler()
2006 if (!adev->gfx.enforce_isolation_jiffies[idx]) { in amdgpu_gfx_enforce_isolation_wait_for_kfd()
2007 adev->gfx.enforce_isolation_jiffies[idx] = jiffies; in amdgpu_gfx_enforce_isolation_wait_for_kfd()
2008 adev->gfx.enforce_isolation_time[idx] = GFX_SLICE_PERIOD_MS; in amdgpu_gfx_enforce_isolation_wait_for_kfd()
2013 if (time_after(cjiffies, adev->gfx.enforce_isolation_jiffies[idx])) { in amdgpu_gfx_enforce_isolation_wait_for_kfd()
2014 cjiffies -= adev->gfx.enforce_isolation_jiffies[idx]; in amdgpu_gfx_enforce_isolation_wait_for_kfd()
2019 adev->gfx.enforce_isolation_time[idx] = GFX_SLICE_PERIOD_MS; in amdgpu_gfx_enforce_isolation_wait_for_kfd()
2022 adev->gfx.enforce_isolation_time[idx] = in amdgpu_gfx_enforce_isolation_wait_for_kfd()
2027 adev->gfx.enforce_isolation_jiffies[idx] = jiffies; in amdgpu_gfx_enforce_isolation_wait_for_kfd()
2031 adev->gfx.enforce_isolation_jiffies[idx] = jiffies; in amdgpu_gfx_enforce_isolation_wait_for_kfd()
2032 adev->gfx.enforce_isolation_time[idx] = GFX_SLICE_PERIOD_MS; in amdgpu_gfx_enforce_isolation_wait_for_kfd()
2045 * Ring begin_use helper implementation for gfx which serializes access to the
2046 * gfx IP between kernel submission IOCTLs and KFD user queues when isolation
2056 if (!adev->gfx.enable_cleaner_shader) in amdgpu_gfx_enforce_isolation_ring_begin_use()
2085 * Ring end_use helper implementation for gfx which serializes access to the
2086 * gfx IP between kernel submission IOCTLs and KFD user queues when isolation
2096 if (!adev->gfx.enable_cleaner_shader) in amdgpu_gfx_enforce_isolation_ring_end_use()
2119 * debugfs for to enable/disable gfx job submission to specific core.
2132 mask = (1ULL << adev->gfx.num_gfx_rings) - 1; in amdgpu_debugfs_gfx_sched_mask_set()
2136 for (i = 0; i < adev->gfx.num_gfx_rings; ++i) { in amdgpu_debugfs_gfx_sched_mask_set()
2137 ring = &adev->gfx.gfx_ring[i]; in amdgpu_debugfs_gfx_sched_mask_set()
2157 for (i = 0; i < adev->gfx.num_gfx_rings; ++i) { in amdgpu_debugfs_gfx_sched_mask_get()
2158 ring = &adev->gfx.gfx_ring[i]; in amdgpu_debugfs_gfx_sched_mask_get()
2180 if (!(adev->gfx.num_gfx_rings > 1)) in amdgpu_debugfs_gfx_sched_mask_init()
2202 mask = (1ULL << adev->gfx.num_compute_rings) - 1; in amdgpu_debugfs_compute_sched_mask_set()
2206 for (i = 0; i < adev->gfx.num_compute_rings; ++i) { in amdgpu_debugfs_compute_sched_mask_set()
2207 ring = &adev->gfx.compute_ring[i]; in amdgpu_debugfs_compute_sched_mask_set()
2228 for (i = 0; i < adev->gfx.num_compute_rings; ++i) { in amdgpu_debugfs_compute_sched_mask_get()
2229 ring = &adev->gfx.compute_ring[i]; in amdgpu_debugfs_compute_sched_mask_get()
2251 if (!(adev->gfx.num_compute_rings > 1)) in amdgpu_debugfs_compute_sched_mask_init()