Lines Matching full:gfx

40 #include "ivsrcid/gfx/irqsrcs_gfx_11_0_0.h"
177 /* gfx queue registers */
310 if (adev->enable_mes && !adev->gfx.kiq[0].ring.sched.ready) { in gfx_v12_0_kiq_unmap_queues()
378 adev->gfx.kiq[0].pmf = &gfx_v12_0_kiq_pm4_funcs; in gfx_v12_0_set_kiq_pm4_funcs()
526 amdgpu_ucode_release(&adev->gfx.pfp_fw); in gfx_v12_0_free_microcode()
527 amdgpu_ucode_release(&adev->gfx.me_fw); in gfx_v12_0_free_microcode()
528 amdgpu_ucode_release(&adev->gfx.rlc_fw); in gfx_v12_0_free_microcode()
529 amdgpu_ucode_release(&adev->gfx.mec_fw); in gfx_v12_0_free_microcode()
531 kfree(adev->gfx.rlc.register_list_format); in gfx_v12_0_free_microcode()
569 err = amdgpu_ucode_request(adev, &adev->gfx.pfp_fw, in gfx_v12_0_init_microcode()
577 err = amdgpu_ucode_request(adev, &adev->gfx.me_fw, in gfx_v12_0_init_microcode()
586 err = amdgpu_ucode_request(adev, &adev->gfx.rlc_fw, in gfx_v12_0_init_microcode()
591 rlc_hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data; in gfx_v12_0_init_microcode()
599 err = amdgpu_ucode_request(adev, &adev->gfx.mec_fw, in gfx_v12_0_init_microcode()
611 /* only one MEC for gfx 12 */ in gfx_v12_0_init_microcode()
612 adev->gfx.mec2_fw = NULL; in gfx_v12_0_init_microcode()
614 if (adev->gfx.imu.funcs) { in gfx_v12_0_init_microcode()
615 if (adev->gfx.imu.funcs->init_microcode) { in gfx_v12_0_init_microcode()
616 err = adev->gfx.imu.funcs->init_microcode(adev); in gfx_v12_0_init_microcode()
624 amdgpu_ucode_release(&adev->gfx.pfp_fw); in gfx_v12_0_init_microcode()
625 amdgpu_ucode_release(&adev->gfx.me_fw); in gfx_v12_0_init_microcode()
626 amdgpu_ucode_release(&adev->gfx.rlc_fw); in gfx_v12_0_init_microcode()
627 amdgpu_ucode_release(&adev->gfx.mec_fw); in gfx_v12_0_init_microcode()
659 if (adev->gfx.rlc.cs_data == NULL) in gfx_v12_0_get_csb_buffer()
666 for (sect = adev->gfx.rlc.cs_data; sect->section != NULL; ++sect) { in gfx_v12_0_get_csb_buffer()
686 amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj, in gfx_v12_0_rlc_fini()
687 &adev->gfx.rlc.clear_state_gpu_addr, in gfx_v12_0_rlc_fini()
688 (void **)&adev->gfx.rlc.cs_ptr); in gfx_v12_0_rlc_fini()
691 amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj, in gfx_v12_0_rlc_fini()
692 &adev->gfx.rlc.cp_table_gpu_addr, in gfx_v12_0_rlc_fini()
693 (void **)&adev->gfx.rlc.cp_table_ptr); in gfx_v12_0_rlc_fini()
700 reg_access_ctrl = &adev->gfx.rlc.reg_access_ctrl[0]; in gfx_v12_0_init_rlcg_reg_access_ctrl()
708 adev->gfx.rlc.rlcg_reg_access_supported = true; in gfx_v12_0_init_rlcg_reg_access_ctrl()
716 adev->gfx.rlc.cs_data = gfx12_cs_data; in gfx_v12_0_rlc_init()
718 cs_data = adev->gfx.rlc.cs_data; in gfx_v12_0_rlc_init()
728 if (adev->gfx.rlc.funcs->update_spm_vmid) in gfx_v12_0_rlc_init()
729 adev->gfx.rlc.funcs->update_spm_vmid(adev, NULL, 0xf); in gfx_v12_0_rlc_init()
736 amdgpu_bo_free_kernel(&adev->gfx.mec.hpd_eop_obj, NULL, NULL); in gfx_v12_0_mec_fini()
737 amdgpu_bo_free_kernel(&adev->gfx.mec.mec_fw_obj, NULL, NULL); in gfx_v12_0_mec_fini()
738 amdgpu_bo_free_kernel(&adev->gfx.mec.mec_fw_data_obj, NULL, NULL); in gfx_v12_0_mec_fini()
743 bitmap_zero(adev->gfx.me.queue_bitmap, AMDGPU_MAX_GFX_QUEUES); in gfx_v12_0_me_init()
754 bitmap_zero(adev->gfx.mec_bitmap[0].queue_bitmap, AMDGPU_MAX_COMPUTE_QUEUES); in gfx_v12_0_mec_init()
758 mec_hpd_size = adev->gfx.num_compute_rings * GFX12_MEC_HPD_SIZE; in gfx_v12_0_mec_init()
763 &adev->gfx.mec.hpd_eop_obj, in gfx_v12_0_mec_init()
764 &adev->gfx.mec.hpd_eop_gpu_addr, in gfx_v12_0_mec_init()
774 amdgpu_bo_kunmap(adev->gfx.mec.hpd_eop_obj); in gfx_v12_0_mec_init()
775 amdgpu_bo_unreserve(adev->gfx.mec.hpd_eop_obj); in gfx_v12_0_mec_init()
884 adev->gfx.config.max_hw_contexts = 8; in gfx_v12_0_gpu_early_init()
885 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20; in gfx_v12_0_gpu_early_init()
886 adev->gfx.config.sc_prim_fifo_size_backend = 0x100; in gfx_v12_0_gpu_early_init()
887 adev->gfx.config.sc_hiz_tile_fifo_size = 0; in gfx_v12_0_gpu_early_init()
888 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0; in gfx_v12_0_gpu_early_init()
905 ring = &adev->gfx.gfx_ring[ring_id]; in gfx_v12_0_gfx_ring_init()
922 r = amdgpu_ring_init(adev, ring, 1024, &adev->gfx.eop_irq, irq_type, in gfx_v12_0_gfx_ring_init()
937 ring = &adev->gfx.compute_ring[ring_id]; in gfx_v12_0_compute_ring_init()
947 ring->eop_gpu_addr = adev->gfx.mec.hpd_eop_gpu_addr in gfx_v12_0_compute_ring_init()
953 + ((ring->me - 1) * adev->gfx.mec.num_pipe_per_mec) in gfx_v12_0_compute_ring_init()
958 r = amdgpu_ring_init(adev, ring, 1024, &adev->gfx.eop_irq, irq_type, in gfx_v12_0_compute_ring_init()
1022 &adev->gfx.rlc.rlc_autoload_bo, in gfx_v12_0_rlc_autoload_buffer_init()
1023 &adev->gfx.rlc.rlc_autoload_gpu_addr, in gfx_v12_0_rlc_autoload_buffer_init()
1024 (void **)&adev->gfx.rlc.rlc_autoload_ptr); in gfx_v12_0_rlc_autoload_buffer_init()
1041 char *ptr = adev->gfx.rlc.rlc_autoload_ptr; in gfx_v12_0_rlc_backdoor_autoload_copy_ucode()
1091 adev->gfx.pfp_fw->data; in gfx_v12_0_rlc_backdoor_autoload_copy_gfx_ucode()
1093 fw_data = (const __le32 *)(adev->gfx.pfp_fw->data + in gfx_v12_0_rlc_backdoor_autoload_copy_gfx_ucode()
1099 fw_data = (const __le32 *)(adev->gfx.pfp_fw->data + in gfx_v12_0_rlc_backdoor_autoload_copy_gfx_ucode()
1108 adev->gfx.me_fw->data; in gfx_v12_0_rlc_backdoor_autoload_copy_gfx_ucode()
1110 fw_data = (const __le32 *)(adev->gfx.me_fw->data + in gfx_v12_0_rlc_backdoor_autoload_copy_gfx_ucode()
1116 fw_data = (const __le32 *)(adev->gfx.me_fw->data + in gfx_v12_0_rlc_backdoor_autoload_copy_gfx_ucode()
1125 adev->gfx.mec_fw->data; in gfx_v12_0_rlc_backdoor_autoload_copy_gfx_ucode()
1127 fw_data = (const __le32 *) (adev->gfx.mec_fw->data + in gfx_v12_0_rlc_backdoor_autoload_copy_gfx_ucode()
1133 fw_data = (const __le32 *) (adev->gfx.mec_fw->data + in gfx_v12_0_rlc_backdoor_autoload_copy_gfx_ucode()
1147 adev->gfx.rlc_fw->data; in gfx_v12_0_rlc_backdoor_autoload_copy_gfx_ucode()
1148 fw_data = (const __le32 *)(adev->gfx.rlc_fw->data + in gfx_v12_0_rlc_backdoor_autoload_copy_gfx_ucode()
1158 rlcv21_hdr = (const struct rlc_firmware_header_v2_1 *)adev->gfx.rlc_fw->data; in gfx_v12_0_rlc_backdoor_autoload_copy_gfx_ucode()
1160 fw_data = (const __le32 *)(adev->gfx.rlc_fw->data + in gfx_v12_0_rlc_backdoor_autoload_copy_gfx_ucode()
1166 fw_data = (const __le32 *)(adev->gfx.rlc_fw->data + in gfx_v12_0_rlc_backdoor_autoload_copy_gfx_ucode()
1173 rlcv22_hdr = (const struct rlc_firmware_header_v2_2 *)adev->gfx.rlc_fw->data; in gfx_v12_0_rlc_backdoor_autoload_copy_gfx_ucode()
1175 fw_data = (const __le32 *)(adev->gfx.rlc_fw->data + in gfx_v12_0_rlc_backdoor_autoload_copy_gfx_ucode()
1181 fw_data = (const __le32 *)(adev->gfx.rlc_fw->data + in gfx_v12_0_rlc_backdoor_autoload_copy_gfx_ucode()
1255 gpu_addr = adev->gfx.rlc.rlc_autoload_gpu_addr + rlc_g_offset - adev->gmc.vram_start; in gfx_v12_0_rlc_backdoor_autoload_enable()
1262 if (adev->gfx.imu.funcs && (amdgpu_dpm > 0)) { in gfx_v12_0_rlc_backdoor_autoload_enable()
1264 if (adev->gfx.imu.funcs->load_microcode) in gfx_v12_0_rlc_backdoor_autoload_enable()
1265 adev->gfx.imu.funcs->load_microcode(adev); in gfx_v12_0_rlc_backdoor_autoload_enable()
1267 if (adev->gfx.imu.funcs->setup_imu) in gfx_v12_0_rlc_backdoor_autoload_enable()
1268 adev->gfx.imu.funcs->setup_imu(adev); in gfx_v12_0_rlc_backdoor_autoload_enable()
1269 if (adev->gfx.imu.funcs->start_imu) in gfx_v12_0_rlc_backdoor_autoload_enable()
1270 adev->gfx.imu.funcs->start_imu(adev); in gfx_v12_0_rlc_backdoor_autoload_enable()
1294 DRM_ERROR("Failed to allocate memory for GFX IP Dump\n"); in gfx_v12_0_alloc_ip_dump()
1295 adev->gfx.ip_dump_core = NULL; in gfx_v12_0_alloc_ip_dump()
1297 adev->gfx.ip_dump_core = ptr; in gfx_v12_0_alloc_ip_dump()
1302 inst = adev->gfx.mec.num_mec * adev->gfx.mec.num_pipe_per_mec * in gfx_v12_0_alloc_ip_dump()
1303 adev->gfx.mec.num_queue_per_pipe; in gfx_v12_0_alloc_ip_dump()
1308 adev->gfx.ip_dump_compute_queues = NULL; in gfx_v12_0_alloc_ip_dump()
1310 adev->gfx.ip_dump_compute_queues = ptr; in gfx_v12_0_alloc_ip_dump()
1313 /* Allocate memory for gfx queue registers for all the instances */ in gfx_v12_0_alloc_ip_dump()
1315 inst = adev->gfx.me.num_me * adev->gfx.me.num_pipe_per_me * in gfx_v12_0_alloc_ip_dump()
1316 adev->gfx.me.num_queue_per_pipe; in gfx_v12_0_alloc_ip_dump()
1320 DRM_ERROR("Failed to allocate memory for GFX Queues IP Dump\n"); in gfx_v12_0_alloc_ip_dump()
1321 adev->gfx.ip_dump_gfx_queues = NULL; in gfx_v12_0_alloc_ip_dump()
1323 adev->gfx.ip_dump_gfx_queues = ptr; in gfx_v12_0_alloc_ip_dump()
1337 adev->gfx.me.num_me = 1; in gfx_v12_0_sw_init()
1338 adev->gfx.me.num_pipe_per_me = 1; in gfx_v12_0_sw_init()
1339 adev->gfx.me.num_queue_per_pipe = 1; in gfx_v12_0_sw_init()
1340 adev->gfx.mec.num_mec = 1; in gfx_v12_0_sw_init()
1341 adev->gfx.mec.num_pipe_per_mec = 2; in gfx_v12_0_sw_init()
1342 adev->gfx.mec.num_queue_per_pipe = 4; in gfx_v12_0_sw_init()
1345 adev->gfx.me.num_me = 1; in gfx_v12_0_sw_init()
1346 adev->gfx.me.num_pipe_per_me = 1; in gfx_v12_0_sw_init()
1347 adev->gfx.me.num_queue_per_pipe = 1; in gfx_v12_0_sw_init()
1348 adev->gfx.mec.num_mec = 1; in gfx_v12_0_sw_init()
1349 adev->gfx.mec.num_pipe_per_mec = 4; in gfx_v12_0_sw_init()
1350 adev->gfx.mec.num_queue_per_pipe = 8; in gfx_v12_0_sw_init()
1357 if (adev->gfx.me_fw_version >= 2480 && in gfx_v12_0_sw_init()
1358 adev->gfx.pfp_fw_version >= 2530 && in gfx_v12_0_sw_init()
1359 adev->gfx.mec_fw_version >= 2680 && in gfx_v12_0_sw_init()
1361 adev->gfx.enable_cleaner_shader = true; in gfx_v12_0_sw_init()
1364 adev->gfx.enable_cleaner_shader = false; in gfx_v12_0_sw_init()
1369 num_compute_rings = (adev->gfx.mec.num_pipe_per_mec * in gfx_v12_0_sw_init()
1370 adev->gfx.mec.num_queue_per_pipe) / 2; in gfx_v12_0_sw_init()
1371 adev->gfx.num_compute_rings = min(adev->gfx.num_compute_rings, in gfx_v12_0_sw_init()
1377 &adev->gfx.eop_irq); in gfx_v12_0_sw_init()
1384 &adev->gfx.bad_op_irq); in gfx_v12_0_sw_init()
1391 &adev->gfx.priv_reg_irq); in gfx_v12_0_sw_init()
1398 &adev->gfx.priv_inst_irq); in gfx_v12_0_sw_init()
1402 adev->gfx.gfx_current_status = AMDGPU_GFX_NORMAL_MODE; in gfx_v12_0_sw_init()
1418 /* set up the gfx ring */ in gfx_v12_0_sw_init()
1419 for (i = 0; i < adev->gfx.me.num_me; i++) { in gfx_v12_0_sw_init()
1420 for (j = 0; j < adev->gfx.me.num_queue_per_pipe; j++) { in gfx_v12_0_sw_init()
1421 for (k = 0; k < adev->gfx.me.num_pipe_per_me; k++) { in gfx_v12_0_sw_init()
1436 for (i = 0; i < adev->gfx.mec.num_mec; ++i) { in gfx_v12_0_sw_init()
1437 for (j = 0; j < adev->gfx.mec.num_queue_per_pipe; j++) { in gfx_v12_0_sw_init()
1438 for (k = 0; k < adev->gfx.mec.num_pipe_per_mec; k++) { in gfx_v12_0_sw_init()
1453 adev->gfx.gfx_supported_reset = in gfx_v12_0_sw_init()
1454 amdgpu_get_soft_full_reset_mask(&adev->gfx.gfx_ring[0]); in gfx_v12_0_sw_init()
1455 adev->gfx.compute_supported_reset = in gfx_v12_0_sw_init()
1456 amdgpu_get_soft_full_reset_mask(&adev->gfx.compute_ring[0]); in gfx_v12_0_sw_init()
1460 if ((adev->gfx.me_fw_version >= 2660) && in gfx_v12_0_sw_init()
1461 (adev->gfx.mec_fw_version >= 2920)) { in gfx_v12_0_sw_init()
1462 adev->gfx.compute_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE; in gfx_v12_0_sw_init()
1463 adev->gfx.gfx_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE; in gfx_v12_0_sw_init()
1505 amdgpu_bo_free_kernel(&adev->gfx.pfp.pfp_fw_obj, in gfx_v12_0_pfp_fini()
1506 &adev->gfx.pfp.pfp_fw_gpu_addr, in gfx_v12_0_pfp_fini()
1507 (void **)&adev->gfx.pfp.pfp_fw_ptr); in gfx_v12_0_pfp_fini()
1509 amdgpu_bo_free_kernel(&adev->gfx.pfp.pfp_fw_data_obj, in gfx_v12_0_pfp_fini()
1510 &adev->gfx.pfp.pfp_fw_data_gpu_addr, in gfx_v12_0_pfp_fini()
1511 (void **)&adev->gfx.pfp.pfp_fw_data_ptr); in gfx_v12_0_pfp_fini()
1516 amdgpu_bo_free_kernel(&adev->gfx.me.me_fw_obj, in gfx_v12_0_me_fini()
1517 &adev->gfx.me.me_fw_gpu_addr, in gfx_v12_0_me_fini()
1518 (void **)&adev->gfx.me.me_fw_ptr); in gfx_v12_0_me_fini()
1520 amdgpu_bo_free_kernel(&adev->gfx.me.me_fw_data_obj, in gfx_v12_0_me_fini()
1521 &adev->gfx.me.me_fw_data_gpu_addr, in gfx_v12_0_me_fini()
1522 (void **)&adev->gfx.me.me_fw_data_ptr); in gfx_v12_0_me_fini()
1527 amdgpu_bo_free_kernel(&adev->gfx.rlc.rlc_autoload_bo, in gfx_v12_0_rlc_autoload_buffer_fini()
1528 &adev->gfx.rlc.rlc_autoload_gpu_addr, in gfx_v12_0_rlc_autoload_buffer_fini()
1529 (void **)&adev->gfx.rlc.rlc_autoload_ptr); in gfx_v12_0_rlc_autoload_buffer_fini()
1537 for (i = 0; i < adev->gfx.num_gfx_rings; i++) in gfx_v12_0_sw_fini()
1538 amdgpu_ring_fini(&adev->gfx.gfx_ring[i]); in gfx_v12_0_sw_fini()
1539 for (i = 0; i < adev->gfx.num_compute_rings; i++) in gfx_v12_0_sw_fini()
1540 amdgpu_ring_fini(&adev->gfx.compute_ring[i]); in gfx_v12_0_sw_fini()
1545 amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq[0].ring); in gfx_v12_0_sw_fini()
1561 kfree(adev->gfx.ip_dump_core); in gfx_v12_0_sw_fini()
1562 kfree(adev->gfx.ip_dump_compute_queues); in gfx_v12_0_sw_fini()
1563 kfree(adev->gfx.ip_dump_gfx_queues); in gfx_v12_0_sw_fini()
1607 sa_mask = amdgpu_gfx_create_bitmask(adev->gfx.config.max_sh_per_se * in gfx_v12_0_get_sa_active_bitmap()
1608 adev->gfx.config.max_shader_engines); in gfx_v12_0_get_sa_active_bitmap()
1626 rb_mask = amdgpu_gfx_create_bitmask(adev->gfx.config.max_backends_per_se * in gfx_v12_0_get_rb_active_bitmap()
1627 adev->gfx.config.max_shader_engines); in gfx_v12_0_get_rb_active_bitmap()
1648 max_sa = adev->gfx.config.max_shader_engines * in gfx_v12_0_setup_rb()
1649 adev->gfx.config.max_sh_per_se; in gfx_v12_0_setup_rb()
1650 rb_bitmap_width_per_sa = adev->gfx.config.max_backends_per_se / in gfx_v12_0_setup_rb()
1651 adev->gfx.config.max_sh_per_se; in gfx_v12_0_setup_rb()
1660 adev->gfx.config.backend_enable_mask = active_rb_bitmap; in gfx_v12_0_setup_rb()
1661 adev->gfx.config.num_rbs = hweight32(active_rb_bitmap); in gfx_v12_0_setup_rb()
1716 gfx_v12_0_get_cu_info(adev, &adev->gfx.cu_info); in gfx_v12_0_constants_init()
1718 adev->gfx.config.pa_sc_tile_steering_override = 0; in gfx_v12_0_constants_init()
1786 for (i = 0; i < adev->gfx.me.num_me; i++) { in gfx_v12_0_enable_gui_idle_interrupt()
1787 for (j = 0; j < adev->gfx.me.num_pipe_per_me; j++) { in gfx_v12_0_enable_gui_idle_interrupt()
1808 adev->gfx.rlc.funcs->get_csb_buffer(adev, adev->gfx.rlc.cs_ptr); in gfx_v12_0_init_csb()
1811 adev->gfx.rlc.clear_state_gpu_addr >> 32); in gfx_v12_0_init_csb()
1813 adev->gfx.rlc.clear_state_gpu_addr & 0xfffffffc); in gfx_v12_0_init_csb()
1814 WREG32_SOC15(GC, 0, regRLC_CSIB_LENGTH, adev->gfx.rlc.clear_state_size); in gfx_v12_0_init_csb()
1885 hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data; in gfx_v12_0_load_rlcg_microcode()
1886 fw_data = (const __le32 *)(adev->gfx.rlc_fw->data + in gfx_v12_0_load_rlcg_microcode()
1897 WREG32_SOC15(GC, 0, regRLC_GPM_UCODE_ADDR, adev->gfx.rlc_fw_version); in gfx_v12_0_load_rlcg_microcode()
1907 hdr = (const struct rlc_firmware_header_v2_2 *)adev->gfx.rlc_fw->data; in gfx_v12_0_load_rlc_iram_dram_microcode()
1909 fw_data = (const __le32 *)(adev->gfx.rlc_fw->data + in gfx_v12_0_load_rlc_iram_dram_microcode()
1922 WREG32_SOC15(GC, 0, regRLC_LX6_IRAM_ADDR, adev->gfx.rlc_fw_version); in gfx_v12_0_load_rlc_iram_dram_microcode()
1924 fw_data = (const __le32 *)(adev->gfx.rlc_fw->data + in gfx_v12_0_load_rlc_iram_dram_microcode()
1936 WREG32_SOC15(GC, 0, regRLC_LX6_IRAM_ADDR, adev->gfx.rlc_fw_version); in gfx_v12_0_load_rlc_iram_dram_microcode()
1950 if (!adev->gfx.rlc_fw) in gfx_v12_0_rlc_load_microcode()
1953 hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data; in gfx_v12_0_rlc_load_microcode()
1987 adev->gfx.rlc.funcs->stop(adev); in gfx_v12_0_rlc_resume()
2004 adev->gfx.rlc.funcs->start(adev); in gfx_v12_0_rlc_resume()
2018 adev->gfx.mec_fw->data; in gfx_v12_0_config_gfx_rs64()
2020 adev->gfx.me_fw->data; in gfx_v12_0_config_gfx_rs64()
2022 adev->gfx.pfp_fw->data; in gfx_v12_0_config_gfx_rs64()
2101 adev->gfx.pfp_fw->data; in gfx_v12_0_set_pfp_ucode_start_addr()
2103 for (pipe_id = 0; pipe_id < adev->gfx.me.num_pipe_per_me; pipe_id++) { in gfx_v12_0_set_pfp_ucode_start_addr()
2143 adev->gfx.me_fw->data; in gfx_v12_0_set_me_ucode_start_addr()
2145 for (pipe_id = 0; pipe_id < adev->gfx.me.num_pipe_per_me; pipe_id++) { in gfx_v12_0_set_me_ucode_start_addr()
2185 adev->gfx.mec_fw->data; in gfx_v12_0_set_mec_ucode_start_addr()
2187 for (pipe_id = 0; pipe_id < adev->gfx.mec.num_pipe_per_mec; pipe_id++) { in gfx_v12_0_set_mec_ucode_start_addr()
2249 DRM_ERROR("failed to %s cp gfx\n", enable ? "unhalt" : "halt"); in gfx_v12_0_cp_gfx_enable()
2264 adev->gfx.pfp_fw->data; in gfx_v12_0_cp_gfx_load_pfp_microcode_rs64()
2269 fw_ucode = (const __le32 *)(adev->gfx.pfp_fw->data + in gfx_v12_0_cp_gfx_load_pfp_microcode_rs64()
2273 fw_data = (const __le32 *)(adev->gfx.pfp_fw->data + in gfx_v12_0_cp_gfx_load_pfp_microcode_rs64()
2280 &adev->gfx.pfp.pfp_fw_obj, in gfx_v12_0_cp_gfx_load_pfp_microcode_rs64()
2281 &adev->gfx.pfp.pfp_fw_gpu_addr, in gfx_v12_0_cp_gfx_load_pfp_microcode_rs64()
2282 (void **)&adev->gfx.pfp.pfp_fw_ptr); in gfx_v12_0_cp_gfx_load_pfp_microcode_rs64()
2291 &adev->gfx.pfp.pfp_fw_data_obj, in gfx_v12_0_cp_gfx_load_pfp_microcode_rs64()
2292 &adev->gfx.pfp.pfp_fw_data_gpu_addr, in gfx_v12_0_cp_gfx_load_pfp_microcode_rs64()
2293 (void **)&adev->gfx.pfp.pfp_fw_data_ptr); in gfx_v12_0_cp_gfx_load_pfp_microcode_rs64()
2300 memcpy(adev->gfx.pfp.pfp_fw_ptr, fw_ucode, fw_ucode_size); in gfx_v12_0_cp_gfx_load_pfp_microcode_rs64()
2301 memcpy(adev->gfx.pfp.pfp_fw_data_ptr, fw_data, fw_data_size); in gfx_v12_0_cp_gfx_load_pfp_microcode_rs64()
2303 amdgpu_bo_kunmap(adev->gfx.pfp.pfp_fw_obj); in gfx_v12_0_cp_gfx_load_pfp_microcode_rs64()
2304 amdgpu_bo_kunmap(adev->gfx.pfp.pfp_fw_data_obj); in gfx_v12_0_cp_gfx_load_pfp_microcode_rs64()
2305 amdgpu_bo_unreserve(adev->gfx.pfp.pfp_fw_obj); in gfx_v12_0_cp_gfx_load_pfp_microcode_rs64()
2306 amdgpu_bo_unreserve(adev->gfx.pfp.pfp_fw_data_obj); in gfx_v12_0_cp_gfx_load_pfp_microcode_rs64()
2312 lower_32_bits(adev->gfx.pfp.pfp_fw_gpu_addr)); in gfx_v12_0_cp_gfx_load_pfp_microcode_rs64()
2314 upper_32_bits(adev->gfx.pfp.pfp_fw_gpu_addr)); in gfx_v12_0_cp_gfx_load_pfp_microcode_rs64()
2359 for (pipe_id = 0; pipe_id < adev->gfx.me.num_pipe_per_me; pipe_id++) { in gfx_v12_0_cp_gfx_load_pfp_microcode_rs64()
2363 lower_32_bits(adev->gfx.pfp.pfp_fw_data_gpu_addr)); in gfx_v12_0_cp_gfx_load_pfp_microcode_rs64()
2365 upper_32_bits(adev->gfx.pfp.pfp_fw_data_gpu_addr)); in gfx_v12_0_cp_gfx_load_pfp_microcode_rs64()
2408 adev->gfx.me_fw->data; in gfx_v12_0_cp_gfx_load_me_microcode_rs64()
2413 fw_ucode = (const __le32 *)(adev->gfx.me_fw->data + in gfx_v12_0_cp_gfx_load_me_microcode_rs64()
2417 fw_data = (const __le32 *)(adev->gfx.me_fw->data + in gfx_v12_0_cp_gfx_load_me_microcode_rs64()
2424 &adev->gfx.me.me_fw_obj, in gfx_v12_0_cp_gfx_load_me_microcode_rs64()
2425 &adev->gfx.me.me_fw_gpu_addr, in gfx_v12_0_cp_gfx_load_me_microcode_rs64()
2426 (void **)&adev->gfx.me.me_fw_ptr); in gfx_v12_0_cp_gfx_load_me_microcode_rs64()
2435 &adev->gfx.me.me_fw_data_obj, in gfx_v12_0_cp_gfx_load_me_microcode_rs64()
2436 &adev->gfx.me.me_fw_data_gpu_addr, in gfx_v12_0_cp_gfx_load_me_microcode_rs64()
2437 (void **)&adev->gfx.me.me_fw_data_ptr); in gfx_v12_0_cp_gfx_load_me_microcode_rs64()
2444 memcpy(adev->gfx.me.me_fw_ptr, fw_ucode, fw_ucode_size); in gfx_v12_0_cp_gfx_load_me_microcode_rs64()
2445 memcpy(adev->gfx.me.me_fw_data_ptr, fw_data, fw_data_size); in gfx_v12_0_cp_gfx_load_me_microcode_rs64()
2447 amdgpu_bo_kunmap(adev->gfx.me.me_fw_obj); in gfx_v12_0_cp_gfx_load_me_microcode_rs64()
2448 amdgpu_bo_kunmap(adev->gfx.me.me_fw_data_obj); in gfx_v12_0_cp_gfx_load_me_microcode_rs64()
2449 amdgpu_bo_unreserve(adev->gfx.me.me_fw_obj); in gfx_v12_0_cp_gfx_load_me_microcode_rs64()
2450 amdgpu_bo_unreserve(adev->gfx.me.me_fw_data_obj); in gfx_v12_0_cp_gfx_load_me_microcode_rs64()
2456 lower_32_bits(adev->gfx.me.me_fw_gpu_addr)); in gfx_v12_0_cp_gfx_load_me_microcode_rs64()
2458 upper_32_bits(adev->gfx.me.me_fw_gpu_addr)); in gfx_v12_0_cp_gfx_load_me_microcode_rs64()
2504 for (pipe_id = 0; pipe_id < adev->gfx.me.num_pipe_per_me; pipe_id++) { in gfx_v12_0_cp_gfx_load_me_microcode_rs64()
2508 lower_32_bits(adev->gfx.me.me_fw_data_gpu_addr)); in gfx_v12_0_cp_gfx_load_me_microcode_rs64()
2510 upper_32_bits(adev->gfx.me.me_fw_data_gpu_addr)); in gfx_v12_0_cp_gfx_load_me_microcode_rs64()
2547 if (!adev->gfx.me_fw || !adev->gfx.pfp_fw) in gfx_v12_0_cp_gfx_load_microcode()
2571 adev->gfx.config.max_hw_contexts - 1); in gfx_v12_0_cp_gfx_start()
2630 /* Init gfx ring 0 for pipe 0 */ in gfx_v12_0_cp_gfx_resume()
2635 ring = &adev->gfx.gfx_ring[0]; in gfx_v12_0_cp_gfx_resume()
2678 for (i = 0; i < adev->gfx.num_gfx_rings; i++) { in gfx_v12_0_cp_gfx_resume()
2679 ring = &adev->gfx.gfx_ring[i]; in gfx_v12_0_cp_gfx_resume()
2713 adev->gfx.kiq[0].ring.sched.ready = enable; in gfx_v12_0_cp_compute_enable()
2727 if (!adev->gfx.mec_fw) in gfx_v12_0_cp_compute_load_microcode_rs64()
2732 mec_hdr = (const struct gfx_firmware_header_v2_0 *)adev->gfx.mec_fw->data; in gfx_v12_0_cp_compute_load_microcode_rs64()
2735 fw_ucode = (const __le32 *) (adev->gfx.mec_fw->data + in gfx_v12_0_cp_compute_load_microcode_rs64()
2739 fw_data = (const __le32 *) (adev->gfx.mec_fw->data + in gfx_v12_0_cp_compute_load_microcode_rs64()
2745 &adev->gfx.mec.mec_fw_obj, in gfx_v12_0_cp_compute_load_microcode_rs64()
2746 &adev->gfx.mec.mec_fw_gpu_addr, in gfx_v12_0_cp_compute_load_microcode_rs64()
2756 adev->gfx.mec.num_pipe_per_mec, in gfx_v12_0_cp_compute_load_microcode_rs64()
2758 &adev->gfx.mec.mec_fw_data_obj, in gfx_v12_0_cp_compute_load_microcode_rs64()
2759 &adev->gfx.mec.mec_fw_data_gpu_addr, in gfx_v12_0_cp_compute_load_microcode_rs64()
2768 for (i = 0; i < adev->gfx.mec.num_pipe_per_mec; i++) { in gfx_v12_0_cp_compute_load_microcode_rs64()
2772 amdgpu_bo_kunmap(adev->gfx.mec.mec_fw_obj); in gfx_v12_0_cp_compute_load_microcode_rs64()
2773 amdgpu_bo_kunmap(adev->gfx.mec.mec_fw_data_obj); in gfx_v12_0_cp_compute_load_microcode_rs64()
2774 amdgpu_bo_unreserve(adev->gfx.mec.mec_fw_obj); in gfx_v12_0_cp_compute_load_microcode_rs64()
2775 amdgpu_bo_unreserve(adev->gfx.mec.mec_fw_data_obj); in gfx_v12_0_cp_compute_load_microcode_rs64()
2789 for (i = 0; i < adev->gfx.mec.num_pipe_per_mec; i++) { in gfx_v12_0_cp_compute_load_microcode_rs64()
2793 lower_32_bits(adev->gfx.mec.mec_fw_data_gpu_addr + in gfx_v12_0_cp_compute_load_microcode_rs64()
2796 upper_32_bits(adev->gfx.mec.mec_fw_data_gpu_addr + in gfx_v12_0_cp_compute_load_microcode_rs64()
2800 lower_32_bits(adev->gfx.mec.mec_fw_gpu_addr)); in gfx_v12_0_cp_compute_load_microcode_rs64()
2802 upper_32_bits(adev->gfx.mec.mec_fw_gpu_addr)); in gfx_v12_0_cp_compute_load_microcode_rs64()
2885 /* set up gfx hqd wptr */ in gfx_v12_0_gfx_mqd_init()
2916 /* set up gfx hqd base. this is similar as CP_RB_BASE */ in gfx_v12_0_gfx_mqd_init()
2967 int mqd_idx = ring - &adev->gfx.gfx_ring[0]; in gfx_v12_0_kgq_init_queue()
2976 if (adev->gfx.me.mqd_backup[mqd_idx]) in gfx_v12_0_kgq_init_queue()
2977 memcpy_fromio(adev->gfx.me.mqd_backup[mqd_idx], mqd, sizeof(*mqd)); in gfx_v12_0_kgq_init_queue()
2980 if (adev->gfx.me.mqd_backup[mqd_idx]) in gfx_v12_0_kgq_init_queue()
2981 memcpy_toio(mqd, adev->gfx.me.mqd_backup[mqd_idx], sizeof(*mqd)); in gfx_v12_0_kgq_init_queue()
2996 for (i = 0; i < adev->gfx.num_gfx_rings; i++) { in gfx_v12_0_cp_async_gfx_ring_resume()
2997 ring = &adev->gfx.gfx_ring[i]; in gfx_v12_0_cp_async_gfx_ring_resume()
3022 for (i = 0; i < adev->gfx.num_gfx_rings; i++) { in gfx_v12_0_cp_async_gfx_ring_resume()
3023 ring = &adev->gfx.gfx_ring[i]; in gfx_v12_0_cp_async_gfx_ring_resume()
3279 if (adev->gfx.mec.mqd_backup[mqd_idx]) in gfx_v12_0_kiq_init_queue()
3280 memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(*mqd)); in gfx_v12_0_kiq_init_queue()
3302 if (adev->gfx.mec.mqd_backup[mqd_idx]) in gfx_v12_0_kiq_init_queue()
3303 memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(*mqd)); in gfx_v12_0_kiq_init_queue()
3313 int mqd_idx = ring - &adev->gfx.compute_ring[0]; in gfx_v12_0_kcq_init_queue()
3323 if (adev->gfx.mec.mqd_backup[mqd_idx]) in gfx_v12_0_kcq_init_queue()
3324 memcpy_fromio(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(*mqd)); in gfx_v12_0_kcq_init_queue()
3327 if (adev->gfx.mec.mqd_backup[mqd_idx]) in gfx_v12_0_kcq_init_queue()
3328 memcpy_toio(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(*mqd)); in gfx_v12_0_kcq_init_queue()
3343 ring = &adev->gfx.kiq[0].ring; in gfx_v12_0_kiq_resume()
3371 for (i = 0; i < adev->gfx.num_compute_rings; i++) { in gfx_v12_0_kcq_resume()
3372 ring = &adev->gfx.compute_ring[i]; in gfx_v12_0_kcq_resume()
3440 for (i = 0; i < adev->gfx.num_gfx_rings; i++) { in gfx_v12_0_cp_resume()
3441 ring = &adev->gfx.gfx_ring[i]; in gfx_v12_0_cp_resume()
3447 for (i = 0; i < adev->gfx.num_compute_rings; i++) { in gfx_v12_0_cp_resume()
3448 ring = &adev->gfx.compute_ring[i]; in gfx_v12_0_cp_resume()
3493 adev->gfx.config.gb_addr_config_fields.num_pkrs = in get_gb_addr_config()
3496 adev->gfx.config.gb_addr_config = gb_addr_config; in get_gb_addr_config()
3498 adev->gfx.config.gb_addr_config_fields.num_pipes = 1 << in get_gb_addr_config()
3499 REG_GET_FIELD(adev->gfx.config.gb_addr_config, in get_gb_addr_config()
3502 adev->gfx.config.max_tile_pipes = in get_gb_addr_config()
3503 adev->gfx.config.gb_addr_config_fields.num_pipes; in get_gb_addr_config()
3505 adev->gfx.config.gb_addr_config_fields.max_compress_frags = 1 << in get_gb_addr_config()
3506 REG_GET_FIELD(adev->gfx.config.gb_addr_config, in get_gb_addr_config()
3508 adev->gfx.config.gb_addr_config_fields.num_rb_per_se = 1 << in get_gb_addr_config()
3509 REG_GET_FIELD(adev->gfx.config.gb_addr_config, in get_gb_addr_config()
3511 adev->gfx.config.gb_addr_config_fields.num_se = 1 << in get_gb_addr_config()
3512 REG_GET_FIELD(adev->gfx.config.gb_addr_config, in get_gb_addr_config()
3514 adev->gfx.config.gb_addr_config_fields.pipe_interleave_size = 1 << (8 + in get_gb_addr_config()
3515 REG_GET_FIELD(adev->gfx.config.gb_addr_config, in get_gb_addr_config()
3562 if (adev->gfx.imu.funcs && (amdgpu_dpm > 0)) { in gfx_v12_0_hw_init()
3564 if (adev->gfx.imu.funcs->program_rlc_ram) in gfx_v12_0_hw_init()
3565 adev->gfx.imu.funcs->program_rlc_ram(adev); in gfx_v12_0_hw_init()
3573 if (adev->gfx.imu.funcs && (amdgpu_dpm > 0)) { in gfx_v12_0_hw_init()
3574 if (adev->gfx.imu.funcs->load_microcode) in gfx_v12_0_hw_init()
3575 adev->gfx.imu.funcs->load_microcode(adev); in gfx_v12_0_hw_init()
3576 if (adev->gfx.imu.funcs->setup_imu) in gfx_v12_0_hw_init()
3577 adev->gfx.imu.funcs->setup_imu(adev); in gfx_v12_0_hw_init()
3578 if (adev->gfx.imu.funcs->start_imu) in gfx_v12_0_hw_init()
3579 adev->gfx.imu.funcs->start_imu(adev); in gfx_v12_0_hw_init()
3599 adev->gfx.is_poweron = true; in gfx_v12_0_hw_init()
3615 * For gfx 12, rlc firmware loading relies on smu firmware is in gfx_v12_0_hw_init()
3651 amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0); in gfx_v12_0_hw_fini()
3652 amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0); in gfx_v12_0_hw_fini()
3653 amdgpu_irq_put(adev, &adev->gfx.bad_op_irq, 0); in gfx_v12_0_hw_fini()
3681 adev->gfx.is_poweron = false; in gfx_v12_0_hw_fini()
3742 adev->gfx.funcs = &gfx_v12_0_gfx_funcs; in gfx_v12_0_early_init()
3744 adev->gfx.num_gfx_rings = GFX12_NUM_GFX_RINGS; in gfx_v12_0_early_init()
3745 adev->gfx.num_compute_rings = min(amdgpu_gfx_get_num_kcq(adev), in gfx_v12_0_early_init()
3765 r = amdgpu_irq_get(adev, &adev->gfx.priv_reg_irq, 0); in gfx_v12_0_late_init()
3769 r = amdgpu_irq_get(adev, &adev->gfx.priv_inst_irq, 0); in gfx_v12_0_late_init()
3773 r = amdgpu_irq_get(adev, &adev->gfx.bad_op_irq, 0); in gfx_v12_0_late_init()
4507 /* set load_per_context_state & load_gfx_sh_regs for GFX */ in gfx_v12_0_ring_emit_cntxcntl()
4537 struct amdgpu_kiq *kiq = &adev->gfx.kiq[0]; in gfx_v12_0_ring_preempt_ib()
4820 amdgpu_fence_process(&adev->gfx.gfx_ring[0]); in gfx_v12_0_eop_irq()
4822 amdgpu_fence_process(&adev->gfx.gfx_ring[1]); in gfx_v12_0_eop_irq()
4826 for (i = 0; i < adev->gfx.num_compute_rings; i++) { in gfx_v12_0_eop_irq()
4827 ring = &adev->gfx.compute_ring[i]; in gfx_v12_0_eop_irq()
4855 for (i = 0; i < adev->gfx.me.num_me; i++) { in gfx_v12_0_set_priv_reg_fault_state()
4856 for (j = 0; j < adev->gfx.me.num_pipe_per_me; j++) { in gfx_v12_0_set_priv_reg_fault_state()
4868 for (i = 0; i < adev->gfx.mec.num_mec; i++) { in gfx_v12_0_set_priv_reg_fault_state()
4869 for (j = 0; j < adev->gfx.mec.num_pipe_per_mec; j++) { in gfx_v12_0_set_priv_reg_fault_state()
4901 for (i = 0; i < adev->gfx.me.num_me; i++) { in gfx_v12_0_set_bad_op_fault_state()
4902 for (j = 0; j < adev->gfx.me.num_pipe_per_me; j++) { in gfx_v12_0_set_bad_op_fault_state()
4914 for (i = 0; i < adev->gfx.mec.num_mec; i++) { in gfx_v12_0_set_bad_op_fault_state()
4915 for (j = 0; j < adev->gfx.mec.num_pipe_per_mec; j++) { in gfx_v12_0_set_bad_op_fault_state()
4946 for (i = 0; i < adev->gfx.me.num_me; i++) { in gfx_v12_0_set_priv_inst_fault_state()
4947 for (j = 0; j < adev->gfx.me.num_pipe_per_me; j++) { in gfx_v12_0_set_priv_inst_fault_state()
4980 for (i = 0; i < adev->gfx.num_gfx_rings; i++) { in gfx_v12_0_handle_priv_fault()
4981 ring = &adev->gfx.gfx_ring[i]; in gfx_v12_0_handle_priv_fault()
4989 for (i = 0; i < adev->gfx.num_compute_rings; i++) { in gfx_v12_0_handle_priv_fault()
4990 ring = &adev->gfx.compute_ring[i]; in gfx_v12_0_handle_priv_fault()
5080 if (!adev->gfx.ip_dump_core) in gfx_v12_ip_print()
5086 adev->gfx.ip_dump_core[i]); in gfx_v12_ip_print()
5089 if (!adev->gfx.ip_dump_compute_queues) in gfx_v12_ip_print()
5094 adev->gfx.mec.num_mec, in gfx_v12_ip_print()
5095 adev->gfx.mec.num_pipe_per_mec, in gfx_v12_ip_print()
5096 adev->gfx.mec.num_queue_per_pipe); in gfx_v12_ip_print()
5098 for (i = 0; i < adev->gfx.mec.num_mec; i++) { in gfx_v12_ip_print()
5099 for (j = 0; j < adev->gfx.mec.num_pipe_per_mec; j++) { in gfx_v12_ip_print()
5100 for (k = 0; k < adev->gfx.mec.num_queue_per_pipe; k++) { in gfx_v12_ip_print()
5105 adev->gfx.ip_dump_compute_queues[index + reg]); in gfx_v12_ip_print()
5112 /* print gfx queue registers for all instances */ in gfx_v12_ip_print()
5113 if (!adev->gfx.ip_dump_gfx_queues) in gfx_v12_ip_print()
5119 adev->gfx.me.num_me, in gfx_v12_ip_print()
5120 adev->gfx.me.num_pipe_per_me, in gfx_v12_ip_print()
5121 adev->gfx.me.num_queue_per_pipe); in gfx_v12_ip_print()
5123 for (i = 0; i < adev->gfx.me.num_me; i++) { in gfx_v12_ip_print()
5124 for (j = 0; j < adev->gfx.me.num_pipe_per_me; j++) { in gfx_v12_ip_print()
5125 for (k = 0; k < adev->gfx.me.num_queue_per_pipe; k++) { in gfx_v12_ip_print()
5130 adev->gfx.ip_dump_gfx_queues[index + reg]); in gfx_v12_ip_print()
5144 if (!adev->gfx.ip_dump_core) in gfx_v12_ip_dump()
5149 adev->gfx.ip_dump_core[i] = RREG32(SOC15_REG_ENTRY_OFFSET(gc_reg_list_12_0[i])); in gfx_v12_ip_dump()
5153 if (!adev->gfx.ip_dump_compute_queues) in gfx_v12_ip_dump()
5159 for (i = 0; i < adev->gfx.mec.num_mec; i++) { in gfx_v12_ip_dump()
5160 for (j = 0; j < adev->gfx.mec.num_pipe_per_mec; j++) { in gfx_v12_ip_dump()
5161 for (k = 0; k < adev->gfx.mec.num_queue_per_pipe; k++) { in gfx_v12_ip_dump()
5162 /* ME0 is for GFX so start from 1 for CP */ in gfx_v12_ip_dump()
5163 soc24_grbm_select(adev, adev->gfx.me.num_me + i, j, k, 0); in gfx_v12_ip_dump()
5165 adev->gfx.ip_dump_compute_queues[index + reg] = in gfx_v12_ip_dump()
5177 /* dump gfx queue registers for all instances */ in gfx_v12_ip_dump()
5178 if (!adev->gfx.ip_dump_gfx_queues) in gfx_v12_ip_dump()
5185 for (i = 0; i < adev->gfx.me.num_me; i++) { in gfx_v12_ip_dump()
5186 for (j = 0; j < adev->gfx.me.num_pipe_per_me; j++) { in gfx_v12_ip_dump()
5187 for (k = 0; k < adev->gfx.me.num_queue_per_pipe; k++) { in gfx_v12_ip_dump()
5191 adev->gfx.ip_dump_gfx_queues[index + reg] = in gfx_v12_ip_dump()
5424 adev->gfx.kiq[0].ring.funcs = &gfx_v12_0_ring_funcs_kiq; in gfx_v12_0_set_ring_funcs()
5426 for (i = 0; i < adev->gfx.num_gfx_rings; i++) in gfx_v12_0_set_ring_funcs()
5427 adev->gfx.gfx_ring[i].funcs = &gfx_v12_0_ring_funcs_gfx; in gfx_v12_0_set_ring_funcs()
5429 for (i = 0; i < adev->gfx.num_compute_rings; i++) in gfx_v12_0_set_ring_funcs()
5430 adev->gfx.compute_ring[i].funcs = &gfx_v12_0_ring_funcs_compute; in gfx_v12_0_set_ring_funcs()
5455 adev->gfx.eop_irq.num_types = AMDGPU_CP_IRQ_LAST; in gfx_v12_0_set_irq_funcs()
5456 adev->gfx.eop_irq.funcs = &gfx_v12_0_eop_irq_funcs; in gfx_v12_0_set_irq_funcs()
5458 adev->gfx.priv_reg_irq.num_types = 1; in gfx_v12_0_set_irq_funcs()
5459 adev->gfx.priv_reg_irq.funcs = &gfx_v12_0_priv_reg_irq_funcs; in gfx_v12_0_set_irq_funcs()
5461 adev->gfx.bad_op_irq.num_types = 1; in gfx_v12_0_set_irq_funcs()
5462 adev->gfx.bad_op_irq.funcs = &gfx_v12_0_bad_op_irq_funcs; in gfx_v12_0_set_irq_funcs()
5464 adev->gfx.priv_inst_irq.num_types = 1; in gfx_v12_0_set_irq_funcs()
5465 adev->gfx.priv_inst_irq.funcs = &gfx_v12_0_priv_inst_irq_funcs; in gfx_v12_0_set_irq_funcs()
5471 adev->gfx.imu.mode = MISSION_MODE; in gfx_v12_0_set_imu_funcs()
5473 adev->gfx.imu.mode = DEBUG_MODE; in gfx_v12_0_set_imu_funcs()
5475 adev->gfx.imu.funcs = &gfx_v12_0_imu_funcs; in gfx_v12_0_set_imu_funcs()
5480 adev->gfx.rlc.funcs = &gfx_v12_0_rlc_funcs; in gfx_v12_0_set_rlc_funcs()
5485 /* set gfx eng mqd */ in gfx_v12_0_set_mqd_funcs()
5521 amdgpu_gfx_create_bitmask(adev->gfx.config.max_cu_per_sh >> 1); in gfx_v12_0_get_wgp_active_bitmap_per_sh()
5557 for (i = 0; i < adev->gfx.config.max_shader_engines; i++) { in gfx_v12_0_get_cu_info()
5558 for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) { in gfx_v12_0_get_cu_info()
5559 bitmap = i * adev->gfx.config.max_sh_per_se + j; in gfx_v12_0_get_cu_info()
5587 for (k = 0; k < adev->gfx.config.max_cu_per_sh; k++) { in gfx_v12_0_get_cu_info()