Lines Matching +full:rpmh +full:- +full:rsc

1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2017-2019 The Linux Foundation. All rights reserved. */
11 #include <soc/qcom/cmd-db.h>
24 struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; in a6xx_gmu_fault()
25 struct msm_gpu *gpu = &adreno_gpu->base; in a6xx_gmu_fault()
28 gmu->hung = true; in a6xx_gmu_fault()
31 del_timer(&gpu->hangcheck_timer); in a6xx_gmu_fault()
34 kthread_queue_work(gpu->worker, &gpu->recover_work); in a6xx_gmu_fault()
46 dev_err_ratelimited(gmu->dev, "GMU watchdog expired\n"); in a6xx_gmu_irq()
52 dev_err_ratelimited(gmu->dev, "GMU AHB bus error\n"); in a6xx_gmu_irq()
55 dev_err_ratelimited(gmu->dev, "GMU fence error: 0x%x\n", in a6xx_gmu_irq()
70 dev_err_ratelimited(gmu->dev, "GMU firmware fault\n"); in a6xx_hfi_irq()
83 if (!gmu->initialized) in a6xx_gmu_sptprac_is_on()
99 if (!gmu->initialized) in a6xx_gmu_gx_is_on()
113 const struct a6xx_info *info = adreno_gpu->info->a6xx; in a6xx_gmu_set_freq()
115 struct a6xx_gmu *gmu = &a6xx_gpu->gmu; in a6xx_gmu_set_freq()
123 if (gpu_freq == gmu->freq) in a6xx_gmu_set_freq()
126 for (perf_index = 0; perf_index < gmu->nr_gpu_freqs - 1; perf_index++) in a6xx_gmu_set_freq()
127 if (gpu_freq == gmu->gpu_freqs[perf_index]) in a6xx_gmu_set_freq()
131 if (info->bcms && gmu->nr_gpu_bws > 1) { in a6xx_gmu_set_freq()
134 for (bw_index = 0; bw_index < gmu->nr_gpu_bws - 1; bw_index++) { in a6xx_gmu_set_freq()
135 if (bw == gmu->gpu_bw_table[bw_index]) in a6xx_gmu_set_freq()
154 do_div(tmp, gmu->gpu_bw_table[gmu->nr_gpu_bws - 1]); in a6xx_gmu_set_freq()
161 gmu->current_perf_index = perf_index; in a6xx_gmu_set_freq()
162 gmu->freq = gmu->gpu_freqs[perf_index]; in a6xx_gmu_set_freq()
164 trace_msm_gmu_freq_change(gmu->freq, perf_index); in a6xx_gmu_set_freq()
175 if (!gmu->legacy) { in a6xx_gmu_set_freq()
179 dev_pm_opp_set_opp(&gpu->pdev->dev, opp); in a6xx_gmu_set_freq()
200 dev_err(gmu->dev, "GMU set GPU frequency error: %d\n", ret); in a6xx_gmu_set_freq()
202 dev_pm_opp_set_opp(&gpu->pdev->dev, opp); in a6xx_gmu_set_freq()
209 struct a6xx_gmu *gmu = &a6xx_gpu->gmu; in a6xx_gmu_get_freq()
211 return gmu->freq; in a6xx_gmu_get_freq()
217 int local = gmu->idle_level; in a6xx_gmu_check_idle_level()
220 if (gmu->idle_level == GMU_IDLE_STATE_SPTP) in a6xx_gmu_check_idle_level()
226 if (gmu->idle_level != GMU_IDLE_STATE_IFPC || in a6xx_gmu_check_idle_level()
243 struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; in a6xx_gmu_start()
273 DRM_DEV_ERROR(gmu->dev, "GMU firmware initialization timed out\n"); in a6xx_gmu_start()
288 DRM_DEV_ERROR(gmu->dev, "Unable to start the HFI queues\n"); in a6xx_gmu_hfi_start()
344 WARN_ON_ONCE(!mutex_is_locked(&gmu->lock)); in a6xx_gmu_set_oob()
347 return -EINVAL; in a6xx_gmu_set_oob()
349 if (gmu->legacy) { in a6xx_gmu_set_oob()
356 DRM_DEV_ERROR(gmu->dev, in a6xx_gmu_set_oob()
357 "Invalid non-legacy GMU request %s\n", in a6xx_gmu_set_oob()
359 return -EINVAL; in a6xx_gmu_set_oob()
371 DRM_DEV_ERROR(gmu->dev, in a6xx_gmu_set_oob()
387 WARN_ON_ONCE(!mutex_is_locked(&gmu->lock)); in a6xx_gmu_clear_oob()
392 if (gmu->legacy) in a6xx_gmu_clear_oob()
406 if (!gmu->legacy) in a6xx_sptprac_enable()
415 DRM_DEV_ERROR(gmu->dev, "Unable to power on SPTPRAC: 0x%x\n", in a6xx_sptprac_enable()
428 if (!gmu->legacy) in a6xx_sptprac_disable()
440 DRM_DEV_ERROR(gmu->dev, "failed to power off SPTPRAC: 0x%x\n", in a6xx_sptprac_disable()
453 vote = gmu->gx_arc_votes[gmu->nr_gpu_freqs - 1]; in a6xx_gmu_gfx_rail_on()
465 struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; in a6xx_gemnoc_workaround()
485 if (gmu->idle_level < GMU_IDLE_STATE_SPTP) in a6xx_gmu_notify_slumber()
488 if (!gmu->legacy) { in a6xx_gmu_notify_slumber()
503 DRM_DEV_ERROR(gmu->dev, "The GMU did not go into slumber\n"); in a6xx_gmu_notify_slumber()
504 ret = -ETIMEDOUT; in a6xx_gmu_notify_slumber()
526 DRM_DEV_ERROR(gmu->dev, "Unable to power on the GPU RSC\n"); in a6xx_rpmh_start()
534 DRM_DEV_ERROR(gmu->dev, "GPU RSC sequence stuck while waking up the GPU\n"); in a6xx_rpmh_start()
553 DRM_DEV_ERROR(gmu->dev, "Unable to power off the GPU RSC\n"); in a6xx_rpmh_stop()
569 struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; in a6xx_gmu_rpmh_init()
570 struct platform_device *pdev = to_platform_device(gmu->dev); in a6xx_gmu_rpmh_init()
599 /* Setup RSC PDC handshake for sleep and wakeup */ in a6xx_gmu_rpmh_init()
617 /* Load RSC sequencer uCode for sleep and wakeup */ in a6xx_gmu_rpmh_init()
696 * for the shorter hysteresis that happens after main - this is 0xa (.5 us)
705 struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; in a6xx_gmu_power_config()
718 switch (gmu->idle_level) { in a6xx_gmu_power_config()
734 /* Enable RPMh GPU client */ in a6xx_gmu_power_config()
754 if (!in_range(blk->addr, bo->iova, bo->size)) in fw_block_mem()
757 memcpy(bo->virt + blk->addr - bo->iova, blk->data, blk->size); in fw_block_mem()
764 struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; in a6xx_gmu_fw_load()
765 const struct firmware *fw_image = adreno_gpu->fw[ADRENO_FW_GMU]; in a6xx_gmu_fw_load()
776 if (gmu->legacy) { in a6xx_gmu_fw_load()
778 if (fw_image->size > 0x8000) { in a6xx_gmu_fw_load()
779 DRM_DEV_ERROR(gmu->dev, in a6xx_gmu_fw_load()
781 return -EINVAL; in a6xx_gmu_fw_load()
785 (u32*) fw_image->data, fw_image->size); in a6xx_gmu_fw_load()
790 for (blk = (const struct block_header *) fw_image->data; in a6xx_gmu_fw_load()
791 (const u8*) blk < fw_image->data + fw_image->size; in a6xx_gmu_fw_load()
792 blk = (const struct block_header *) &blk->data[blk->size >> 2]) { in a6xx_gmu_fw_load()
793 if (blk->size == 0) in a6xx_gmu_fw_load()
796 if (in_range(blk->addr, itcm_base, SZ_16K)) { in a6xx_gmu_fw_load()
797 reg_offset = (blk->addr - itcm_base) >> 2; in a6xx_gmu_fw_load()
800 blk->data, blk->size); in a6xx_gmu_fw_load()
801 } else if (in_range(blk->addr, dtcm_base, SZ_16K)) { in a6xx_gmu_fw_load()
802 reg_offset = (blk->addr - dtcm_base) >> 2; in a6xx_gmu_fw_load()
805 blk->data, blk->size); in a6xx_gmu_fw_load()
806 } else if (!fw_block_mem(&gmu->icache, blk) && in a6xx_gmu_fw_load()
807 !fw_block_mem(&gmu->dcache, blk) && in a6xx_gmu_fw_load()
808 !fw_block_mem(&gmu->dummy, blk)) { in a6xx_gmu_fw_load()
809 DRM_DEV_ERROR(gmu->dev, in a6xx_gmu_fw_load()
811 blk->addr, blk->size, blk->data[0]); in a6xx_gmu_fw_load()
827 struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; in a6xx_gmu_fw_start()
828 const struct a6xx_info *a6xx_info = adreno_gpu->info->a6xx; in a6xx_gmu_fw_start()
850 if (WARN(!adreno_gpu->fw[ADRENO_FW_GMU], in a6xx_gmu_fw_start()
852 return -ENOENT; in a6xx_gmu_fw_start()
868 gmu_write(gmu, REG_A6XX_GMU_HFI_QTBL_ADDR, gmu->hfi.iova); in a6xx_gmu_fw_start()
890 if (a6xx_info->gmu_chipid) { in a6xx_gmu_fw_start()
891 chipid = a6xx_info->gmu_chipid; in a6xx_gmu_fw_start()
900 chipid = adreno_gpu->chip_id & 0xffff0000; in a6xx_gmu_fw_start()
901 chipid |= (adreno_gpu->chip_id << 4) & 0xf000; /* minor */ in a6xx_gmu_fw_start()
902 chipid |= (adreno_gpu->chip_id << 8) & 0x0f00; /* patchid */ in a6xx_gmu_fw_start()
908 (gmu->log.iova & GENMASK(31, 12)) | in a6xx_gmu_fw_start()
909 ((gmu->log.size / SZ_4K - 1) & GENMASK(7, 0))); in a6xx_gmu_fw_start()
914 gmu->log.iova | (gmu->log.size / SZ_4K - 1)); in a6xx_gmu_fw_start()
924 if (gmu->legacy) { in a6xx_gmu_fw_start()
931 if (gmu->idle_level < GMU_IDLE_STATE_SPTP) { in a6xx_gmu_fw_start()
957 disable_irq(gmu->gmu_irq); in a6xx_gmu_irq_disable()
958 disable_irq(gmu->hfi_irq); in a6xx_gmu_irq_disable()
967 struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; in a6xx_gmu_rpmh_off()
974 /* Make sure there are no outstanding RPMh votes */ in a6xx_gmu_rpmh_off()
989 struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; in a6xx_gmu_force_off()
990 struct msm_gpu *gpu = &adreno_gpu->base; in a6xx_gmu_force_off()
996 gmu_write(&a6xx_gpu->gmu, REG_A6XX_GMU_GMU_PWR_COL_KEEPALIVE, 0); in a6xx_gmu_force_off()
1009 /* Make sure there are no outstanding RPMh votes */ in a6xx_gmu_force_off()
1031 unsigned long gpu_freq = gmu->gpu_freqs[gmu->current_perf_index]; in a6xx_gmu_set_initial_freq()
1033 gpu_opp = dev_pm_opp_find_freq_exact(&gpu->pdev->dev, gpu_freq, true); in a6xx_gmu_set_initial_freq()
1037 gmu->freq = 0; /* so a6xx_gmu_set_freq() doesn't exit early */ in a6xx_gmu_set_initial_freq()
1045 unsigned long gpu_freq = gmu->gpu_freqs[gmu->current_perf_index]; in a6xx_gmu_set_initial_bw()
1047 gpu_opp = dev_pm_opp_find_freq_exact(&gpu->pdev->dev, gpu_freq, true); in a6xx_gmu_set_initial_bw()
1051 dev_pm_opp_set_opp(&gpu->pdev->dev, gpu_opp); in a6xx_gmu_set_initial_bw()
1057 struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; in a6xx_gmu_resume()
1058 struct msm_gpu *gpu = &adreno_gpu->base; in a6xx_gmu_resume()
1059 struct a6xx_gmu *gmu = &a6xx_gpu->gmu; in a6xx_gmu_resume()
1062 if (WARN(!gmu->initialized, "The GMU is not set up yet\n")) in a6xx_gmu_resume()
1063 return -EINVAL; in a6xx_gmu_resume()
1065 gmu->hung = false; in a6xx_gmu_resume()
1068 if (!IS_ERR(gmu->qmp)) { in a6xx_gmu_resume()
1069 ret = qmp_send(gmu->qmp, "{class: gpu, res: acd, val: %d}", in a6xx_gmu_resume()
1072 dev_err(gmu->dev, "failed to send GPU ACD state\n"); in a6xx_gmu_resume()
1076 pm_runtime_get_sync(gmu->dev); in a6xx_gmu_resume()
1083 if (!IS_ERR_OR_NULL(gmu->gxpd)) in a6xx_gmu_resume()
1084 pm_runtime_get_sync(gmu->gxpd); in a6xx_gmu_resume()
1087 clk_set_rate(gmu->core_clk, 200000000); in a6xx_gmu_resume()
1088 clk_set_rate(gmu->hub_clk, adreno_is_a740_family(adreno_gpu) ? in a6xx_gmu_resume()
1090 ret = clk_bulk_prepare_enable(gmu->nr_clocks, gmu->clocks); in a6xx_gmu_resume()
1092 pm_runtime_put(gmu->gxpd); in a6xx_gmu_resume()
1093 pm_runtime_put(gmu->dev); in a6xx_gmu_resume()
1103 enable_irq(gmu->gmu_irq); in a6xx_gmu_resume()
1109 } else if (gmu->legacy) { in a6xx_gmu_resume()
1134 enable_irq(gmu->hfi_irq); in a6xx_gmu_resume()
1142 disable_irq(gmu->gmu_irq); in a6xx_gmu_resume()
1144 pm_runtime_put(gmu->gxpd); in a6xx_gmu_resume()
1145 pm_runtime_put(gmu->dev); in a6xx_gmu_resume()
1155 if (!gmu->initialized) in a6xx_gmu_isidle()
1170 struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; in a6xx_gmu_shutdown()
1179 if (adreno_gpu->base.needs_hw_init) { in a6xx_gmu_shutdown()
1180 if (a6xx_gmu_set_oob(&a6xx_gpu->gmu, GMU_OOB_GPU_SET)) in a6xx_gmu_shutdown()
1183 a6xx_gmu_clear_oob(&a6xx_gpu->gmu, GMU_OOB_GPU_SET); in a6xx_gmu_shutdown()
1192 a6xx_bus_clear_pending_transactions(adreno_gpu, a6xx_gpu->hung); in a6xx_gmu_shutdown()
1210 DRM_DEV_ERROR(gmu->dev, in a6xx_gmu_shutdown()
1223 /* Tell RPMh to power off the GPU */ in a6xx_gmu_shutdown()
1235 struct a6xx_gmu *gmu = &a6xx_gpu->gmu; in a6xx_gmu_stop()
1236 struct msm_gpu *gpu = &a6xx_gpu->base.base; in a6xx_gmu_stop()
1238 if (!pm_runtime_active(gmu->dev)) in a6xx_gmu_stop()
1245 if (gmu->hung) in a6xx_gmu_stop()
1251 dev_pm_opp_set_opp(&gpu->pdev->dev, NULL); in a6xx_gmu_stop()
1258 if (!IS_ERR_OR_NULL(gmu->gxpd)) in a6xx_gmu_stop()
1259 pm_runtime_put_sync(gmu->gxpd); in a6xx_gmu_stop()
1261 clk_bulk_disable_unprepare(gmu->nr_clocks, gmu->clocks); in a6xx_gmu_stop()
1263 pm_runtime_put_sync(gmu->dev); in a6xx_gmu_stop()
1270 msm_gem_kernel_put(gmu->hfi.obj, gmu->aspace); in a6xx_gmu_memory_free()
1271 msm_gem_kernel_put(gmu->debug.obj, gmu->aspace); in a6xx_gmu_memory_free()
1272 msm_gem_kernel_put(gmu->icache.obj, gmu->aspace); in a6xx_gmu_memory_free()
1273 msm_gem_kernel_put(gmu->dcache.obj, gmu->aspace); in a6xx_gmu_memory_free()
1274 msm_gem_kernel_put(gmu->dummy.obj, gmu->aspace); in a6xx_gmu_memory_free()
1275 msm_gem_kernel_put(gmu->log.obj, gmu->aspace); in a6xx_gmu_memory_free()
1277 gmu->aspace->mmu->funcs->detach(gmu->aspace->mmu); in a6xx_gmu_memory_free()
1278 msm_gem_address_space_put(gmu->aspace); in a6xx_gmu_memory_free()
1285 struct drm_device *dev = a6xx_gpu->base.base.dev; in a6xx_gmu_memory_alloc()
1292 /* no fixed address - use GMU's uncached range */ in a6xx_gmu_memory_alloc()
1303 bo->obj = msm_gem_new(dev, size, flags); in a6xx_gmu_memory_alloc()
1304 if (IS_ERR(bo->obj)) in a6xx_gmu_memory_alloc()
1305 return PTR_ERR(bo->obj); in a6xx_gmu_memory_alloc()
1307 ret = msm_gem_get_and_pin_iova_range(bo->obj, gmu->aspace, &bo->iova, in a6xx_gmu_memory_alloc()
1310 drm_gem_object_put(bo->obj); in a6xx_gmu_memory_alloc()
1314 bo->virt = msm_gem_get_vaddr(bo->obj); in a6xx_gmu_memory_alloc()
1315 bo->size = size; in a6xx_gmu_memory_alloc()
1317 msm_gem_object_set_name(bo->obj, "%s", name); in a6xx_gmu_memory_alloc()
1326 mmu = msm_iommu_new(gmu->dev, 0); in a6xx_gmu_memory_probe()
1328 return -ENODEV; in a6xx_gmu_memory_probe()
1332 gmu->aspace = msm_gem_address_space_create(mmu, "gmu", 0x0, 0x80000000); in a6xx_gmu_memory_probe()
1333 if (IS_ERR(gmu->aspace)) in a6xx_gmu_memory_probe()
1334 return PTR_ERR(gmu->aspace); in a6xx_gmu_memory_probe()
1340 * struct bcm_db - Auxiliary data pertaining to each Bus Clock Manager (BCM)
1341 * @unit: divisor used to convert bytes/sec bw value to an RPMh msg
1342 * @width: multiplier used to convert bytes/sec bw value to an RPMh msg
1360 /* Retrieve BCM data from cmd-db */ in a6xx_gmu_rpmh_bw_votes_init()
1362 const struct a6xx_bcm *bcm = &info->bcms[bcm_index]; in a6xx_gmu_rpmh_bw_votes_init()
1366 if (!bcm->name) in a6xx_gmu_rpmh_bw_votes_init()
1369 bcm_data[bcm_index] = cmd_db_read_aux_data(bcm->name, &count); in a6xx_gmu_rpmh_bw_votes_init()
1374 dev_err(gmu->dev, "invalid BCM '%s' aux data size\n", in a6xx_gmu_rpmh_bw_votes_init()
1375 bcm->name); in a6xx_gmu_rpmh_bw_votes_init()
1376 return -EINVAL; in a6xx_gmu_rpmh_bw_votes_init()
1383 for (bw_index = 0; bw_index < gmu->nr_gpu_bws; bw_index++) { in a6xx_gmu_rpmh_bw_votes_init()
1384 u32 *data = gmu->gpu_ib_votes[bw_index]; in a6xx_gmu_rpmh_bw_votes_init()
1385 u32 bw = gmu->gpu_bw_table[bw_index]; in a6xx_gmu_rpmh_bw_votes_init()
1389 const struct a6xx_bcm *bcm = &info->bcms[bcm_index]; in a6xx_gmu_rpmh_bw_votes_init()
1394 if (bcm_index == bcm_count - 1 || in a6xx_gmu_rpmh_bw_votes_init()
1396 bcm_data[bcm_index]->vcd != bcm_data[bcm_index + 1]->vcd)) in a6xx_gmu_rpmh_bw_votes_init()
1404 if (bcm->fixed) { in a6xx_gmu_rpmh_bw_votes_init()
1409 (bcm->perfmode_bw && bw >= bcm->perfmode_bw)) in a6xx_gmu_rpmh_bw_votes_init()
1410 perfmode = bcm->perfmode; in a6xx_gmu_rpmh_bw_votes_init()
1417 peak = (u64)bw * le16_to_cpu(bcm_data[bcm_index]->width); in a6xx_gmu_rpmh_bw_votes_init()
1418 do_div(peak, bcm->buswidth); in a6xx_gmu_rpmh_bw_votes_init()
1422 do_div(peak, le32_to_cpu(bcm_data[bcm_index]->unit)); in a6xx_gmu_rpmh_bw_votes_init()
1437 /* Return the 'arc-level' for the given frequency */
1474 return -EINVAL; in a6xx_gmu_rpmh_arc_votes_init()
1481 if (IS_ERR(sec) && sec != ERR_PTR(-EPROBE_DEFER)) in a6xx_gmu_rpmh_arc_votes_init()
1488 return -EINVAL; in a6xx_gmu_rpmh_arc_votes_init()
1505 "Level %u not found in the RPMh list\n", in a6xx_gmu_rpmh_arc_votes_init()
1511 return -EINVAL; in a6xx_gmu_rpmh_arc_votes_init()
1537 * The GMU votes with the RPMh for itself and on behalf of the GPU but we need
1538 * to construct the list of votes on the CPU and send it over. Query the RPMh
1547 struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; in a6xx_gmu_rpmh_votes_init()
1548 const struct a6xx_info *info = adreno_gpu->info->a6xx; in a6xx_gmu_rpmh_votes_init()
1549 struct msm_gpu *gpu = &adreno_gpu->base; in a6xx_gmu_rpmh_votes_init()
1553 ret = a6xx_gmu_rpmh_arc_votes_init(&gpu->pdev->dev, gmu->gx_arc_votes, in a6xx_gmu_rpmh_votes_init()
1554 gmu->gpu_freqs, gmu->nr_gpu_freqs, "gfx.lvl"); in a6xx_gmu_rpmh_votes_init()
1557 ret |= a6xx_gmu_rpmh_arc_votes_init(gmu->dev, gmu->cx_arc_votes, in a6xx_gmu_rpmh_votes_init()
1558 gmu->gmu_freqs, gmu->nr_gmu_freqs, "cx.lvl"); in a6xx_gmu_rpmh_votes_init()
1561 if (info->bcms && gmu->nr_gpu_bws > 1) in a6xx_gmu_rpmh_votes_init()
1582 count = size - 1; in a6xx_gmu_build_freq_table()
1614 count = size - 1; in a6xx_gmu_build_bw_table()
1634 struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; in a6xx_gmu_pwrlevels_probe()
1635 const struct a6xx_info *info = adreno_gpu->info->a6xx; in a6xx_gmu_pwrlevels_probe()
1636 struct msm_gpu *gpu = &adreno_gpu->base; in a6xx_gmu_pwrlevels_probe()
1644 ret = devm_pm_opp_of_add_table(gmu->dev); in a6xx_gmu_pwrlevels_probe()
1646 DRM_DEV_ERROR(gmu->dev, "Unable to set the OPP table for the GMU\n"); in a6xx_gmu_pwrlevels_probe()
1650 gmu->nr_gmu_freqs = a6xx_gmu_build_freq_table(gmu->dev, in a6xx_gmu_pwrlevels_probe()
1651 gmu->gmu_freqs, ARRAY_SIZE(gmu->gmu_freqs)); in a6xx_gmu_pwrlevels_probe()
1657 gmu->nr_gpu_freqs = a6xx_gmu_build_freq_table(&gpu->pdev->dev, in a6xx_gmu_pwrlevels_probe()
1658 gmu->gpu_freqs, ARRAY_SIZE(gmu->gpu_freqs)); in a6xx_gmu_pwrlevels_probe()
1660 gmu->current_perf_index = gmu->nr_gpu_freqs - 1; in a6xx_gmu_pwrlevels_probe()
1666 if (info->bcms) in a6xx_gmu_pwrlevels_probe()
1667 gmu->nr_gpu_bws = a6xx_gmu_build_bw_table(&gpu->pdev->dev, in a6xx_gmu_pwrlevels_probe()
1668 gmu->gpu_bw_table, ARRAY_SIZE(gmu->gpu_bw_table)); in a6xx_gmu_pwrlevels_probe()
1670 /* Build the list of RPMh votes that we'll send to the GMU */ in a6xx_gmu_pwrlevels_probe()
1676 int ret = devm_clk_bulk_get_all(gmu->dev, &gmu->clocks); in a6xx_gmu_clocks_probe()
1681 gmu->nr_clocks = ret; in a6xx_gmu_clocks_probe()
1683 gmu->core_clk = msm_clk_bulk_get_clock(gmu->clocks, in a6xx_gmu_clocks_probe()
1684 gmu->nr_clocks, "gmu"); in a6xx_gmu_clocks_probe()
1686 gmu->hub_clk = msm_clk_bulk_get_clock(gmu->clocks, in a6xx_gmu_clocks_probe()
1687 gmu->nr_clocks, "hub"); in a6xx_gmu_clocks_probe()
1700 DRM_DEV_ERROR(&pdev->dev, "Unable to find the %s registers\n", name); in a6xx_gmu_get_mmio()
1701 return ERR_PTR(-EINVAL); in a6xx_gmu_get_mmio()
1704 ret = ioremap(res->start, resource_size(res)); in a6xx_gmu_get_mmio()
1706 DRM_DEV_ERROR(&pdev->dev, "Unable to map the %s registers\n", name); in a6xx_gmu_get_mmio()
1707 return ERR_PTR(-EINVAL); in a6xx_gmu_get_mmio()
1722 DRM_DEV_ERROR(&pdev->dev, "Unable to get interrupt %s %d\n", in a6xx_gmu_get_irq()
1732 struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; in a6xx_gmu_remove()
1733 struct a6xx_gmu *gmu = &a6xx_gpu->gmu; in a6xx_gmu_remove()
1734 struct platform_device *pdev = to_platform_device(gmu->dev); in a6xx_gmu_remove()
1736 mutex_lock(&gmu->lock); in a6xx_gmu_remove()
1737 if (!gmu->initialized) { in a6xx_gmu_remove()
1738 mutex_unlock(&gmu->lock); in a6xx_gmu_remove()
1742 gmu->initialized = false; in a6xx_gmu_remove()
1744 mutex_unlock(&gmu->lock); in a6xx_gmu_remove()
1746 pm_runtime_force_suspend(gmu->dev); in a6xx_gmu_remove()
1749 * Since cxpd is a virt device, the devlink with gmu-dev will be removed in a6xx_gmu_remove()
1752 dev_pm_domain_detach(gmu->cxpd, false); in a6xx_gmu_remove()
1754 if (!IS_ERR_OR_NULL(gmu->gxpd)) { in a6xx_gmu_remove()
1755 pm_runtime_disable(gmu->gxpd); in a6xx_gmu_remove()
1756 dev_pm_domain_detach(gmu->gxpd, false); in a6xx_gmu_remove()
1759 if (!IS_ERR_OR_NULL(gmu->qmp)) in a6xx_gmu_remove()
1760 qmp_put(gmu->qmp); in a6xx_gmu_remove()
1762 iounmap(gmu->mmio); in a6xx_gmu_remove()
1764 iounmap(gmu->rscc); in a6xx_gmu_remove()
1765 gmu->mmio = NULL; in a6xx_gmu_remove()
1766 gmu->rscc = NULL; in a6xx_gmu_remove()
1771 free_irq(gmu->gmu_irq, gmu); in a6xx_gmu_remove()
1772 free_irq(gmu->hfi_irq, gmu); in a6xx_gmu_remove()
1776 put_device(gmu->dev); in a6xx_gmu_remove()
1785 complete_all(&gmu->pd_gate); in cxpd_notifier_cb()
1793 struct a6xx_gmu *gmu = &a6xx_gpu->gmu; in a6xx_gmu_wrapper_init()
1797 return -ENODEV; in a6xx_gmu_wrapper_init()
1799 gmu->dev = &pdev->dev; in a6xx_gmu_wrapper_init()
1801 ret = of_dma_configure(gmu->dev, node, true); in a6xx_gmu_wrapper_init()
1805 pm_runtime_enable(gmu->dev); in a6xx_gmu_wrapper_init()
1808 gmu->legacy = true; in a6xx_gmu_wrapper_init()
1811 gmu->mmio = a6xx_gmu_get_mmio(pdev, "gmu"); in a6xx_gmu_wrapper_init()
1812 if (IS_ERR(gmu->mmio)) { in a6xx_gmu_wrapper_init()
1813 ret = PTR_ERR(gmu->mmio); in a6xx_gmu_wrapper_init()
1817 gmu->cxpd = dev_pm_domain_attach_by_name(gmu->dev, "cx"); in a6xx_gmu_wrapper_init()
1818 if (IS_ERR(gmu->cxpd)) { in a6xx_gmu_wrapper_init()
1819 ret = PTR_ERR(gmu->cxpd); in a6xx_gmu_wrapper_init()
1823 if (!device_link_add(gmu->dev, gmu->cxpd, DL_FLAG_PM_RUNTIME)) { in a6xx_gmu_wrapper_init()
1824 ret = -ENODEV; in a6xx_gmu_wrapper_init()
1828 init_completion(&gmu->pd_gate); in a6xx_gmu_wrapper_init()
1829 complete_all(&gmu->pd_gate); in a6xx_gmu_wrapper_init()
1830 gmu->pd_nb.notifier_call = cxpd_notifier_cb; in a6xx_gmu_wrapper_init()
1833 gmu->gxpd = dev_pm_domain_attach_by_name(gmu->dev, "gx"); in a6xx_gmu_wrapper_init()
1834 if (IS_ERR(gmu->gxpd)) { in a6xx_gmu_wrapper_init()
1835 ret = PTR_ERR(gmu->gxpd); in a6xx_gmu_wrapper_init()
1839 gmu->initialized = true; in a6xx_gmu_wrapper_init()
1844 dev_pm_domain_detach(gmu->cxpd, false); in a6xx_gmu_wrapper_init()
1847 iounmap(gmu->mmio); in a6xx_gmu_wrapper_init()
1850 put_device(gmu->dev); in a6xx_gmu_wrapper_init()
1857 struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; in a6xx_gmu_init()
1858 struct a6xx_gmu *gmu = &a6xx_gpu->gmu; in a6xx_gmu_init()
1864 return -ENODEV; in a6xx_gmu_init()
1866 gmu->dev = &pdev->dev; in a6xx_gmu_init()
1868 ret = of_dma_configure(gmu->dev, node, true); in a6xx_gmu_init()
1873 gmu->idle_level = GMU_IDLE_STATE_ACTIVE; in a6xx_gmu_init()
1875 pm_runtime_enable(gmu->dev); in a6xx_gmu_init()
1893 gmu->dummy.size = SZ_4K; in a6xx_gmu_init()
1896 ret = a6xx_gmu_memory_alloc(gmu, &gmu->debug, SZ_4K * 7, in a6xx_gmu_init()
1901 gmu->dummy.size = SZ_8K; in a6xx_gmu_init()
1905 ret = a6xx_gmu_memory_alloc(gmu, &gmu->dummy, gmu->dummy.size, in a6xx_gmu_init()
1913 ret = a6xx_gmu_memory_alloc(gmu, &gmu->icache, in a6xx_gmu_init()
1914 SZ_16M - SZ_16K, 0x04000, "icache"); in a6xx_gmu_init()
1918 * NOTE: when porting legacy ("pre-650-family") GPUs you may be tempted to add a condition in a6xx_gmu_init()
1924 ret = a6xx_gmu_memory_alloc(gmu, &gmu->icache, in a6xx_gmu_init()
1925 SZ_256K - SZ_16K, 0x04000, "icache"); in a6xx_gmu_init()
1929 ret = a6xx_gmu_memory_alloc(gmu, &gmu->dcache, in a6xx_gmu_init()
1930 SZ_256K - SZ_16K, 0x44000, "dcache"); in a6xx_gmu_init()
1935 gmu->legacy = true; in a6xx_gmu_init()
1938 ret = a6xx_gmu_memory_alloc(gmu, &gmu->debug, SZ_16K, 0, "debug"); in a6xx_gmu_init()
1944 ret = a6xx_gmu_memory_alloc(gmu, &gmu->log, SZ_16K, 0, "log"); in a6xx_gmu_init()
1949 ret = a6xx_gmu_memory_alloc(gmu, &gmu->hfi, SZ_16K, 0, "hfi"); in a6xx_gmu_init()
1954 gmu->mmio = a6xx_gmu_get_mmio(pdev, "gmu"); in a6xx_gmu_init()
1955 if (IS_ERR(gmu->mmio)) { in a6xx_gmu_init()
1956 ret = PTR_ERR(gmu->mmio); in a6xx_gmu_init()
1962 gmu->rscc = a6xx_gmu_get_mmio(pdev, "rscc"); in a6xx_gmu_init()
1963 if (IS_ERR(gmu->rscc)) { in a6xx_gmu_init()
1964 ret = -ENODEV; in a6xx_gmu_init()
1968 gmu->rscc = gmu->mmio + 0x23000; in a6xx_gmu_init()
1972 gmu->hfi_irq = a6xx_gmu_get_irq(gmu, pdev, "hfi", a6xx_hfi_irq); in a6xx_gmu_init()
1973 gmu->gmu_irq = a6xx_gmu_get_irq(gmu, pdev, "gmu", a6xx_gmu_irq); in a6xx_gmu_init()
1975 if (gmu->hfi_irq < 0 || gmu->gmu_irq < 0) { in a6xx_gmu_init()
1976 ret = -ENODEV; in a6xx_gmu_init()
1980 gmu->cxpd = dev_pm_domain_attach_by_name(gmu->dev, "cx"); in a6xx_gmu_init()
1981 if (IS_ERR(gmu->cxpd)) { in a6xx_gmu_init()
1982 ret = PTR_ERR(gmu->cxpd); in a6xx_gmu_init()
1986 link = device_link_add(gmu->dev, gmu->cxpd, DL_FLAG_PM_RUNTIME); in a6xx_gmu_init()
1988 ret = -ENODEV; in a6xx_gmu_init()
1992 gmu->qmp = qmp_get(gmu->dev); in a6xx_gmu_init()
1993 if (IS_ERR(gmu->qmp) && adreno_is_a7xx(adreno_gpu)) { in a6xx_gmu_init()
1994 ret = PTR_ERR(gmu->qmp); in a6xx_gmu_init()
1998 init_completion(&gmu->pd_gate); in a6xx_gmu_init()
1999 complete_all(&gmu->pd_gate); in a6xx_gmu_init()
2000 gmu->pd_nb.notifier_call = cxpd_notifier_cb; in a6xx_gmu_init()
2006 gmu->gxpd = dev_pm_domain_attach_by_name(gmu->dev, "gx"); in a6xx_gmu_init()
2014 /* Initialize RPMh */ in a6xx_gmu_init()
2017 gmu->initialized = true; in a6xx_gmu_init()
2025 dev_pm_domain_detach(gmu->cxpd, false); in a6xx_gmu_init()
2028 iounmap(gmu->mmio); in a6xx_gmu_init()
2030 iounmap(gmu->rscc); in a6xx_gmu_init()
2031 free_irq(gmu->gmu_irq, gmu); in a6xx_gmu_init()
2032 free_irq(gmu->hfi_irq, gmu); in a6xx_gmu_init()
2038 put_device(gmu->dev); in a6xx_gmu_init()