1 /*
2 * Copyright 2023 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24 #define SWSMU_CODE_LAYER_L2
25
26 #include <linux/firmware.h>
27 #include <linux/pci.h>
28 #include <linux/i2c.h>
29 #include "amdgpu.h"
30 #include "amdgpu_smu.h"
31 #include "atomfirmware.h"
32 #include "amdgpu_atomfirmware.h"
33 #include "amdgpu_atombios.h"
34 #include "smu_v14_0.h"
35 #include "smu14_driver_if_v14_0.h"
36 #include "soc15_common.h"
37 #include "atom.h"
38 #include "smu_v14_0_2_ppt.h"
39 #include "smu_v14_0_2_pptable.h"
40 #include "smu_v14_0_2_ppsmc.h"
41 #include "mp/mp_14_0_2_offset.h"
42 #include "mp/mp_14_0_2_sh_mask.h"
43
44 #include "smu_cmn.h"
45 #include "amdgpu_ras.h"
46
47 /*
48 * DO NOT use these for err/warn/info/debug messages.
49 * Use dev_err, dev_warn, dev_info and dev_dbg instead.
50 * They are more MGPU friendly.
51 */
52 #undef pr_err
53 #undef pr_warn
54 #undef pr_info
55 #undef pr_debug
56
57 #define to_amdgpu_device(x) (container_of(x, struct amdgpu_device, pm.smu_i2c))
58
59 #define FEATURE_MASK(feature) (1ULL << feature)
60 #define SMC_DPM_FEATURE ( \
61 FEATURE_MASK(FEATURE_DPM_GFXCLK_BIT) | \
62 FEATURE_MASK(FEATURE_DPM_UCLK_BIT) | \
63 FEATURE_MASK(FEATURE_DPM_LINK_BIT) | \
64 FEATURE_MASK(FEATURE_DPM_SOCCLK_BIT) | \
65 FEATURE_MASK(FEATURE_DPM_FCLK_BIT))
66
67 #define MP0_MP1_DATA_REGION_SIZE_COMBOPPTABLE 0x4000
68 #define DEBUGSMC_MSG_Mode1Reset 2
69 #define LINK_SPEED_MAX 3
70
71 #define PP_OD_FEATURE_GFXCLK_FMIN 0
72 #define PP_OD_FEATURE_GFXCLK_FMAX 1
73 #define PP_OD_FEATURE_UCLK_FMIN 2
74 #define PP_OD_FEATURE_UCLK_FMAX 3
75 #define PP_OD_FEATURE_GFX_VF_CURVE 4
76 #define PP_OD_FEATURE_FAN_CURVE_TEMP 5
77 #define PP_OD_FEATURE_FAN_CURVE_PWM 6
78 #define PP_OD_FEATURE_FAN_ACOUSTIC_LIMIT 7
79 #define PP_OD_FEATURE_FAN_ACOUSTIC_TARGET 8
80 #define PP_OD_FEATURE_FAN_TARGET_TEMPERATURE 9
81 #define PP_OD_FEATURE_FAN_MINIMUM_PWM 10
82 #define PP_OD_FEATURE_FAN_ZERO_RPM_ENABLE 11
83
84 static struct cmn2asic_msg_mapping smu_v14_0_2_message_map[SMU_MSG_MAX_COUNT] = {
85 MSG_MAP(TestMessage, PPSMC_MSG_TestMessage, 1),
86 MSG_MAP(GetSmuVersion, PPSMC_MSG_GetSmuVersion, 1),
87 MSG_MAP(GetDriverIfVersion, PPSMC_MSG_GetDriverIfVersion, 1),
88 MSG_MAP(SetAllowedFeaturesMaskLow, PPSMC_MSG_SetAllowedFeaturesMaskLow, 0),
89 MSG_MAP(SetAllowedFeaturesMaskHigh, PPSMC_MSG_SetAllowedFeaturesMaskHigh, 0),
90 MSG_MAP(EnableAllSmuFeatures, PPSMC_MSG_EnableAllSmuFeatures, 0),
91 MSG_MAP(DisableAllSmuFeatures, PPSMC_MSG_DisableAllSmuFeatures, 0),
92 MSG_MAP(EnableSmuFeaturesLow, PPSMC_MSG_EnableSmuFeaturesLow, 1),
93 MSG_MAP(EnableSmuFeaturesHigh, PPSMC_MSG_EnableSmuFeaturesHigh, 1),
94 MSG_MAP(DisableSmuFeaturesLow, PPSMC_MSG_DisableSmuFeaturesLow, 1),
95 MSG_MAP(DisableSmuFeaturesHigh, PPSMC_MSG_DisableSmuFeaturesHigh, 1),
96 MSG_MAP(GetEnabledSmuFeaturesLow, PPSMC_MSG_GetRunningSmuFeaturesLow, 1),
97 MSG_MAP(GetEnabledSmuFeaturesHigh, PPSMC_MSG_GetRunningSmuFeaturesHigh, 1),
98 MSG_MAP(SetWorkloadMask, PPSMC_MSG_SetWorkloadMask, 1),
99 MSG_MAP(SetPptLimit, PPSMC_MSG_SetPptLimit, 0),
100 MSG_MAP(SetDriverDramAddrHigh, PPSMC_MSG_SetDriverDramAddrHigh, 1),
101 MSG_MAP(SetDriverDramAddrLow, PPSMC_MSG_SetDriverDramAddrLow, 1),
102 MSG_MAP(SetToolsDramAddrHigh, PPSMC_MSG_SetToolsDramAddrHigh, 0),
103 MSG_MAP(SetToolsDramAddrLow, PPSMC_MSG_SetToolsDramAddrLow, 0),
104 MSG_MAP(TransferTableSmu2Dram, PPSMC_MSG_TransferTableSmu2Dram, 1),
105 MSG_MAP(TransferTableDram2Smu, PPSMC_MSG_TransferTableDram2Smu, 0),
106 MSG_MAP(UseDefaultPPTable, PPSMC_MSG_UseDefaultPPTable, 0),
107 MSG_MAP(RunDcBtc, PPSMC_MSG_RunDcBtc, 0),
108 MSG_MAP(EnterBaco, PPSMC_MSG_EnterBaco, 0),
109 MSG_MAP(ExitBaco, PPSMC_MSG_ExitBaco, 0),
110 MSG_MAP(SetSoftMinByFreq, PPSMC_MSG_SetSoftMinByFreq, 1),
111 MSG_MAP(SetSoftMaxByFreq, PPSMC_MSG_SetSoftMaxByFreq, 1),
112 MSG_MAP(SetHardMinByFreq, PPSMC_MSG_SetHardMinByFreq, 1),
113 MSG_MAP(SetHardMaxByFreq, PPSMC_MSG_SetHardMaxByFreq, 0),
114 MSG_MAP(GetMinDpmFreq, PPSMC_MSG_GetMinDpmFreq, 1),
115 MSG_MAP(GetMaxDpmFreq, PPSMC_MSG_GetMaxDpmFreq, 1),
116 MSG_MAP(GetDpmFreqByIndex, PPSMC_MSG_GetDpmFreqByIndex, 1),
117 MSG_MAP(PowerUpVcn, PPSMC_MSG_PowerUpVcn, 0),
118 MSG_MAP(PowerDownVcn, PPSMC_MSG_PowerDownVcn, 0),
119 MSG_MAP(PowerUpJpeg, PPSMC_MSG_PowerUpJpeg, 0),
120 MSG_MAP(PowerDownJpeg, PPSMC_MSG_PowerDownJpeg, 0),
121 MSG_MAP(GetDcModeMaxDpmFreq, PPSMC_MSG_GetDcModeMaxDpmFreq, 1),
122 MSG_MAP(OverridePcieParameters, PPSMC_MSG_OverridePcieParameters, 0),
123 MSG_MAP(DramLogSetDramAddrHigh, PPSMC_MSG_DramLogSetDramAddrHigh, 0),
124 MSG_MAP(DramLogSetDramAddrLow, PPSMC_MSG_DramLogSetDramAddrLow, 0),
125 MSG_MAP(DramLogSetDramSize, PPSMC_MSG_DramLogSetDramSize, 0),
126 MSG_MAP(AllowGfxOff, PPSMC_MSG_AllowGfxOff, 0),
127 MSG_MAP(DisallowGfxOff, PPSMC_MSG_DisallowGfxOff, 0),
128 MSG_MAP(SetMGpuFanBoostLimitRpm, PPSMC_MSG_SetMGpuFanBoostLimitRpm, 0),
129 MSG_MAP(GetPptLimit, PPSMC_MSG_GetPptLimit, 0),
130 MSG_MAP(NotifyPowerSource, PPSMC_MSG_NotifyPowerSource, 0),
131 MSG_MAP(PrepareMp1ForUnload, PPSMC_MSG_PrepareMp1ForUnload, 0),
132 MSG_MAP(DFCstateControl, PPSMC_MSG_SetExternalClientDfCstateAllow, 0),
133 MSG_MAP(ArmD3, PPSMC_MSG_ArmD3, 0),
134 MSG_MAP(SetNumBadMemoryPagesRetired, PPSMC_MSG_SetNumBadMemoryPagesRetired, 0),
135 MSG_MAP(SetBadMemoryPagesRetiredFlagsPerChannel,
136 PPSMC_MSG_SetBadMemoryPagesRetiredFlagsPerChannel, 0),
137 MSG_MAP(AllowIHHostInterrupt, PPSMC_MSG_AllowIHHostInterrupt, 0),
138 MSG_MAP(ReenableAcDcInterrupt, PPSMC_MSG_ReenableAcDcInterrupt, 0),
139 };
140
141 static struct cmn2asic_mapping smu_v14_0_2_clk_map[SMU_CLK_COUNT] = {
142 CLK_MAP(GFXCLK, PPCLK_GFXCLK),
143 CLK_MAP(SCLK, PPCLK_GFXCLK),
144 CLK_MAP(SOCCLK, PPCLK_SOCCLK),
145 CLK_MAP(FCLK, PPCLK_FCLK),
146 CLK_MAP(UCLK, PPCLK_UCLK),
147 CLK_MAP(MCLK, PPCLK_UCLK),
148 CLK_MAP(VCLK, PPCLK_VCLK_0),
149 CLK_MAP(DCLK, PPCLK_DCLK_0),
150 CLK_MAP(DCEFCLK, PPCLK_DCFCLK),
151 };
152
153 static struct cmn2asic_mapping smu_v14_0_2_feature_mask_map[SMU_FEATURE_COUNT] = {
154 FEA_MAP(FW_DATA_READ),
155 FEA_MAP(DPM_GFXCLK),
156 FEA_MAP(DPM_GFX_POWER_OPTIMIZER),
157 FEA_MAP(DPM_UCLK),
158 FEA_MAP(DPM_FCLK),
159 FEA_MAP(DPM_SOCCLK),
160 FEA_MAP(DPM_LINK),
161 FEA_MAP(DPM_DCN),
162 FEA_MAP(VMEMP_SCALING),
163 FEA_MAP(VDDIO_MEM_SCALING),
164 FEA_MAP(DS_GFXCLK),
165 FEA_MAP(DS_SOCCLK),
166 FEA_MAP(DS_FCLK),
167 FEA_MAP(DS_LCLK),
168 FEA_MAP(DS_DCFCLK),
169 FEA_MAP(DS_UCLK),
170 FEA_MAP(GFX_ULV),
171 FEA_MAP(FW_DSTATE),
172 FEA_MAP(GFXOFF),
173 FEA_MAP(BACO),
174 FEA_MAP(MM_DPM),
175 FEA_MAP(SOC_MPCLK_DS),
176 FEA_MAP(BACO_MPCLK_DS),
177 FEA_MAP(THROTTLERS),
178 FEA_MAP(SMARTSHIFT),
179 FEA_MAP(GTHR),
180 FEA_MAP(ACDC),
181 FEA_MAP(VR0HOT),
182 FEA_MAP(FW_CTF),
183 FEA_MAP(FAN_CONTROL),
184 FEA_MAP(GFX_DCS),
185 FEA_MAP(GFX_READ_MARGIN),
186 FEA_MAP(LED_DISPLAY),
187 FEA_MAP(GFXCLK_SPREAD_SPECTRUM),
188 FEA_MAP(OUT_OF_BAND_MONITOR),
189 FEA_MAP(OPTIMIZED_VMIN),
190 FEA_MAP(GFX_IMU),
191 FEA_MAP(BOOT_TIME_CAL),
192 FEA_MAP(GFX_PCC_DFLL),
193 FEA_MAP(SOC_CG),
194 FEA_MAP(DF_CSTATE),
195 FEA_MAP(GFX_EDC),
196 FEA_MAP(BOOT_POWER_OPT),
197 FEA_MAP(CLOCK_POWER_DOWN_BYPASS),
198 FEA_MAP(DS_VCN),
199 FEA_MAP(BACO_CG),
200 FEA_MAP(MEM_TEMP_READ),
201 FEA_MAP(ATHUB_MMHUB_PG),
202 FEA_MAP(SOC_PCC),
203 FEA_MAP(EDC_PWRBRK),
204 FEA_MAP(SOC_EDC_XVMIN),
205 FEA_MAP(GFX_PSM_DIDT),
206 FEA_MAP(APT_ALL_ENABLE),
207 FEA_MAP(APT_SQ_THROTTLE),
208 FEA_MAP(APT_PF_DCS),
209 FEA_MAP(GFX_EDC_XVMIN),
210 FEA_MAP(GFX_DIDT_XVMIN),
211 FEA_MAP(FAN_ABNORMAL),
212 [SMU_FEATURE_DPM_VCLK_BIT] = {1, FEATURE_MM_DPM_BIT},
213 [SMU_FEATURE_DPM_DCLK_BIT] = {1, FEATURE_MM_DPM_BIT},
214 [SMU_FEATURE_PPT_BIT] = {1, FEATURE_THROTTLERS_BIT},
215 };
216
217 static struct cmn2asic_mapping smu_v14_0_2_table_map[SMU_TABLE_COUNT] = {
218 TAB_MAP(PPTABLE),
219 TAB_MAP(WATERMARKS),
220 TAB_MAP(AVFS_PSM_DEBUG),
221 TAB_MAP(PMSTATUSLOG),
222 TAB_MAP(SMU_METRICS),
223 TAB_MAP(DRIVER_SMU_CONFIG),
224 TAB_MAP(ACTIVITY_MONITOR_COEFF),
225 [SMU_TABLE_COMBO_PPTABLE] = {1, TABLE_COMBO_PPTABLE},
226 TAB_MAP(I2C_COMMANDS),
227 TAB_MAP(ECCINFO),
228 TAB_MAP(OVERDRIVE),
229 };
230
231 static struct cmn2asic_mapping smu_v14_0_2_pwr_src_map[SMU_POWER_SOURCE_COUNT] = {
232 PWR_MAP(AC),
233 PWR_MAP(DC),
234 };
235
236 static struct cmn2asic_mapping smu_v14_0_2_workload_map[PP_SMC_POWER_PROFILE_COUNT] = {
237 WORKLOAD_MAP(PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT, WORKLOAD_PPLIB_DEFAULT_BIT),
238 WORKLOAD_MAP(PP_SMC_POWER_PROFILE_FULLSCREEN3D, WORKLOAD_PPLIB_FULL_SCREEN_3D_BIT),
239 WORKLOAD_MAP(PP_SMC_POWER_PROFILE_POWERSAVING, WORKLOAD_PPLIB_POWER_SAVING_BIT),
240 WORKLOAD_MAP(PP_SMC_POWER_PROFILE_VIDEO, WORKLOAD_PPLIB_VIDEO_BIT),
241 WORKLOAD_MAP(PP_SMC_POWER_PROFILE_VR, WORKLOAD_PPLIB_VR_BIT),
242 WORKLOAD_MAP(PP_SMC_POWER_PROFILE_COMPUTE, WORKLOAD_PPLIB_COMPUTE_BIT),
243 WORKLOAD_MAP(PP_SMC_POWER_PROFILE_CUSTOM, WORKLOAD_PPLIB_CUSTOM_BIT),
244 WORKLOAD_MAP(PP_SMC_POWER_PROFILE_WINDOW3D, WORKLOAD_PPLIB_WINDOW_3D_BIT),
245 };
246
247 static const uint8_t smu_v14_0_2_throttler_map[] = {
248 [THROTTLER_PPT0_BIT] = (SMU_THROTTLER_PPT0_BIT),
249 [THROTTLER_PPT1_BIT] = (SMU_THROTTLER_PPT1_BIT),
250 [THROTTLER_PPT2_BIT] = (SMU_THROTTLER_PPT2_BIT),
251 [THROTTLER_PPT3_BIT] = (SMU_THROTTLER_PPT3_BIT),
252 [THROTTLER_TDC_GFX_BIT] = (SMU_THROTTLER_TDC_GFX_BIT),
253 [THROTTLER_TDC_SOC_BIT] = (SMU_THROTTLER_TDC_SOC_BIT),
254 [THROTTLER_TEMP_EDGE_BIT] = (SMU_THROTTLER_TEMP_EDGE_BIT),
255 [THROTTLER_TEMP_HOTSPOT_BIT] = (SMU_THROTTLER_TEMP_HOTSPOT_BIT),
256 [THROTTLER_TEMP_MEM_BIT] = (SMU_THROTTLER_TEMP_MEM_BIT),
257 [THROTTLER_TEMP_VR_GFX_BIT] = (SMU_THROTTLER_TEMP_VR_GFX_BIT),
258 [THROTTLER_TEMP_VR_SOC_BIT] = (SMU_THROTTLER_TEMP_VR_SOC_BIT),
259 [THROTTLER_TEMP_VR_MEM0_BIT] = (SMU_THROTTLER_TEMP_VR_MEM0_BIT),
260 [THROTTLER_TEMP_VR_MEM1_BIT] = (SMU_THROTTLER_TEMP_VR_MEM1_BIT),
261 [THROTTLER_TEMP_LIQUID0_BIT] = (SMU_THROTTLER_TEMP_LIQUID0_BIT),
262 [THROTTLER_TEMP_LIQUID1_BIT] = (SMU_THROTTLER_TEMP_LIQUID1_BIT),
263 [THROTTLER_GFX_APCC_PLUS_BIT] = (SMU_THROTTLER_APCC_BIT),
264 [THROTTLER_FIT_BIT] = (SMU_THROTTLER_FIT_BIT),
265 };
266
267 static int
smu_v14_0_2_get_allowed_feature_mask(struct smu_context * smu,uint32_t * feature_mask,uint32_t num)268 smu_v14_0_2_get_allowed_feature_mask(struct smu_context *smu,
269 uint32_t *feature_mask, uint32_t num)
270 {
271 struct amdgpu_device *adev = smu->adev;
272 /*u32 smu_version;*/
273
274 if (num > 2)
275 return -EINVAL;
276
277 memset(feature_mask, 0xff, sizeof(uint32_t) * num);
278
279 if (adev->pm.pp_feature & PP_SCLK_DPM_MASK) {
280 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_GFXCLK_BIT);
281 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_GFX_IMU_BIT);
282 }
283 #if 0
284 if (!(adev->pg_flags & AMD_PG_SUPPORT_ATHUB) ||
285 !(adev->pg_flags & AMD_PG_SUPPORT_MMHUB))
286 *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_ATHUB_MMHUB_PG_BIT);
287
288 if (!(adev->pm.pp_feature & PP_SOCCLK_DPM_MASK))
289 *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_DPM_SOCCLK_BIT);
290
291 /* PMFW 78.58 contains a critical fix for gfxoff feature */
292 smu_cmn_get_smc_version(smu, NULL, &smu_version);
293 if ((smu_version < 0x004e3a00) ||
294 !(adev->pm.pp_feature & PP_GFXOFF_MASK))
295 *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_GFXOFF_BIT);
296
297 if (!(adev->pm.pp_feature & PP_MCLK_DPM_MASK)) {
298 *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_DPM_UCLK_BIT);
299 *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_VMEMP_SCALING_BIT);
300 *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_VDDIO_MEM_SCALING_BIT);
301 }
302
303 if (!(adev->pm.pp_feature & PP_SCLK_DEEP_SLEEP_MASK))
304 *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_DS_GFXCLK_BIT);
305
306 if (!(adev->pm.pp_feature & PP_PCIE_DPM_MASK)) {
307 *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_DPM_LINK_BIT);
308 *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_DS_LCLK_BIT);
309 }
310
311 if (!(adev->pm.pp_feature & PP_ULV_MASK))
312 *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_GFX_ULV_BIT);
313 #endif
314
315 return 0;
316 }
317
smu_v14_0_2_check_powerplay_table(struct smu_context * smu)318 static int smu_v14_0_2_check_powerplay_table(struct smu_context *smu)
319 {
320 struct smu_table_context *table_context = &smu->smu_table;
321 struct smu_14_0_2_powerplay_table *powerplay_table =
322 table_context->power_play_table;
323 struct smu_baco_context *smu_baco = &smu->smu_baco;
324 PPTable_t *pptable = smu->smu_table.driver_pptable;
325 const OverDriveLimits_t * const overdrive_upperlimits =
326 &pptable->SkuTable.OverDriveLimitsBasicMax;
327 const OverDriveLimits_t * const overdrive_lowerlimits =
328 &pptable->SkuTable.OverDriveLimitsBasicMin;
329
330 if (powerplay_table->platform_caps & SMU_14_0_2_PP_PLATFORM_CAP_HARDWAREDC)
331 smu->dc_controlled_by_gpio = true;
332
333 if (powerplay_table->platform_caps & SMU_14_0_2_PP_PLATFORM_CAP_BACO) {
334 smu_baco->platform_support = true;
335
336 if (powerplay_table->platform_caps & SMU_14_0_2_PP_PLATFORM_CAP_MACO)
337 smu_baco->maco_support = true;
338 }
339
340 if (!overdrive_lowerlimits->FeatureCtrlMask ||
341 !overdrive_upperlimits->FeatureCtrlMask)
342 smu->od_enabled = false;
343
344 table_context->thermal_controller_type =
345 powerplay_table->thermal_controller_type;
346
347 /*
348 * Instead of having its own buffer space and get overdrive_table copied,
349 * smu->od_settings just points to the actual overdrive_table
350 */
351 smu->od_settings = &powerplay_table->overdrive_table;
352
353 smu->adev->pm.no_fan =
354 !(pptable->PFE_Settings.FeaturesToRun[0] & (1 << FEATURE_FAN_CONTROL_BIT));
355
356 return 0;
357 }
358
smu_v14_0_2_store_powerplay_table(struct smu_context * smu)359 static int smu_v14_0_2_store_powerplay_table(struct smu_context *smu)
360 {
361 struct smu_table_context *table_context = &smu->smu_table;
362 struct smu_14_0_2_powerplay_table *powerplay_table =
363 table_context->power_play_table;
364
365 memcpy(table_context->driver_pptable, &powerplay_table->smc_pptable,
366 sizeof(PPTable_t));
367
368 return 0;
369 }
370
smu_v14_0_2_get_pptable_from_pmfw(struct smu_context * smu,void ** table,uint32_t * size)371 static int smu_v14_0_2_get_pptable_from_pmfw(struct smu_context *smu,
372 void **table,
373 uint32_t *size)
374 {
375 struct smu_table_context *smu_table = &smu->smu_table;
376 void *combo_pptable = smu_table->combo_pptable;
377 int ret = 0;
378
379 ret = smu_cmn_get_combo_pptable(smu);
380 if (ret)
381 return ret;
382
383 *table = combo_pptable;
384 *size = sizeof(struct smu_14_0_2_powerplay_table);
385
386 return 0;
387 }
388
smu_v14_0_2_setup_pptable(struct smu_context * smu)389 static int smu_v14_0_2_setup_pptable(struct smu_context *smu)
390 {
391 struct smu_table_context *smu_table = &smu->smu_table;
392 int ret = 0;
393
394 if (amdgpu_sriov_vf(smu->adev))
395 return 0;
396
397 ret = smu_v14_0_2_get_pptable_from_pmfw(smu,
398 &smu_table->power_play_table,
399 &smu_table->power_play_table_size);
400 if (ret)
401 return ret;
402
403 ret = smu_v14_0_2_store_powerplay_table(smu);
404 if (ret)
405 return ret;
406
407 ret = smu_v14_0_2_check_powerplay_table(smu);
408 if (ret)
409 return ret;
410
411 return ret;
412 }
413
smu_v14_0_2_tables_init(struct smu_context * smu)414 static int smu_v14_0_2_tables_init(struct smu_context *smu)
415 {
416 struct smu_table_context *smu_table = &smu->smu_table;
417 struct smu_table *tables = smu_table->tables;
418
419 SMU_TABLE_INIT(tables, SMU_TABLE_PPTABLE, sizeof(PPTable_t),
420 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
421 SMU_TABLE_INIT(tables, SMU_TABLE_WATERMARKS, sizeof(Watermarks_t),
422 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
423 SMU_TABLE_INIT(tables, SMU_TABLE_SMU_METRICS, sizeof(SmuMetricsExternal_t),
424 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
425 SMU_TABLE_INIT(tables, SMU_TABLE_I2C_COMMANDS, sizeof(SwI2cRequest_t),
426 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
427 SMU_TABLE_INIT(tables, SMU_TABLE_OVERDRIVE, sizeof(OverDriveTable_t),
428 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
429 SMU_TABLE_INIT(tables, SMU_TABLE_PMSTATUSLOG, SMU14_TOOL_SIZE,
430 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
431 SMU_TABLE_INIT(tables, SMU_TABLE_ACTIVITY_MONITOR_COEFF,
432 sizeof(DpmActivityMonitorCoeffIntExternal_t), PAGE_SIZE,
433 AMDGPU_GEM_DOMAIN_VRAM);
434 SMU_TABLE_INIT(tables, SMU_TABLE_COMBO_PPTABLE, MP0_MP1_DATA_REGION_SIZE_COMBOPPTABLE,
435 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
436 SMU_TABLE_INIT(tables, SMU_TABLE_ECCINFO, sizeof(EccInfoTable_t),
437 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
438
439 smu_table->metrics_table = kzalloc(sizeof(SmuMetricsExternal_t), GFP_KERNEL);
440 if (!smu_table->metrics_table)
441 goto err0_out;
442 smu_table->metrics_time = 0;
443
444 smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v1_3);
445 smu_table->gpu_metrics_table = kzalloc(smu_table->gpu_metrics_table_size, GFP_KERNEL);
446 if (!smu_table->gpu_metrics_table)
447 goto err1_out;
448
449 smu_table->watermarks_table = kzalloc(sizeof(Watermarks_t), GFP_KERNEL);
450 if (!smu_table->watermarks_table)
451 goto err2_out;
452
453 smu_table->ecc_table = kzalloc(tables[SMU_TABLE_ECCINFO].size, GFP_KERNEL);
454 if (!smu_table->ecc_table)
455 goto err3_out;
456
457 return 0;
458
459 err3_out:
460 kfree(smu_table->watermarks_table);
461 err2_out:
462 kfree(smu_table->gpu_metrics_table);
463 err1_out:
464 kfree(smu_table->metrics_table);
465 err0_out:
466 return -ENOMEM;
467 }
468
smu_v14_0_2_allocate_dpm_context(struct smu_context * smu)469 static int smu_v14_0_2_allocate_dpm_context(struct smu_context *smu)
470 {
471 struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
472
473 smu_dpm->dpm_context = kzalloc(sizeof(struct smu_14_0_dpm_context),
474 GFP_KERNEL);
475 if (!smu_dpm->dpm_context)
476 return -ENOMEM;
477
478 smu_dpm->dpm_context_size = sizeof(struct smu_14_0_dpm_context);
479
480 return 0;
481 }
482
smu_v14_0_2_init_smc_tables(struct smu_context * smu)483 static int smu_v14_0_2_init_smc_tables(struct smu_context *smu)
484 {
485 int ret = 0;
486
487 ret = smu_v14_0_2_tables_init(smu);
488 if (ret)
489 return ret;
490
491 ret = smu_v14_0_2_allocate_dpm_context(smu);
492 if (ret)
493 return ret;
494
495 return smu_v14_0_init_smc_tables(smu);
496 }
497
smu_v14_0_2_set_default_dpm_table(struct smu_context * smu)498 static int smu_v14_0_2_set_default_dpm_table(struct smu_context *smu)
499 {
500 struct smu_14_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
501 struct smu_table_context *table_context = &smu->smu_table;
502 PPTable_t *pptable = table_context->driver_pptable;
503 SkuTable_t *skutable = &pptable->SkuTable;
504 struct smu_14_0_dpm_table *dpm_table;
505 struct smu_14_0_pcie_table *pcie_table;
506 uint32_t link_level;
507 int ret = 0;
508
509 /* socclk dpm table setup */
510 dpm_table = &dpm_context->dpm_tables.soc_table;
511 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) {
512 ret = smu_v14_0_set_single_dpm_table(smu,
513 SMU_SOCCLK,
514 dpm_table);
515 if (ret)
516 return ret;
517 } else {
518 dpm_table->count = 1;
519 dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.socclk / 100;
520 dpm_table->dpm_levels[0].enabled = true;
521 dpm_table->min = dpm_table->dpm_levels[0].value;
522 dpm_table->max = dpm_table->dpm_levels[0].value;
523 }
524
525 /* gfxclk dpm table setup */
526 dpm_table = &dpm_context->dpm_tables.gfx_table;
527 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_GFXCLK_BIT)) {
528 ret = smu_v14_0_set_single_dpm_table(smu,
529 SMU_GFXCLK,
530 dpm_table);
531 if (ret)
532 return ret;
533
534 /*
535 * Update the reported maximum shader clock to the value
536 * which can be guarded to be achieved on all cards. This
537 * is aligned with Window setting. And considering that value
538 * might be not the peak frequency the card can achieve, it
539 * is normal some real-time clock frequency can overtake this
540 * labelled maximum clock frequency(for example in pp_dpm_sclk
541 * sysfs output).
542 */
543 if (skutable->DriverReportedClocks.GameClockAc &&
544 (dpm_table->dpm_levels[dpm_table->count - 1].value >
545 skutable->DriverReportedClocks.GameClockAc)) {
546 dpm_table->dpm_levels[dpm_table->count - 1].value =
547 skutable->DriverReportedClocks.GameClockAc;
548 dpm_table->max = skutable->DriverReportedClocks.GameClockAc;
549 }
550 } else {
551 dpm_table->count = 1;
552 dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.gfxclk / 100;
553 dpm_table->dpm_levels[0].enabled = true;
554 dpm_table->min = dpm_table->dpm_levels[0].value;
555 dpm_table->max = dpm_table->dpm_levels[0].value;
556 }
557
558 /* uclk dpm table setup */
559 dpm_table = &dpm_context->dpm_tables.uclk_table;
560 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT)) {
561 ret = smu_v14_0_set_single_dpm_table(smu,
562 SMU_UCLK,
563 dpm_table);
564 if (ret)
565 return ret;
566 } else {
567 dpm_table->count = 1;
568 dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.uclk / 100;
569 dpm_table->dpm_levels[0].enabled = true;
570 dpm_table->min = dpm_table->dpm_levels[0].value;
571 dpm_table->max = dpm_table->dpm_levels[0].value;
572 }
573
574 /* fclk dpm table setup */
575 dpm_table = &dpm_context->dpm_tables.fclk_table;
576 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_FCLK_BIT)) {
577 ret = smu_v14_0_set_single_dpm_table(smu,
578 SMU_FCLK,
579 dpm_table);
580 if (ret)
581 return ret;
582 } else {
583 dpm_table->count = 1;
584 dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.fclk / 100;
585 dpm_table->dpm_levels[0].enabled = true;
586 dpm_table->min = dpm_table->dpm_levels[0].value;
587 dpm_table->max = dpm_table->dpm_levels[0].value;
588 }
589
590 /* vclk dpm table setup */
591 dpm_table = &dpm_context->dpm_tables.vclk_table;
592 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_VCLK_BIT)) {
593 ret = smu_v14_0_set_single_dpm_table(smu,
594 SMU_VCLK,
595 dpm_table);
596 if (ret)
597 return ret;
598 } else {
599 dpm_table->count = 1;
600 dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.vclk / 100;
601 dpm_table->dpm_levels[0].enabled = true;
602 dpm_table->min = dpm_table->dpm_levels[0].value;
603 dpm_table->max = dpm_table->dpm_levels[0].value;
604 }
605
606 /* dclk dpm table setup */
607 dpm_table = &dpm_context->dpm_tables.dclk_table;
608 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_DCLK_BIT)) {
609 ret = smu_v14_0_set_single_dpm_table(smu,
610 SMU_DCLK,
611 dpm_table);
612 if (ret)
613 return ret;
614 } else {
615 dpm_table->count = 1;
616 dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.dclk / 100;
617 dpm_table->dpm_levels[0].enabled = true;
618 dpm_table->min = dpm_table->dpm_levels[0].value;
619 dpm_table->max = dpm_table->dpm_levels[0].value;
620 }
621
622 /* lclk dpm table setup */
623 pcie_table = &dpm_context->dpm_tables.pcie_table;
624 pcie_table->num_of_link_levels = 0;
625 for (link_level = 0; link_level < NUM_LINK_LEVELS; link_level++) {
626 if (!skutable->PcieGenSpeed[link_level] &&
627 !skutable->PcieLaneCount[link_level] &&
628 !skutable->LclkFreq[link_level])
629 continue;
630
631 pcie_table->pcie_gen[pcie_table->num_of_link_levels] =
632 skutable->PcieGenSpeed[link_level];
633 pcie_table->pcie_lane[pcie_table->num_of_link_levels] =
634 skutable->PcieLaneCount[link_level];
635 pcie_table->clk_freq[pcie_table->num_of_link_levels] =
636 skutable->LclkFreq[link_level];
637 pcie_table->num_of_link_levels++;
638
639 if (link_level == 0)
640 link_level++;
641 }
642
643 /* dcefclk dpm table setup */
644 dpm_table = &dpm_context->dpm_tables.dcef_table;
645 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_DCN_BIT)) {
646 ret = smu_v14_0_set_single_dpm_table(smu,
647 SMU_DCEFCLK,
648 dpm_table);
649 if (ret)
650 return ret;
651 } else {
652 dpm_table->count = 1;
653 dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.dcefclk / 100;
654 dpm_table->dpm_levels[0].enabled = true;
655 dpm_table->min = dpm_table->dpm_levels[0].value;
656 dpm_table->max = dpm_table->dpm_levels[0].value;
657 }
658
659 return 0;
660 }
661
smu_v14_0_2_is_dpm_running(struct smu_context * smu)662 static bool smu_v14_0_2_is_dpm_running(struct smu_context *smu)
663 {
664 int ret = 0;
665 uint64_t feature_enabled;
666
667 ret = smu_cmn_get_enabled_mask(smu, &feature_enabled);
668 if (ret)
669 return false;
670
671 return !!(feature_enabled & SMC_DPM_FEATURE);
672 }
673
smu_v14_0_2_get_throttler_status(SmuMetrics_t * metrics)674 static uint32_t smu_v14_0_2_get_throttler_status(SmuMetrics_t *metrics)
675 {
676 uint32_t throttler_status = 0;
677 int i;
678
679 for (i = 0; i < THROTTLER_COUNT; i++)
680 throttler_status |=
681 (metrics->ThrottlingPercentage[i] ? 1U << i : 0);
682
683 return throttler_status;
684 }
685
686 #define SMU_14_0_2_BUSY_THRESHOLD 5
smu_v14_0_2_get_smu_metrics_data(struct smu_context * smu,MetricsMember_t member,uint32_t * value)687 static int smu_v14_0_2_get_smu_metrics_data(struct smu_context *smu,
688 MetricsMember_t member,
689 uint32_t *value)
690 {
691 struct smu_table_context *smu_table = &smu->smu_table;
692 SmuMetrics_t *metrics =
693 &(((SmuMetricsExternal_t *)(smu_table->metrics_table))->SmuMetrics);
694 int ret = 0;
695
696 ret = smu_cmn_get_metrics_table(smu,
697 NULL,
698 false);
699 if (ret)
700 return ret;
701
702 switch (member) {
703 case METRICS_CURR_GFXCLK:
704 *value = metrics->CurrClock[PPCLK_GFXCLK];
705 break;
706 case METRICS_CURR_SOCCLK:
707 *value = metrics->CurrClock[PPCLK_SOCCLK];
708 break;
709 case METRICS_CURR_UCLK:
710 *value = metrics->CurrClock[PPCLK_UCLK];
711 break;
712 case METRICS_CURR_VCLK:
713 *value = metrics->CurrClock[PPCLK_VCLK_0];
714 break;
715 case METRICS_CURR_DCLK:
716 *value = metrics->CurrClock[PPCLK_DCLK_0];
717 break;
718 case METRICS_CURR_FCLK:
719 *value = metrics->CurrClock[PPCLK_FCLK];
720 break;
721 case METRICS_CURR_DCEFCLK:
722 *value = metrics->CurrClock[PPCLK_DCFCLK];
723 break;
724 case METRICS_AVERAGE_GFXCLK:
725 if (metrics->AverageGfxActivity <= SMU_14_0_2_BUSY_THRESHOLD)
726 *value = metrics->AverageGfxclkFrequencyPostDs;
727 else
728 *value = metrics->AverageGfxclkFrequencyPreDs;
729 break;
730 case METRICS_AVERAGE_FCLK:
731 if (metrics->AverageUclkActivity <= SMU_14_0_2_BUSY_THRESHOLD)
732 *value = metrics->AverageFclkFrequencyPostDs;
733 else
734 *value = metrics->AverageFclkFrequencyPreDs;
735 break;
736 case METRICS_AVERAGE_UCLK:
737 if (metrics->AverageUclkActivity <= SMU_14_0_2_BUSY_THRESHOLD)
738 *value = metrics->AverageMemclkFrequencyPostDs;
739 else
740 *value = metrics->AverageMemclkFrequencyPreDs;
741 break;
742 case METRICS_AVERAGE_VCLK:
743 *value = metrics->AverageVclk0Frequency;
744 break;
745 case METRICS_AVERAGE_DCLK:
746 *value = metrics->AverageDclk0Frequency;
747 break;
748 case METRICS_AVERAGE_VCLK1:
749 *value = metrics->AverageVclk1Frequency;
750 break;
751 case METRICS_AVERAGE_DCLK1:
752 *value = metrics->AverageDclk1Frequency;
753 break;
754 case METRICS_AVERAGE_GFXACTIVITY:
755 *value = metrics->AverageGfxActivity;
756 break;
757 case METRICS_AVERAGE_MEMACTIVITY:
758 *value = metrics->AverageUclkActivity;
759 break;
760 case METRICS_AVERAGE_SOCKETPOWER:
761 *value = metrics->AverageSocketPower << 8;
762 break;
763 case METRICS_TEMPERATURE_EDGE:
764 *value = metrics->AvgTemperature[TEMP_EDGE] *
765 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
766 break;
767 case METRICS_TEMPERATURE_HOTSPOT:
768 *value = metrics->AvgTemperature[TEMP_HOTSPOT] *
769 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
770 break;
771 case METRICS_TEMPERATURE_MEM:
772 *value = metrics->AvgTemperature[TEMP_MEM] *
773 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
774 break;
775 case METRICS_TEMPERATURE_VRGFX:
776 *value = metrics->AvgTemperature[TEMP_VR_GFX] *
777 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
778 break;
779 case METRICS_TEMPERATURE_VRSOC:
780 *value = metrics->AvgTemperature[TEMP_VR_SOC] *
781 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
782 break;
783 case METRICS_THROTTLER_STATUS:
784 *value = smu_v14_0_2_get_throttler_status(metrics);
785 break;
786 case METRICS_CURR_FANSPEED:
787 *value = metrics->AvgFanRpm;
788 break;
789 case METRICS_CURR_FANPWM:
790 *value = metrics->AvgFanPwm;
791 break;
792 case METRICS_VOLTAGE_VDDGFX:
793 *value = metrics->AvgVoltage[SVI_PLANE_VDD_GFX];
794 break;
795 case METRICS_PCIE_RATE:
796 *value = metrics->PcieRate;
797 break;
798 case METRICS_PCIE_WIDTH:
799 *value = metrics->PcieWidth;
800 break;
801 default:
802 *value = UINT_MAX;
803 break;
804 }
805
806 return ret;
807 }
808
smu_v14_0_2_get_dpm_ultimate_freq(struct smu_context * smu,enum smu_clk_type clk_type,uint32_t * min,uint32_t * max)809 static int smu_v14_0_2_get_dpm_ultimate_freq(struct smu_context *smu,
810 enum smu_clk_type clk_type,
811 uint32_t *min,
812 uint32_t *max)
813 {
814 struct smu_14_0_dpm_context *dpm_context =
815 smu->smu_dpm.dpm_context;
816 struct smu_14_0_dpm_table *dpm_table;
817
818 switch (clk_type) {
819 case SMU_MCLK:
820 case SMU_UCLK:
821 /* uclk dpm table */
822 dpm_table = &dpm_context->dpm_tables.uclk_table;
823 break;
824 case SMU_GFXCLK:
825 case SMU_SCLK:
826 /* gfxclk dpm table */
827 dpm_table = &dpm_context->dpm_tables.gfx_table;
828 break;
829 case SMU_SOCCLK:
830 /* socclk dpm table */
831 dpm_table = &dpm_context->dpm_tables.soc_table;
832 break;
833 case SMU_FCLK:
834 /* fclk dpm table */
835 dpm_table = &dpm_context->dpm_tables.fclk_table;
836 break;
837 case SMU_VCLK:
838 case SMU_VCLK1:
839 /* vclk dpm table */
840 dpm_table = &dpm_context->dpm_tables.vclk_table;
841 break;
842 case SMU_DCLK:
843 case SMU_DCLK1:
844 /* dclk dpm table */
845 dpm_table = &dpm_context->dpm_tables.dclk_table;
846 break;
847 default:
848 dev_err(smu->adev->dev, "Unsupported clock type!\n");
849 return -EINVAL;
850 }
851
852 if (min)
853 *min = dpm_table->min;
854 if (max)
855 *max = dpm_table->max;
856
857 return 0;
858 }
859
smu_v14_0_2_read_sensor(struct smu_context * smu,enum amd_pp_sensors sensor,void * data,uint32_t * size)860 static int smu_v14_0_2_read_sensor(struct smu_context *smu,
861 enum amd_pp_sensors sensor,
862 void *data,
863 uint32_t *size)
864 {
865 struct smu_table_context *table_context = &smu->smu_table;
866 PPTable_t *smc_pptable = table_context->driver_pptable;
867 int ret = 0;
868
869 switch (sensor) {
870 case AMDGPU_PP_SENSOR_MAX_FAN_RPM:
871 *(uint16_t *)data = smc_pptable->CustomSkuTable.FanMaximumRpm;
872 *size = 4;
873 break;
874 case AMDGPU_PP_SENSOR_MEM_LOAD:
875 ret = smu_v14_0_2_get_smu_metrics_data(smu,
876 METRICS_AVERAGE_MEMACTIVITY,
877 (uint32_t *)data);
878 *size = 4;
879 break;
880 case AMDGPU_PP_SENSOR_GPU_LOAD:
881 ret = smu_v14_0_2_get_smu_metrics_data(smu,
882 METRICS_AVERAGE_GFXACTIVITY,
883 (uint32_t *)data);
884 *size = 4;
885 break;
886 case AMDGPU_PP_SENSOR_GPU_AVG_POWER:
887 ret = smu_v14_0_2_get_smu_metrics_data(smu,
888 METRICS_AVERAGE_SOCKETPOWER,
889 (uint32_t *)data);
890 *size = 4;
891 break;
892 case AMDGPU_PP_SENSOR_HOTSPOT_TEMP:
893 ret = smu_v14_0_2_get_smu_metrics_data(smu,
894 METRICS_TEMPERATURE_HOTSPOT,
895 (uint32_t *)data);
896 *size = 4;
897 break;
898 case AMDGPU_PP_SENSOR_EDGE_TEMP:
899 ret = smu_v14_0_2_get_smu_metrics_data(smu,
900 METRICS_TEMPERATURE_EDGE,
901 (uint32_t *)data);
902 *size = 4;
903 break;
904 case AMDGPU_PP_SENSOR_MEM_TEMP:
905 ret = smu_v14_0_2_get_smu_metrics_data(smu,
906 METRICS_TEMPERATURE_MEM,
907 (uint32_t *)data);
908 *size = 4;
909 break;
910 case AMDGPU_PP_SENSOR_GFX_MCLK:
911 ret = smu_v14_0_2_get_smu_metrics_data(smu,
912 METRICS_CURR_UCLK,
913 (uint32_t *)data);
914 *(uint32_t *)data *= 100;
915 *size = 4;
916 break;
917 case AMDGPU_PP_SENSOR_GFX_SCLK:
918 ret = smu_v14_0_2_get_smu_metrics_data(smu,
919 METRICS_AVERAGE_GFXCLK,
920 (uint32_t *)data);
921 *(uint32_t *)data *= 100;
922 *size = 4;
923 break;
924 case AMDGPU_PP_SENSOR_VDDGFX:
925 ret = smu_v14_0_2_get_smu_metrics_data(smu,
926 METRICS_VOLTAGE_VDDGFX,
927 (uint32_t *)data);
928 *size = 4;
929 break;
930 default:
931 ret = -EOPNOTSUPP;
932 break;
933 }
934
935 return ret;
936 }
937
smu_v14_0_2_get_current_clk_freq_by_table(struct smu_context * smu,enum smu_clk_type clk_type,uint32_t * value)938 static int smu_v14_0_2_get_current_clk_freq_by_table(struct smu_context *smu,
939 enum smu_clk_type clk_type,
940 uint32_t *value)
941 {
942 MetricsMember_t member_type;
943 int clk_id = 0;
944
945 clk_id = smu_cmn_to_asic_specific_index(smu,
946 CMN2ASIC_MAPPING_CLK,
947 clk_type);
948 if (clk_id < 0)
949 return -EINVAL;
950
951 switch (clk_id) {
952 case PPCLK_GFXCLK:
953 member_type = METRICS_AVERAGE_GFXCLK;
954 break;
955 case PPCLK_UCLK:
956 member_type = METRICS_CURR_UCLK;
957 break;
958 case PPCLK_FCLK:
959 member_type = METRICS_CURR_FCLK;
960 break;
961 case PPCLK_SOCCLK:
962 member_type = METRICS_CURR_SOCCLK;
963 break;
964 case PPCLK_VCLK_0:
965 member_type = METRICS_AVERAGE_VCLK;
966 break;
967 case PPCLK_DCLK_0:
968 member_type = METRICS_AVERAGE_DCLK;
969 break;
970 case PPCLK_DCFCLK:
971 member_type = METRICS_CURR_DCEFCLK;
972 break;
973 default:
974 return -EINVAL;
975 }
976
977 return smu_v14_0_2_get_smu_metrics_data(smu,
978 member_type,
979 value);
980 }
981
smu_v14_0_2_is_od_feature_supported(struct smu_context * smu,int od_feature_bit)982 static bool smu_v14_0_2_is_od_feature_supported(struct smu_context *smu,
983 int od_feature_bit)
984 {
985 PPTable_t *pptable = smu->smu_table.driver_pptable;
986 const OverDriveLimits_t * const overdrive_upperlimits =
987 &pptable->SkuTable.OverDriveLimitsBasicMax;
988
989 return overdrive_upperlimits->FeatureCtrlMask & (1U << od_feature_bit);
990 }
991
smu_v14_0_2_get_od_setting_limits(struct smu_context * smu,int od_feature_bit,int32_t * min,int32_t * max)992 static void smu_v14_0_2_get_od_setting_limits(struct smu_context *smu,
993 int od_feature_bit,
994 int32_t *min,
995 int32_t *max)
996 {
997 PPTable_t *pptable = smu->smu_table.driver_pptable;
998 const OverDriveLimits_t * const overdrive_upperlimits =
999 &pptable->SkuTable.OverDriveLimitsBasicMax;
1000 const OverDriveLimits_t * const overdrive_lowerlimits =
1001 &pptable->SkuTable.OverDriveLimitsBasicMin;
1002 int32_t od_min_setting, od_max_setting;
1003
1004 switch (od_feature_bit) {
1005 case PP_OD_FEATURE_GFXCLK_FMIN:
1006 case PP_OD_FEATURE_GFXCLK_FMAX:
1007 od_min_setting = overdrive_lowerlimits->GfxclkFoffset;
1008 od_max_setting = overdrive_upperlimits->GfxclkFoffset;
1009 break;
1010 case PP_OD_FEATURE_UCLK_FMIN:
1011 od_min_setting = overdrive_lowerlimits->UclkFmin;
1012 od_max_setting = overdrive_upperlimits->UclkFmin;
1013 break;
1014 case PP_OD_FEATURE_UCLK_FMAX:
1015 od_min_setting = overdrive_lowerlimits->UclkFmax;
1016 od_max_setting = overdrive_upperlimits->UclkFmax;
1017 break;
1018 case PP_OD_FEATURE_GFX_VF_CURVE:
1019 od_min_setting = overdrive_lowerlimits->VoltageOffsetPerZoneBoundary[0];
1020 od_max_setting = overdrive_upperlimits->VoltageOffsetPerZoneBoundary[0];
1021 break;
1022 case PP_OD_FEATURE_FAN_CURVE_TEMP:
1023 od_min_setting = overdrive_lowerlimits->FanLinearTempPoints[0];
1024 od_max_setting = overdrive_upperlimits->FanLinearTempPoints[0];
1025 break;
1026 case PP_OD_FEATURE_FAN_CURVE_PWM:
1027 od_min_setting = overdrive_lowerlimits->FanLinearPwmPoints[0];
1028 od_max_setting = overdrive_upperlimits->FanLinearPwmPoints[0];
1029 break;
1030 case PP_OD_FEATURE_FAN_ACOUSTIC_LIMIT:
1031 od_min_setting = overdrive_lowerlimits->AcousticLimitRpmThreshold;
1032 od_max_setting = overdrive_upperlimits->AcousticLimitRpmThreshold;
1033 break;
1034 case PP_OD_FEATURE_FAN_ACOUSTIC_TARGET:
1035 od_min_setting = overdrive_lowerlimits->AcousticTargetRpmThreshold;
1036 od_max_setting = overdrive_upperlimits->AcousticTargetRpmThreshold;
1037 break;
1038 case PP_OD_FEATURE_FAN_TARGET_TEMPERATURE:
1039 od_min_setting = overdrive_lowerlimits->FanTargetTemperature;
1040 od_max_setting = overdrive_upperlimits->FanTargetTemperature;
1041 break;
1042 case PP_OD_FEATURE_FAN_MINIMUM_PWM:
1043 od_min_setting = overdrive_lowerlimits->FanMinimumPwm;
1044 od_max_setting = overdrive_upperlimits->FanMinimumPwm;
1045 break;
1046 case PP_OD_FEATURE_FAN_ZERO_RPM_ENABLE:
1047 od_min_setting = overdrive_lowerlimits->FanZeroRpmEnable;
1048 od_max_setting = overdrive_upperlimits->FanZeroRpmEnable;
1049 break;
1050 default:
1051 od_min_setting = od_max_setting = INT_MAX;
1052 break;
1053 }
1054
1055 if (min)
1056 *min = od_min_setting;
1057 if (max)
1058 *max = od_max_setting;
1059 }
1060
smu_v14_0_2_print_clk_levels(struct smu_context * smu,enum smu_clk_type clk_type,char * buf)1061 static int smu_v14_0_2_print_clk_levels(struct smu_context *smu,
1062 enum smu_clk_type clk_type,
1063 char *buf)
1064 {
1065 struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
1066 struct smu_14_0_dpm_context *dpm_context = smu_dpm->dpm_context;
1067 OverDriveTableExternal_t *od_table =
1068 (OverDriveTableExternal_t *)smu->smu_table.overdrive_table;
1069 struct smu_14_0_dpm_table *single_dpm_table;
1070 struct smu_14_0_pcie_table *pcie_table;
1071 uint32_t gen_speed, lane_width;
1072 int i, curr_freq, size = 0;
1073 int32_t min_value, max_value;
1074 int ret = 0;
1075
1076 smu_cmn_get_sysfs_buf(&buf, &size);
1077
1078 if (amdgpu_ras_intr_triggered()) {
1079 size += sysfs_emit_at(buf, size, "unavailable\n");
1080 return size;
1081 }
1082
1083 switch (clk_type) {
1084 case SMU_SCLK:
1085 single_dpm_table = &(dpm_context->dpm_tables.gfx_table);
1086 break;
1087 case SMU_MCLK:
1088 single_dpm_table = &(dpm_context->dpm_tables.uclk_table);
1089 break;
1090 case SMU_SOCCLK:
1091 single_dpm_table = &(dpm_context->dpm_tables.soc_table);
1092 break;
1093 case SMU_FCLK:
1094 single_dpm_table = &(dpm_context->dpm_tables.fclk_table);
1095 break;
1096 case SMU_VCLK:
1097 case SMU_VCLK1:
1098 single_dpm_table = &(dpm_context->dpm_tables.vclk_table);
1099 break;
1100 case SMU_DCLK:
1101 case SMU_DCLK1:
1102 single_dpm_table = &(dpm_context->dpm_tables.dclk_table);
1103 break;
1104 case SMU_DCEFCLK:
1105 single_dpm_table = &(dpm_context->dpm_tables.dcef_table);
1106 break;
1107 default:
1108 break;
1109 }
1110
1111 switch (clk_type) {
1112 case SMU_SCLK:
1113 case SMU_MCLK:
1114 case SMU_SOCCLK:
1115 case SMU_FCLK:
1116 case SMU_VCLK:
1117 case SMU_VCLK1:
1118 case SMU_DCLK:
1119 case SMU_DCLK1:
1120 case SMU_DCEFCLK:
1121 ret = smu_v14_0_2_get_current_clk_freq_by_table(smu, clk_type, &curr_freq);
1122 if (ret) {
1123 dev_err(smu->adev->dev, "Failed to get current clock freq!");
1124 return ret;
1125 }
1126
1127 if (single_dpm_table->is_fine_grained) {
1128 /*
1129 * For fine grained dpms, there are only two dpm levels:
1130 * - level 0 -> min clock freq
1131 * - level 1 -> max clock freq
1132 * And the current clock frequency can be any value between them.
1133 * So, if the current clock frequency is not at level 0 or level 1,
1134 * we will fake it as three dpm levels:
1135 * - level 0 -> min clock freq
1136 * - level 1 -> current actual clock freq
1137 * - level 2 -> max clock freq
1138 */
1139 if ((single_dpm_table->dpm_levels[0].value != curr_freq) &&
1140 (single_dpm_table->dpm_levels[1].value != curr_freq)) {
1141 size += sysfs_emit_at(buf, size, "0: %uMhz\n",
1142 single_dpm_table->dpm_levels[0].value);
1143 size += sysfs_emit_at(buf, size, "1: %uMhz *\n",
1144 curr_freq);
1145 size += sysfs_emit_at(buf, size, "2: %uMhz\n",
1146 single_dpm_table->dpm_levels[1].value);
1147 } else {
1148 size += sysfs_emit_at(buf, size, "0: %uMhz %s\n",
1149 single_dpm_table->dpm_levels[0].value,
1150 single_dpm_table->dpm_levels[0].value == curr_freq ? "*" : "");
1151 size += sysfs_emit_at(buf, size, "1: %uMhz %s\n",
1152 single_dpm_table->dpm_levels[1].value,
1153 single_dpm_table->dpm_levels[1].value == curr_freq ? "*" : "");
1154 }
1155 } else {
1156 for (i = 0; i < single_dpm_table->count; i++)
1157 size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n",
1158 i, single_dpm_table->dpm_levels[i].value,
1159 single_dpm_table->dpm_levels[i].value == curr_freq ? "*" : "");
1160 }
1161 break;
1162 case SMU_PCIE:
1163 ret = smu_v14_0_2_get_smu_metrics_data(smu,
1164 METRICS_PCIE_RATE,
1165 &gen_speed);
1166 if (ret)
1167 return ret;
1168
1169 ret = smu_v14_0_2_get_smu_metrics_data(smu,
1170 METRICS_PCIE_WIDTH,
1171 &lane_width);
1172 if (ret)
1173 return ret;
1174
1175 pcie_table = &(dpm_context->dpm_tables.pcie_table);
1176 for (i = 0; i < pcie_table->num_of_link_levels; i++)
1177 size += sysfs_emit_at(buf, size, "%d: %s %s %dMhz %s\n", i,
1178 (pcie_table->pcie_gen[i] == 0) ? "2.5GT/s," :
1179 (pcie_table->pcie_gen[i] == 1) ? "5.0GT/s," :
1180 (pcie_table->pcie_gen[i] == 2) ? "8.0GT/s," :
1181 (pcie_table->pcie_gen[i] == 3) ? "16.0GT/s," :
1182 (pcie_table->pcie_gen[i] == 4) ? "32.0GT/s," : "",
1183 (pcie_table->pcie_lane[i] == 1) ? "x1" :
1184 (pcie_table->pcie_lane[i] == 2) ? "x2" :
1185 (pcie_table->pcie_lane[i] == 3) ? "x4" :
1186 (pcie_table->pcie_lane[i] == 4) ? "x8" :
1187 (pcie_table->pcie_lane[i] == 5) ? "x12" :
1188 (pcie_table->pcie_lane[i] == 6) ? "x16" :
1189 (pcie_table->pcie_lane[i] == 7) ? "x32" : "",
1190 pcie_table->clk_freq[i],
1191 (gen_speed == DECODE_GEN_SPEED(pcie_table->pcie_gen[i])) &&
1192 (lane_width == DECODE_LANE_WIDTH(pcie_table->pcie_lane[i])) ?
1193 "*" : "");
1194 break;
1195
1196 case SMU_OD_SCLK:
1197 if (!smu_v14_0_2_is_od_feature_supported(smu,
1198 PP_OD_FEATURE_GFXCLK_BIT))
1199 break;
1200
1201 size += sysfs_emit_at(buf, size, "OD_SCLK_OFFSET:\n");
1202 size += sysfs_emit_at(buf, size, "%dMhz\n",
1203 od_table->OverDriveTable.GfxclkFoffset);
1204 break;
1205
1206 case SMU_OD_MCLK:
1207 if (!smu_v14_0_2_is_od_feature_supported(smu,
1208 PP_OD_FEATURE_UCLK_BIT))
1209 break;
1210
1211 size += sysfs_emit_at(buf, size, "OD_MCLK:\n");
1212 size += sysfs_emit_at(buf, size, "0: %uMhz\n1: %uMHz\n",
1213 od_table->OverDriveTable.UclkFmin,
1214 od_table->OverDriveTable.UclkFmax);
1215 break;
1216
1217 case SMU_OD_VDDGFX_OFFSET:
1218 if (!smu_v14_0_2_is_od_feature_supported(smu,
1219 PP_OD_FEATURE_GFX_VF_CURVE_BIT))
1220 break;
1221
1222 size += sysfs_emit_at(buf, size, "OD_VDDGFX_OFFSET:\n");
1223 size += sysfs_emit_at(buf, size, "%dmV\n",
1224 od_table->OverDriveTable.VoltageOffsetPerZoneBoundary[0]);
1225 break;
1226
1227 case SMU_OD_FAN_CURVE:
1228 if (!smu_v14_0_2_is_od_feature_supported(smu,
1229 PP_OD_FEATURE_FAN_CURVE_BIT))
1230 break;
1231
1232 size += sysfs_emit_at(buf, size, "OD_FAN_CURVE:\n");
1233 for (i = 0; i < NUM_OD_FAN_MAX_POINTS - 1; i++)
1234 size += sysfs_emit_at(buf, size, "%d: %dC %d%%\n",
1235 i,
1236 (int)od_table->OverDriveTable.FanLinearTempPoints[i],
1237 (int)od_table->OverDriveTable.FanLinearPwmPoints[i]);
1238
1239 size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE");
1240 smu_v14_0_2_get_od_setting_limits(smu,
1241 PP_OD_FEATURE_FAN_CURVE_TEMP,
1242 &min_value,
1243 &max_value);
1244 size += sysfs_emit_at(buf, size, "FAN_CURVE(hotspot temp): %uC %uC\n",
1245 min_value, max_value);
1246
1247 smu_v14_0_2_get_od_setting_limits(smu,
1248 PP_OD_FEATURE_FAN_CURVE_PWM,
1249 &min_value,
1250 &max_value);
1251 size += sysfs_emit_at(buf, size, "FAN_CURVE(fan speed): %u%% %u%%\n",
1252 min_value, max_value);
1253
1254 break;
1255
1256 case SMU_OD_ACOUSTIC_LIMIT:
1257 if (!smu_v14_0_2_is_od_feature_supported(smu,
1258 PP_OD_FEATURE_FAN_CURVE_BIT))
1259 break;
1260
1261 size += sysfs_emit_at(buf, size, "OD_ACOUSTIC_LIMIT:\n");
1262 size += sysfs_emit_at(buf, size, "%d\n",
1263 (int)od_table->OverDriveTable.AcousticLimitRpmThreshold);
1264
1265 size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE");
1266 smu_v14_0_2_get_od_setting_limits(smu,
1267 PP_OD_FEATURE_FAN_ACOUSTIC_LIMIT,
1268 &min_value,
1269 &max_value);
1270 size += sysfs_emit_at(buf, size, "ACOUSTIC_LIMIT: %u %u\n",
1271 min_value, max_value);
1272 break;
1273
1274 case SMU_OD_ACOUSTIC_TARGET:
1275 if (!smu_v14_0_2_is_od_feature_supported(smu,
1276 PP_OD_FEATURE_FAN_CURVE_BIT))
1277 break;
1278
1279 size += sysfs_emit_at(buf, size, "OD_ACOUSTIC_TARGET:\n");
1280 size += sysfs_emit_at(buf, size, "%d\n",
1281 (int)od_table->OverDriveTable.AcousticTargetRpmThreshold);
1282
1283 size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE");
1284 smu_v14_0_2_get_od_setting_limits(smu,
1285 PP_OD_FEATURE_FAN_ACOUSTIC_TARGET,
1286 &min_value,
1287 &max_value);
1288 size += sysfs_emit_at(buf, size, "ACOUSTIC_TARGET: %u %u\n",
1289 min_value, max_value);
1290 break;
1291
1292 case SMU_OD_FAN_TARGET_TEMPERATURE:
1293 if (!smu_v14_0_2_is_od_feature_supported(smu,
1294 PP_OD_FEATURE_FAN_CURVE_BIT))
1295 break;
1296
1297 size += sysfs_emit_at(buf, size, "FAN_TARGET_TEMPERATURE:\n");
1298 size += sysfs_emit_at(buf, size, "%d\n",
1299 (int)od_table->OverDriveTable.FanTargetTemperature);
1300
1301 size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE");
1302 smu_v14_0_2_get_od_setting_limits(smu,
1303 PP_OD_FEATURE_FAN_TARGET_TEMPERATURE,
1304 &min_value,
1305 &max_value);
1306 size += sysfs_emit_at(buf, size, "TARGET_TEMPERATURE: %u %u\n",
1307 min_value, max_value);
1308 break;
1309
1310 case SMU_OD_FAN_MINIMUM_PWM:
1311 if (!smu_v14_0_2_is_od_feature_supported(smu,
1312 PP_OD_FEATURE_FAN_CURVE_BIT))
1313 break;
1314
1315 size += sysfs_emit_at(buf, size, "FAN_MINIMUM_PWM:\n");
1316 size += sysfs_emit_at(buf, size, "%d\n",
1317 (int)od_table->OverDriveTable.FanMinimumPwm);
1318
1319 size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE");
1320 smu_v14_0_2_get_od_setting_limits(smu,
1321 PP_OD_FEATURE_FAN_MINIMUM_PWM,
1322 &min_value,
1323 &max_value);
1324 size += sysfs_emit_at(buf, size, "MINIMUM_PWM: %u %u\n",
1325 min_value, max_value);
1326 break;
1327
1328 case SMU_OD_FAN_ZERO_RPM_ENABLE:
1329 if (!smu_v14_0_2_is_od_feature_supported(smu,
1330 PP_OD_FEATURE_ZERO_FAN_BIT))
1331 break;
1332
1333 size += sysfs_emit_at(buf, size, "FAN_ZERO_RPM_ENABLE:\n");
1334 size += sysfs_emit_at(buf, size, "%d\n",
1335 (int)od_table->OverDriveTable.FanZeroRpmEnable);
1336
1337 size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE");
1338 smu_v14_0_2_get_od_setting_limits(smu,
1339 PP_OD_FEATURE_FAN_ZERO_RPM_ENABLE,
1340 &min_value,
1341 &max_value);
1342 size += sysfs_emit_at(buf, size, "ZERO_RPM_ENABLE: %u %u\n",
1343 min_value, max_value);
1344 break;
1345
1346 case SMU_OD_RANGE:
1347 if (!smu_v14_0_2_is_od_feature_supported(smu, PP_OD_FEATURE_GFXCLK_BIT) &&
1348 !smu_v14_0_2_is_od_feature_supported(smu, PP_OD_FEATURE_UCLK_BIT) &&
1349 !smu_v14_0_2_is_od_feature_supported(smu, PP_OD_FEATURE_GFX_VF_CURVE_BIT))
1350 break;
1351
1352 size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE");
1353
1354 if (smu_v14_0_2_is_od_feature_supported(smu, PP_OD_FEATURE_GFXCLK_BIT)) {
1355 smu_v14_0_2_get_od_setting_limits(smu,
1356 PP_OD_FEATURE_GFXCLK_FMAX,
1357 &min_value,
1358 &max_value);
1359 size += sysfs_emit_at(buf, size, "SCLK_OFFSET: %7dMhz %10uMhz\n",
1360 min_value, max_value);
1361 }
1362
1363 if (smu_v14_0_2_is_od_feature_supported(smu, PP_OD_FEATURE_UCLK_BIT)) {
1364 smu_v14_0_2_get_od_setting_limits(smu,
1365 PP_OD_FEATURE_UCLK_FMIN,
1366 &min_value,
1367 NULL);
1368 smu_v14_0_2_get_od_setting_limits(smu,
1369 PP_OD_FEATURE_UCLK_FMAX,
1370 NULL,
1371 &max_value);
1372 size += sysfs_emit_at(buf, size, "MCLK: %7uMhz %10uMhz\n",
1373 min_value, max_value);
1374 }
1375
1376 if (smu_v14_0_2_is_od_feature_supported(smu, PP_OD_FEATURE_GFX_VF_CURVE_BIT)) {
1377 smu_v14_0_2_get_od_setting_limits(smu,
1378 PP_OD_FEATURE_GFX_VF_CURVE,
1379 &min_value,
1380 &max_value);
1381 size += sysfs_emit_at(buf, size, "VDDGFX_OFFSET: %7dmv %10dmv\n",
1382 min_value, max_value);
1383 }
1384 break;
1385
1386 default:
1387 break;
1388 }
1389
1390 return size;
1391 }
1392
smu_v14_0_2_force_clk_levels(struct smu_context * smu,enum smu_clk_type clk_type,uint32_t mask)1393 static int smu_v14_0_2_force_clk_levels(struct smu_context *smu,
1394 enum smu_clk_type clk_type,
1395 uint32_t mask)
1396 {
1397 struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
1398 struct smu_14_0_dpm_context *dpm_context = smu_dpm->dpm_context;
1399 struct smu_14_0_dpm_table *single_dpm_table;
1400 uint32_t soft_min_level, soft_max_level;
1401 uint32_t min_freq, max_freq;
1402 int ret = 0;
1403
1404 soft_min_level = mask ? (ffs(mask) - 1) : 0;
1405 soft_max_level = mask ? (fls(mask) - 1) : 0;
1406
1407 switch (clk_type) {
1408 case SMU_GFXCLK:
1409 case SMU_SCLK:
1410 single_dpm_table = &(dpm_context->dpm_tables.gfx_table);
1411 break;
1412 case SMU_MCLK:
1413 case SMU_UCLK:
1414 single_dpm_table = &(dpm_context->dpm_tables.uclk_table);
1415 break;
1416 case SMU_SOCCLK:
1417 single_dpm_table = &(dpm_context->dpm_tables.soc_table);
1418 break;
1419 case SMU_FCLK:
1420 single_dpm_table = &(dpm_context->dpm_tables.fclk_table);
1421 break;
1422 case SMU_VCLK:
1423 case SMU_VCLK1:
1424 single_dpm_table = &(dpm_context->dpm_tables.vclk_table);
1425 break;
1426 case SMU_DCLK:
1427 case SMU_DCLK1:
1428 single_dpm_table = &(dpm_context->dpm_tables.dclk_table);
1429 break;
1430 default:
1431 break;
1432 }
1433
1434 switch (clk_type) {
1435 case SMU_GFXCLK:
1436 case SMU_SCLK:
1437 case SMU_MCLK:
1438 case SMU_UCLK:
1439 case SMU_SOCCLK:
1440 case SMU_FCLK:
1441 case SMU_VCLK:
1442 case SMU_VCLK1:
1443 case SMU_DCLK:
1444 case SMU_DCLK1:
1445 if (single_dpm_table->is_fine_grained) {
1446 /* There is only 2 levels for fine grained DPM */
1447 soft_max_level = (soft_max_level >= 1 ? 1 : 0);
1448 soft_min_level = (soft_min_level >= 1 ? 1 : 0);
1449 } else {
1450 if ((soft_max_level >= single_dpm_table->count) ||
1451 (soft_min_level >= single_dpm_table->count))
1452 return -EINVAL;
1453 }
1454
1455 min_freq = single_dpm_table->dpm_levels[soft_min_level].value;
1456 max_freq = single_dpm_table->dpm_levels[soft_max_level].value;
1457
1458 ret = smu_v14_0_set_soft_freq_limited_range(smu,
1459 clk_type,
1460 min_freq,
1461 max_freq,
1462 false);
1463 break;
1464 case SMU_DCEFCLK:
1465 case SMU_PCIE:
1466 default:
1467 break;
1468 }
1469
1470 return ret;
1471 }
1472
smu_v14_0_2_update_pcie_parameters(struct smu_context * smu,uint8_t pcie_gen_cap,uint8_t pcie_width_cap)1473 static int smu_v14_0_2_update_pcie_parameters(struct smu_context *smu,
1474 uint8_t pcie_gen_cap,
1475 uint8_t pcie_width_cap)
1476 {
1477 struct smu_14_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
1478 struct smu_14_0_pcie_table *pcie_table =
1479 &dpm_context->dpm_tables.pcie_table;
1480 int num_of_levels = pcie_table->num_of_link_levels;
1481 uint32_t smu_pcie_arg;
1482 int ret, i;
1483
1484 if (!num_of_levels)
1485 return 0;
1486
1487 if (!(smu->adev->pm.pp_feature & PP_PCIE_DPM_MASK)) {
1488 if (pcie_table->pcie_gen[num_of_levels - 1] < pcie_gen_cap)
1489 pcie_gen_cap = pcie_table->pcie_gen[num_of_levels - 1];
1490
1491 if (pcie_table->pcie_lane[num_of_levels - 1] < pcie_width_cap)
1492 pcie_width_cap = pcie_table->pcie_lane[num_of_levels - 1];
1493
1494 /* Force all levels to use the same settings */
1495 for (i = 0; i < num_of_levels; i++) {
1496 pcie_table->pcie_gen[i] = pcie_gen_cap;
1497 pcie_table->pcie_lane[i] = pcie_width_cap;
1498 }
1499 } else {
1500 for (i = 0; i < num_of_levels; i++) {
1501 if (pcie_table->pcie_gen[i] > pcie_gen_cap)
1502 pcie_table->pcie_gen[i] = pcie_gen_cap;
1503 if (pcie_table->pcie_lane[i] > pcie_width_cap)
1504 pcie_table->pcie_lane[i] = pcie_width_cap;
1505 }
1506 }
1507
1508 for (i = 0; i < num_of_levels; i++) {
1509 smu_pcie_arg = i << 16;
1510 smu_pcie_arg |= pcie_table->pcie_gen[i] << 8;
1511 smu_pcie_arg |= pcie_table->pcie_lane[i];
1512
1513 ret = smu_cmn_send_smc_msg_with_param(smu,
1514 SMU_MSG_OverridePcieParameters,
1515 smu_pcie_arg,
1516 NULL);
1517 if (ret)
1518 return ret;
1519 }
1520
1521 return 0;
1522 }
1523
1524 static const struct smu_temperature_range smu14_thermal_policy[] = {
1525 {-273150, 99000, 99000, -273150, 99000, 99000, -273150, 99000, 99000},
1526 { 120000, 120000, 120000, 120000, 120000, 120000, 120000, 120000, 120000},
1527 };
1528
smu_v14_0_2_get_thermal_temperature_range(struct smu_context * smu,struct smu_temperature_range * range)1529 static int smu_v14_0_2_get_thermal_temperature_range(struct smu_context *smu,
1530 struct smu_temperature_range *range)
1531 {
1532 struct smu_table_context *table_context = &smu->smu_table;
1533 struct smu_14_0_2_powerplay_table *powerplay_table =
1534 table_context->power_play_table;
1535 PPTable_t *pptable = smu->smu_table.driver_pptable;
1536
1537 if (amdgpu_sriov_vf(smu->adev))
1538 return 0;
1539
1540 if (!range)
1541 return -EINVAL;
1542
1543 memcpy(range, &smu14_thermal_policy[0], sizeof(struct smu_temperature_range));
1544
1545 range->max = pptable->CustomSkuTable.TemperatureLimit[TEMP_EDGE] *
1546 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
1547 range->edge_emergency_max = (pptable->CustomSkuTable.TemperatureLimit[TEMP_EDGE] + CTF_OFFSET_EDGE) *
1548 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
1549 range->hotspot_crit_max = pptable->CustomSkuTable.TemperatureLimit[TEMP_HOTSPOT] *
1550 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
1551 range->hotspot_emergency_max = (pptable->CustomSkuTable.TemperatureLimit[TEMP_HOTSPOT] + CTF_OFFSET_HOTSPOT) *
1552 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
1553 range->mem_crit_max = pptable->CustomSkuTable.TemperatureLimit[TEMP_MEM] *
1554 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
1555 range->mem_emergency_max = (pptable->CustomSkuTable.TemperatureLimit[TEMP_MEM] + CTF_OFFSET_MEM)*
1556 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
1557 range->software_shutdown_temp = powerplay_table->software_shutdown_temp;
1558 range->software_shutdown_temp_offset = pptable->CustomSkuTable.FanAbnormalTempLimitOffset;
1559
1560 return 0;
1561 }
1562
smu_v14_0_2_populate_umd_state_clk(struct smu_context * smu)1563 static int smu_v14_0_2_populate_umd_state_clk(struct smu_context *smu)
1564 {
1565 struct smu_14_0_dpm_context *dpm_context =
1566 smu->smu_dpm.dpm_context;
1567 struct smu_14_0_dpm_table *gfx_table =
1568 &dpm_context->dpm_tables.gfx_table;
1569 struct smu_14_0_dpm_table *mem_table =
1570 &dpm_context->dpm_tables.uclk_table;
1571 struct smu_14_0_dpm_table *soc_table =
1572 &dpm_context->dpm_tables.soc_table;
1573 struct smu_14_0_dpm_table *vclk_table =
1574 &dpm_context->dpm_tables.vclk_table;
1575 struct smu_14_0_dpm_table *dclk_table =
1576 &dpm_context->dpm_tables.dclk_table;
1577 struct smu_14_0_dpm_table *fclk_table =
1578 &dpm_context->dpm_tables.fclk_table;
1579 struct smu_umd_pstate_table *pstate_table =
1580 &smu->pstate_table;
1581 struct smu_table_context *table_context = &smu->smu_table;
1582 PPTable_t *pptable = table_context->driver_pptable;
1583 DriverReportedClocks_t driver_clocks =
1584 pptable->SkuTable.DriverReportedClocks;
1585
1586 pstate_table->gfxclk_pstate.min = gfx_table->min;
1587 if (driver_clocks.GameClockAc &&
1588 (driver_clocks.GameClockAc < gfx_table->max))
1589 pstate_table->gfxclk_pstate.peak = driver_clocks.GameClockAc;
1590 else
1591 pstate_table->gfxclk_pstate.peak = gfx_table->max;
1592
1593 pstate_table->uclk_pstate.min = mem_table->min;
1594 pstate_table->uclk_pstate.peak = mem_table->max;
1595
1596 pstate_table->socclk_pstate.min = soc_table->min;
1597 pstate_table->socclk_pstate.peak = soc_table->max;
1598
1599 pstate_table->vclk_pstate.min = vclk_table->min;
1600 pstate_table->vclk_pstate.peak = vclk_table->max;
1601
1602 pstate_table->dclk_pstate.min = dclk_table->min;
1603 pstate_table->dclk_pstate.peak = dclk_table->max;
1604
1605 pstate_table->fclk_pstate.min = fclk_table->min;
1606 pstate_table->fclk_pstate.peak = fclk_table->max;
1607
1608 if (driver_clocks.BaseClockAc &&
1609 driver_clocks.BaseClockAc < gfx_table->max)
1610 pstate_table->gfxclk_pstate.standard = driver_clocks.BaseClockAc;
1611 else
1612 pstate_table->gfxclk_pstate.standard = gfx_table->max;
1613 pstate_table->uclk_pstate.standard = mem_table->max;
1614 pstate_table->socclk_pstate.standard = soc_table->min;
1615 pstate_table->vclk_pstate.standard = vclk_table->min;
1616 pstate_table->dclk_pstate.standard = dclk_table->min;
1617 pstate_table->fclk_pstate.standard = fclk_table->min;
1618
1619 return 0;
1620 }
1621
smu_v14_0_2_get_unique_id(struct smu_context * smu)1622 static void smu_v14_0_2_get_unique_id(struct smu_context *smu)
1623 {
1624 struct smu_table_context *smu_table = &smu->smu_table;
1625 SmuMetrics_t *metrics =
1626 &(((SmuMetricsExternal_t *)(smu_table->metrics_table))->SmuMetrics);
1627 struct amdgpu_device *adev = smu->adev;
1628 uint32_t upper32 = 0, lower32 = 0;
1629 int ret;
1630
1631 ret = smu_cmn_get_metrics_table(smu, NULL, false);
1632 if (ret)
1633 goto out;
1634
1635 upper32 = metrics->PublicSerialNumberUpper;
1636 lower32 = metrics->PublicSerialNumberLower;
1637
1638 out:
1639 adev->unique_id = ((uint64_t)upper32 << 32) | lower32;
1640 }
1641
smu_v14_0_2_get_fan_speed_pwm(struct smu_context * smu,uint32_t * speed)1642 static int smu_v14_0_2_get_fan_speed_pwm(struct smu_context *smu,
1643 uint32_t *speed)
1644 {
1645 int ret;
1646
1647 if (!speed)
1648 return -EINVAL;
1649
1650 ret = smu_v14_0_2_get_smu_metrics_data(smu,
1651 METRICS_CURR_FANPWM,
1652 speed);
1653 if (ret) {
1654 dev_err(smu->adev->dev, "Failed to get fan speed(PWM)!");
1655 return ret;
1656 }
1657
1658 /* Convert the PMFW output which is in percent to pwm(255) based */
1659 *speed = min(*speed * 255 / 100, (uint32_t)255);
1660
1661 return 0;
1662 }
1663
smu_v14_0_2_get_fan_speed_rpm(struct smu_context * smu,uint32_t * speed)1664 static int smu_v14_0_2_get_fan_speed_rpm(struct smu_context *smu,
1665 uint32_t *speed)
1666 {
1667 if (!speed)
1668 return -EINVAL;
1669
1670 return smu_v14_0_2_get_smu_metrics_data(smu,
1671 METRICS_CURR_FANSPEED,
1672 speed);
1673 }
1674
smu_v14_0_2_get_power_limit(struct smu_context * smu,uint32_t * current_power_limit,uint32_t * default_power_limit,uint32_t * max_power_limit,uint32_t * min_power_limit)1675 static int smu_v14_0_2_get_power_limit(struct smu_context *smu,
1676 uint32_t *current_power_limit,
1677 uint32_t *default_power_limit,
1678 uint32_t *max_power_limit,
1679 uint32_t *min_power_limit)
1680 {
1681 struct smu_table_context *table_context = &smu->smu_table;
1682 PPTable_t *pptable = table_context->driver_pptable;
1683 CustomSkuTable_t *skutable = &pptable->CustomSkuTable;
1684 uint32_t power_limit;
1685 uint32_t msg_limit = pptable->SkuTable.MsgLimits.Power[PPT_THROTTLER_PPT0][POWER_SOURCE_AC];
1686
1687 if (smu_v14_0_get_current_power_limit(smu, &power_limit))
1688 power_limit = smu->adev->pm.ac_power ?
1689 skutable->SocketPowerLimitAc[PPT_THROTTLER_PPT0] :
1690 skutable->SocketPowerLimitDc[PPT_THROTTLER_PPT0];
1691
1692 if (current_power_limit)
1693 *current_power_limit = power_limit;
1694 if (default_power_limit)
1695 *default_power_limit = power_limit;
1696
1697 if (max_power_limit)
1698 *max_power_limit = msg_limit;
1699
1700 if (min_power_limit)
1701 *min_power_limit = 0;
1702
1703 return 0;
1704 }
1705
smu_v14_0_2_get_power_profile_mode(struct smu_context * smu,char * buf)1706 static int smu_v14_0_2_get_power_profile_mode(struct smu_context *smu,
1707 char *buf)
1708 {
1709 DpmActivityMonitorCoeffIntExternal_t activity_monitor_external;
1710 DpmActivityMonitorCoeffInt_t *activity_monitor =
1711 &(activity_monitor_external.DpmActivityMonitorCoeffInt);
1712 static const char *title[] = {
1713 "PROFILE_INDEX(NAME)",
1714 "CLOCK_TYPE(NAME)",
1715 "FPS",
1716 "MinActiveFreqType",
1717 "MinActiveFreq",
1718 "BoosterFreqType",
1719 "BoosterFreq",
1720 "PD_Data_limit_c",
1721 "PD_Data_error_coeff",
1722 "PD_Data_error_rate_coeff"};
1723 int16_t workload_type = 0;
1724 uint32_t i, size = 0;
1725 int result = 0;
1726
1727 if (!buf)
1728 return -EINVAL;
1729
1730 size += sysfs_emit_at(buf, size, "%16s %s %s %s %s %s %s %s %s %s\n",
1731 title[0], title[1], title[2], title[3], title[4], title[5],
1732 title[6], title[7], title[8], title[9]);
1733
1734 for (i = 0; i < PP_SMC_POWER_PROFILE_COUNT; i++) {
1735 /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */
1736 workload_type = smu_cmn_to_asic_specific_index(smu,
1737 CMN2ASIC_MAPPING_WORKLOAD,
1738 i);
1739 if (workload_type == -ENOTSUPP)
1740 continue;
1741 else if (workload_type < 0)
1742 return -EINVAL;
1743
1744 result = smu_cmn_update_table(smu,
1745 SMU_TABLE_ACTIVITY_MONITOR_COEFF,
1746 workload_type,
1747 (void *)(&activity_monitor_external),
1748 false);
1749 if (result) {
1750 dev_err(smu->adev->dev, "[%s] Failed to get activity monitor!", __func__);
1751 return result;
1752 }
1753
1754 size += sysfs_emit_at(buf, size, "%2d %14s%s:\n",
1755 i, amdgpu_pp_profile_name[i], (i == smu->power_profile_mode) ? "*" : " ");
1756
1757 size += sysfs_emit_at(buf, size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d\n",
1758 " ",
1759 0,
1760 "GFXCLK",
1761 activity_monitor->Gfx_FPS,
1762 activity_monitor->Gfx_MinActiveFreqType,
1763 activity_monitor->Gfx_MinActiveFreq,
1764 activity_monitor->Gfx_BoosterFreqType,
1765 activity_monitor->Gfx_BoosterFreq,
1766 activity_monitor->Gfx_PD_Data_limit_c,
1767 activity_monitor->Gfx_PD_Data_error_coeff,
1768 activity_monitor->Gfx_PD_Data_error_rate_coeff);
1769
1770 size += sysfs_emit_at(buf, size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d\n",
1771 " ",
1772 1,
1773 "FCLK",
1774 activity_monitor->Fclk_FPS,
1775 activity_monitor->Fclk_MinActiveFreqType,
1776 activity_monitor->Fclk_MinActiveFreq,
1777 activity_monitor->Fclk_BoosterFreqType,
1778 activity_monitor->Fclk_BoosterFreq,
1779 activity_monitor->Fclk_PD_Data_limit_c,
1780 activity_monitor->Fclk_PD_Data_error_coeff,
1781 activity_monitor->Fclk_PD_Data_error_rate_coeff);
1782 }
1783
1784 return size;
1785 }
1786
1787 #define SMU_14_0_2_CUSTOM_PARAMS_COUNT 9
1788 #define SMU_14_0_2_CUSTOM_PARAMS_CLOCK_COUNT 2
1789 #define SMU_14_0_2_CUSTOM_PARAMS_SIZE (SMU_14_0_2_CUSTOM_PARAMS_CLOCK_COUNT * SMU_14_0_2_CUSTOM_PARAMS_COUNT * sizeof(long))
1790
smu_v14_0_2_set_power_profile_mode_coeff(struct smu_context * smu,long * input)1791 static int smu_v14_0_2_set_power_profile_mode_coeff(struct smu_context *smu,
1792 long *input)
1793 {
1794 DpmActivityMonitorCoeffIntExternal_t activity_monitor_external;
1795 DpmActivityMonitorCoeffInt_t *activity_monitor =
1796 &(activity_monitor_external.DpmActivityMonitorCoeffInt);
1797 int ret, idx;
1798
1799 ret = smu_cmn_update_table(smu,
1800 SMU_TABLE_ACTIVITY_MONITOR_COEFF,
1801 WORKLOAD_PPLIB_CUSTOM_BIT,
1802 (void *)(&activity_monitor_external),
1803 false);
1804 if (ret) {
1805 dev_err(smu->adev->dev, "[%s] Failed to get activity monitor!", __func__);
1806 return ret;
1807 }
1808
1809 idx = 0 * SMU_14_0_2_CUSTOM_PARAMS_COUNT;
1810 if (input[idx]) {
1811 /* Gfxclk */
1812 activity_monitor->Gfx_FPS = input[idx + 1];
1813 activity_monitor->Gfx_MinActiveFreqType = input[idx + 2];
1814 activity_monitor->Gfx_MinActiveFreq = input[idx + 3];
1815 activity_monitor->Gfx_BoosterFreqType = input[idx + 4];
1816 activity_monitor->Gfx_BoosterFreq = input[idx + 5];
1817 activity_monitor->Gfx_PD_Data_limit_c = input[idx + 6];
1818 activity_monitor->Gfx_PD_Data_error_coeff = input[idx + 7];
1819 activity_monitor->Gfx_PD_Data_error_rate_coeff = input[idx + 8];
1820 }
1821 idx = 1 * SMU_14_0_2_CUSTOM_PARAMS_COUNT;
1822 if (input[idx]) {
1823 /* Fclk */
1824 activity_monitor->Fclk_FPS = input[idx + 1];
1825 activity_monitor->Fclk_MinActiveFreqType = input[idx + 2];
1826 activity_monitor->Fclk_MinActiveFreq = input[idx + 3];
1827 activity_monitor->Fclk_BoosterFreqType = input[idx + 4];
1828 activity_monitor->Fclk_BoosterFreq = input[idx + 5];
1829 activity_monitor->Fclk_PD_Data_limit_c = input[idx + 6];
1830 activity_monitor->Fclk_PD_Data_error_coeff = input[idx + 7];
1831 activity_monitor->Fclk_PD_Data_error_rate_coeff = input[idx + 8];
1832 }
1833
1834 ret = smu_cmn_update_table(smu,
1835 SMU_TABLE_ACTIVITY_MONITOR_COEFF,
1836 WORKLOAD_PPLIB_CUSTOM_BIT,
1837 (void *)(&activity_monitor_external),
1838 true);
1839 if (ret) {
1840 dev_err(smu->adev->dev, "[%s] Failed to set activity monitor!", __func__);
1841 return ret;
1842 }
1843
1844 return ret;
1845 }
1846
smu_v14_0_2_set_power_profile_mode(struct smu_context * smu,u32 workload_mask,long * custom_params,u32 custom_params_max_idx)1847 static int smu_v14_0_2_set_power_profile_mode(struct smu_context *smu,
1848 u32 workload_mask,
1849 long *custom_params,
1850 u32 custom_params_max_idx)
1851 {
1852 u32 backend_workload_mask = 0;
1853 int ret, idx = -1, i;
1854
1855 smu_cmn_get_backend_workload_mask(smu, workload_mask,
1856 &backend_workload_mask);
1857
1858 /* disable deep sleep if compute is enabled */
1859 if (workload_mask & (1 << PP_SMC_POWER_PROFILE_COMPUTE))
1860 smu_v14_0_deep_sleep_control(smu, false);
1861 else
1862 smu_v14_0_deep_sleep_control(smu, true);
1863
1864 if (workload_mask & (1 << PP_SMC_POWER_PROFILE_CUSTOM)) {
1865 if (!smu->custom_profile_params) {
1866 smu->custom_profile_params =
1867 kzalloc(SMU_14_0_2_CUSTOM_PARAMS_SIZE, GFP_KERNEL);
1868 if (!smu->custom_profile_params)
1869 return -ENOMEM;
1870 }
1871 if (custom_params && custom_params_max_idx) {
1872 if (custom_params_max_idx != SMU_14_0_2_CUSTOM_PARAMS_COUNT)
1873 return -EINVAL;
1874 if (custom_params[0] >= SMU_14_0_2_CUSTOM_PARAMS_CLOCK_COUNT)
1875 return -EINVAL;
1876 idx = custom_params[0] * SMU_14_0_2_CUSTOM_PARAMS_COUNT;
1877 smu->custom_profile_params[idx] = 1;
1878 for (i = 1; i < custom_params_max_idx; i++)
1879 smu->custom_profile_params[idx + i] = custom_params[i];
1880 }
1881 ret = smu_v14_0_2_set_power_profile_mode_coeff(smu,
1882 smu->custom_profile_params);
1883 if (ret) {
1884 if (idx != -1)
1885 smu->custom_profile_params[idx] = 0;
1886 return ret;
1887 }
1888 } else if (smu->custom_profile_params) {
1889 memset(smu->custom_profile_params, 0, SMU_14_0_2_CUSTOM_PARAMS_SIZE);
1890 }
1891
1892 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetWorkloadMask,
1893 backend_workload_mask, NULL);
1894 if (ret) {
1895 dev_err(smu->adev->dev, "Failed to set workload mask 0x%08x\n",
1896 workload_mask);
1897 if (idx != -1)
1898 smu->custom_profile_params[idx] = 0;
1899 return ret;
1900 }
1901
1902 return ret;
1903 }
1904
smu_v14_0_2_baco_enter(struct smu_context * smu)1905 static int smu_v14_0_2_baco_enter(struct smu_context *smu)
1906 {
1907 struct smu_baco_context *smu_baco = &smu->smu_baco;
1908 struct amdgpu_device *adev = smu->adev;
1909
1910 if (adev->in_runpm && smu_cmn_is_audio_func_enabled(adev))
1911 return smu_v14_0_baco_set_armd3_sequence(smu,
1912 smu_baco->maco_support ? BACO_SEQ_BAMACO : BACO_SEQ_BACO);
1913 else
1914 return smu_v14_0_baco_enter(smu);
1915 }
1916
smu_v14_0_2_baco_exit(struct smu_context * smu)1917 static int smu_v14_0_2_baco_exit(struct smu_context *smu)
1918 {
1919 struct amdgpu_device *adev = smu->adev;
1920
1921 if (adev->in_runpm && smu_cmn_is_audio_func_enabled(adev)) {
1922 /* Wait for PMFW handling for the Dstate change */
1923 usleep_range(10000, 11000);
1924 return smu_v14_0_baco_set_armd3_sequence(smu, BACO_SEQ_ULPS);
1925 } else {
1926 return smu_v14_0_baco_exit(smu);
1927 }
1928 }
1929
smu_v14_0_2_is_mode1_reset_supported(struct smu_context * smu)1930 static bool smu_v14_0_2_is_mode1_reset_supported(struct smu_context *smu)
1931 {
1932 // TODO
1933
1934 return true;
1935 }
1936
smu_v14_0_2_i2c_xfer(struct i2c_adapter * i2c_adap,struct i2c_msg * msg,int num_msgs)1937 static int smu_v14_0_2_i2c_xfer(struct i2c_adapter *i2c_adap,
1938 struct i2c_msg *msg, int num_msgs)
1939 {
1940 struct amdgpu_smu_i2c_bus *smu_i2c = i2c_get_adapdata(i2c_adap);
1941 struct amdgpu_device *adev = smu_i2c->adev;
1942 struct smu_context *smu = adev->powerplay.pp_handle;
1943 struct smu_table_context *smu_table = &smu->smu_table;
1944 struct smu_table *table = &smu_table->driver_table;
1945 SwI2cRequest_t *req, *res = (SwI2cRequest_t *)table->cpu_addr;
1946 int i, j, r, c;
1947 u16 dir;
1948
1949 if (!adev->pm.dpm_enabled)
1950 return -EBUSY;
1951
1952 req = kzalloc(sizeof(*req), GFP_KERNEL);
1953 if (!req)
1954 return -ENOMEM;
1955
1956 req->I2CcontrollerPort = smu_i2c->port;
1957 req->I2CSpeed = I2C_SPEED_FAST_400K;
1958 req->SlaveAddress = msg[0].addr << 1; /* wants an 8-bit address */
1959 dir = msg[0].flags & I2C_M_RD;
1960
1961 for (c = i = 0; i < num_msgs; i++) {
1962 for (j = 0; j < msg[i].len; j++, c++) {
1963 SwI2cCmd_t *cmd = &req->SwI2cCmds[c];
1964
1965 if (!(msg[i].flags & I2C_M_RD)) {
1966 /* write */
1967 cmd->CmdConfig |= CMDCONFIG_READWRITE_MASK;
1968 cmd->ReadWriteData = msg[i].buf[j];
1969 }
1970
1971 if ((dir ^ msg[i].flags) & I2C_M_RD) {
1972 /* The direction changes.
1973 */
1974 dir = msg[i].flags & I2C_M_RD;
1975 cmd->CmdConfig |= CMDCONFIG_RESTART_MASK;
1976 }
1977
1978 req->NumCmds++;
1979
1980 /*
1981 * Insert STOP if we are at the last byte of either last
1982 * message for the transaction or the client explicitly
1983 * requires a STOP at this particular message.
1984 */
1985 if ((j == msg[i].len - 1) &&
1986 ((i == num_msgs - 1) || (msg[i].flags & I2C_M_STOP))) {
1987 cmd->CmdConfig &= ~CMDCONFIG_RESTART_MASK;
1988 cmd->CmdConfig |= CMDCONFIG_STOP_MASK;
1989 }
1990 }
1991 }
1992 mutex_lock(&adev->pm.mutex);
1993 r = smu_cmn_update_table(smu, SMU_TABLE_I2C_COMMANDS, 0, req, true);
1994 mutex_unlock(&adev->pm.mutex);
1995 if (r)
1996 goto fail;
1997
1998 for (c = i = 0; i < num_msgs; i++) {
1999 if (!(msg[i].flags & I2C_M_RD)) {
2000 c += msg[i].len;
2001 continue;
2002 }
2003 for (j = 0; j < msg[i].len; j++, c++) {
2004 SwI2cCmd_t *cmd = &res->SwI2cCmds[c];
2005
2006 msg[i].buf[j] = cmd->ReadWriteData;
2007 }
2008 }
2009 r = num_msgs;
2010 fail:
2011 kfree(req);
2012 return r;
2013 }
2014
smu_v14_0_2_i2c_func(struct i2c_adapter * adap)2015 static u32 smu_v14_0_2_i2c_func(struct i2c_adapter *adap)
2016 {
2017 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
2018 }
2019
2020 static const struct i2c_algorithm smu_v14_0_2_i2c_algo = {
2021 .master_xfer = smu_v14_0_2_i2c_xfer,
2022 .functionality = smu_v14_0_2_i2c_func,
2023 };
2024
2025 static const struct i2c_adapter_quirks smu_v14_0_2_i2c_control_quirks = {
2026 .flags = I2C_AQ_COMB | I2C_AQ_COMB_SAME_ADDR | I2C_AQ_NO_ZERO_LEN,
2027 .max_read_len = MAX_SW_I2C_COMMANDS,
2028 .max_write_len = MAX_SW_I2C_COMMANDS,
2029 .max_comb_1st_msg_len = 2,
2030 .max_comb_2nd_msg_len = MAX_SW_I2C_COMMANDS - 2,
2031 };
2032
smu_v14_0_2_i2c_control_init(struct smu_context * smu)2033 static int smu_v14_0_2_i2c_control_init(struct smu_context *smu)
2034 {
2035 struct amdgpu_device *adev = smu->adev;
2036 int res, i;
2037
2038 for (i = 0; i < MAX_SMU_I2C_BUSES; i++) {
2039 struct amdgpu_smu_i2c_bus *smu_i2c = &adev->pm.smu_i2c[i];
2040 struct i2c_adapter *control = &smu_i2c->adapter;
2041
2042 smu_i2c->adev = adev;
2043 smu_i2c->port = i;
2044 mutex_init(&smu_i2c->mutex);
2045 control->owner = THIS_MODULE;
2046 control->dev.parent = &adev->pdev->dev;
2047 control->algo = &smu_v14_0_2_i2c_algo;
2048 snprintf(control->name, sizeof(control->name), "AMDGPU SMU %d", i);
2049 control->quirks = &smu_v14_0_2_i2c_control_quirks;
2050 i2c_set_adapdata(control, smu_i2c);
2051
2052 res = i2c_add_adapter(control);
2053 if (res) {
2054 DRM_ERROR("Failed to register hw i2c, err: %d\n", res);
2055 goto Out_err;
2056 }
2057 }
2058
2059 /* assign the buses used for the FRU EEPROM and RAS EEPROM */
2060 /* XXX ideally this would be something in a vbios data table */
2061 adev->pm.ras_eeprom_i2c_bus = &adev->pm.smu_i2c[1].adapter;
2062 adev->pm.fru_eeprom_i2c_bus = &adev->pm.smu_i2c[0].adapter;
2063
2064 return 0;
2065 Out_err:
2066 for ( ; i >= 0; i--) {
2067 struct amdgpu_smu_i2c_bus *smu_i2c = &adev->pm.smu_i2c[i];
2068 struct i2c_adapter *control = &smu_i2c->adapter;
2069
2070 i2c_del_adapter(control);
2071 }
2072 return res;
2073 }
2074
smu_v14_0_2_i2c_control_fini(struct smu_context * smu)2075 static void smu_v14_0_2_i2c_control_fini(struct smu_context *smu)
2076 {
2077 struct amdgpu_device *adev = smu->adev;
2078 int i;
2079
2080 for (i = 0; i < MAX_SMU_I2C_BUSES; i++) {
2081 struct amdgpu_smu_i2c_bus *smu_i2c = &adev->pm.smu_i2c[i];
2082 struct i2c_adapter *control = &smu_i2c->adapter;
2083
2084 i2c_del_adapter(control);
2085 }
2086 adev->pm.ras_eeprom_i2c_bus = NULL;
2087 adev->pm.fru_eeprom_i2c_bus = NULL;
2088 }
2089
smu_v14_0_2_set_mp1_state(struct smu_context * smu,enum pp_mp1_state mp1_state)2090 static int smu_v14_0_2_set_mp1_state(struct smu_context *smu,
2091 enum pp_mp1_state mp1_state)
2092 {
2093 int ret;
2094
2095 switch (mp1_state) {
2096 case PP_MP1_STATE_UNLOAD:
2097 ret = smu_cmn_set_mp1_state(smu, mp1_state);
2098 break;
2099 default:
2100 /* Ignore others */
2101 ret = 0;
2102 }
2103
2104 return ret;
2105 }
2106
smu_v14_0_2_set_df_cstate(struct smu_context * smu,enum pp_df_cstate state)2107 static int smu_v14_0_2_set_df_cstate(struct smu_context *smu,
2108 enum pp_df_cstate state)
2109 {
2110 return smu_cmn_send_smc_msg_with_param(smu,
2111 SMU_MSG_DFCstateControl,
2112 state,
2113 NULL);
2114 }
2115
smu_v14_0_2_mode1_reset(struct smu_context * smu)2116 static int smu_v14_0_2_mode1_reset(struct smu_context *smu)
2117 {
2118 int ret = 0;
2119
2120 ret = smu_cmn_send_debug_smc_msg(smu, DEBUGSMC_MSG_Mode1Reset);
2121 if (!ret) {
2122 if (amdgpu_emu_mode == 1)
2123 msleep(50000);
2124 else
2125 msleep(1000);
2126 }
2127
2128 return ret;
2129 }
2130
smu_v14_0_2_mode2_reset(struct smu_context * smu)2131 static int smu_v14_0_2_mode2_reset(struct smu_context *smu)
2132 {
2133 int ret = 0;
2134
2135 // TODO
2136
2137 return ret;
2138 }
2139
smu_v14_0_2_enable_gfx_features(struct smu_context * smu)2140 static int smu_v14_0_2_enable_gfx_features(struct smu_context *smu)
2141 {
2142 struct amdgpu_device *adev = smu->adev;
2143
2144 if (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 2))
2145 return smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_EnableAllSmuFeatures,
2146 FEATURE_PWR_GFX, NULL);
2147 else
2148 return -EOPNOTSUPP;
2149 }
2150
smu_v14_0_2_set_smu_mailbox_registers(struct smu_context * smu)2151 static void smu_v14_0_2_set_smu_mailbox_registers(struct smu_context *smu)
2152 {
2153 struct amdgpu_device *adev = smu->adev;
2154
2155 smu->param_reg = SOC15_REG_OFFSET(MP1, 0, regMP1_SMN_C2PMSG_82);
2156 smu->msg_reg = SOC15_REG_OFFSET(MP1, 0, regMP1_SMN_C2PMSG_66);
2157 smu->resp_reg = SOC15_REG_OFFSET(MP1, 0, regMP1_SMN_C2PMSG_90);
2158
2159 smu->debug_param_reg = SOC15_REG_OFFSET(MP1, 0, regMP1_SMN_C2PMSG_53);
2160 smu->debug_msg_reg = SOC15_REG_OFFSET(MP1, 0, regMP1_SMN_C2PMSG_75);
2161 smu->debug_resp_reg = SOC15_REG_OFFSET(MP1, 0, regMP1_SMN_C2PMSG_54);
2162 }
2163
smu_v14_0_2_get_gpu_metrics(struct smu_context * smu,void ** table)2164 static ssize_t smu_v14_0_2_get_gpu_metrics(struct smu_context *smu,
2165 void **table)
2166 {
2167 struct smu_table_context *smu_table = &smu->smu_table;
2168 struct gpu_metrics_v1_3 *gpu_metrics =
2169 (struct gpu_metrics_v1_3 *)smu_table->gpu_metrics_table;
2170 SmuMetricsExternal_t metrics_ext;
2171 SmuMetrics_t *metrics = &metrics_ext.SmuMetrics;
2172 int ret = 0;
2173
2174 ret = smu_cmn_get_metrics_table(smu,
2175 &metrics_ext,
2176 true);
2177 if (ret)
2178 return ret;
2179
2180 smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 3);
2181
2182 gpu_metrics->temperature_edge = metrics->AvgTemperature[TEMP_EDGE];
2183 gpu_metrics->temperature_hotspot = metrics->AvgTemperature[TEMP_HOTSPOT];
2184 gpu_metrics->temperature_mem = metrics->AvgTemperature[TEMP_MEM];
2185 gpu_metrics->temperature_vrgfx = metrics->AvgTemperature[TEMP_VR_GFX];
2186 gpu_metrics->temperature_vrsoc = metrics->AvgTemperature[TEMP_VR_SOC];
2187 gpu_metrics->temperature_vrmem = max(metrics->AvgTemperature[TEMP_VR_MEM0],
2188 metrics->AvgTemperature[TEMP_VR_MEM1]);
2189
2190 gpu_metrics->average_gfx_activity = metrics->AverageGfxActivity;
2191 gpu_metrics->average_umc_activity = metrics->AverageUclkActivity;
2192 gpu_metrics->average_mm_activity = max(metrics->AverageVcn0ActivityPercentage,
2193 metrics->Vcn1ActivityPercentage);
2194
2195 gpu_metrics->average_socket_power = metrics->AverageSocketPower;
2196 gpu_metrics->energy_accumulator = metrics->EnergyAccumulator;
2197
2198 if (metrics->AverageGfxActivity <= SMU_14_0_2_BUSY_THRESHOLD)
2199 gpu_metrics->average_gfxclk_frequency = metrics->AverageGfxclkFrequencyPostDs;
2200 else
2201 gpu_metrics->average_gfxclk_frequency = metrics->AverageGfxclkFrequencyPreDs;
2202
2203 if (metrics->AverageUclkActivity <= SMU_14_0_2_BUSY_THRESHOLD)
2204 gpu_metrics->average_uclk_frequency = metrics->AverageMemclkFrequencyPostDs;
2205 else
2206 gpu_metrics->average_uclk_frequency = metrics->AverageMemclkFrequencyPreDs;
2207
2208 gpu_metrics->average_vclk0_frequency = metrics->AverageVclk0Frequency;
2209 gpu_metrics->average_dclk0_frequency = metrics->AverageDclk0Frequency;
2210 gpu_metrics->average_vclk1_frequency = metrics->AverageVclk1Frequency;
2211 gpu_metrics->average_dclk1_frequency = metrics->AverageDclk1Frequency;
2212
2213 gpu_metrics->current_gfxclk = gpu_metrics->average_gfxclk_frequency;
2214 gpu_metrics->current_socclk = metrics->CurrClock[PPCLK_SOCCLK];
2215 gpu_metrics->current_uclk = metrics->CurrClock[PPCLK_UCLK];
2216 gpu_metrics->current_vclk0 = metrics->CurrClock[PPCLK_VCLK_0];
2217 gpu_metrics->current_dclk0 = metrics->CurrClock[PPCLK_DCLK_0];
2218 gpu_metrics->current_vclk1 = metrics->CurrClock[PPCLK_VCLK_0];
2219 gpu_metrics->current_dclk1 = metrics->CurrClock[PPCLK_DCLK_0];
2220
2221 gpu_metrics->throttle_status =
2222 smu_v14_0_2_get_throttler_status(metrics);
2223 gpu_metrics->indep_throttle_status =
2224 smu_cmn_get_indep_throttler_status(gpu_metrics->throttle_status,
2225 smu_v14_0_2_throttler_map);
2226
2227 gpu_metrics->current_fan_speed = metrics->AvgFanRpm;
2228
2229 gpu_metrics->pcie_link_width = metrics->PcieWidth;
2230 if ((metrics->PcieRate - 1) > LINK_SPEED_MAX)
2231 gpu_metrics->pcie_link_speed = pcie_gen_to_speed(1);
2232 else
2233 gpu_metrics->pcie_link_speed = pcie_gen_to_speed(metrics->PcieRate);
2234
2235 gpu_metrics->system_clock_counter = ktime_get_boottime_ns();
2236
2237 gpu_metrics->voltage_gfx = metrics->AvgVoltage[SVI_PLANE_VDD_GFX];
2238 gpu_metrics->voltage_soc = metrics->AvgVoltage[SVI_PLANE_VDD_SOC];
2239 gpu_metrics->voltage_mem = metrics->AvgVoltage[SVI_PLANE_VDDIO_MEM];
2240
2241 *table = (void *)gpu_metrics;
2242
2243 return sizeof(struct gpu_metrics_v1_3);
2244 }
2245
smu_v14_0_2_dump_od_table(struct smu_context * smu,OverDriveTableExternal_t * od_table)2246 static void smu_v14_0_2_dump_od_table(struct smu_context *smu,
2247 OverDriveTableExternal_t *od_table)
2248 {
2249 struct amdgpu_device *adev = smu->adev;
2250
2251 dev_dbg(adev->dev, "OD: Gfxclk offset: (%d)\n", od_table->OverDriveTable.GfxclkFoffset);
2252 dev_dbg(adev->dev, "OD: Uclk: (%d, %d)\n", od_table->OverDriveTable.UclkFmin,
2253 od_table->OverDriveTable.UclkFmax);
2254 }
2255
smu_v14_0_2_upload_overdrive_table(struct smu_context * smu,OverDriveTableExternal_t * od_table)2256 static int smu_v14_0_2_upload_overdrive_table(struct smu_context *smu,
2257 OverDriveTableExternal_t *od_table)
2258 {
2259 int ret;
2260 ret = smu_cmn_update_table(smu,
2261 SMU_TABLE_OVERDRIVE,
2262 0,
2263 (void *)od_table,
2264 true);
2265 if (ret)
2266 dev_err(smu->adev->dev, "Failed to upload overdrive table!\n");
2267
2268 return ret;
2269 }
2270
smu_v14_0_2_set_supported_od_feature_mask(struct smu_context * smu)2271 static void smu_v14_0_2_set_supported_od_feature_mask(struct smu_context *smu)
2272 {
2273 struct amdgpu_device *adev = smu->adev;
2274
2275 if (smu_v14_0_2_is_od_feature_supported(smu,
2276 PP_OD_FEATURE_FAN_CURVE_BIT))
2277 adev->pm.od_feature_mask |= OD_OPS_SUPPORT_FAN_CURVE_RETRIEVE |
2278 OD_OPS_SUPPORT_FAN_CURVE_SET |
2279 OD_OPS_SUPPORT_ACOUSTIC_LIMIT_THRESHOLD_RETRIEVE |
2280 OD_OPS_SUPPORT_ACOUSTIC_LIMIT_THRESHOLD_SET |
2281 OD_OPS_SUPPORT_ACOUSTIC_TARGET_THRESHOLD_RETRIEVE |
2282 OD_OPS_SUPPORT_ACOUSTIC_TARGET_THRESHOLD_SET |
2283 OD_OPS_SUPPORT_FAN_TARGET_TEMPERATURE_RETRIEVE |
2284 OD_OPS_SUPPORT_FAN_TARGET_TEMPERATURE_SET |
2285 OD_OPS_SUPPORT_FAN_MINIMUM_PWM_RETRIEVE |
2286 OD_OPS_SUPPORT_FAN_MINIMUM_PWM_SET |
2287 OD_OPS_SUPPORT_FAN_ZERO_RPM_ENABLE_RETRIEVE |
2288 OD_OPS_SUPPORT_FAN_ZERO_RPM_ENABLE_SET;
2289 }
2290
smu_v14_0_2_get_overdrive_table(struct smu_context * smu,OverDriveTableExternal_t * od_table)2291 static int smu_v14_0_2_get_overdrive_table(struct smu_context *smu,
2292 OverDriveTableExternal_t *od_table)
2293 {
2294 int ret;
2295 ret = smu_cmn_update_table(smu,
2296 SMU_TABLE_OVERDRIVE,
2297 0,
2298 (void *)od_table,
2299 false);
2300 if (ret)
2301 dev_err(smu->adev->dev, "Failed to get overdrive table!\n");
2302
2303 return ret;
2304 }
2305
smu_v14_0_2_set_default_od_settings(struct smu_context * smu)2306 static int smu_v14_0_2_set_default_od_settings(struct smu_context *smu)
2307 {
2308 OverDriveTableExternal_t *od_table =
2309 (OverDriveTableExternal_t *)smu->smu_table.overdrive_table;
2310 OverDriveTableExternal_t *boot_od_table =
2311 (OverDriveTableExternal_t *)smu->smu_table.boot_overdrive_table;
2312 OverDriveTableExternal_t *user_od_table =
2313 (OverDriveTableExternal_t *)smu->smu_table.user_overdrive_table;
2314 OverDriveTableExternal_t user_od_table_bak;
2315 int ret;
2316 int i;
2317
2318 ret = smu_v14_0_2_get_overdrive_table(smu, boot_od_table);
2319 if (ret)
2320 return ret;
2321
2322 smu_v14_0_2_dump_od_table(smu, boot_od_table);
2323
2324 memcpy(od_table,
2325 boot_od_table,
2326 sizeof(OverDriveTableExternal_t));
2327
2328 /*
2329 * For S3/S4/Runpm resume, we need to setup those overdrive tables again,
2330 * but we have to preserve user defined values in "user_od_table".
2331 */
2332 if (!smu->adev->in_suspend) {
2333 memcpy(user_od_table,
2334 boot_od_table,
2335 sizeof(OverDriveTableExternal_t));
2336 smu->user_dpm_profile.user_od = false;
2337 } else if (smu->user_dpm_profile.user_od) {
2338 memcpy(&user_od_table_bak,
2339 user_od_table,
2340 sizeof(OverDriveTableExternal_t));
2341 memcpy(user_od_table,
2342 boot_od_table,
2343 sizeof(OverDriveTableExternal_t));
2344 user_od_table->OverDriveTable.GfxclkFoffset =
2345 user_od_table_bak.OverDriveTable.GfxclkFoffset;
2346 user_od_table->OverDriveTable.UclkFmin =
2347 user_od_table_bak.OverDriveTable.UclkFmin;
2348 user_od_table->OverDriveTable.UclkFmax =
2349 user_od_table_bak.OverDriveTable.UclkFmax;
2350 for (i = 0; i < PP_NUM_OD_VF_CURVE_POINTS; i++)
2351 user_od_table->OverDriveTable.VoltageOffsetPerZoneBoundary[i] =
2352 user_od_table_bak.OverDriveTable.VoltageOffsetPerZoneBoundary[i];
2353 for (i = 0; i < NUM_OD_FAN_MAX_POINTS - 1; i++) {
2354 user_od_table->OverDriveTable.FanLinearTempPoints[i] =
2355 user_od_table_bak.OverDriveTable.FanLinearTempPoints[i];
2356 user_od_table->OverDriveTable.FanLinearPwmPoints[i] =
2357 user_od_table_bak.OverDriveTable.FanLinearPwmPoints[i];
2358 }
2359 user_od_table->OverDriveTable.AcousticLimitRpmThreshold =
2360 user_od_table_bak.OverDriveTable.AcousticLimitRpmThreshold;
2361 user_od_table->OverDriveTable.AcousticTargetRpmThreshold =
2362 user_od_table_bak.OverDriveTable.AcousticTargetRpmThreshold;
2363 user_od_table->OverDriveTable.FanTargetTemperature =
2364 user_od_table_bak.OverDriveTable.FanTargetTemperature;
2365 user_od_table->OverDriveTable.FanMinimumPwm =
2366 user_od_table_bak.OverDriveTable.FanMinimumPwm;
2367 user_od_table->OverDriveTable.FanZeroRpmEnable =
2368 user_od_table_bak.OverDriveTable.FanZeroRpmEnable;
2369 }
2370
2371 smu_v14_0_2_set_supported_od_feature_mask(smu);
2372
2373 return 0;
2374 }
2375
smu_v14_0_2_restore_user_od_settings(struct smu_context * smu)2376 static int smu_v14_0_2_restore_user_od_settings(struct smu_context *smu)
2377 {
2378 struct smu_table_context *table_context = &smu->smu_table;
2379 OverDriveTableExternal_t *od_table = table_context->overdrive_table;
2380 OverDriveTableExternal_t *user_od_table = table_context->user_overdrive_table;
2381 int res;
2382
2383 user_od_table->OverDriveTable.FeatureCtrlMask = BIT(PP_OD_FEATURE_GFXCLK_BIT) |
2384 BIT(PP_OD_FEATURE_UCLK_BIT) |
2385 BIT(PP_OD_FEATURE_GFX_VF_CURVE_BIT) |
2386 BIT(PP_OD_FEATURE_FAN_CURVE_BIT);
2387 res = smu_v14_0_2_upload_overdrive_table(smu, user_od_table);
2388 user_od_table->OverDriveTable.FeatureCtrlMask = 0;
2389 if (res == 0)
2390 memcpy(od_table, user_od_table, sizeof(OverDriveTableExternal_t));
2391
2392 return res;
2393 }
2394
smu_v14_0_2_od_restore_table_single(struct smu_context * smu,long input)2395 static int smu_v14_0_2_od_restore_table_single(struct smu_context *smu, long input)
2396 {
2397 struct smu_table_context *table_context = &smu->smu_table;
2398 OverDriveTableExternal_t *boot_overdrive_table =
2399 (OverDriveTableExternal_t *)table_context->boot_overdrive_table;
2400 OverDriveTableExternal_t *od_table =
2401 (OverDriveTableExternal_t *)table_context->overdrive_table;
2402 struct amdgpu_device *adev = smu->adev;
2403 int i;
2404
2405 switch (input) {
2406 case PP_OD_EDIT_FAN_CURVE:
2407 for (i = 0; i < NUM_OD_FAN_MAX_POINTS; i++) {
2408 od_table->OverDriveTable.FanLinearTempPoints[i] =
2409 boot_overdrive_table->OverDriveTable.FanLinearTempPoints[i];
2410 od_table->OverDriveTable.FanLinearPwmPoints[i] =
2411 boot_overdrive_table->OverDriveTable.FanLinearPwmPoints[i];
2412 }
2413 od_table->OverDriveTable.FanMode = FAN_MODE_AUTO;
2414 od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_FAN_CURVE_BIT);
2415 break;
2416 case PP_OD_EDIT_FAN_ZERO_RPM_ENABLE:
2417 od_table->OverDriveTable.FanZeroRpmEnable =
2418 boot_overdrive_table->OverDriveTable.FanZeroRpmEnable;
2419 od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_ZERO_FAN_BIT);
2420 break;
2421 case PP_OD_EDIT_ACOUSTIC_LIMIT:
2422 od_table->OverDriveTable.AcousticLimitRpmThreshold =
2423 boot_overdrive_table->OverDriveTable.AcousticLimitRpmThreshold;
2424 od_table->OverDriveTable.FanMode = FAN_MODE_AUTO;
2425 od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_FAN_CURVE_BIT);
2426 break;
2427 case PP_OD_EDIT_ACOUSTIC_TARGET:
2428 od_table->OverDriveTable.AcousticTargetRpmThreshold =
2429 boot_overdrive_table->OverDriveTable.AcousticTargetRpmThreshold;
2430 od_table->OverDriveTable.FanMode = FAN_MODE_AUTO;
2431 od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_FAN_CURVE_BIT);
2432 break;
2433 case PP_OD_EDIT_FAN_TARGET_TEMPERATURE:
2434 od_table->OverDriveTable.FanTargetTemperature =
2435 boot_overdrive_table->OverDriveTable.FanTargetTemperature;
2436 od_table->OverDriveTable.FanMode = FAN_MODE_AUTO;
2437 od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_FAN_CURVE_BIT);
2438 break;
2439 case PP_OD_EDIT_FAN_MINIMUM_PWM:
2440 od_table->OverDriveTable.FanMinimumPwm =
2441 boot_overdrive_table->OverDriveTable.FanMinimumPwm;
2442 od_table->OverDriveTable.FanMode = FAN_MODE_AUTO;
2443 od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_FAN_CURVE_BIT);
2444 break;
2445 default:
2446 dev_info(adev->dev, "Invalid table index: %ld\n", input);
2447 return -EINVAL;
2448 }
2449
2450 return 0;
2451 }
2452
smu_v14_0_2_od_edit_dpm_table(struct smu_context * smu,enum PP_OD_DPM_TABLE_COMMAND type,long input[],uint32_t size)2453 static int smu_v14_0_2_od_edit_dpm_table(struct smu_context *smu,
2454 enum PP_OD_DPM_TABLE_COMMAND type,
2455 long input[],
2456 uint32_t size)
2457 {
2458 struct smu_table_context *table_context = &smu->smu_table;
2459 OverDriveTableExternal_t *od_table =
2460 (OverDriveTableExternal_t *)table_context->overdrive_table;
2461 struct amdgpu_device *adev = smu->adev;
2462 uint32_t offset_of_voltageoffset;
2463 int32_t minimum, maximum;
2464 uint32_t feature_ctrlmask;
2465 int i, ret = 0;
2466
2467 switch (type) {
2468 case PP_OD_EDIT_SCLK_VDDC_TABLE:
2469 if (!smu_v14_0_2_is_od_feature_supported(smu, PP_OD_FEATURE_GFXCLK_BIT)) {
2470 dev_warn(adev->dev, "GFXCLK_LIMITS setting not supported!\n");
2471 return -ENOTSUPP;
2472 }
2473
2474 if (size != 1) {
2475 dev_info(adev->dev, "invalid number of input parameters %d\n", size);
2476 return -EINVAL;
2477 }
2478
2479 smu_v14_0_2_get_od_setting_limits(smu,
2480 PP_OD_FEATURE_GFXCLK_FMAX,
2481 &minimum,
2482 &maximum);
2483 if (input[0] < minimum ||
2484 input[0] > maximum) {
2485 dev_info(adev->dev, "GfxclkFoffset must be within [%d, %u]!\n",
2486 minimum, maximum);
2487 return -EINVAL;
2488 }
2489
2490 od_table->OverDriveTable.GfxclkFoffset = input[0];
2491 od_table->OverDriveTable.FeatureCtrlMask |= 1U << PP_OD_FEATURE_GFXCLK_BIT;
2492 break;
2493
2494 case PP_OD_EDIT_MCLK_VDDC_TABLE:
2495 if (!smu_v14_0_2_is_od_feature_supported(smu, PP_OD_FEATURE_UCLK_BIT)) {
2496 dev_warn(adev->dev, "UCLK_LIMITS setting not supported!\n");
2497 return -ENOTSUPP;
2498 }
2499
2500 for (i = 0; i < size; i += 2) {
2501 if (i + 2 > size) {
2502 dev_info(adev->dev, "invalid number of input parameters %d\n", size);
2503 return -EINVAL;
2504 }
2505
2506 switch (input[i]) {
2507 case 0:
2508 smu_v14_0_2_get_od_setting_limits(smu,
2509 PP_OD_FEATURE_UCLK_FMIN,
2510 &minimum,
2511 &maximum);
2512 if (input[i + 1] < minimum ||
2513 input[i + 1] > maximum) {
2514 dev_info(adev->dev, "UclkFmin (%ld) must be within [%u, %u]!\n",
2515 input[i + 1], minimum, maximum);
2516 return -EINVAL;
2517 }
2518
2519 od_table->OverDriveTable.UclkFmin = input[i + 1];
2520 od_table->OverDriveTable.FeatureCtrlMask |= 1U << PP_OD_FEATURE_UCLK_BIT;
2521 break;
2522
2523 case 1:
2524 smu_v14_0_2_get_od_setting_limits(smu,
2525 PP_OD_FEATURE_UCLK_FMAX,
2526 &minimum,
2527 &maximum);
2528 if (input[i + 1] < minimum ||
2529 input[i + 1] > maximum) {
2530 dev_info(adev->dev, "UclkFmax (%ld) must be within [%u, %u]!\n",
2531 input[i + 1], minimum, maximum);
2532 return -EINVAL;
2533 }
2534
2535 od_table->OverDriveTable.UclkFmax = input[i + 1];
2536 od_table->OverDriveTable.FeatureCtrlMask |= 1U << PP_OD_FEATURE_UCLK_BIT;
2537 break;
2538
2539 default:
2540 dev_info(adev->dev, "Invalid MCLK_VDDC_TABLE index: %ld\n", input[i]);
2541 dev_info(adev->dev, "Supported indices: [0:min,1:max]\n");
2542 return -EINVAL;
2543 }
2544 }
2545
2546 if (od_table->OverDriveTable.UclkFmin > od_table->OverDriveTable.UclkFmax) {
2547 dev_err(adev->dev,
2548 "Invalid setting: UclkFmin(%u) is bigger than UclkFmax(%u)\n",
2549 (uint32_t)od_table->OverDriveTable.UclkFmin,
2550 (uint32_t)od_table->OverDriveTable.UclkFmax);
2551 return -EINVAL;
2552 }
2553 break;
2554
2555 case PP_OD_EDIT_VDDGFX_OFFSET:
2556 if (!smu_v14_0_2_is_od_feature_supported(smu, PP_OD_FEATURE_GFX_VF_CURVE_BIT)) {
2557 dev_warn(adev->dev, "Gfx offset setting not supported!\n");
2558 return -ENOTSUPP;
2559 }
2560
2561 smu_v14_0_2_get_od_setting_limits(smu,
2562 PP_OD_FEATURE_GFX_VF_CURVE,
2563 &minimum,
2564 &maximum);
2565 if (input[0] < minimum ||
2566 input[0] > maximum) {
2567 dev_info(adev->dev, "Voltage offset (%ld) must be within [%d, %d]!\n",
2568 input[0], minimum, maximum);
2569 return -EINVAL;
2570 }
2571
2572 for (i = 0; i < PP_NUM_OD_VF_CURVE_POINTS; i++)
2573 od_table->OverDriveTable.VoltageOffsetPerZoneBoundary[i] = input[0];
2574 od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_GFX_VF_CURVE_BIT);
2575 break;
2576
2577 case PP_OD_EDIT_FAN_CURVE:
2578 if (!smu_v14_0_2_is_od_feature_supported(smu, PP_OD_FEATURE_FAN_CURVE_BIT)) {
2579 dev_warn(adev->dev, "Fan curve setting not supported!\n");
2580 return -ENOTSUPP;
2581 }
2582
2583 if (input[0] >= NUM_OD_FAN_MAX_POINTS - 1 ||
2584 input[0] < 0)
2585 return -EINVAL;
2586
2587 smu_v14_0_2_get_od_setting_limits(smu,
2588 PP_OD_FEATURE_FAN_CURVE_TEMP,
2589 &minimum,
2590 &maximum);
2591 if (input[1] < minimum ||
2592 input[1] > maximum) {
2593 dev_info(adev->dev, "Fan curve temp setting(%ld) must be within [%d, %d]!\n",
2594 input[1], minimum, maximum);
2595 return -EINVAL;
2596 }
2597
2598 smu_v14_0_2_get_od_setting_limits(smu,
2599 PP_OD_FEATURE_FAN_CURVE_PWM,
2600 &minimum,
2601 &maximum);
2602 if (input[2] < minimum ||
2603 input[2] > maximum) {
2604 dev_info(adev->dev, "Fan curve pwm setting(%ld) must be within [%d, %d]!\n",
2605 input[2], minimum, maximum);
2606 return -EINVAL;
2607 }
2608
2609 od_table->OverDriveTable.FanLinearTempPoints[input[0]] = input[1];
2610 od_table->OverDriveTable.FanLinearPwmPoints[input[0]] = input[2];
2611 od_table->OverDriveTable.FanMode = FAN_MODE_MANUAL_LINEAR;
2612 od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_FAN_CURVE_BIT);
2613 break;
2614
2615 case PP_OD_EDIT_ACOUSTIC_LIMIT:
2616 if (!smu_v14_0_2_is_od_feature_supported(smu, PP_OD_FEATURE_FAN_CURVE_BIT)) {
2617 dev_warn(adev->dev, "Fan curve setting not supported!\n");
2618 return -ENOTSUPP;
2619 }
2620
2621 smu_v14_0_2_get_od_setting_limits(smu,
2622 PP_OD_FEATURE_FAN_ACOUSTIC_LIMIT,
2623 &minimum,
2624 &maximum);
2625 if (input[0] < minimum ||
2626 input[0] > maximum) {
2627 dev_info(adev->dev, "acoustic limit threshold setting(%ld) must be within [%d, %d]!\n",
2628 input[0], minimum, maximum);
2629 return -EINVAL;
2630 }
2631
2632 od_table->OverDriveTable.AcousticLimitRpmThreshold = input[0];
2633 od_table->OverDriveTable.FanMode = FAN_MODE_AUTO;
2634 od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_FAN_CURVE_BIT);
2635 break;
2636
2637 case PP_OD_EDIT_ACOUSTIC_TARGET:
2638 if (!smu_v14_0_2_is_od_feature_supported(smu, PP_OD_FEATURE_FAN_CURVE_BIT)) {
2639 dev_warn(adev->dev, "Fan curve setting not supported!\n");
2640 return -ENOTSUPP;
2641 }
2642
2643 smu_v14_0_2_get_od_setting_limits(smu,
2644 PP_OD_FEATURE_FAN_ACOUSTIC_TARGET,
2645 &minimum,
2646 &maximum);
2647 if (input[0] < minimum ||
2648 input[0] > maximum) {
2649 dev_info(adev->dev, "acoustic target threshold setting(%ld) must be within [%d, %d]!\n",
2650 input[0], minimum, maximum);
2651 return -EINVAL;
2652 }
2653
2654 od_table->OverDriveTable.AcousticTargetRpmThreshold = input[0];
2655 od_table->OverDriveTable.FanMode = FAN_MODE_AUTO;
2656 od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_FAN_CURVE_BIT);
2657 break;
2658
2659 case PP_OD_EDIT_FAN_TARGET_TEMPERATURE:
2660 if (!smu_v14_0_2_is_od_feature_supported(smu, PP_OD_FEATURE_FAN_CURVE_BIT)) {
2661 dev_warn(adev->dev, "Fan curve setting not supported!\n");
2662 return -ENOTSUPP;
2663 }
2664
2665 smu_v14_0_2_get_od_setting_limits(smu,
2666 PP_OD_FEATURE_FAN_TARGET_TEMPERATURE,
2667 &minimum,
2668 &maximum);
2669 if (input[0] < minimum ||
2670 input[0] > maximum) {
2671 dev_info(adev->dev, "fan target temperature setting(%ld) must be within [%d, %d]!\n",
2672 input[0], minimum, maximum);
2673 return -EINVAL;
2674 }
2675
2676 od_table->OverDriveTable.FanTargetTemperature = input[0];
2677 od_table->OverDriveTable.FanMode = FAN_MODE_AUTO;
2678 od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_FAN_CURVE_BIT);
2679 break;
2680
2681 case PP_OD_EDIT_FAN_MINIMUM_PWM:
2682 if (!smu_v14_0_2_is_od_feature_supported(smu, PP_OD_FEATURE_FAN_CURVE_BIT)) {
2683 dev_warn(adev->dev, "Fan curve setting not supported!\n");
2684 return -ENOTSUPP;
2685 }
2686
2687 smu_v14_0_2_get_od_setting_limits(smu,
2688 PP_OD_FEATURE_FAN_MINIMUM_PWM,
2689 &minimum,
2690 &maximum);
2691 if (input[0] < minimum ||
2692 input[0] > maximum) {
2693 dev_info(adev->dev, "fan minimum pwm setting(%ld) must be within [%d, %d]!\n",
2694 input[0], minimum, maximum);
2695 return -EINVAL;
2696 }
2697
2698 od_table->OverDriveTable.FanMinimumPwm = input[0];
2699 od_table->OverDriveTable.FanMode = FAN_MODE_AUTO;
2700 od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_FAN_CURVE_BIT);
2701 break;
2702
2703 case PP_OD_EDIT_FAN_ZERO_RPM_ENABLE:
2704 if (!smu_v14_0_2_is_od_feature_supported(smu, PP_OD_FEATURE_ZERO_FAN_BIT)) {
2705 dev_warn(adev->dev, "Zero RPM setting not supported!\n");
2706 return -ENOTSUPP;
2707 }
2708
2709 smu_v14_0_2_get_od_setting_limits(smu,
2710 PP_OD_FEATURE_FAN_ZERO_RPM_ENABLE,
2711 &minimum,
2712 &maximum);
2713 if (input[0] < minimum ||
2714 input[0] > maximum) {
2715 dev_info(adev->dev, "zero RPM enable setting(%ld) must be within [%d, %d]!\n",
2716 input[0], minimum, maximum);
2717 return -EINVAL;
2718 }
2719
2720 od_table->OverDriveTable.FanZeroRpmEnable = input[0];
2721 od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_ZERO_FAN_BIT);
2722 break;
2723
2724 case PP_OD_RESTORE_DEFAULT_TABLE:
2725 if (size == 1) {
2726 ret = smu_v14_0_2_od_restore_table_single(smu, input[0]);
2727 if (ret)
2728 return ret;
2729 } else {
2730 feature_ctrlmask = od_table->OverDriveTable.FeatureCtrlMask;
2731 memcpy(od_table,
2732 table_context->boot_overdrive_table,
2733 sizeof(OverDriveTableExternal_t));
2734 od_table->OverDriveTable.FeatureCtrlMask = feature_ctrlmask;
2735 }
2736 fallthrough;
2737 case PP_OD_COMMIT_DPM_TABLE:
2738 /*
2739 * The member below instructs PMFW the settings focused in
2740 * this single operation.
2741 * `uint32_t FeatureCtrlMask;`
2742 * It does not contain actual informations about user's custom
2743 * settings. Thus we do not cache it.
2744 */
2745 offset_of_voltageoffset = offsetof(OverDriveTable_t, VoltageOffsetPerZoneBoundary);
2746 if (memcmp((u8 *)od_table + offset_of_voltageoffset,
2747 table_context->user_overdrive_table + offset_of_voltageoffset,
2748 sizeof(OverDriveTableExternal_t) - offset_of_voltageoffset)) {
2749 smu_v14_0_2_dump_od_table(smu, od_table);
2750
2751 ret = smu_v14_0_2_upload_overdrive_table(smu, od_table);
2752 if (ret) {
2753 dev_err(adev->dev, "Failed to upload overdrive table!\n");
2754 return ret;
2755 }
2756
2757 od_table->OverDriveTable.FeatureCtrlMask = 0;
2758 memcpy(table_context->user_overdrive_table + offset_of_voltageoffset,
2759 (u8 *)od_table + offset_of_voltageoffset,
2760 sizeof(OverDriveTableExternal_t) - offset_of_voltageoffset);
2761
2762 if (!memcmp(table_context->user_overdrive_table,
2763 table_context->boot_overdrive_table,
2764 sizeof(OverDriveTableExternal_t)))
2765 smu->user_dpm_profile.user_od = false;
2766 else
2767 smu->user_dpm_profile.user_od = true;
2768 }
2769 break;
2770
2771 default:
2772 return -ENOSYS;
2773 }
2774
2775 return ret;
2776 }
2777
smu_v14_0_2_set_power_limit(struct smu_context * smu,enum smu_ppt_limit_type limit_type,uint32_t limit)2778 static int smu_v14_0_2_set_power_limit(struct smu_context *smu,
2779 enum smu_ppt_limit_type limit_type,
2780 uint32_t limit)
2781 {
2782 PPTable_t *pptable = smu->smu_table.driver_pptable;
2783 uint32_t msg_limit = pptable->SkuTable.MsgLimits.Power[PPT_THROTTLER_PPT0][POWER_SOURCE_AC];
2784 struct smu_table_context *table_context = &smu->smu_table;
2785 OverDriveTableExternal_t *od_table =
2786 (OverDriveTableExternal_t *)table_context->overdrive_table;
2787 int ret = 0;
2788
2789 if (limit_type != SMU_DEFAULT_PPT_LIMIT)
2790 return -EINVAL;
2791
2792 if (limit <= msg_limit) {
2793 if (smu->current_power_limit > msg_limit) {
2794 od_table->OverDriveTable.Ppt = 0;
2795 od_table->OverDriveTable.FeatureCtrlMask |= 1U << PP_OD_FEATURE_PPT_BIT;
2796
2797 ret = smu_v14_0_2_upload_overdrive_table(smu, od_table);
2798 if (ret) {
2799 dev_err(smu->adev->dev, "Failed to upload overdrive table!\n");
2800 return ret;
2801 }
2802 }
2803 return smu_v14_0_set_power_limit(smu, limit_type, limit);
2804 } else if (smu->od_enabled) {
2805 ret = smu_v14_0_set_power_limit(smu, limit_type, msg_limit);
2806 if (ret)
2807 return ret;
2808
2809 od_table->OverDriveTable.Ppt = (limit * 100) / msg_limit - 100;
2810 od_table->OverDriveTable.FeatureCtrlMask |= 1U << PP_OD_FEATURE_PPT_BIT;
2811
2812 ret = smu_v14_0_2_upload_overdrive_table(smu, od_table);
2813 if (ret) {
2814 dev_err(smu->adev->dev, "Failed to upload overdrive table!\n");
2815 return ret;
2816 }
2817
2818 smu->current_power_limit = limit;
2819 } else {
2820 return -EINVAL;
2821 }
2822
2823 return 0;
2824 }
2825
2826 static const struct pptable_funcs smu_v14_0_2_ppt_funcs = {
2827 .get_allowed_feature_mask = smu_v14_0_2_get_allowed_feature_mask,
2828 .set_default_dpm_table = smu_v14_0_2_set_default_dpm_table,
2829 .i2c_init = smu_v14_0_2_i2c_control_init,
2830 .i2c_fini = smu_v14_0_2_i2c_control_fini,
2831 .is_dpm_running = smu_v14_0_2_is_dpm_running,
2832 .init_microcode = smu_v14_0_init_microcode,
2833 .load_microcode = smu_v14_0_load_microcode,
2834 .fini_microcode = smu_v14_0_fini_microcode,
2835 .init_smc_tables = smu_v14_0_2_init_smc_tables,
2836 .fini_smc_tables = smu_v14_0_fini_smc_tables,
2837 .init_power = smu_v14_0_init_power,
2838 .fini_power = smu_v14_0_fini_power,
2839 .check_fw_status = smu_v14_0_check_fw_status,
2840 .setup_pptable = smu_v14_0_2_setup_pptable,
2841 .check_fw_version = smu_v14_0_check_fw_version,
2842 .set_driver_table_location = smu_v14_0_set_driver_table_location,
2843 .system_features_control = smu_v14_0_system_features_control,
2844 .set_allowed_mask = smu_v14_0_set_allowed_mask,
2845 .get_enabled_mask = smu_cmn_get_enabled_mask,
2846 .dpm_set_vcn_enable = smu_v14_0_set_vcn_enable,
2847 .dpm_set_jpeg_enable = smu_v14_0_set_jpeg_enable,
2848 .get_dpm_ultimate_freq = smu_v14_0_2_get_dpm_ultimate_freq,
2849 .get_vbios_bootup_values = smu_v14_0_get_vbios_bootup_values,
2850 .read_sensor = smu_v14_0_2_read_sensor,
2851 .feature_is_enabled = smu_cmn_feature_is_enabled,
2852 .print_clk_levels = smu_v14_0_2_print_clk_levels,
2853 .force_clk_levels = smu_v14_0_2_force_clk_levels,
2854 .update_pcie_parameters = smu_v14_0_2_update_pcie_parameters,
2855 .get_thermal_temperature_range = smu_v14_0_2_get_thermal_temperature_range,
2856 .register_irq_handler = smu_v14_0_register_irq_handler,
2857 .enable_thermal_alert = smu_v14_0_enable_thermal_alert,
2858 .disable_thermal_alert = smu_v14_0_disable_thermal_alert,
2859 .notify_memory_pool_location = smu_v14_0_notify_memory_pool_location,
2860 .get_gpu_metrics = smu_v14_0_2_get_gpu_metrics,
2861 .set_soft_freq_limited_range = smu_v14_0_set_soft_freq_limited_range,
2862 .set_default_od_settings = smu_v14_0_2_set_default_od_settings,
2863 .restore_user_od_settings = smu_v14_0_2_restore_user_od_settings,
2864 .od_edit_dpm_table = smu_v14_0_2_od_edit_dpm_table,
2865 .init_pptable_microcode = smu_v14_0_init_pptable_microcode,
2866 .populate_umd_state_clk = smu_v14_0_2_populate_umd_state_clk,
2867 .set_performance_level = smu_v14_0_set_performance_level,
2868 .gfx_off_control = smu_v14_0_gfx_off_control,
2869 .get_unique_id = smu_v14_0_2_get_unique_id,
2870 .get_fan_speed_pwm = smu_v14_0_2_get_fan_speed_pwm,
2871 .get_fan_speed_rpm = smu_v14_0_2_get_fan_speed_rpm,
2872 .get_power_limit = smu_v14_0_2_get_power_limit,
2873 .set_power_limit = smu_v14_0_2_set_power_limit,
2874 .get_power_profile_mode = smu_v14_0_2_get_power_profile_mode,
2875 .set_power_profile_mode = smu_v14_0_2_set_power_profile_mode,
2876 .run_btc = smu_v14_0_run_btc,
2877 .get_pp_feature_mask = smu_cmn_get_pp_feature_mask,
2878 .set_pp_feature_mask = smu_cmn_set_pp_feature_mask,
2879 .set_tool_table_location = smu_v14_0_set_tool_table_location,
2880 .deep_sleep_control = smu_v14_0_deep_sleep_control,
2881 .gfx_ulv_control = smu_v14_0_gfx_ulv_control,
2882 .get_bamaco_support = smu_v14_0_get_bamaco_support,
2883 .baco_get_state = smu_v14_0_baco_get_state,
2884 .baco_set_state = smu_v14_0_baco_set_state,
2885 .baco_enter = smu_v14_0_2_baco_enter,
2886 .baco_exit = smu_v14_0_2_baco_exit,
2887 .mode1_reset_is_support = smu_v14_0_2_is_mode1_reset_supported,
2888 .mode1_reset = smu_v14_0_2_mode1_reset,
2889 .mode2_reset = smu_v14_0_2_mode2_reset,
2890 .enable_gfx_features = smu_v14_0_2_enable_gfx_features,
2891 .set_mp1_state = smu_v14_0_2_set_mp1_state,
2892 .set_df_cstate = smu_v14_0_2_set_df_cstate,
2893 #if 0
2894 .gpo_control = smu_v14_0_gpo_control,
2895 #endif
2896 };
2897
smu_v14_0_2_set_ppt_funcs(struct smu_context * smu)2898 void smu_v14_0_2_set_ppt_funcs(struct smu_context *smu)
2899 {
2900 smu->ppt_funcs = &smu_v14_0_2_ppt_funcs;
2901 smu->message_map = smu_v14_0_2_message_map;
2902 smu->clock_map = smu_v14_0_2_clk_map;
2903 smu->feature_map = smu_v14_0_2_feature_mask_map;
2904 smu->table_map = smu_v14_0_2_table_map;
2905 smu->pwr_src_map = smu_v14_0_2_pwr_src_map;
2906 smu->workload_map = smu_v14_0_2_workload_map;
2907 smu_v14_0_2_set_smu_mailbox_registers(smu);
2908 }
2909