1 /*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23 #include "pp_debug.h"
24 #include <linux/types.h>
25 #include <linux/kernel.h>
26 #include <linux/gfp.h>
27 #include <linux/slab.h>
28 #include <linux/firmware.h>
29 #include <linux/reboot.h>
30 #include "amd_shared.h"
31 #include "amd_powerplay.h"
32 #include "power_state.h"
33 #include "amdgpu.h"
34 #include "hwmgr.h"
35 #include "amdgpu_dpm_internal.h"
36 #include "amdgpu_display.h"
37
38 static const struct amd_pm_funcs pp_dpm_funcs;
39
amd_powerplay_create(struct amdgpu_device * adev)40 static int amd_powerplay_create(struct amdgpu_device *adev)
41 {
42 struct pp_hwmgr *hwmgr;
43
44 if (adev == NULL)
45 return -EINVAL;
46
47 hwmgr = kzalloc(sizeof(struct pp_hwmgr), GFP_KERNEL);
48 if (hwmgr == NULL)
49 return -ENOMEM;
50
51 hwmgr->adev = adev;
52 hwmgr->not_vf = !amdgpu_sriov_vf(adev);
53 hwmgr->device = amdgpu_cgs_create_device(adev);
54 if (!hwmgr->device) {
55 kfree(hwmgr);
56 return -ENOMEM;
57 }
58
59 mutex_init(&hwmgr->msg_lock);
60 hwmgr->chip_family = adev->family;
61 hwmgr->chip_id = adev->asic_type;
62 hwmgr->feature_mask = adev->pm.pp_feature;
63 hwmgr->display_config = &adev->pm.pm_display_cfg;
64 adev->powerplay.pp_handle = hwmgr;
65 adev->powerplay.pp_funcs = &pp_dpm_funcs;
66 return 0;
67 }
68
69
amd_powerplay_destroy(struct amdgpu_device * adev)70 static void amd_powerplay_destroy(struct amdgpu_device *adev)
71 {
72 struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
73
74 mutex_destroy(&hwmgr->msg_lock);
75
76 kfree(hwmgr->hardcode_pp_table);
77 hwmgr->hardcode_pp_table = NULL;
78
79 kfree(hwmgr);
80 hwmgr = NULL;
81 }
82
pp_early_init(struct amdgpu_ip_block * ip_block)83 static int pp_early_init(struct amdgpu_ip_block *ip_block)
84 {
85 int ret;
86 struct amdgpu_device *adev = ip_block->adev;
87 ret = amd_powerplay_create(adev);
88
89 if (ret != 0)
90 return ret;
91
92 ret = hwmgr_early_init(adev->powerplay.pp_handle);
93 if (ret)
94 return -EINVAL;
95
96 return 0;
97 }
98
pp_swctf_delayed_work_handler(struct work_struct * work)99 static void pp_swctf_delayed_work_handler(struct work_struct *work)
100 {
101 struct pp_hwmgr *hwmgr =
102 container_of(work, struct pp_hwmgr, swctf_delayed_work.work);
103 struct amdgpu_device *adev = hwmgr->adev;
104 struct amdgpu_dpm_thermal *range =
105 &adev->pm.dpm.thermal;
106 uint32_t gpu_temperature, size = sizeof(gpu_temperature);
107 int ret;
108
109 /*
110 * If the hotspot/edge temperature is confirmed as below SW CTF setting point
111 * after the delay enforced, nothing will be done.
112 * Otherwise, a graceful shutdown will be performed to prevent further damage.
113 */
114 if (range->sw_ctf_threshold &&
115 hwmgr->hwmgr_func->read_sensor) {
116 ret = hwmgr->hwmgr_func->read_sensor(hwmgr,
117 AMDGPU_PP_SENSOR_HOTSPOT_TEMP,
118 &gpu_temperature,
119 &size);
120 /*
121 * For some legacy ASICs, hotspot temperature retrieving might be not
122 * supported. Check the edge temperature instead then.
123 */
124 if (ret == -EOPNOTSUPP)
125 ret = hwmgr->hwmgr_func->read_sensor(hwmgr,
126 AMDGPU_PP_SENSOR_EDGE_TEMP,
127 &gpu_temperature,
128 &size);
129 if (!ret && gpu_temperature / 1000 < range->sw_ctf_threshold)
130 return;
131 }
132
133 dev_emerg(adev->dev, "ERROR: GPU over temperature range(SW CTF) detected!\n");
134 dev_emerg(adev->dev, "ERROR: System is going to shutdown due to GPU SW CTF!\n");
135 orderly_poweroff(true);
136 }
137
pp_sw_init(struct amdgpu_ip_block * ip_block)138 static int pp_sw_init(struct amdgpu_ip_block *ip_block)
139 {
140 struct amdgpu_device *adev = ip_block->adev;
141 struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
142 int ret = 0;
143
144 ret = hwmgr_sw_init(hwmgr);
145
146 pr_debug("powerplay sw init %s\n", ret ? "failed" : "successfully");
147
148 if (!ret)
149 INIT_DELAYED_WORK(&hwmgr->swctf_delayed_work,
150 pp_swctf_delayed_work_handler);
151
152 return ret;
153 }
154
pp_sw_fini(struct amdgpu_ip_block * ip_block)155 static int pp_sw_fini(struct amdgpu_ip_block *ip_block)
156 {
157 struct amdgpu_device *adev = ip_block->adev;
158 struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
159
160 hwmgr_sw_fini(hwmgr);
161
162 amdgpu_ucode_release(&adev->pm.fw);
163
164 return 0;
165 }
166
pp_hw_init(struct amdgpu_ip_block * ip_block)167 static int pp_hw_init(struct amdgpu_ip_block *ip_block)
168 {
169 int ret = 0;
170 struct amdgpu_device *adev = ip_block->adev;
171 struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
172
173 ret = hwmgr_hw_init(hwmgr);
174
175 if (ret)
176 pr_err("powerplay hw init failed\n");
177
178 return ret;
179 }
180
pp_hw_fini(struct amdgpu_ip_block * ip_block)181 static int pp_hw_fini(struct amdgpu_ip_block *ip_block)
182 {
183 struct pp_hwmgr *hwmgr = ip_block->adev->powerplay.pp_handle;
184
185 cancel_delayed_work_sync(&hwmgr->swctf_delayed_work);
186
187 hwmgr_hw_fini(hwmgr);
188
189 return 0;
190 }
191
pp_reserve_vram_for_smu(struct amdgpu_device * adev)192 static void pp_reserve_vram_for_smu(struct amdgpu_device *adev)
193 {
194 int r = -EINVAL;
195 void *cpu_ptr = NULL;
196 uint64_t gpu_addr;
197 struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
198
199 if (amdgpu_bo_create_kernel(adev, adev->pm.smu_prv_buffer_size,
200 PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
201 &adev->pm.smu_prv_buffer,
202 &gpu_addr,
203 &cpu_ptr)) {
204 DRM_ERROR("amdgpu: failed to create smu prv buffer\n");
205 return;
206 }
207
208 if (hwmgr->hwmgr_func->notify_cac_buffer_info)
209 r = hwmgr->hwmgr_func->notify_cac_buffer_info(hwmgr,
210 lower_32_bits((unsigned long)cpu_ptr),
211 upper_32_bits((unsigned long)cpu_ptr),
212 lower_32_bits(gpu_addr),
213 upper_32_bits(gpu_addr),
214 adev->pm.smu_prv_buffer_size);
215
216 if (r) {
217 amdgpu_bo_free_kernel(&adev->pm.smu_prv_buffer, NULL, NULL);
218 adev->pm.smu_prv_buffer = NULL;
219 DRM_ERROR("amdgpu: failed to notify SMU buffer address\n");
220 }
221 }
222
pp_late_init(struct amdgpu_ip_block * ip_block)223 static int pp_late_init(struct amdgpu_ip_block *ip_block)
224 {
225 struct amdgpu_device *adev = ip_block->adev;
226 struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
227
228 if (hwmgr && hwmgr->pm_en)
229 hwmgr_handle_task(hwmgr,
230 AMD_PP_TASK_COMPLETE_INIT, NULL);
231 if (adev->pm.smu_prv_buffer_size != 0)
232 pp_reserve_vram_for_smu(adev);
233
234 return 0;
235 }
236
pp_late_fini(struct amdgpu_ip_block * ip_block)237 static void pp_late_fini(struct amdgpu_ip_block *ip_block)
238 {
239 struct amdgpu_device *adev = ip_block->adev;
240
241 if (adev->pm.smu_prv_buffer)
242 amdgpu_bo_free_kernel(&adev->pm.smu_prv_buffer, NULL, NULL);
243 amd_powerplay_destroy(adev);
244 }
245
246
pp_is_idle(void * handle)247 static bool pp_is_idle(void *handle)
248 {
249 return false;
250 }
251
pp_set_powergating_state(struct amdgpu_ip_block * ip_block,enum amd_powergating_state state)252 static int pp_set_powergating_state(struct amdgpu_ip_block *ip_block,
253 enum amd_powergating_state state)
254 {
255 return 0;
256 }
257
pp_suspend(struct amdgpu_ip_block * ip_block)258 static int pp_suspend(struct amdgpu_ip_block *ip_block)
259 {
260 struct amdgpu_device *adev = ip_block->adev;
261 struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
262
263 cancel_delayed_work_sync(&hwmgr->swctf_delayed_work);
264
265 return hwmgr_suspend(hwmgr);
266 }
267
pp_resume(struct amdgpu_ip_block * ip_block)268 static int pp_resume(struct amdgpu_ip_block *ip_block)
269 {
270 struct pp_hwmgr *hwmgr = ip_block->adev->powerplay.pp_handle;
271
272 return hwmgr_resume(hwmgr);
273 }
274
pp_set_clockgating_state(struct amdgpu_ip_block * ip_block,enum amd_clockgating_state state)275 static int pp_set_clockgating_state(struct amdgpu_ip_block *ip_block,
276 enum amd_clockgating_state state)
277 {
278 return 0;
279 }
280
281 static const struct amd_ip_funcs pp_ip_funcs = {
282 .name = "powerplay",
283 .early_init = pp_early_init,
284 .late_init = pp_late_init,
285 .sw_init = pp_sw_init,
286 .sw_fini = pp_sw_fini,
287 .hw_init = pp_hw_init,
288 .hw_fini = pp_hw_fini,
289 .late_fini = pp_late_fini,
290 .suspend = pp_suspend,
291 .resume = pp_resume,
292 .is_idle = pp_is_idle,
293 .set_clockgating_state = pp_set_clockgating_state,
294 .set_powergating_state = pp_set_powergating_state,
295 };
296
297 const struct amdgpu_ip_block_version pp_smu_ip_block =
298 {
299 .type = AMD_IP_BLOCK_TYPE_SMC,
300 .major = 1,
301 .minor = 0,
302 .rev = 0,
303 .funcs = &pp_ip_funcs,
304 };
305
306 /* This interface only be supported On Vi,
307 * because only smu7/8 can help to load gfx/sdma fw,
308 * smu need to be enabled before load other ip's fw.
309 * so call start smu to load smu7 fw and other ip's fw
310 */
pp_dpm_load_fw(void * handle)311 static int pp_dpm_load_fw(void *handle)
312 {
313 struct pp_hwmgr *hwmgr = handle;
314
315 if (!hwmgr || !hwmgr->smumgr_funcs || !hwmgr->smumgr_funcs->start_smu)
316 return -EINVAL;
317
318 if (hwmgr->smumgr_funcs->start_smu(hwmgr)) {
319 pr_err("fw load failed\n");
320 return -EINVAL;
321 }
322
323 return 0;
324 }
325
pp_dpm_fw_loading_complete(void * handle)326 static int pp_dpm_fw_loading_complete(void *handle)
327 {
328 return 0;
329 }
330
pp_set_clockgating_by_smu(void * handle,uint32_t msg_id)331 static int pp_set_clockgating_by_smu(void *handle, uint32_t msg_id)
332 {
333 struct pp_hwmgr *hwmgr = handle;
334
335 if (!hwmgr || !hwmgr->pm_en)
336 return -EINVAL;
337
338 if (hwmgr->hwmgr_func->update_clock_gatings == NULL) {
339 pr_info_ratelimited("%s was not implemented.\n", __func__);
340 return 0;
341 }
342
343 return hwmgr->hwmgr_func->update_clock_gatings(hwmgr, &msg_id);
344 }
345
pp_dpm_en_umd_pstate(struct pp_hwmgr * hwmgr,enum amd_dpm_forced_level * level)346 static void pp_dpm_en_umd_pstate(struct pp_hwmgr *hwmgr,
347 enum amd_dpm_forced_level *level)
348 {
349 uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD |
350 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK |
351 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK |
352 AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
353
354 if (!(hwmgr->dpm_level & profile_mode_mask)) {
355 /* enter umd pstate, save current level, disable gfx cg*/
356 if (*level & profile_mode_mask) {
357 hwmgr->saved_dpm_level = hwmgr->dpm_level;
358 hwmgr->en_umd_pstate = true;
359 }
360 } else {
361 /* exit umd pstate, restore level, enable gfx cg*/
362 if (!(*level & profile_mode_mask)) {
363 if (*level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT)
364 *level = hwmgr->saved_dpm_level;
365 hwmgr->en_umd_pstate = false;
366 }
367 }
368 }
369
pp_dpm_force_performance_level(void * handle,enum amd_dpm_forced_level level)370 static int pp_dpm_force_performance_level(void *handle,
371 enum amd_dpm_forced_level level)
372 {
373 struct pp_hwmgr *hwmgr = handle;
374
375 if (!hwmgr || !hwmgr->pm_en)
376 return -EINVAL;
377
378 if (level == hwmgr->dpm_level)
379 return 0;
380
381 pp_dpm_en_umd_pstate(hwmgr, &level);
382 hwmgr->request_dpm_level = level;
383 hwmgr_handle_task(hwmgr, AMD_PP_TASK_READJUST_POWER_STATE, NULL);
384
385 return 0;
386 }
387
pp_dpm_get_performance_level(void * handle)388 static enum amd_dpm_forced_level pp_dpm_get_performance_level(
389 void *handle)
390 {
391 struct pp_hwmgr *hwmgr = handle;
392
393 if (!hwmgr || !hwmgr->pm_en)
394 return -EINVAL;
395
396 return hwmgr->dpm_level;
397 }
398
pp_dpm_get_sclk(void * handle,bool low)399 static uint32_t pp_dpm_get_sclk(void *handle, bool low)
400 {
401 struct pp_hwmgr *hwmgr = handle;
402
403 if (!hwmgr || !hwmgr->pm_en)
404 return 0;
405
406 if (hwmgr->hwmgr_func->get_sclk == NULL) {
407 pr_info_ratelimited("%s was not implemented.\n", __func__);
408 return 0;
409 }
410 return hwmgr->hwmgr_func->get_sclk(hwmgr, low);
411 }
412
pp_dpm_get_mclk(void * handle,bool low)413 static uint32_t pp_dpm_get_mclk(void *handle, bool low)
414 {
415 struct pp_hwmgr *hwmgr = handle;
416
417 if (!hwmgr || !hwmgr->pm_en)
418 return 0;
419
420 if (hwmgr->hwmgr_func->get_mclk == NULL) {
421 pr_info_ratelimited("%s was not implemented.\n", __func__);
422 return 0;
423 }
424 return hwmgr->hwmgr_func->get_mclk(hwmgr, low);
425 }
426
pp_dpm_powergate_vce(void * handle,bool gate)427 static void pp_dpm_powergate_vce(void *handle, bool gate)
428 {
429 struct pp_hwmgr *hwmgr = handle;
430
431 if (!hwmgr || !hwmgr->pm_en)
432 return;
433
434 if (hwmgr->hwmgr_func->powergate_vce == NULL) {
435 pr_info_ratelimited("%s was not implemented.\n", __func__);
436 return;
437 }
438 hwmgr->hwmgr_func->powergate_vce(hwmgr, gate);
439 }
440
pp_dpm_powergate_uvd(void * handle,bool gate)441 static void pp_dpm_powergate_uvd(void *handle, bool gate)
442 {
443 struct pp_hwmgr *hwmgr = handle;
444
445 if (!hwmgr || !hwmgr->pm_en)
446 return;
447
448 if (hwmgr->hwmgr_func->powergate_uvd == NULL) {
449 pr_info_ratelimited("%s was not implemented.\n", __func__);
450 return;
451 }
452 hwmgr->hwmgr_func->powergate_uvd(hwmgr, gate);
453 }
454
pp_dpm_dispatch_tasks(void * handle,enum amd_pp_task task_id,enum amd_pm_state_type * user_state)455 static int pp_dpm_dispatch_tasks(void *handle, enum amd_pp_task task_id,
456 enum amd_pm_state_type *user_state)
457 {
458 struct pp_hwmgr *hwmgr = handle;
459
460 if (!hwmgr || !hwmgr->pm_en)
461 return -EINVAL;
462
463 return hwmgr_handle_task(hwmgr, task_id, user_state);
464 }
465
pp_dpm_get_current_power_state(void * handle)466 static enum amd_pm_state_type pp_dpm_get_current_power_state(void *handle)
467 {
468 struct pp_hwmgr *hwmgr = handle;
469 struct pp_power_state *state;
470 enum amd_pm_state_type pm_type;
471
472 if (!hwmgr || !hwmgr->pm_en || !hwmgr->current_ps)
473 return -EINVAL;
474
475 state = hwmgr->current_ps;
476
477 switch (state->classification.ui_label) {
478 case PP_StateUILabel_Battery:
479 pm_type = POWER_STATE_TYPE_BATTERY;
480 break;
481 case PP_StateUILabel_Balanced:
482 pm_type = POWER_STATE_TYPE_BALANCED;
483 break;
484 case PP_StateUILabel_Performance:
485 pm_type = POWER_STATE_TYPE_PERFORMANCE;
486 break;
487 default:
488 if (state->classification.flags & PP_StateClassificationFlag_Boot)
489 pm_type = POWER_STATE_TYPE_INTERNAL_BOOT;
490 else
491 pm_type = POWER_STATE_TYPE_DEFAULT;
492 break;
493 }
494
495 return pm_type;
496 }
497
pp_dpm_set_fan_control_mode(void * handle,uint32_t mode)498 static int pp_dpm_set_fan_control_mode(void *handle, uint32_t mode)
499 {
500 struct pp_hwmgr *hwmgr = handle;
501
502 if (!hwmgr || !hwmgr->pm_en)
503 return -EOPNOTSUPP;
504
505 if (hwmgr->hwmgr_func->set_fan_control_mode == NULL)
506 return -EOPNOTSUPP;
507
508 if (mode == U32_MAX)
509 return -EINVAL;
510
511 hwmgr->hwmgr_func->set_fan_control_mode(hwmgr, mode);
512
513 return 0;
514 }
515
pp_dpm_get_fan_control_mode(void * handle,uint32_t * fan_mode)516 static int pp_dpm_get_fan_control_mode(void *handle, uint32_t *fan_mode)
517 {
518 struct pp_hwmgr *hwmgr = handle;
519
520 if (!hwmgr || !hwmgr->pm_en)
521 return -EOPNOTSUPP;
522
523 if (hwmgr->hwmgr_func->get_fan_control_mode == NULL)
524 return -EOPNOTSUPP;
525
526 if (!fan_mode)
527 return -EINVAL;
528
529 *fan_mode = hwmgr->hwmgr_func->get_fan_control_mode(hwmgr);
530 return 0;
531 }
532
pp_dpm_set_fan_speed_pwm(void * handle,uint32_t speed)533 static int pp_dpm_set_fan_speed_pwm(void *handle, uint32_t speed)
534 {
535 struct pp_hwmgr *hwmgr = handle;
536
537 if (!hwmgr || !hwmgr->pm_en)
538 return -EOPNOTSUPP;
539
540 if (hwmgr->hwmgr_func->set_fan_speed_pwm == NULL)
541 return -EOPNOTSUPP;
542
543 if (speed == U32_MAX)
544 return -EINVAL;
545
546 return hwmgr->hwmgr_func->set_fan_speed_pwm(hwmgr, speed);
547 }
548
pp_dpm_get_fan_speed_pwm(void * handle,uint32_t * speed)549 static int pp_dpm_get_fan_speed_pwm(void *handle, uint32_t *speed)
550 {
551 struct pp_hwmgr *hwmgr = handle;
552
553 if (!hwmgr || !hwmgr->pm_en)
554 return -EOPNOTSUPP;
555
556 if (hwmgr->hwmgr_func->get_fan_speed_pwm == NULL)
557 return -EOPNOTSUPP;
558
559 if (!speed)
560 return -EINVAL;
561
562 return hwmgr->hwmgr_func->get_fan_speed_pwm(hwmgr, speed);
563 }
564
pp_dpm_get_fan_speed_rpm(void * handle,uint32_t * rpm)565 static int pp_dpm_get_fan_speed_rpm(void *handle, uint32_t *rpm)
566 {
567 struct pp_hwmgr *hwmgr = handle;
568
569 if (!hwmgr || !hwmgr->pm_en)
570 return -EOPNOTSUPP;
571
572 if (hwmgr->hwmgr_func->get_fan_speed_rpm == NULL)
573 return -EOPNOTSUPP;
574
575 if (!rpm)
576 return -EINVAL;
577
578 return hwmgr->hwmgr_func->get_fan_speed_rpm(hwmgr, rpm);
579 }
580
pp_dpm_set_fan_speed_rpm(void * handle,uint32_t rpm)581 static int pp_dpm_set_fan_speed_rpm(void *handle, uint32_t rpm)
582 {
583 struct pp_hwmgr *hwmgr = handle;
584
585 if (!hwmgr || !hwmgr->pm_en)
586 return -EOPNOTSUPP;
587
588 if (hwmgr->hwmgr_func->set_fan_speed_rpm == NULL)
589 return -EOPNOTSUPP;
590
591 if (rpm == U32_MAX)
592 return -EINVAL;
593
594 return hwmgr->hwmgr_func->set_fan_speed_rpm(hwmgr, rpm);
595 }
596
pp_dpm_get_pp_num_states(void * handle,struct pp_states_info * data)597 static int pp_dpm_get_pp_num_states(void *handle,
598 struct pp_states_info *data)
599 {
600 struct pp_hwmgr *hwmgr = handle;
601 int i;
602
603 memset(data, 0, sizeof(*data));
604
605 if (!hwmgr || !hwmgr->pm_en || !hwmgr->ps)
606 return -EINVAL;
607
608 data->nums = hwmgr->num_ps;
609
610 for (i = 0; i < hwmgr->num_ps; i++) {
611 struct pp_power_state *state = (struct pp_power_state *)
612 ((unsigned long)hwmgr->ps + i * hwmgr->ps_size);
613 switch (state->classification.ui_label) {
614 case PP_StateUILabel_Battery:
615 data->states[i] = POWER_STATE_TYPE_BATTERY;
616 break;
617 case PP_StateUILabel_Balanced:
618 data->states[i] = POWER_STATE_TYPE_BALANCED;
619 break;
620 case PP_StateUILabel_Performance:
621 data->states[i] = POWER_STATE_TYPE_PERFORMANCE;
622 break;
623 default:
624 if (state->classification.flags & PP_StateClassificationFlag_Boot)
625 data->states[i] = POWER_STATE_TYPE_INTERNAL_BOOT;
626 else
627 data->states[i] = POWER_STATE_TYPE_DEFAULT;
628 }
629 }
630 return 0;
631 }
632
pp_dpm_get_pp_table(void * handle,char ** table)633 static int pp_dpm_get_pp_table(void *handle, char **table)
634 {
635 struct pp_hwmgr *hwmgr = handle;
636
637 if (!hwmgr || !hwmgr->pm_en || !hwmgr->soft_pp_table)
638 return -EINVAL;
639
640 *table = (char *)hwmgr->soft_pp_table;
641 return hwmgr->soft_pp_table_size;
642 }
643
amd_powerplay_reset(void * handle)644 static int amd_powerplay_reset(void *handle)
645 {
646 struct pp_hwmgr *hwmgr = handle;
647 int ret;
648
649 ret = hwmgr_hw_fini(hwmgr);
650 if (ret)
651 return ret;
652
653 ret = hwmgr_hw_init(hwmgr);
654 if (ret)
655 return ret;
656
657 return hwmgr_handle_task(hwmgr, AMD_PP_TASK_COMPLETE_INIT, NULL);
658 }
659
pp_dpm_set_pp_table(void * handle,const char * buf,size_t size)660 static int pp_dpm_set_pp_table(void *handle, const char *buf, size_t size)
661 {
662 struct pp_hwmgr *hwmgr = handle;
663 int ret = -ENOMEM;
664
665 if (!hwmgr || !hwmgr->pm_en)
666 return -EINVAL;
667
668 if (!hwmgr->hardcode_pp_table) {
669 hwmgr->hardcode_pp_table = kmemdup(hwmgr->soft_pp_table,
670 hwmgr->soft_pp_table_size,
671 GFP_KERNEL);
672 if (!hwmgr->hardcode_pp_table)
673 return ret;
674 }
675
676 memcpy(hwmgr->hardcode_pp_table, buf, size);
677
678 hwmgr->soft_pp_table = hwmgr->hardcode_pp_table;
679
680 ret = amd_powerplay_reset(handle);
681 if (ret)
682 return ret;
683
684 if (hwmgr->hwmgr_func->avfs_control)
685 ret = hwmgr->hwmgr_func->avfs_control(hwmgr, false);
686
687 return ret;
688 }
689
pp_dpm_force_clock_level(void * handle,enum pp_clock_type type,uint32_t mask)690 static int pp_dpm_force_clock_level(void *handle,
691 enum pp_clock_type type, uint32_t mask)
692 {
693 struct pp_hwmgr *hwmgr = handle;
694
695 if (!hwmgr || !hwmgr->pm_en)
696 return -EINVAL;
697
698 if (hwmgr->hwmgr_func->force_clock_level == NULL) {
699 pr_info_ratelimited("%s was not implemented.\n", __func__);
700 return 0;
701 }
702
703 if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
704 pr_debug("force clock level is for dpm manual mode only.\n");
705 return -EINVAL;
706 }
707
708 return hwmgr->hwmgr_func->force_clock_level(hwmgr, type, mask);
709 }
710
pp_dpm_emit_clock_levels(void * handle,enum pp_clock_type type,char * buf,int * offset)711 static int pp_dpm_emit_clock_levels(void *handle,
712 enum pp_clock_type type,
713 char *buf,
714 int *offset)
715 {
716 struct pp_hwmgr *hwmgr = handle;
717
718 if (!hwmgr || !hwmgr->pm_en)
719 return -EOPNOTSUPP;
720
721 if (!hwmgr->hwmgr_func->emit_clock_levels)
722 return -ENOENT;
723
724 return hwmgr->hwmgr_func->emit_clock_levels(hwmgr, type, buf, offset);
725 }
726
pp_dpm_print_clock_levels(void * handle,enum pp_clock_type type,char * buf)727 static int pp_dpm_print_clock_levels(void *handle,
728 enum pp_clock_type type, char *buf)
729 {
730 struct pp_hwmgr *hwmgr = handle;
731
732 if (!hwmgr || !hwmgr->pm_en)
733 return -EINVAL;
734
735 if (hwmgr->hwmgr_func->print_clock_levels == NULL) {
736 pr_info_ratelimited("%s was not implemented.\n", __func__);
737 return 0;
738 }
739 return hwmgr->hwmgr_func->print_clock_levels(hwmgr, type, buf);
740 }
741
pp_dpm_get_sclk_od(void * handle)742 static int pp_dpm_get_sclk_od(void *handle)
743 {
744 struct pp_hwmgr *hwmgr = handle;
745
746 if (!hwmgr || !hwmgr->pm_en)
747 return -EINVAL;
748
749 if (hwmgr->hwmgr_func->get_sclk_od == NULL) {
750 pr_info_ratelimited("%s was not implemented.\n", __func__);
751 return 0;
752 }
753 return hwmgr->hwmgr_func->get_sclk_od(hwmgr);
754 }
755
pp_dpm_set_sclk_od(void * handle,uint32_t value)756 static int pp_dpm_set_sclk_od(void *handle, uint32_t value)
757 {
758 struct pp_hwmgr *hwmgr = handle;
759
760 if (!hwmgr || !hwmgr->pm_en)
761 return -EINVAL;
762
763 if (hwmgr->hwmgr_func->set_sclk_od == NULL) {
764 pr_info_ratelimited("%s was not implemented.\n", __func__);
765 return 0;
766 }
767
768 return hwmgr->hwmgr_func->set_sclk_od(hwmgr, value);
769 }
770
pp_dpm_get_mclk_od(void * handle)771 static int pp_dpm_get_mclk_od(void *handle)
772 {
773 struct pp_hwmgr *hwmgr = handle;
774
775 if (!hwmgr || !hwmgr->pm_en)
776 return -EINVAL;
777
778 if (hwmgr->hwmgr_func->get_mclk_od == NULL) {
779 pr_info_ratelimited("%s was not implemented.\n", __func__);
780 return 0;
781 }
782 return hwmgr->hwmgr_func->get_mclk_od(hwmgr);
783 }
784
pp_dpm_set_mclk_od(void * handle,uint32_t value)785 static int pp_dpm_set_mclk_od(void *handle, uint32_t value)
786 {
787 struct pp_hwmgr *hwmgr = handle;
788
789 if (!hwmgr || !hwmgr->pm_en)
790 return -EINVAL;
791
792 if (hwmgr->hwmgr_func->set_mclk_od == NULL) {
793 pr_info_ratelimited("%s was not implemented.\n", __func__);
794 return 0;
795 }
796 return hwmgr->hwmgr_func->set_mclk_od(hwmgr, value);
797 }
798
pp_dpm_read_sensor(void * handle,int idx,void * value,int * size)799 static int pp_dpm_read_sensor(void *handle, int idx,
800 void *value, int *size)
801 {
802 struct pp_hwmgr *hwmgr = handle;
803
804 if (!hwmgr || !hwmgr->pm_en || !value)
805 return -EINVAL;
806
807 switch (idx) {
808 case AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK:
809 *((uint32_t *)value) = hwmgr->pstate_sclk * 100;
810 return 0;
811 case AMDGPU_PP_SENSOR_STABLE_PSTATE_MCLK:
812 *((uint32_t *)value) = hwmgr->pstate_mclk * 100;
813 return 0;
814 case AMDGPU_PP_SENSOR_PEAK_PSTATE_SCLK:
815 *((uint32_t *)value) = hwmgr->pstate_sclk_peak * 100;
816 return 0;
817 case AMDGPU_PP_SENSOR_PEAK_PSTATE_MCLK:
818 *((uint32_t *)value) = hwmgr->pstate_mclk_peak * 100;
819 return 0;
820 case AMDGPU_PP_SENSOR_MIN_FAN_RPM:
821 *((uint32_t *)value) = hwmgr->thermal_controller.fanInfo.ulMinRPM;
822 return 0;
823 case AMDGPU_PP_SENSOR_MAX_FAN_RPM:
824 *((uint32_t *)value) = hwmgr->thermal_controller.fanInfo.ulMaxRPM;
825 return 0;
826 default:
827 return hwmgr->hwmgr_func->read_sensor(hwmgr, idx, value, size);
828 }
829 }
830
831 static struct amd_vce_state*
pp_dpm_get_vce_clock_state(void * handle,unsigned idx)832 pp_dpm_get_vce_clock_state(void *handle, unsigned idx)
833 {
834 struct pp_hwmgr *hwmgr = handle;
835
836 if (!hwmgr || !hwmgr->pm_en)
837 return NULL;
838
839 if (idx < hwmgr->num_vce_state_tables)
840 return &hwmgr->vce_states[idx];
841 return NULL;
842 }
843
pp_get_power_profile_mode(void * handle,char * buf)844 static int pp_get_power_profile_mode(void *handle, char *buf)
845 {
846 struct pp_hwmgr *hwmgr = handle;
847
848 if (!hwmgr || !hwmgr->pm_en || !hwmgr->hwmgr_func->get_power_profile_mode)
849 return -EOPNOTSUPP;
850 if (!buf)
851 return -EINVAL;
852
853 return hwmgr->hwmgr_func->get_power_profile_mode(hwmgr, buf);
854 }
855
pp_set_power_profile_mode(void * handle,long * input,uint32_t size)856 static int pp_set_power_profile_mode(void *handle, long *input, uint32_t size)
857 {
858 struct pp_hwmgr *hwmgr = handle;
859
860 if (!hwmgr || !hwmgr->pm_en || !hwmgr->hwmgr_func->set_power_profile_mode)
861 return -EOPNOTSUPP;
862
863 if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
864 pr_debug("power profile setting is for manual dpm mode only.\n");
865 return -EINVAL;
866 }
867
868 return hwmgr->hwmgr_func->set_power_profile_mode(hwmgr, input, size);
869 }
870
pp_set_fine_grain_clk_vol(void * handle,uint32_t type,long * input,uint32_t size)871 static int pp_set_fine_grain_clk_vol(void *handle, uint32_t type, long *input, uint32_t size)
872 {
873 struct pp_hwmgr *hwmgr = handle;
874
875 if (!hwmgr || !hwmgr->pm_en)
876 return -EINVAL;
877
878 if (hwmgr->hwmgr_func->set_fine_grain_clk_vol == NULL)
879 return 0;
880
881 return hwmgr->hwmgr_func->set_fine_grain_clk_vol(hwmgr, type, input, size);
882 }
883
pp_odn_edit_dpm_table(void * handle,enum PP_OD_DPM_TABLE_COMMAND type,long * input,uint32_t size)884 static int pp_odn_edit_dpm_table(void *handle, enum PP_OD_DPM_TABLE_COMMAND type,
885 long *input, uint32_t size)
886 {
887 struct pp_hwmgr *hwmgr = handle;
888
889 if (!hwmgr || !hwmgr->pm_en)
890 return -EINVAL;
891
892 if (hwmgr->hwmgr_func->odn_edit_dpm_table == NULL) {
893 pr_info_ratelimited("%s was not implemented.\n", __func__);
894 return 0;
895 }
896
897 return hwmgr->hwmgr_func->odn_edit_dpm_table(hwmgr, type, input, size);
898 }
899
pp_dpm_set_mp1_state(void * handle,enum pp_mp1_state mp1_state)900 static int pp_dpm_set_mp1_state(void *handle, enum pp_mp1_state mp1_state)
901 {
902 struct pp_hwmgr *hwmgr = handle;
903
904 if (!hwmgr)
905 return -EINVAL;
906
907 if (!hwmgr->pm_en)
908 return 0;
909
910 if (hwmgr->hwmgr_func->set_mp1_state)
911 return hwmgr->hwmgr_func->set_mp1_state(hwmgr, mp1_state);
912
913 return 0;
914 }
915
pp_dpm_switch_power_profile(void * handle,enum PP_SMC_POWER_PROFILE type,bool en)916 static int pp_dpm_switch_power_profile(void *handle,
917 enum PP_SMC_POWER_PROFILE type, bool en)
918 {
919 struct pp_hwmgr *hwmgr = handle;
920 long workload[1];
921 uint32_t index;
922
923 if (!hwmgr || !hwmgr->pm_en)
924 return -EINVAL;
925
926 if (hwmgr->hwmgr_func->set_power_profile_mode == NULL) {
927 pr_info_ratelimited("%s was not implemented.\n", __func__);
928 return -EINVAL;
929 }
930
931 if (!(type < PP_SMC_POWER_PROFILE_CUSTOM))
932 return -EINVAL;
933
934 if (!en) {
935 hwmgr->workload_mask &= ~(1 << hwmgr->workload_prority[type]);
936 index = fls(hwmgr->workload_mask);
937 index = index > 0 && index <= Workload_Policy_Max ? index - 1 : 0;
938 workload[0] = hwmgr->workload_setting[index];
939 } else {
940 hwmgr->workload_mask |= (1 << hwmgr->workload_prority[type]);
941 index = fls(hwmgr->workload_mask);
942 index = index <= Workload_Policy_Max ? index - 1 : 0;
943 workload[0] = hwmgr->workload_setting[index];
944 }
945
946 if (type == PP_SMC_POWER_PROFILE_COMPUTE &&
947 hwmgr->hwmgr_func->disable_power_features_for_compute_performance) {
948 if (hwmgr->hwmgr_func->disable_power_features_for_compute_performance(hwmgr, en))
949 return -EINVAL;
950 }
951
952 if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL)
953 hwmgr->hwmgr_func->set_power_profile_mode(hwmgr, workload, 0);
954
955 return 0;
956 }
957
pp_set_power_limit(void * handle,uint32_t limit)958 static int pp_set_power_limit(void *handle, uint32_t limit)
959 {
960 struct pp_hwmgr *hwmgr = handle;
961 uint32_t max_power_limit;
962
963 if (!hwmgr || !hwmgr->pm_en)
964 return -EINVAL;
965
966 if (hwmgr->hwmgr_func->set_power_limit == NULL) {
967 pr_info_ratelimited("%s was not implemented.\n", __func__);
968 return -EINVAL;
969 }
970
971 if (limit == 0)
972 limit = hwmgr->default_power_limit;
973
974 max_power_limit = hwmgr->default_power_limit;
975 if (hwmgr->od_enabled) {
976 max_power_limit *= (100 + hwmgr->platform_descriptor.TDPODLimit);
977 max_power_limit /= 100;
978 }
979
980 if (limit > max_power_limit)
981 return -EINVAL;
982
983 hwmgr->hwmgr_func->set_power_limit(hwmgr, limit);
984 hwmgr->power_limit = limit;
985 return 0;
986 }
987
pp_get_power_limit(void * handle,uint32_t * limit,enum pp_power_limit_level pp_limit_level,enum pp_power_type power_type)988 static int pp_get_power_limit(void *handle, uint32_t *limit,
989 enum pp_power_limit_level pp_limit_level,
990 enum pp_power_type power_type)
991 {
992 struct pp_hwmgr *hwmgr = handle;
993 int ret = 0;
994
995 if (!hwmgr || !hwmgr->pm_en || !limit)
996 return -EINVAL;
997
998 if (power_type != PP_PWR_TYPE_SUSTAINED)
999 return -EOPNOTSUPP;
1000
1001 switch (pp_limit_level) {
1002 case PP_PWR_LIMIT_CURRENT:
1003 *limit = hwmgr->power_limit;
1004 break;
1005 case PP_PWR_LIMIT_DEFAULT:
1006 *limit = hwmgr->default_power_limit;
1007 break;
1008 case PP_PWR_LIMIT_MAX:
1009 *limit = hwmgr->default_power_limit;
1010 if (hwmgr->od_enabled) {
1011 *limit *= (100 + hwmgr->platform_descriptor.TDPODLimit);
1012 *limit /= 100;
1013 }
1014 break;
1015 case PP_PWR_LIMIT_MIN:
1016 *limit = 0;
1017 break;
1018 default:
1019 ret = -EOPNOTSUPP;
1020 break;
1021 }
1022
1023 return ret;
1024 }
1025
pp_display_configuration_change(void * handle,const struct amd_pp_display_configuration * display_config)1026 static int pp_display_configuration_change(void *handle,
1027 const struct amd_pp_display_configuration *display_config)
1028 {
1029 struct pp_hwmgr *hwmgr = handle;
1030
1031 if (!hwmgr || !hwmgr->pm_en)
1032 return -EINVAL;
1033
1034 phm_store_dal_configuration_data(hwmgr, display_config);
1035 return 0;
1036 }
1037
pp_get_display_power_level(void * handle,struct amd_pp_simple_clock_info * output)1038 static int pp_get_display_power_level(void *handle,
1039 struct amd_pp_simple_clock_info *output)
1040 {
1041 struct pp_hwmgr *hwmgr = handle;
1042
1043 if (!hwmgr || !hwmgr->pm_en || !output)
1044 return -EINVAL;
1045
1046 return phm_get_dal_power_level(hwmgr, output);
1047 }
1048
pp_get_current_clocks(void * handle,struct amd_pp_clock_info * clocks)1049 static int pp_get_current_clocks(void *handle,
1050 struct amd_pp_clock_info *clocks)
1051 {
1052 struct amd_pp_simple_clock_info simple_clocks = { 0 };
1053 struct pp_clock_info hw_clocks;
1054 struct pp_hwmgr *hwmgr = handle;
1055 int ret = 0;
1056
1057 if (!hwmgr || !hwmgr->pm_en)
1058 return -EINVAL;
1059
1060 phm_get_dal_power_level(hwmgr, &simple_clocks);
1061
1062 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1063 PHM_PlatformCaps_PowerContainment))
1064 ret = phm_get_clock_info(hwmgr, &hwmgr->current_ps->hardware,
1065 &hw_clocks, PHM_PerformanceLevelDesignation_PowerContainment);
1066 else
1067 ret = phm_get_clock_info(hwmgr, &hwmgr->current_ps->hardware,
1068 &hw_clocks, PHM_PerformanceLevelDesignation_Activity);
1069
1070 if (ret) {
1071 pr_debug("Error in phm_get_clock_info \n");
1072 return -EINVAL;
1073 }
1074
1075 clocks->min_engine_clock = hw_clocks.min_eng_clk;
1076 clocks->max_engine_clock = hw_clocks.max_eng_clk;
1077 clocks->min_memory_clock = hw_clocks.min_mem_clk;
1078 clocks->max_memory_clock = hw_clocks.max_mem_clk;
1079 clocks->min_bus_bandwidth = hw_clocks.min_bus_bandwidth;
1080 clocks->max_bus_bandwidth = hw_clocks.max_bus_bandwidth;
1081
1082 clocks->max_engine_clock_in_sr = hw_clocks.max_eng_clk;
1083 clocks->min_engine_clock_in_sr = hw_clocks.min_eng_clk;
1084
1085 if (simple_clocks.level == 0)
1086 clocks->max_clocks_state = PP_DAL_POWERLEVEL_7;
1087 else
1088 clocks->max_clocks_state = simple_clocks.level;
1089
1090 if (0 == phm_get_current_shallow_sleep_clocks(hwmgr, &hwmgr->current_ps->hardware, &hw_clocks)) {
1091 clocks->max_engine_clock_in_sr = hw_clocks.max_eng_clk;
1092 clocks->min_engine_clock_in_sr = hw_clocks.min_eng_clk;
1093 }
1094 return 0;
1095 }
1096
pp_get_clock_by_type(void * handle,enum amd_pp_clock_type type,struct amd_pp_clocks * clocks)1097 static int pp_get_clock_by_type(void *handle, enum amd_pp_clock_type type, struct amd_pp_clocks *clocks)
1098 {
1099 struct pp_hwmgr *hwmgr = handle;
1100
1101 if (!hwmgr || !hwmgr->pm_en)
1102 return -EINVAL;
1103
1104 if (clocks == NULL)
1105 return -EINVAL;
1106
1107 return phm_get_clock_by_type(hwmgr, type, clocks);
1108 }
1109
pp_get_clock_by_type_with_latency(void * handle,enum amd_pp_clock_type type,struct pp_clock_levels_with_latency * clocks)1110 static int pp_get_clock_by_type_with_latency(void *handle,
1111 enum amd_pp_clock_type type,
1112 struct pp_clock_levels_with_latency *clocks)
1113 {
1114 struct pp_hwmgr *hwmgr = handle;
1115
1116 if (!hwmgr || !hwmgr->pm_en || !clocks)
1117 return -EINVAL;
1118
1119 return phm_get_clock_by_type_with_latency(hwmgr, type, clocks);
1120 }
1121
pp_get_clock_by_type_with_voltage(void * handle,enum amd_pp_clock_type type,struct pp_clock_levels_with_voltage * clocks)1122 static int pp_get_clock_by_type_with_voltage(void *handle,
1123 enum amd_pp_clock_type type,
1124 struct pp_clock_levels_with_voltage *clocks)
1125 {
1126 struct pp_hwmgr *hwmgr = handle;
1127
1128 if (!hwmgr || !hwmgr->pm_en || !clocks)
1129 return -EINVAL;
1130
1131 return phm_get_clock_by_type_with_voltage(hwmgr, type, clocks);
1132 }
1133
pp_set_watermarks_for_clocks_ranges(void * handle,void * clock_ranges)1134 static int pp_set_watermarks_for_clocks_ranges(void *handle,
1135 void *clock_ranges)
1136 {
1137 struct pp_hwmgr *hwmgr = handle;
1138
1139 if (!hwmgr || !hwmgr->pm_en || !clock_ranges)
1140 return -EINVAL;
1141
1142 return phm_set_watermarks_for_clocks_ranges(hwmgr,
1143 clock_ranges);
1144 }
1145
pp_display_clock_voltage_request(void * handle,struct pp_display_clock_request * clock)1146 static int pp_display_clock_voltage_request(void *handle,
1147 struct pp_display_clock_request *clock)
1148 {
1149 struct pp_hwmgr *hwmgr = handle;
1150
1151 if (!hwmgr || !hwmgr->pm_en || !clock)
1152 return -EINVAL;
1153
1154 return phm_display_clock_voltage_request(hwmgr, clock);
1155 }
1156
pp_get_display_mode_validation_clocks(void * handle,struct amd_pp_simple_clock_info * clocks)1157 static int pp_get_display_mode_validation_clocks(void *handle,
1158 struct amd_pp_simple_clock_info *clocks)
1159 {
1160 struct pp_hwmgr *hwmgr = handle;
1161 int ret = 0;
1162
1163 if (!hwmgr || !hwmgr->pm_en || !clocks)
1164 return -EINVAL;
1165
1166 clocks->level = PP_DAL_POWERLEVEL_7;
1167
1168 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DynamicPatchPowerState))
1169 ret = phm_get_max_high_clocks(hwmgr, clocks);
1170
1171 return ret;
1172 }
1173
pp_dpm_powergate_mmhub(void * handle)1174 static int pp_dpm_powergate_mmhub(void *handle)
1175 {
1176 struct pp_hwmgr *hwmgr = handle;
1177
1178 if (!hwmgr || !hwmgr->pm_en)
1179 return -EINVAL;
1180
1181 if (hwmgr->hwmgr_func->powergate_mmhub == NULL) {
1182 pr_info_ratelimited("%s was not implemented.\n", __func__);
1183 return 0;
1184 }
1185
1186 return hwmgr->hwmgr_func->powergate_mmhub(hwmgr);
1187 }
1188
pp_dpm_powergate_gfx(void * handle,bool gate)1189 static int pp_dpm_powergate_gfx(void *handle, bool gate)
1190 {
1191 struct pp_hwmgr *hwmgr = handle;
1192
1193 if (!hwmgr || !hwmgr->pm_en)
1194 return 0;
1195
1196 if (hwmgr->hwmgr_func->powergate_gfx == NULL) {
1197 pr_info_ratelimited("%s was not implemented.\n", __func__);
1198 return 0;
1199 }
1200
1201 return hwmgr->hwmgr_func->powergate_gfx(hwmgr, gate);
1202 }
1203
pp_dpm_powergate_acp(void * handle,bool gate)1204 static void pp_dpm_powergate_acp(void *handle, bool gate)
1205 {
1206 struct pp_hwmgr *hwmgr = handle;
1207
1208 if (!hwmgr || !hwmgr->pm_en)
1209 return;
1210
1211 if (hwmgr->hwmgr_func->powergate_acp == NULL) {
1212 pr_info_ratelimited("%s was not implemented.\n", __func__);
1213 return;
1214 }
1215
1216 hwmgr->hwmgr_func->powergate_acp(hwmgr, gate);
1217 }
1218
pp_dpm_powergate_sdma(void * handle,bool gate)1219 static void pp_dpm_powergate_sdma(void *handle, bool gate)
1220 {
1221 struct pp_hwmgr *hwmgr = handle;
1222
1223 if (!hwmgr)
1224 return;
1225
1226 if (hwmgr->hwmgr_func->powergate_sdma == NULL) {
1227 pr_info_ratelimited("%s was not implemented.\n", __func__);
1228 return;
1229 }
1230
1231 hwmgr->hwmgr_func->powergate_sdma(hwmgr, gate);
1232 }
1233
pp_set_powergating_by_smu(void * handle,uint32_t block_type,bool gate,int inst)1234 static int pp_set_powergating_by_smu(void *handle,
1235 uint32_t block_type,
1236 bool gate,
1237 int inst)
1238 {
1239 int ret = 0;
1240
1241 switch (block_type) {
1242 case AMD_IP_BLOCK_TYPE_UVD:
1243 case AMD_IP_BLOCK_TYPE_VCN:
1244 pp_dpm_powergate_uvd(handle, gate);
1245 break;
1246 case AMD_IP_BLOCK_TYPE_VCE:
1247 pp_dpm_powergate_vce(handle, gate);
1248 break;
1249 case AMD_IP_BLOCK_TYPE_GMC:
1250 /*
1251 * For now, this is only used on PICASSO.
1252 * And only "gate" operation is supported.
1253 */
1254 if (gate)
1255 pp_dpm_powergate_mmhub(handle);
1256 break;
1257 case AMD_IP_BLOCK_TYPE_GFX:
1258 ret = pp_dpm_powergate_gfx(handle, gate);
1259 break;
1260 case AMD_IP_BLOCK_TYPE_ACP:
1261 pp_dpm_powergate_acp(handle, gate);
1262 break;
1263 case AMD_IP_BLOCK_TYPE_SDMA:
1264 pp_dpm_powergate_sdma(handle, gate);
1265 break;
1266 default:
1267 break;
1268 }
1269 return ret;
1270 }
1271
pp_notify_smu_enable_pwe(void * handle)1272 static int pp_notify_smu_enable_pwe(void *handle)
1273 {
1274 struct pp_hwmgr *hwmgr = handle;
1275
1276 if (!hwmgr || !hwmgr->pm_en)
1277 return -EINVAL;
1278
1279 if (hwmgr->hwmgr_func->smus_notify_pwe == NULL) {
1280 pr_info_ratelimited("%s was not implemented.\n", __func__);
1281 return -EINVAL;
1282 }
1283
1284 hwmgr->hwmgr_func->smus_notify_pwe(hwmgr);
1285
1286 return 0;
1287 }
1288
pp_enable_mgpu_fan_boost(void * handle)1289 static int pp_enable_mgpu_fan_boost(void *handle)
1290 {
1291 struct pp_hwmgr *hwmgr = handle;
1292
1293 if (!hwmgr)
1294 return -EINVAL;
1295
1296 if (!hwmgr->pm_en ||
1297 hwmgr->hwmgr_func->enable_mgpu_fan_boost == NULL)
1298 return 0;
1299
1300 hwmgr->hwmgr_func->enable_mgpu_fan_boost(hwmgr);
1301
1302 return 0;
1303 }
1304
pp_set_min_deep_sleep_dcefclk(void * handle,uint32_t clock)1305 static int pp_set_min_deep_sleep_dcefclk(void *handle, uint32_t clock)
1306 {
1307 struct pp_hwmgr *hwmgr = handle;
1308
1309 if (!hwmgr || !hwmgr->pm_en)
1310 return -EINVAL;
1311
1312 if (hwmgr->hwmgr_func->set_min_deep_sleep_dcefclk == NULL) {
1313 pr_debug("%s was not implemented.\n", __func__);
1314 return -EINVAL;
1315 }
1316
1317 hwmgr->hwmgr_func->set_min_deep_sleep_dcefclk(hwmgr, clock);
1318
1319 return 0;
1320 }
1321
pp_set_hard_min_dcefclk_by_freq(void * handle,uint32_t clock)1322 static int pp_set_hard_min_dcefclk_by_freq(void *handle, uint32_t clock)
1323 {
1324 struct pp_hwmgr *hwmgr = handle;
1325
1326 if (!hwmgr || !hwmgr->pm_en)
1327 return -EINVAL;
1328
1329 if (hwmgr->hwmgr_func->set_hard_min_dcefclk_by_freq == NULL) {
1330 pr_debug("%s was not implemented.\n", __func__);
1331 return -EINVAL;
1332 }
1333
1334 hwmgr->hwmgr_func->set_hard_min_dcefclk_by_freq(hwmgr, clock);
1335
1336 return 0;
1337 }
1338
pp_set_hard_min_fclk_by_freq(void * handle,uint32_t clock)1339 static int pp_set_hard_min_fclk_by_freq(void *handle, uint32_t clock)
1340 {
1341 struct pp_hwmgr *hwmgr = handle;
1342
1343 if (!hwmgr || !hwmgr->pm_en)
1344 return -EINVAL;
1345
1346 if (hwmgr->hwmgr_func->set_hard_min_fclk_by_freq == NULL) {
1347 pr_debug("%s was not implemented.\n", __func__);
1348 return -EINVAL;
1349 }
1350
1351 hwmgr->hwmgr_func->set_hard_min_fclk_by_freq(hwmgr, clock);
1352
1353 return 0;
1354 }
1355
pp_set_active_display_count(void * handle,uint32_t count)1356 static int pp_set_active_display_count(void *handle, uint32_t count)
1357 {
1358 struct pp_hwmgr *hwmgr = handle;
1359
1360 if (!hwmgr || !hwmgr->pm_en)
1361 return -EINVAL;
1362
1363 return phm_set_active_display_count(hwmgr, count);
1364 }
1365
pp_get_asic_baco_capability(void * handle)1366 static int pp_get_asic_baco_capability(void *handle)
1367 {
1368 struct pp_hwmgr *hwmgr = handle;
1369
1370 if (!hwmgr)
1371 return false;
1372
1373 if (!(hwmgr->not_vf && amdgpu_dpm) ||
1374 !hwmgr->hwmgr_func->get_bamaco_support)
1375 return false;
1376
1377 return hwmgr->hwmgr_func->get_bamaco_support(hwmgr);
1378 }
1379
pp_get_asic_baco_state(void * handle,int * state)1380 static int pp_get_asic_baco_state(void *handle, int *state)
1381 {
1382 struct pp_hwmgr *hwmgr = handle;
1383
1384 if (!hwmgr)
1385 return -EINVAL;
1386
1387 if (!hwmgr->pm_en || !hwmgr->hwmgr_func->get_asic_baco_state)
1388 return 0;
1389
1390 hwmgr->hwmgr_func->get_asic_baco_state(hwmgr, (enum BACO_STATE *)state);
1391
1392 return 0;
1393 }
1394
pp_set_asic_baco_state(void * handle,int state)1395 static int pp_set_asic_baco_state(void *handle, int state)
1396 {
1397 struct pp_hwmgr *hwmgr = handle;
1398
1399 if (!hwmgr)
1400 return -EINVAL;
1401
1402 if (!(hwmgr->not_vf && amdgpu_dpm) ||
1403 !hwmgr->hwmgr_func->set_asic_baco_state)
1404 return 0;
1405
1406 hwmgr->hwmgr_func->set_asic_baco_state(hwmgr, (enum BACO_STATE)state);
1407
1408 return 0;
1409 }
1410
pp_get_ppfeature_status(void * handle,char * buf)1411 static int pp_get_ppfeature_status(void *handle, char *buf)
1412 {
1413 struct pp_hwmgr *hwmgr = handle;
1414
1415 if (!hwmgr || !hwmgr->pm_en || !buf)
1416 return -EINVAL;
1417
1418 if (hwmgr->hwmgr_func->get_ppfeature_status == NULL) {
1419 pr_info_ratelimited("%s was not implemented.\n", __func__);
1420 return -EINVAL;
1421 }
1422
1423 return hwmgr->hwmgr_func->get_ppfeature_status(hwmgr, buf);
1424 }
1425
pp_set_ppfeature_status(void * handle,uint64_t ppfeature_masks)1426 static int pp_set_ppfeature_status(void *handle, uint64_t ppfeature_masks)
1427 {
1428 struct pp_hwmgr *hwmgr = handle;
1429
1430 if (!hwmgr || !hwmgr->pm_en)
1431 return -EINVAL;
1432
1433 if (hwmgr->hwmgr_func->set_ppfeature_status == NULL) {
1434 pr_info_ratelimited("%s was not implemented.\n", __func__);
1435 return -EINVAL;
1436 }
1437
1438 return hwmgr->hwmgr_func->set_ppfeature_status(hwmgr, ppfeature_masks);
1439 }
1440
pp_asic_reset_mode_2(void * handle)1441 static int pp_asic_reset_mode_2(void *handle)
1442 {
1443 struct pp_hwmgr *hwmgr = handle;
1444
1445 if (!hwmgr || !hwmgr->pm_en)
1446 return -EINVAL;
1447
1448 if (hwmgr->hwmgr_func->asic_reset == NULL) {
1449 pr_info_ratelimited("%s was not implemented.\n", __func__);
1450 return -EINVAL;
1451 }
1452
1453 return hwmgr->hwmgr_func->asic_reset(hwmgr, SMU_ASIC_RESET_MODE_2);
1454 }
1455
pp_smu_i2c_bus_access(void * handle,bool acquire)1456 static int pp_smu_i2c_bus_access(void *handle, bool acquire)
1457 {
1458 struct pp_hwmgr *hwmgr = handle;
1459
1460 if (!hwmgr || !hwmgr->pm_en)
1461 return -EINVAL;
1462
1463 if (hwmgr->hwmgr_func->smu_i2c_bus_access == NULL) {
1464 pr_info_ratelimited("%s was not implemented.\n", __func__);
1465 return -EINVAL;
1466 }
1467
1468 return hwmgr->hwmgr_func->smu_i2c_bus_access(hwmgr, acquire);
1469 }
1470
pp_set_df_cstate(void * handle,enum pp_df_cstate state)1471 static int pp_set_df_cstate(void *handle, enum pp_df_cstate state)
1472 {
1473 struct pp_hwmgr *hwmgr = handle;
1474
1475 if (!hwmgr)
1476 return -EINVAL;
1477
1478 if (!hwmgr->pm_en || !hwmgr->hwmgr_func->set_df_cstate)
1479 return 0;
1480
1481 hwmgr->hwmgr_func->set_df_cstate(hwmgr, state);
1482
1483 return 0;
1484 }
1485
pp_set_xgmi_pstate(void * handle,uint32_t pstate)1486 static int pp_set_xgmi_pstate(void *handle, uint32_t pstate)
1487 {
1488 struct pp_hwmgr *hwmgr = handle;
1489
1490 if (!hwmgr)
1491 return -EINVAL;
1492
1493 if (!hwmgr->pm_en || !hwmgr->hwmgr_func->set_xgmi_pstate)
1494 return 0;
1495
1496 hwmgr->hwmgr_func->set_xgmi_pstate(hwmgr, pstate);
1497
1498 return 0;
1499 }
1500
pp_get_gpu_metrics(void * handle,void ** table)1501 static ssize_t pp_get_gpu_metrics(void *handle, void **table)
1502 {
1503 struct pp_hwmgr *hwmgr = handle;
1504
1505 if (!hwmgr)
1506 return -EINVAL;
1507
1508 if (!hwmgr->pm_en || !hwmgr->hwmgr_func->get_gpu_metrics)
1509 return -EOPNOTSUPP;
1510
1511 return hwmgr->hwmgr_func->get_gpu_metrics(hwmgr, table);
1512 }
1513
pp_gfx_state_change_set(void * handle,uint32_t state)1514 static int pp_gfx_state_change_set(void *handle, uint32_t state)
1515 {
1516 struct pp_hwmgr *hwmgr = handle;
1517
1518 if (!hwmgr || !hwmgr->pm_en)
1519 return -EINVAL;
1520
1521 if (hwmgr->hwmgr_func->gfx_state_change == NULL) {
1522 pr_info_ratelimited("%s was not implemented.\n", __func__);
1523 return -EINVAL;
1524 }
1525
1526 hwmgr->hwmgr_func->gfx_state_change(hwmgr, state);
1527 return 0;
1528 }
1529
pp_get_prv_buffer_details(void * handle,void ** addr,size_t * size)1530 static int pp_get_prv_buffer_details(void *handle, void **addr, size_t *size)
1531 {
1532 struct pp_hwmgr *hwmgr = handle;
1533 struct amdgpu_device *adev = hwmgr->adev;
1534 int err;
1535
1536 if (!addr || !size)
1537 return -EINVAL;
1538
1539 *addr = NULL;
1540 *size = 0;
1541 if (adev->pm.smu_prv_buffer) {
1542 err = amdgpu_bo_kmap(adev->pm.smu_prv_buffer, addr);
1543 if (err)
1544 return err;
1545 *size = adev->pm.smu_prv_buffer_size;
1546 }
1547
1548 return 0;
1549 }
1550
pp_pm_compute_clocks(void * handle)1551 static void pp_pm_compute_clocks(void *handle)
1552 {
1553 struct pp_hwmgr *hwmgr = handle;
1554 struct amdgpu_device *adev = hwmgr->adev;
1555
1556 if (!adev->dc_enabled) {
1557 amdgpu_dpm_get_active_displays(adev);
1558 adev->pm.pm_display_cfg.num_display = adev->pm.dpm.new_active_crtc_count;
1559 adev->pm.pm_display_cfg.vrefresh = amdgpu_dpm_get_vrefresh(adev);
1560 adev->pm.pm_display_cfg.min_vblank_time = amdgpu_dpm_get_vblank_time(adev);
1561 /* we have issues with mclk switching with
1562 * refresh rates over 120 hz on the non-DC code.
1563 */
1564 if (adev->pm.pm_display_cfg.vrefresh > 120)
1565 adev->pm.pm_display_cfg.min_vblank_time = 0;
1566
1567 pp_display_configuration_change(handle,
1568 &adev->pm.pm_display_cfg);
1569 }
1570
1571 pp_dpm_dispatch_tasks(handle,
1572 AMD_PP_TASK_DISPLAY_CONFIG_CHANGE,
1573 NULL);
1574 }
1575
1576 static const struct amd_pm_funcs pp_dpm_funcs = {
1577 .load_firmware = pp_dpm_load_fw,
1578 .wait_for_fw_loading_complete = pp_dpm_fw_loading_complete,
1579 .force_performance_level = pp_dpm_force_performance_level,
1580 .get_performance_level = pp_dpm_get_performance_level,
1581 .get_current_power_state = pp_dpm_get_current_power_state,
1582 .dispatch_tasks = pp_dpm_dispatch_tasks,
1583 .set_fan_control_mode = pp_dpm_set_fan_control_mode,
1584 .get_fan_control_mode = pp_dpm_get_fan_control_mode,
1585 .set_fan_speed_pwm = pp_dpm_set_fan_speed_pwm,
1586 .get_fan_speed_pwm = pp_dpm_get_fan_speed_pwm,
1587 .get_fan_speed_rpm = pp_dpm_get_fan_speed_rpm,
1588 .set_fan_speed_rpm = pp_dpm_set_fan_speed_rpm,
1589 .get_pp_num_states = pp_dpm_get_pp_num_states,
1590 .get_pp_table = pp_dpm_get_pp_table,
1591 .set_pp_table = pp_dpm_set_pp_table,
1592 .force_clock_level = pp_dpm_force_clock_level,
1593 .emit_clock_levels = pp_dpm_emit_clock_levels,
1594 .print_clock_levels = pp_dpm_print_clock_levels,
1595 .get_sclk_od = pp_dpm_get_sclk_od,
1596 .set_sclk_od = pp_dpm_set_sclk_od,
1597 .get_mclk_od = pp_dpm_get_mclk_od,
1598 .set_mclk_od = pp_dpm_set_mclk_od,
1599 .read_sensor = pp_dpm_read_sensor,
1600 .get_vce_clock_state = pp_dpm_get_vce_clock_state,
1601 .switch_power_profile = pp_dpm_switch_power_profile,
1602 .set_clockgating_by_smu = pp_set_clockgating_by_smu,
1603 .set_powergating_by_smu = pp_set_powergating_by_smu,
1604 .get_power_profile_mode = pp_get_power_profile_mode,
1605 .set_power_profile_mode = pp_set_power_profile_mode,
1606 .set_fine_grain_clk_vol = pp_set_fine_grain_clk_vol,
1607 .odn_edit_dpm_table = pp_odn_edit_dpm_table,
1608 .set_mp1_state = pp_dpm_set_mp1_state,
1609 .set_power_limit = pp_set_power_limit,
1610 .get_power_limit = pp_get_power_limit,
1611 /* export to DC */
1612 .get_sclk = pp_dpm_get_sclk,
1613 .get_mclk = pp_dpm_get_mclk,
1614 .display_configuration_change = pp_display_configuration_change,
1615 .get_display_power_level = pp_get_display_power_level,
1616 .get_current_clocks = pp_get_current_clocks,
1617 .get_clock_by_type = pp_get_clock_by_type,
1618 .get_clock_by_type_with_latency = pp_get_clock_by_type_with_latency,
1619 .get_clock_by_type_with_voltage = pp_get_clock_by_type_with_voltage,
1620 .set_watermarks_for_clocks_ranges = pp_set_watermarks_for_clocks_ranges,
1621 .display_clock_voltage_request = pp_display_clock_voltage_request,
1622 .get_display_mode_validation_clocks = pp_get_display_mode_validation_clocks,
1623 .notify_smu_enable_pwe = pp_notify_smu_enable_pwe,
1624 .enable_mgpu_fan_boost = pp_enable_mgpu_fan_boost,
1625 .set_active_display_count = pp_set_active_display_count,
1626 .set_min_deep_sleep_dcefclk = pp_set_min_deep_sleep_dcefclk,
1627 .set_hard_min_dcefclk_by_freq = pp_set_hard_min_dcefclk_by_freq,
1628 .set_hard_min_fclk_by_freq = pp_set_hard_min_fclk_by_freq,
1629 .get_asic_baco_capability = pp_get_asic_baco_capability,
1630 .get_asic_baco_state = pp_get_asic_baco_state,
1631 .set_asic_baco_state = pp_set_asic_baco_state,
1632 .get_ppfeature_status = pp_get_ppfeature_status,
1633 .set_ppfeature_status = pp_set_ppfeature_status,
1634 .asic_reset_mode_2 = pp_asic_reset_mode_2,
1635 .smu_i2c_bus_access = pp_smu_i2c_bus_access,
1636 .set_df_cstate = pp_set_df_cstate,
1637 .set_xgmi_pstate = pp_set_xgmi_pstate,
1638 .get_gpu_metrics = pp_get_gpu_metrics,
1639 .gfx_state_change_set = pp_gfx_state_change_set,
1640 .get_smu_prv_buf_details = pp_get_prv_buffer_details,
1641 .pm_compute_clocks = pp_pm_compute_clocks,
1642 };
1643