Lines Matching full:pc
58 * DOC: GuC Power Conservation (PC)
60 * GuC Power Conservation (PC) supports multiple features for the most
78 * Render-C states is also a GuC PC feature that is now enabled in Xe for
83 static struct xe_guc *pc_to_guc(struct xe_guc_pc *pc) in pc_to_guc() argument
85 return container_of(pc, struct xe_guc, pc); in pc_to_guc()
88 static struct xe_guc_ct *pc_to_ct(struct xe_guc_pc *pc) in pc_to_ct() argument
90 return &pc_to_guc(pc)->ct; in pc_to_ct()
93 static struct xe_gt *pc_to_gt(struct xe_guc_pc *pc) in pc_to_gt() argument
95 return guc_to_gt(pc_to_guc(pc)); in pc_to_gt()
98 static struct xe_device *pc_to_xe(struct xe_guc_pc *pc) in pc_to_xe() argument
100 return guc_to_xe(pc_to_guc(pc)); in pc_to_xe()
103 static struct iosys_map *pc_to_maps(struct xe_guc_pc *pc) in pc_to_maps() argument
105 return &pc->bo->vmap; in pc_to_maps()
120 static int wait_for_pc_state(struct xe_guc_pc *pc, in wait_for_pc_state() argument
127 xe_device_assert_mem_access(pc_to_xe(pc)); in wait_for_pc_state()
130 if (slpc_shared_data_read(pc, header.global_state) == state) in wait_for_pc_state()
143 static int pc_action_reset(struct xe_guc_pc *pc) in pc_action_reset() argument
145 struct xe_guc_ct *ct = pc_to_ct(pc); in pc_action_reset()
149 xe_bo_ggtt_addr(pc->bo), in pc_action_reset()
156 xe_gt_err(pc_to_gt(pc), "GuC PC reset failed: %pe\n", in pc_action_reset()
162 static int pc_action_query_task_state(struct xe_guc_pc *pc) in pc_action_query_task_state() argument
164 struct xe_guc_ct *ct = pc_to_ct(pc); in pc_action_query_task_state()
168 xe_bo_ggtt_addr(pc->bo), in pc_action_query_task_state()
173 if (wait_for_pc_state(pc, SLPC_GLOBAL_STATE_RUNNING, in pc_action_query_task_state()
180 xe_gt_err(pc_to_gt(pc), "GuC PC query task state failed: %pe\n", in pc_action_query_task_state()
186 static int pc_action_set_param(struct xe_guc_pc *pc, u8 id, u32 value) in pc_action_set_param() argument
188 struct xe_guc_ct *ct = pc_to_ct(pc); in pc_action_set_param()
197 if (wait_for_pc_state(pc, SLPC_GLOBAL_STATE_RUNNING, in pc_action_set_param()
203 xe_gt_err(pc_to_gt(pc), "GuC PC set param[%u]=%u failed: %pe\n", in pc_action_set_param()
209 static int pc_action_unset_param(struct xe_guc_pc *pc, u8 id) in pc_action_unset_param() argument
216 struct xe_guc_ct *ct = &pc_to_guc(pc)->ct; in pc_action_unset_param()
219 if (wait_for_pc_state(pc, SLPC_GLOBAL_STATE_RUNNING, in pc_action_unset_param()
225 xe_gt_err(pc_to_gt(pc), "GuC PC unset param failed: %pe", in pc_action_unset_param()
231 static int pc_action_setup_gucrc(struct xe_guc_pc *pc, u32 mode) in pc_action_setup_gucrc() argument
233 struct xe_guc_ct *ct = pc_to_ct(pc); in pc_action_setup_gucrc()
242 xe_gt_err(pc_to_gt(pc), "GuC RC enable mode=%u failed: %pe\n", in pc_action_setup_gucrc()
259 static u32 pc_get_min_freq(struct xe_guc_pc *pc) in pc_get_min_freq() argument
264 slpc_shared_data_read(pc, task_state_data.freq)); in pc_get_min_freq()
269 static void pc_set_manual_rp_ctrl(struct xe_guc_pc *pc, bool enable) in pc_set_manual_rp_ctrl() argument
271 struct xe_gt *gt = pc_to_gt(pc); in pc_set_manual_rp_ctrl()
278 static void pc_set_cur_freq(struct xe_guc_pc *pc, u32 freq) in pc_set_cur_freq() argument
280 struct xe_gt *gt = pc_to_gt(pc); in pc_set_cur_freq()
283 pc_set_manual_rp_ctrl(pc, true); in pc_set_cur_freq()
292 pc_set_manual_rp_ctrl(pc, false); in pc_set_cur_freq()
295 static int pc_set_min_freq(struct xe_guc_pc *pc, u32 freq) in pc_set_min_freq() argument
301 if (freq < pc->rpn_freq || freq > pc->rp0_freq) in pc_set_min_freq()
308 pc_action_set_param(pc, SLPC_PARAM_IGNORE_EFFICIENT_FREQUENCY, in pc_set_min_freq()
309 freq < pc->rpe_freq); in pc_set_min_freq()
311 return pc_action_set_param(pc, in pc_set_min_freq()
316 static int pc_get_max_freq(struct xe_guc_pc *pc) in pc_get_max_freq() argument
321 slpc_shared_data_read(pc, task_state_data.freq)); in pc_get_max_freq()
326 static int pc_set_max_freq(struct xe_guc_pc *pc, u32 freq) in pc_set_max_freq() argument
333 if (freq < pc->rpn_freq || freq > pc->rp0_freq) in pc_set_max_freq()
336 return pc_action_set_param(pc, in pc_set_max_freq()
341 static void mtl_update_rpa_value(struct xe_guc_pc *pc) in mtl_update_rpa_value() argument
343 struct xe_gt *gt = pc_to_gt(pc); in mtl_update_rpa_value()
351 pc->rpa_freq = decode_freq(REG_FIELD_GET(MTL_RPA_MASK, reg)); in mtl_update_rpa_value()
354 static void mtl_update_rpe_value(struct xe_guc_pc *pc) in mtl_update_rpe_value() argument
356 struct xe_gt *gt = pc_to_gt(pc); in mtl_update_rpe_value()
364 pc->rpe_freq = decode_freq(REG_FIELD_GET(MTL_RPE_MASK, reg)); in mtl_update_rpe_value()
367 static void tgl_update_rpa_value(struct xe_guc_pc *pc) in tgl_update_rpa_value() argument
369 struct xe_gt *gt = pc_to_gt(pc); in tgl_update_rpa_value()
383 pc->rpa_freq = REG_FIELD_GET(RPA_MASK, reg) * GT_FREQUENCY_MULTIPLIER; in tgl_update_rpa_value()
386 static void tgl_update_rpe_value(struct xe_guc_pc *pc) in tgl_update_rpe_value() argument
388 struct xe_gt *gt = pc_to_gt(pc); in tgl_update_rpe_value()
402 pc->rpe_freq = REG_FIELD_GET(RPE_MASK, reg) * GT_FREQUENCY_MULTIPLIER; in tgl_update_rpe_value()
405 static void pc_update_rp_values(struct xe_guc_pc *pc) in pc_update_rp_values() argument
407 struct xe_gt *gt = pc_to_gt(pc); in pc_update_rp_values()
411 mtl_update_rpa_value(pc); in pc_update_rp_values()
412 mtl_update_rpe_value(pc); in pc_update_rp_values()
414 tgl_update_rpa_value(pc); in pc_update_rp_values()
415 tgl_update_rpe_value(pc); in pc_update_rp_values()
423 pc->rpn_freq = min(pc->rpn_freq, pc->rpe_freq); in pc_update_rp_values()
428 * @pc: The GuC PC
432 u32 xe_guc_pc_get_act_freq(struct xe_guc_pc *pc) in xe_guc_pc_get_act_freq() argument
434 struct xe_gt *gt = pc_to_gt(pc); in xe_guc_pc_get_act_freq()
463 * @pc: The GuC PC
467 * -EAGAIN if GuC PC not ready (likely in middle of a reset).
469 int xe_guc_pc_get_cur_freq(struct xe_guc_pc *pc, u32 *freq) in xe_guc_pc_get_cur_freq() argument
471 struct xe_gt *gt = pc_to_gt(pc); in xe_guc_pc_get_cur_freq()
492 * @pc: The GuC PC
496 u32 xe_guc_pc_get_rp0_freq(struct xe_guc_pc *pc) in xe_guc_pc_get_rp0_freq() argument
498 return pc->rp0_freq; in xe_guc_pc_get_rp0_freq()
503 * @pc: The GuC PC
507 u32 xe_guc_pc_get_rpa_freq(struct xe_guc_pc *pc) in xe_guc_pc_get_rpa_freq() argument
509 pc_update_rp_values(pc); in xe_guc_pc_get_rpa_freq()
511 return pc->rpa_freq; in xe_guc_pc_get_rpa_freq()
516 * @pc: The GuC PC
520 u32 xe_guc_pc_get_rpe_freq(struct xe_guc_pc *pc) in xe_guc_pc_get_rpe_freq() argument
522 pc_update_rp_values(pc); in xe_guc_pc_get_rpe_freq()
524 return pc->rpe_freq; in xe_guc_pc_get_rpe_freq()
529 * @pc: The GuC PC
533 u32 xe_guc_pc_get_rpn_freq(struct xe_guc_pc *pc) in xe_guc_pc_get_rpn_freq() argument
535 return pc->rpn_freq; in xe_guc_pc_get_rpn_freq()
540 * @pc: The GuC PC
544 * -EAGAIN if GuC PC not ready (likely in middle of a reset).
546 int xe_guc_pc_get_min_freq(struct xe_guc_pc *pc, u32 *freq) in xe_guc_pc_get_min_freq() argument
550 xe_device_assert_mem_access(pc_to_xe(pc)); in xe_guc_pc_get_min_freq()
552 mutex_lock(&pc->freq_lock); in xe_guc_pc_get_min_freq()
553 if (!pc->freq_ready) { in xe_guc_pc_get_min_freq()
559 ret = pc_action_query_task_state(pc); in xe_guc_pc_get_min_freq()
563 *freq = pc_get_min_freq(pc); in xe_guc_pc_get_min_freq()
566 mutex_unlock(&pc->freq_lock); in xe_guc_pc_get_min_freq()
572 * @pc: The GuC PC
576 * -EAGAIN if GuC PC not ready (likely in middle of a reset),
579 int xe_guc_pc_set_min_freq(struct xe_guc_pc *pc, u32 freq) in xe_guc_pc_set_min_freq() argument
583 mutex_lock(&pc->freq_lock); in xe_guc_pc_set_min_freq()
584 if (!pc->freq_ready) { in xe_guc_pc_set_min_freq()
590 ret = pc_set_min_freq(pc, freq); in xe_guc_pc_set_min_freq()
594 pc->user_requested_min = freq; in xe_guc_pc_set_min_freq()
597 mutex_unlock(&pc->freq_lock); in xe_guc_pc_set_min_freq()
603 * @pc: The GuC PC
607 * -EAGAIN if GuC PC not ready (likely in middle of a reset).
609 int xe_guc_pc_get_max_freq(struct xe_guc_pc *pc, u32 *freq) in xe_guc_pc_get_max_freq() argument
613 mutex_lock(&pc->freq_lock); in xe_guc_pc_get_max_freq()
614 if (!pc->freq_ready) { in xe_guc_pc_get_max_freq()
620 ret = pc_action_query_task_state(pc); in xe_guc_pc_get_max_freq()
624 *freq = pc_get_max_freq(pc); in xe_guc_pc_get_max_freq()
627 mutex_unlock(&pc->freq_lock); in xe_guc_pc_get_max_freq()
633 * @pc: The GuC PC
637 * -EAGAIN if GuC PC not ready (likely in middle of a reset),
640 int xe_guc_pc_set_max_freq(struct xe_guc_pc *pc, u32 freq) in xe_guc_pc_set_max_freq() argument
644 mutex_lock(&pc->freq_lock); in xe_guc_pc_set_max_freq()
645 if (!pc->freq_ready) { in xe_guc_pc_set_max_freq()
651 ret = pc_set_max_freq(pc, freq); in xe_guc_pc_set_max_freq()
655 pc->user_requested_max = freq; in xe_guc_pc_set_max_freq()
658 mutex_unlock(&pc->freq_lock); in xe_guc_pc_set_max_freq()
664 * @pc: XE_GuC_PC instance
666 enum xe_gt_idle_state xe_guc_pc_c_status(struct xe_guc_pc *pc) in xe_guc_pc_c_status() argument
668 struct xe_gt *gt = pc_to_gt(pc); in xe_guc_pc_c_status()
691 * @pc: Xe_GuC_PC instance
693 u64 xe_guc_pc_rc6_residency(struct xe_guc_pc *pc) in xe_guc_pc_rc6_residency() argument
695 struct xe_gt *gt = pc_to_gt(pc); in xe_guc_pc_rc6_residency()
705 * @pc: Xe_GuC_PC instance
707 u64 xe_guc_pc_mc6_residency(struct xe_guc_pc *pc) in xe_guc_pc_mc6_residency() argument
709 struct xe_gt *gt = pc_to_gt(pc); in xe_guc_pc_mc6_residency()
717 static void mtl_init_fused_rp_values(struct xe_guc_pc *pc) in mtl_init_fused_rp_values() argument
719 struct xe_gt *gt = pc_to_gt(pc); in mtl_init_fused_rp_values()
722 xe_device_assert_mem_access(pc_to_xe(pc)); in mtl_init_fused_rp_values()
729 pc->rp0_freq = decode_freq(REG_FIELD_GET(MTL_RP0_CAP_MASK, reg)); in mtl_init_fused_rp_values()
731 pc->rpn_freq = decode_freq(REG_FIELD_GET(MTL_RPN_CAP_MASK, reg)); in mtl_init_fused_rp_values()
734 static void tgl_init_fused_rp_values(struct xe_guc_pc *pc) in tgl_init_fused_rp_values() argument
736 struct xe_gt *gt = pc_to_gt(pc); in tgl_init_fused_rp_values()
740 xe_device_assert_mem_access(pc_to_xe(pc)); in tgl_init_fused_rp_values()
746 pc->rp0_freq = REG_FIELD_GET(RP0_MASK, reg) * GT_FREQUENCY_MULTIPLIER; in tgl_init_fused_rp_values()
747 pc->rpn_freq = REG_FIELD_GET(RPN_MASK, reg) * GT_FREQUENCY_MULTIPLIER; in tgl_init_fused_rp_values()
750 static void pc_init_fused_rp_values(struct xe_guc_pc *pc) in pc_init_fused_rp_values() argument
752 struct xe_gt *gt = pc_to_gt(pc); in pc_init_fused_rp_values()
756 mtl_init_fused_rp_values(pc); in pc_init_fused_rp_values()
758 tgl_init_fused_rp_values(pc); in pc_init_fused_rp_values()
761 static u32 pc_max_freq_cap(struct xe_guc_pc *pc) in pc_max_freq_cap() argument
763 struct xe_gt *gt = pc_to_gt(pc); in pc_max_freq_cap()
767 return min(LNL_MERT_FREQ_CAP, pc->rp0_freq); in pc_max_freq_cap()
769 return min(BMG_MERT_FREQ_CAP, pc->rp0_freq); in pc_max_freq_cap()
771 return pc->rp0_freq; in pc_max_freq_cap()
778 * @pc: Xe_GuC_PC instance
780 void xe_guc_pc_raise_unslice(struct xe_guc_pc *pc) in xe_guc_pc_raise_unslice() argument
782 struct xe_gt *gt = pc_to_gt(pc); in xe_guc_pc_raise_unslice()
785 pc_set_cur_freq(pc, pc_max_freq_cap(pc)); in xe_guc_pc_raise_unslice()
790 * @pc: Xe_GuC_PC instance
792 void xe_guc_pc_init_early(struct xe_guc_pc *pc) in xe_guc_pc_init_early() argument
794 struct xe_gt *gt = pc_to_gt(pc); in xe_guc_pc_init_early()
797 pc_init_fused_rp_values(pc); in xe_guc_pc_init_early()
800 static int pc_adjust_freq_bounds(struct xe_guc_pc *pc) in pc_adjust_freq_bounds() argument
804 lockdep_assert_held(&pc->freq_lock); in pc_adjust_freq_bounds()
806 ret = pc_action_query_task_state(pc); in pc_adjust_freq_bounds()
815 if (pc_get_max_freq(pc) > pc->rp0_freq) { in pc_adjust_freq_bounds()
816 ret = pc_set_max_freq(pc, pc->rp0_freq); in pc_adjust_freq_bounds()
825 if (pc_get_min_freq(pc) > pc->rp0_freq) in pc_adjust_freq_bounds()
826 ret = pc_set_min_freq(pc, pc->rp0_freq); in pc_adjust_freq_bounds()
832 static int pc_adjust_requested_freq(struct xe_guc_pc *pc) in pc_adjust_requested_freq() argument
836 lockdep_assert_held(&pc->freq_lock); in pc_adjust_requested_freq()
838 if (pc->user_requested_min != 0) { in pc_adjust_requested_freq()
839 ret = pc_set_min_freq(pc, pc->user_requested_min); in pc_adjust_requested_freq()
844 if (pc->user_requested_max != 0) { in pc_adjust_requested_freq()
845 ret = pc_set_max_freq(pc, pc->user_requested_max); in pc_adjust_requested_freq()
853 static int pc_set_mert_freq_cap(struct xe_guc_pc *pc) in pc_set_mert_freq_cap() argument
857 if (XE_WA(pc_to_gt(pc), 22019338487)) { in pc_set_mert_freq_cap()
861 ret = xe_guc_pc_get_min_freq(pc, &pc->stashed_min_freq); in pc_set_mert_freq_cap()
863 ret = xe_guc_pc_get_max_freq(pc, &pc->stashed_max_freq); in pc_set_mert_freq_cap()
870 mutex_lock(&pc->freq_lock); in pc_set_mert_freq_cap()
871 ret = pc_set_min_freq(pc, min(pc->rpe_freq, pc_max_freq_cap(pc))); in pc_set_mert_freq_cap()
873 ret = pc_set_max_freq(pc, min(pc->rp0_freq, pc_max_freq_cap(pc))); in pc_set_mert_freq_cap()
874 mutex_unlock(&pc->freq_lock); in pc_set_mert_freq_cap()
882 * @pc: The GuC PC
887 int xe_guc_pc_restore_stashed_freq(struct xe_guc_pc *pc) in xe_guc_pc_restore_stashed_freq() argument
891 if (IS_SRIOV_VF(pc_to_xe(pc)) || pc_to_xe(pc)->info.skip_guc_pc) in xe_guc_pc_restore_stashed_freq()
894 mutex_lock(&pc->freq_lock); in xe_guc_pc_restore_stashed_freq()
895 ret = pc_set_max_freq(pc, pc->stashed_max_freq); in xe_guc_pc_restore_stashed_freq()
897 ret = pc_set_min_freq(pc, pc->stashed_min_freq); in xe_guc_pc_restore_stashed_freq()
898 mutex_unlock(&pc->freq_lock); in xe_guc_pc_restore_stashed_freq()
905 * @pc: Xe_GuC_PC instance
911 int xe_guc_pc_gucrc_disable(struct xe_guc_pc *pc) in xe_guc_pc_gucrc_disable() argument
913 struct xe_device *xe = pc_to_xe(pc); in xe_guc_pc_gucrc_disable()
914 struct xe_gt *gt = pc_to_gt(pc); in xe_guc_pc_gucrc_disable()
921 ret = pc_action_setup_gucrc(pc, GUCRC_HOST_CONTROL); in xe_guc_pc_gucrc_disable()
940 * @pc: Xe_GuC_PC instance
945 int xe_guc_pc_override_gucrc_mode(struct xe_guc_pc *pc, enum slpc_gucrc_mode mode) in xe_guc_pc_override_gucrc_mode() argument
949 xe_pm_runtime_get(pc_to_xe(pc)); in xe_guc_pc_override_gucrc_mode()
950 ret = pc_action_set_param(pc, SLPC_PARAM_PWRGATE_RC_MODE, mode); in xe_guc_pc_override_gucrc_mode()
951 xe_pm_runtime_put(pc_to_xe(pc)); in xe_guc_pc_override_gucrc_mode()
958 * @pc: Xe_GuC_PC instance
962 int xe_guc_pc_unset_gucrc_mode(struct xe_guc_pc *pc) in xe_guc_pc_unset_gucrc_mode() argument
966 xe_pm_runtime_get(pc_to_xe(pc)); in xe_guc_pc_unset_gucrc_mode()
967 ret = pc_action_unset_param(pc, SLPC_PARAM_PWRGATE_RC_MODE); in xe_guc_pc_unset_gucrc_mode()
968 xe_pm_runtime_put(pc_to_xe(pc)); in xe_guc_pc_unset_gucrc_mode()
973 static void pc_init_pcode_freq(struct xe_guc_pc *pc) in pc_init_pcode_freq() argument
975 u32 min = DIV_ROUND_CLOSEST(pc->rpn_freq, GT_FREQUENCY_MULTIPLIER); in pc_init_pcode_freq()
976 u32 max = DIV_ROUND_CLOSEST(pc->rp0_freq, GT_FREQUENCY_MULTIPLIER); in pc_init_pcode_freq()
978 XE_WARN_ON(xe_pcode_init_min_freq_table(gt_to_tile(pc_to_gt(pc)), min, max)); in pc_init_pcode_freq()
981 static int pc_init_freqs(struct xe_guc_pc *pc) in pc_init_freqs() argument
985 mutex_lock(&pc->freq_lock); in pc_init_freqs()
987 ret = pc_adjust_freq_bounds(pc); in pc_init_freqs()
991 ret = pc_adjust_requested_freq(pc); in pc_init_freqs()
995 pc_update_rp_values(pc); in pc_init_freqs()
997 pc_init_pcode_freq(pc); in pc_init_freqs()
1003 pc->freq_ready = true; in pc_init_freqs()
1006 mutex_unlock(&pc->freq_lock); in pc_init_freqs()
1012 * @pc: Xe_GuC_PC instance
1014 int xe_guc_pc_start(struct xe_guc_pc *pc) in xe_guc_pc_start() argument
1016 struct xe_device *xe = pc_to_xe(pc); in xe_guc_pc_start()
1017 struct xe_gt *gt = pc_to_gt(pc); in xe_guc_pc_start()
1036 pc_set_cur_freq(pc, UINT_MAX); in xe_guc_pc_start()
1042 memset(pc->bo->vmap.vaddr, 0, size); in xe_guc_pc_start()
1043 slpc_shared_data_write(pc, header.size, size); in xe_guc_pc_start()
1046 ret = pc_action_reset(pc); in xe_guc_pc_start()
1050 if (wait_for_pc_state(pc, SLPC_GLOBAL_STATE_RUNNING, in xe_guc_pc_start()
1052 …xe_gt_warn(gt, "GuC PC start taking longer than normal [freq = %dMHz (req = %dMHz), perf_limit_rea… in xe_guc_pc_start()
1053 xe_guc_pc_get_act_freq(pc), get_cur_freq(gt), in xe_guc_pc_start()
1056 if (wait_for_pc_state(pc, SLPC_GLOBAL_STATE_RUNNING, in xe_guc_pc_start()
1058 …xe_gt_err(gt, "GuC PC Start failed: Dynamic GT frequency control and GT sleep states are now disab… in xe_guc_pc_start()
1063 xe_gt_warn(gt, "GuC PC excessive start time: %lldms", in xe_guc_pc_start()
1067 ret = pc_init_freqs(pc); in xe_guc_pc_start()
1071 ret = pc_set_mert_freq_cap(pc); in xe_guc_pc_start()
1076 xe_guc_pc_gucrc_disable(pc); in xe_guc_pc_start()
1081 ret = pc_action_setup_gucrc(pc, GUCRC_FIRMWARE_CONTROL); in xe_guc_pc_start()
1090 * @pc: Xe_GuC_PC instance
1092 int xe_guc_pc_stop(struct xe_guc_pc *pc) in xe_guc_pc_stop() argument
1094 struct xe_device *xe = pc_to_xe(pc); in xe_guc_pc_stop()
1097 xe_gt_idle_disable_c6(pc_to_gt(pc)); in xe_guc_pc_stop()
1101 mutex_lock(&pc->freq_lock); in xe_guc_pc_stop()
1102 pc->freq_ready = false; in xe_guc_pc_stop()
1103 mutex_unlock(&pc->freq_lock); in xe_guc_pc_stop()
1114 struct xe_guc_pc *pc = arg; in xe_guc_pc_fini_hw() local
1115 struct xe_device *xe = pc_to_xe(pc); in xe_guc_pc_fini_hw()
1121 fw_ref = xe_force_wake_get(gt_to_fw(pc_to_gt(pc)), XE_FORCEWAKE_ALL); in xe_guc_pc_fini_hw()
1122 xe_guc_pc_gucrc_disable(pc); in xe_guc_pc_fini_hw()
1123 XE_WARN_ON(xe_guc_pc_stop(pc)); in xe_guc_pc_fini_hw()
1126 pc_set_cur_freq(pc, min(pc_max_freq_cap(pc), pc->rpe_freq)); in xe_guc_pc_fini_hw()
1128 xe_force_wake_put(gt_to_fw(pc_to_gt(pc)), fw_ref); in xe_guc_pc_fini_hw()
1133 * @pc: Xe_GuC_PC instance
1135 int xe_guc_pc_init(struct xe_guc_pc *pc) in xe_guc_pc_init() argument
1137 struct xe_gt *gt = pc_to_gt(pc); in xe_guc_pc_init()
1147 err = drmm_mutex_init(&xe->drm, &pc->freq_lock); in xe_guc_pc_init()
1158 pc->bo = bo; in xe_guc_pc_init()
1160 return devm_add_action_or_reset(xe->drm.dev, xe_guc_pc_fini_hw, pc); in xe_guc_pc_init()