xref: /aosp_15_r20/external/coreboot/src/cpu/intel/common/common_init.c (revision b9411a12aaaa7e1e6a6fb7c5e057f44ee179a49c)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 
3 #include <acpi/acpigen.h>
4 #include <console/console.h>
5 #include <cpu/cpu.h>
6 #include <cpu/intel/msr.h>
7 #include <cpu/intel/turbo.h>
8 #include <cpu/x86/msr.h>
9 #include <types.h>
10 
11 #include "common.h"
12 
13 #define  CPUID_6_ECX_EPB		(1 << 3)
14 #define  CPUID_6_ENGERY_PERF_PREF	(1 << 10)
15 #define  CPUID_6_HWP			(1 << 7)
16 
17 /* Structured Extended Feature Flags */
18 #define CPUID_EXT_FEATURE_TME_SUPPORTED (1 << 13)
19 
set_vmx_and_lock(void)20 void set_vmx_and_lock(void)
21 {
22 	set_feature_ctrl_vmx();
23 	set_feature_ctrl_lock();
24 }
25 
set_feature_ctrl_vmx_arg(bool enable)26 void set_feature_ctrl_vmx_arg(bool enable)
27 {
28 	msr_t msr;
29 	uint32_t feature_flag;
30 
31 	feature_flag = cpu_get_feature_flags_ecx();
32 	/* Check that the VMX is supported before reading or writing the MSR. */
33 	if (!((feature_flag & CPUID_VMX) || (feature_flag & CPUID_SMX))) {
34 		printk(BIOS_DEBUG, "CPU doesn't support VMX; exiting\n");
35 		return;
36 	}
37 
38 	msr = rdmsr(IA32_FEATURE_CONTROL);
39 
40 	if (msr.lo & (1 << 0)) {
41 		printk(BIOS_DEBUG, "IA32_FEATURE_CONTROL already locked; ");
42 		printk(BIOS_DEBUG, "VMX status: %s\n", msr.lo & (1 << 2)  ?
43 			"enabled" : "disabled");
44 		/* IA32_FEATURE_CONTROL locked. If we set it again we get an
45 		 * illegal instruction
46 		 */
47 		return;
48 	}
49 
50 	/* The IA32_FEATURE_CONTROL MSR may initialize with random values.
51 	 * It must be cleared regardless of VMX config setting.
52 	 */
53 	msr.hi = msr.lo = 0;
54 
55 	if (enable) {
56 		msr.lo |= (1 << 2);
57 		if (feature_flag & CPUID_SMX) {
58 			msr.lo |= (1 << 1);
59 			if (CONFIG(INTEL_TXT)) {
60 				/* Enable GetSec and all GetSec leaves */
61 				msr.lo |= (0xff << 8);
62 			}
63 		}
64 	}
65 
66 	wrmsr(IA32_FEATURE_CONTROL, msr);
67 
68 	printk(BIOS_DEBUG, "VMX status: %s\n",
69 		enable ? "enabled" : "disabled");
70 }
71 
set_feature_ctrl_vmx(void)72 void set_feature_ctrl_vmx(void)
73 {
74 	set_feature_ctrl_vmx_arg(CONFIG(ENABLE_VMX));
75 }
76 
set_feature_ctrl_lock(void)77 void set_feature_ctrl_lock(void)
78 {
79 	msr_t msr;
80 	int lock = CONFIG(SET_IA32_FC_LOCK_BIT);
81 	uint32_t feature_flag = cpu_get_feature_flags_ecx();
82 
83 	/* Check if VMX is supported before reading or writing the MSR */
84 	if (!((feature_flag & CPUID_VMX) || (feature_flag & CPUID_SMX))) {
85 		printk(BIOS_DEBUG, "Read IA32_FEATURE_CONTROL unsupported\n");
86 		return;
87 	}
88 
89 	msr = rdmsr(IA32_FEATURE_CONTROL);
90 
91 	if (msr.lo & (1 << 0)) {
92 		printk(BIOS_DEBUG, "IA32_FEATURE_CONTROL already locked\n");
93 		/* IA32_FEATURE_CONTROL locked. If we set it again we get an
94 		 * illegal instruction
95 		 */
96 		return;
97 	}
98 
99 	if (lock) {
100 		/* Set lock bit */
101 		msr.lo |= (1 << 0);
102 		wrmsr(IA32_FEATURE_CONTROL, msr);
103 	}
104 
105 	printk(BIOS_DEBUG, "IA32_FEATURE_CONTROL status: %s\n",
106 		lock ? "locked" : "unlocked");
107 }
108 
109 /*
110  * Init cppc_config in a way that's appropriate for Intel
111  * processors with Intel Enhanced Speed Step Technology.
112  * NOTE: version 2 is expected to be the typical use case.
113  * For now this function 'punts' on version 3 and just
114  * populates the additional fields with 'unsupported'.
115  */
cpu_init_cppc_config(struct cppc_config * config,u32 version)116 void cpu_init_cppc_config(struct cppc_config *config, u32 version)
117 {
118 	config->version = version;
119 
120 	config->entries[CPPC_HIGHEST_PERF]		= CPPC_REG_MSR(IA32_HWP_CAPABILITIES, 0, 8);
121 	config->entries[CPPC_NOMINAL_PERF]		= CPPC_REG_MSR(MSR_PLATFORM_INFO, 8, 8);
122 	config->entries[CPPC_LOWEST_NONL_PERF]		= CPPC_REG_MSR(IA32_HWP_CAPABILITIES, 16, 8);
123 	config->entries[CPPC_LOWEST_PERF]		= CPPC_REG_MSR(IA32_HWP_CAPABILITIES, 24, 8);
124 	config->entries[CPPC_GUARANTEED_PERF]		= CPPC_REG_MSR(IA32_HWP_CAPABILITIES, 8, 8);
125 	config->entries[CPPC_DESIRED_PERF]		= CPPC_REG_MSR(IA32_HWP_REQUEST, 16, 8);
126 	config->entries[CPPC_MIN_PERF]			= CPPC_REG_MSR(IA32_HWP_REQUEST, 0, 8);
127 	config->entries[CPPC_MAX_PERF]			= CPPC_REG_MSR(IA32_HWP_REQUEST, 8, 8);
128 	config->entries[CPPC_PERF_REDUCE_TOLERANCE]	= CPPC_UNSUPPORTED;
129 	config->entries[CPPC_TIME_WINDOW]		= CPPC_UNSUPPORTED;
130 	config->entries[CPPC_COUNTER_WRAP]		= CPPC_UNSUPPORTED;
131 	config->entries[CPPC_REF_PERF_COUNTER]		= CPPC_REG_MSR(IA32_MPERF, 0, 64);
132 	config->entries[CPPC_DELIVERED_PERF_COUNTER]	= CPPC_REG_MSR(IA32_APERF, 0, 64);
133 	config->entries[CPPC_PERF_LIMITED]		= CPPC_REG_MSR(IA32_HWP_STATUS, 2, 1);
134 	config->entries[CPPC_ENABLE]			= CPPC_REG_MSR(IA32_PM_ENABLE, 0, 1);
135 
136 	if (version < 2)
137 		return;
138 
139 	config->entries[CPPC_AUTO_SELECT]		= CPPC_DWORD(1);
140 	config->entries[CPPC_AUTO_ACTIVITY_WINDOW]	= CPPC_REG_MSR(IA32_HWP_REQUEST, 32, 10);
141 	config->entries[CPPC_PERF_PREF]			= CPPC_REG_MSR(IA32_HWP_REQUEST, 24, 8);
142 	config->entries[CPPC_REF_PERF]			= CPPC_UNSUPPORTED;
143 
144 	if (version < 3)
145 		return;
146 
147 	config->entries[CPPC_LOWEST_FREQ]		= CPPC_UNSUPPORTED;
148 	config->entries[CPPC_NOMINAL_FREQ]		= CPPC_UNSUPPORTED;
149 }
150 
set_aesni_lock(void)151 void set_aesni_lock(void)
152 {
153 	msr_t msr;
154 
155 	if (!CONFIG(SET_MSR_AESNI_LOCK_BIT))
156 		return;
157 
158 	if (!(cpu_get_feature_flags_ecx() & CPUID_AES))
159 		return;
160 
161 	/* Only run once per core as specified in the MSR datasheet */
162 	if (intel_ht_sibling())
163 		return;
164 
165 	msr = rdmsr(MSR_FEATURE_CONFIG);
166 	if (msr.lo & AESNI_LOCK)
167 		return;
168 
169 	msr_set(MSR_FEATURE_CONFIG, AESNI_LOCK);
170 }
171 
enable_lapic_tpr(void)172 void enable_lapic_tpr(void)
173 {
174 	msr_unset(MSR_PIC_MSG_CONTROL, TPR_UPDATES_DISABLE);
175 }
176 
configure_dca_cap(void)177 void configure_dca_cap(void)
178 {
179 	if (cpu_get_feature_flags_ecx() & CPUID_DCA)
180 		msr_set(IA32_PLATFORM_DCA_CAP, DCA_TYPE0_EN);
181 }
182 
set_energy_perf_bias(u8 policy)183 void set_energy_perf_bias(u8 policy)
184 {
185 	u8 epb = policy & ENERGY_POLICY_MASK;
186 
187 	if (!(cpuid_ecx(6) & CPUID_6_ECX_EPB))
188 		return;
189 
190 	msr_unset_and_set(IA32_ENERGY_PERF_BIAS, ENERGY_POLICY_MASK, epb);
191 	printk(BIOS_DEBUG, "cpu: energy policy set to %u\n", epb);
192 }
193 
194 /*
195  * Check energy performance preference and HWP capabilities from Thermal and
196  * Power Management Leaf CPUID
197  */
check_energy_perf_cap(void)198 bool check_energy_perf_cap(void)
199 {
200 	const u32 cap = cpuid_eax(CPUID_LEAF_PM);
201 	if (!(cap & CPUID_6_ENGERY_PERF_PREF))
202 		return false;
203 	if (!(cap & CPUID_6_HWP))
204 		return false;
205 	return true;
206 }
207 
208 /*
209  * Instructs the CPU to use EPP hints. This means that any energy policies set
210  * up in `set_energy_perf_bias` will be ignored afterwards.
211  */
enable_energy_perf_pref(void)212 void enable_energy_perf_pref(void)
213 {
214 	msr_t msr = rdmsr(IA32_PM_ENABLE);
215 	if (!(msr.lo & HWP_ENABLE)) {
216 		/* Package-scoped MSR */
217 		printk(BIOS_DEBUG, "HWP_ENABLE: energy-perf preference in favor of energy-perf bias\n");
218 		msr_set(IA32_PM_ENABLE, HWP_ENABLE);
219 	}
220 }
221 
222 /*
223  * Set the IA32_HWP_REQUEST Energy-Performance Preference bits on the logical
224  * thread. 0 is a hint to the HWP to prefer performance, and 255 is a hint to
225  * prefer energy efficiency.
226  * This function needs to be called when HWP_ENABLE is set.
227 */
set_energy_perf_pref(u8 pref)228 void set_energy_perf_pref(u8 pref)
229 {
230 	msr_unset_and_set(IA32_HWP_REQUEST, IA32_HWP_REQUEST_EPP_MASK,
231 		(uint64_t)pref << IA32_HWP_REQUEST_EPP_SHIFT);
232 }
233 
is_tme_supported(void)234 bool is_tme_supported(void)
235 {
236 	struct cpuid_result cpuid_regs;
237 
238 	cpuid_regs = cpuid_ext(CPUID_STRUCT_EXTENDED_FEATURE_FLAGS, 0x0);
239 	return (cpuid_regs.ecx & CPUID_EXT_FEATURE_TME_SUPPORTED);
240 }
241 
242 /*
243  * Get number of address bits used by Total Memory Encryption (TME)
244  *
245  * Returns TME_ACTIVATE[MK_TME_KEYID_BITS] (MSR 0x982 Bits[32-35]).
246  *
247  * NOTE: This function should be called after MK-TME features has been
248  * configured in the MSRs according to the capabilities and platform
249  * configuration. For instance, after FSP-M.
250  */
get_tme_keyid_bits(void)251 static unsigned int get_tme_keyid_bits(void)
252 {
253 	msr_t msr;
254 
255 	msr = rdmsr(MSR_TME_ACTIVATE);
256 	return msr.hi & TME_ACTIVATE_HI_KEYID_BITS_MASK;
257 }
258 
get_reserved_phys_addr_bits(void)259 unsigned int get_reserved_phys_addr_bits(void)
260 {
261 	if (!is_tme_supported())
262 		return 0;
263 
264 	return get_tme_keyid_bits();
265 }
266