xref: /aosp_15_r20/external/coreboot/src/arch/x86/cpu.c (revision b9411a12aaaa7e1e6a6fb7c5e057f44ee179a49c)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 
3 #include <bootstate.h>
4 #include <boot/coreboot_tables.h>
5 #include <console/console.h>
6 #include <cpu/cpu.h>
7 #include <post.h>
8 #include <string.h>
9 #include <cpu/x86/gdt.h>
10 #include <cpu/x86/mp.h>
11 #include <cpu/x86/lapic.h>
12 #include <cpu/x86/tsc.h>
13 #include <device/device.h>
14 #include <smp/spinlock.h>
15 
16 #if ENV_X86_32
17 /* Standard macro to see if a specific flag is changeable */
flag_is_changeable_p(uint32_t flag)18 static inline int flag_is_changeable_p(uint32_t flag)
19 {
20 	uint32_t f1, f2;
21 
22 	asm(
23 		"pushfl\n\t"
24 		"pushfl\n\t"
25 		"popl %0\n\t"
26 		"movl %0,%1\n\t"
27 		"xorl %2,%0\n\t"
28 		"pushl %0\n\t"
29 		"popfl\n\t"
30 		"pushfl\n\t"
31 		"popl %0\n\t"
32 		"popfl\n\t"
33 		: "=&r" (f1), "=&r" (f2)
34 		: "ir" (flag));
35 	return ((f1^f2) & flag) != 0;
36 }
37 
38 /*
39  * Cyrix CPUs without cpuid or with cpuid not yet enabled can be detected
40  * by the fact that they preserve the flags across the division of 5/2.
41  * PII and PPro exhibit this behavior too, but they have cpuid available.
42  */
43 
44 /*
45  * Perform the Cyrix 5/2 test. A Cyrix won't change
46  * the flags, while other 486 chips will.
47  */
test_cyrix_52div(void)48 static inline int test_cyrix_52div(void)
49 {
50 	unsigned int test;
51 
52 	__asm__ __volatile__(
53 	     "sahf\n\t"		/* clear flags (%eax = 0x0005) */
54 	     "div %b2\n\t"	/* divide 5 by 2 */
55 	     "lahf"		/* store flags into %ah */
56 	     : "=a" (test)
57 	     : "0" (5), "q" (2)
58 	     : "cc");
59 
60 	/* AH is 0x02 on Cyrix after the divide.. */
61 	return (unsigned char)(test >> 8) == 0x02;
62 }
63 
64 /*
65  *	Detect a NexGen CPU running without BIOS hypercode new enough
66  *	to have CPUID. (Thanks to Herbert Oppmann)
67  */
68 
deep_magic_nexgen_probe(void)69 static int deep_magic_nexgen_probe(void)
70 {
71 	int ret;
72 
73 	__asm__ __volatile__ (
74 		"	movw	$0x5555, %%ax\n"
75 		"	xorw	%%dx,%%dx\n"
76 		"	movw	$2, %%cx\n"
77 		"	divw	%%cx\n"
78 		"	movl	$0, %%eax\n"
79 		"	jnz	1f\n"
80 		"	movl	$1, %%eax\n"
81 		"1:\n"
82 		: "=a" (ret) : : "cx", "dx");
83 	return  ret;
84 }
85 #endif
86 
87 /* List of CPU vendor strings along with their normalized
88  * id values.
89  */
90 static struct {
91 	int vendor;
92 	const char *name;
93 } x86_vendors[] = {
94 	{ X86_VENDOR_INTEL,     "GenuineIntel", },
95 	{ X86_VENDOR_CYRIX,     "CyrixInstead", },
96 	{ X86_VENDOR_AMD,       "AuthenticAMD", },
97 	{ X86_VENDOR_UMC,       "UMC UMC UMC ", },
98 	{ X86_VENDOR_NEXGEN,    "NexGenDriven", },
99 	{ X86_VENDOR_CENTAUR,   "CentaurHauls", },
100 	{ X86_VENDOR_RISE,      "RiseRiseRise", },
101 	{ X86_VENDOR_TRANSMETA, "GenuineTMx86", },
102 	{ X86_VENDOR_TRANSMETA, "TransmetaCPU", },
103 	{ X86_VENDOR_NSC,       "Geode by NSC", },
104 	{ X86_VENDOR_SIS,       "SiS SiS SiS ", },
105 	{ X86_VENDOR_HYGON,     "HygonGenuine", },
106 };
107 
108 static const char *const x86_vendor_name[] = {
109 	[X86_VENDOR_INTEL]     = "Intel",
110 	[X86_VENDOR_CYRIX]     = "Cyrix",
111 	[X86_VENDOR_AMD]       = "AMD",
112 	[X86_VENDOR_UMC]       = "UMC",
113 	[X86_VENDOR_NEXGEN]    = "NexGen",
114 	[X86_VENDOR_CENTAUR]   = "Centaur",
115 	[X86_VENDOR_RISE]      = "Rise",
116 	[X86_VENDOR_TRANSMETA] = "Transmeta",
117 	[X86_VENDOR_NSC]       = "NSC",
118 	[X86_VENDOR_SIS]       = "SiS",
119 	[X86_VENDOR_HYGON]     = "Hygon",
120 };
121 
cpu_vendor_name(int vendor)122 static const char *cpu_vendor_name(int vendor)
123 {
124 	const char *name;
125 	name = "<invalid CPU vendor>";
126 	if (vendor < ARRAY_SIZE(x86_vendor_name) &&
127 		x86_vendor_name[vendor] != 0)
128 		name = x86_vendor_name[vendor];
129 	return name;
130 }
131 
identify_cpu(struct device * cpu)132 static void identify_cpu(struct device *cpu)
133 {
134 	char vendor_name[16];
135 	int i;
136 
137 	vendor_name[0] = '\0'; /* Unset */
138 
139 #if ENV_X86_32
140 	/* Find the id and vendor_name */
141 	if (!cpu_have_cpuid()) {
142 		/* Its a 486 if we can modify the AC flag */
143 		if (flag_is_changeable_p(X86_EFLAGS_AC))
144 			cpu->device = 0x00000400; /* 486 */
145 		else
146 			cpu->device = 0x00000300; /* 386 */
147 		if (cpu->device == 0x00000400 && test_cyrix_52div())
148 			memcpy(vendor_name, "CyrixInstead", 13);
149 			/* If we ever care we can enable cpuid here */
150 		/* Detect NexGen with old hypercode */
151 		else if (deep_magic_nexgen_probe())
152 			memcpy(vendor_name, "NexGenDriven", 13);
153 	}
154 #endif
155 	if (cpu_have_cpuid()) {
156 		int  cpuid_level;
157 		struct cpuid_result result;
158 		result = cpuid(0x00000000);
159 		cpuid_level     = result.eax;
160 		vendor_name[0]  = (result.ebx >>  0) & 0xff;
161 		vendor_name[1]  = (result.ebx >>  8) & 0xff;
162 		vendor_name[2]  = (result.ebx >> 16) & 0xff;
163 		vendor_name[3]  = (result.ebx >> 24) & 0xff;
164 		vendor_name[4]  = (result.edx >>  0) & 0xff;
165 		vendor_name[5]  = (result.edx >>  8) & 0xff;
166 		vendor_name[6]  = (result.edx >> 16) & 0xff;
167 		vendor_name[7]  = (result.edx >> 24) & 0xff;
168 		vendor_name[8]  = (result.ecx >>  0) & 0xff;
169 		vendor_name[9]  = (result.ecx >>  8) & 0xff;
170 		vendor_name[10] = (result.ecx >> 16) & 0xff;
171 		vendor_name[11] = (result.ecx >> 24) & 0xff;
172 		vendor_name[12] = '\0';
173 
174 		/* Intel-defined flags: level 0x00000001 */
175 		if (cpuid_level >= 0x00000001)
176 			cpu->device = cpu_get_cpuid();
177 		else
178 			/* Have CPUID level 0 only unheard of */
179 			cpu->device = 0x00000400;
180 	}
181 	cpu->vendor = X86_VENDOR_UNKNOWN;
182 	for (i = 0; i < ARRAY_SIZE(x86_vendors); i++) {
183 		if (memcmp(vendor_name, x86_vendors[i].name, 12) == 0) {
184 			cpu->vendor = x86_vendors[i].vendor;
185 			break;
186 		}
187 	}
188 }
189 
find_cpu_driver(struct device * cpu)190 struct cpu_driver *find_cpu_driver(struct device *cpu)
191 {
192 	struct cpu_driver *driver;
193 	for (driver = _cpu_drivers; driver < _ecpu_drivers; driver++) {
194 		const struct cpu_device_id *id;
195 		for (id = driver->id_table;
196 		     id->vendor != X86_VENDOR_INVALID; id++) {
197 			if (cpu->vendor == id->vendor &&
198 			    cpuid_match(cpu->device, id->device, id->device_match_mask))
199 				return driver;
200 			if (id->vendor == X86_VENDOR_ANY)
201 				return driver;
202 		}
203 	}
204 	return NULL;
205 }
206 
set_cpu_ops(struct device * cpu)207 static void set_cpu_ops(struct device *cpu)
208 {
209 	struct cpu_driver *driver = find_cpu_driver(cpu);
210 	cpu->ops = driver ? driver->ops : NULL;
211 }
212 
cpu_initialize(void)213 void cpu_initialize(void)
214 {
215 	/* Because we busy wait at the printk spinlock.
216 	 * It is important to keep the number of printed messages
217 	 * from secondary cpus to a minimum, when debugging is
218 	 * disabled.
219 	 */
220 	struct device *cpu;
221 	struct cpu_info *info;
222 	struct cpuinfo_x86 c;
223 
224 	info = cpu_info();
225 
226 	printk(BIOS_INFO, "Initializing CPU #%zd\n", info->index);
227 
228 	cpu = info->cpu;
229 	if (!cpu)
230 		die("CPU: missing CPU device structure");
231 
232 	if (cpu->initialized)
233 		return;
234 
235 	post_log_path(cpu);
236 
237 	/* Find what type of CPU we are dealing with */
238 	identify_cpu(cpu);
239 	printk(BIOS_DEBUG, "CPU: vendor %s device %x\n",
240 		cpu_vendor_name(cpu->vendor), cpu->device);
241 
242 	get_fms(&c, cpu->device);
243 
244 	printk(BIOS_DEBUG, "CPU: family %02x, model %02x, stepping %02x\n",
245 		c.x86, c.x86_model, c.x86_mask);
246 
247 	/* Lookup the cpu's operations */
248 	set_cpu_ops(cpu);
249 
250 	if (!cpu->ops) {
251 		/* mask out the stepping and try again */
252 		cpu->device -= c.x86_mask;
253 		set_cpu_ops(cpu);
254 		cpu->device += c.x86_mask;
255 		if (!cpu->ops)
256 			die("Unknown cpu");
257 		printk(BIOS_DEBUG, "Using generic CPU ops (good)\n");
258 	}
259 
260 	/* Initialize the CPU */
261 	if (cpu->ops && cpu->ops->init) {
262 		cpu->enabled = 1;
263 		cpu->initialized = 1;
264 		cpu->ops->init(cpu);
265 	}
266 	post_log_clear();
267 
268 	printk(BIOS_INFO, "CPU #%zd initialized\n", info->index);
269 }
270 
lb_arch_add_records(struct lb_header * header)271 void lb_arch_add_records(struct lb_header *header)
272 {
273 	uint32_t freq_khz;
274 	struct lb_tsc_info *tsc_info;
275 
276 	/* Don't advertise a TSC rate unless it's constant. */
277 	if (!tsc_constant_rate())
278 		return;
279 
280 	freq_khz = tsc_freq_mhz() * 1000;
281 
282 	/* No use exposing a TSC frequency that is zero. */
283 	if (freq_khz == 0)
284 		return;
285 
286 	tsc_info = (void *)lb_new_record(header);
287 	tsc_info->tag = LB_TAG_TSC_INFO;
288 	tsc_info->size = sizeof(*tsc_info);
289 	tsc_info->freq_khz = freq_khz;
290 }
291 
arch_bootstate_coreboot_exit(void)292 void arch_bootstate_coreboot_exit(void)
293 {
294 	/* APs are already parked by existing infrastructure. */
295 	if (!CONFIG(PARALLEL_MP_AP_WORK))
296 		return;
297 
298 	/* APs are waiting for work. Last thing to do is park them. */
299 	mp_park_aps();
300 }
301 
302 /* cpu_info() looks at address 0 at the base of %gs for a pointer to struct cpu_info */
303 static struct per_cpu_segment_data segment_data[CONFIG_MAX_CPUS];
304 struct cpu_info cpu_infos[CONFIG_MAX_CPUS] = {0};
305 
set_cpu_info(unsigned int index,struct device * cpu)306 enum cb_err set_cpu_info(unsigned int index, struct device *cpu)
307 {
308 	if (index >= ARRAY_SIZE(cpu_infos))
309 		return CB_ERR;
310 
311 	if (!cpu)
312 		return CB_ERR;
313 
314 	const struct cpu_info info = { .cpu = cpu, .index = index};
315 	cpu_infos[index] = info;
316 	segment_data[index].cpu_info = &cpu_infos[index];
317 
318 	struct segment_descriptor {
319 		uint16_t segment_limit_0_15;
320 		uint16_t base_address_0_15;
321 		uint8_t base_address_16_23;
322 		uint8_t attrs[2];
323 		uint8_t base_address_24_31;
324 	} *segment_descriptor = (void *)&per_cpu_segment_descriptors;
325 
326 	segment_descriptor[index].base_address_0_15 = (uintptr_t)&segment_data[index] & 0xffff;
327 	segment_descriptor[index].base_address_16_23 = ((uintptr_t)&segment_data[index] >> 16) & 0xff;
328 	segment_descriptor[index].base_address_24_31 = ((uintptr_t)&segment_data[index] >> 24) & 0xff;
329 
330 	const unsigned int cpu_segment = per_cpu_segment_selector + (index << 3);
331 
332 	__asm__ __volatile__ ("mov %0, %%gs\n"
333 		:
334 		: "r" (cpu_segment)
335 		: );
336 
337 	return CB_SUCCESS;
338 }
339