xref: /aosp_15_r20/external/coreboot/src/cpu/x86/mtrr/xip_cache.c (revision b9411a12aaaa7e1e6a6fb7c5e057f44ee179a49c)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 
3 #include <commonlib/region.h>
4 #include <console/console.h>
5 #include <cpu/cpu.h>
6 #include <cpu/x86/mtrr.h>
7 #include <program_loading.h>
8 
9 /* For now this is a good lowest common denominator for the total CPU cache.
10    TODO: fetch the total amount of cache from CPUID leaf2. */
11 #define MAX_CPU_CACHE (256 * KiB)
12 
13 /* This makes the 'worst' case assumption that all cachelines covered by
14    the MTRR, no matter the caching type, are filled and not overlapping. */
max_cache_used(void)15 static uint32_t max_cache_used(void)
16 {
17 	int i, total_mtrrs = get_var_mtrr_count();
18 	uint32_t total_cache = 0;
19 
20 	for (i = 0; i < total_mtrrs; i++) {
21 		msr_t mtrr = rdmsr(MTRR_PHYS_MASK(i));
22 		if (!(mtrr.lo & MTRR_PHYS_MASK_VALID))
23 			continue;
24 		total_cache += ~(mtrr.lo & 0xfffff000) + 1;
25 	}
26 	return total_cache;
27 }
28 
platform_prog_run(struct prog * prog)29 void platform_prog_run(struct prog *prog)
30 {
31 	const uint32_t base = (uintptr_t)prog_start(prog);
32 	const uint32_t size = prog_size(prog);
33 	const uint32_t end = base + size;
34 	const uint32_t cache_used = max_cache_used();
35 	/* This will accumulate MTRR's as XIP stages are run.
36 	   For now this includes bootblock which sets ups its own
37 	   caching elsewhere, verstage and romstage */
38 	int mtrr_num = get_free_var_mtrr();
39 	uint32_t mtrr_base;
40 	uint32_t mtrr_size = 4 * KiB;
41 	struct cpuinfo_x86 cpu_info;
42 
43 	get_fms(&cpu_info, cpuid_eax(1));
44 	/*
45 	 * An unidentified combination of speculative reads and branch
46 	 * predictions inside WRPROT-cacheable memory can cause invalidation
47 	 * of cachelines and loss of stack on models based on NetBurst
48 	 * microarchitecture. Therefore disable WRPROT region entirely for
49 	 * all family F models.
50 	 */
51 	if (cpu_info.x86 == 0xf) {
52 		printk(BIOS_NOTICE,
53 		       "PROG_RUN: CPU does not support caching ROM\n"
54 		       "The next stage will run slowly!\n");
55 		return;
56 	}
57 
58 	if (mtrr_num == -1) {
59 		printk(BIOS_NOTICE,
60 		       "PROG_RUN: No MTRR available to cache ROM!\n"
61 		       "The next stage will run slowly!\n");
62 		return;
63 	}
64 
65 	if (cache_used + mtrr_size > MAX_CPU_CACHE) {
66 		printk(BIOS_NOTICE,
67 		       "PROG_RUN: No more cache available for the next stage\n"
68 		       "The next stage will run slowly!\n");
69 		return;
70 	}
71 
72 	while (1) {
73 		if (ALIGN_DOWN(base, mtrr_size) + mtrr_size >= end)
74 			break;
75 		if (cache_used + mtrr_size * 2 > MAX_CPU_CACHE)
76 			break;
77 		mtrr_size *= 2;
78 	}
79 
80 	mtrr_base = ALIGN_DOWN(base, mtrr_size);
81 	if (mtrr_base + mtrr_size < end) {
82 		printk(BIOS_NOTICE, "PROG_RUN: Limiting XIP cache to %uKiB!\n",
83 		       mtrr_size / KiB);
84 		/* Check if we can cover a bigger range by aligning up. */
85 		const uint32_t alt_base = ALIGN_UP(base, mtrr_size);
86 		const uint32_t lower_coverage = mtrr_base + mtrr_size - base;
87 		const uint32_t upper_coverage = MIN(alt_base + mtrr_size, end) - alt_base;
88 		if (upper_coverage > lower_coverage)
89 			mtrr_base = alt_base;
90 	}
91 
92 	printk(BIOS_DEBUG,
93 	       "PROG_RUN: Setting MTRR to cache XIP stage. base: 0x%08x, size: 0x%08x\n",
94 	       mtrr_base, mtrr_size);
95 
96 	set_var_mtrr(mtrr_num, mtrr_base, mtrr_size, MTRR_TYPE_WRPROT);
97 }
98