1 /* SPDX-License-Identifier: GPL-2.0-only */
2
3 #include <cpu/x86/mtrr.h>
4 #include <cpu/x86/mp.h>
5 #include <amdblocks/cpu.h>
6 #include <amdblocks/smm.h>
7 #include <console/console.h>
8 #include <cpu/amd/amd64_save_state.h>
9 #include <cpu/amd/microcode.h>
10 #include <cpu/amd/msr.h>
11 #include <cpu/amd/mtrr.h>
12 #include <cpu/cpu.h>
13 #include <cpu/x86/msr.h>
14 #include <cpu/x86/smm.h>
15 #include <types.h>
16
17 /* AP MTRRs will be synced to the BSP in the SIPI vector so set them up before MP init. */
pre_mp_init(void)18 static void pre_mp_init(void)
19 {
20 const msr_t syscfg = rdmsr(SYSCFG_MSR);
21 if (syscfg.lo & SYSCFG_MSR_TOM2WB)
22 x86_setup_mtrrs_with_detect_no_above_4gb();
23 else
24 x86_setup_mtrrs_with_detect();
25 x86_mtrr_check();
26 if (CONFIG(SOC_AMD_COMMON_BLOCK_UCODE))
27 amd_load_microcode_from_cbfs();
28 }
29
get_smm_info(uintptr_t * perm_smbase,size_t * perm_smsize,size_t * smm_save_state_size)30 static void get_smm_info(uintptr_t *perm_smbase, size_t *perm_smsize,
31 size_t *smm_save_state_size)
32 {
33 printk(BIOS_DEBUG, "Setting up SMI for CPU\n");
34
35 uintptr_t tseg_base;
36 size_t tseg_size;
37
38 smm_region(&tseg_base, &tseg_size);
39
40 if (!IS_ALIGNED(tseg_base, tseg_size)) {
41 printk(BIOS_ERR, "TSEG base not aligned to TSEG size\n");
42 return;
43 }
44 /* Minimum granularity for TSEG MSRs */
45 if (tseg_size < 128 * KiB) {
46 printk(BIOS_ERR, "TSEG size (0x%zx) too small\n", tseg_size);
47 return;
48 }
49
50
51 smm_subregion(SMM_SUBREGION_HANDLER, perm_smbase, perm_smsize);
52 *smm_save_state_size = sizeof(amd64_smm_state_save_area_t);
53 }
54
smm_relocation_handler(void)55 static void smm_relocation_handler(void)
56 {
57 uintptr_t tseg_base;
58 size_t tseg_size;
59
60 /* For the TSEG masks all physical address bits including the ones reserved for memory
61 encryption need to be taken into account. TODO: Find out why this is the case */
62 const unsigned int total_physical_address_bits =
63 cpu_phys_address_size() + get_reserved_phys_addr_bits();
64
65 smm_region(&tseg_base, &tseg_size);
66
67 msr_t msr;
68 msr.raw = tseg_base;
69 wrmsr(SMM_ADDR_MSR, msr);
70
71 msr.lo = ~(tseg_size - 1);
72 msr.lo |= SMM_TSEG_WB;
73 msr.hi = (1 << (total_physical_address_bits - 32)) - 1;
74 wrmsr(SMM_MASK_MSR, msr);
75
76 uintptr_t smbase = smm_get_cpu_smbase(cpu_index());
77 msr_t smm_base = {
78 .raw = smbase
79 };
80 wrmsr(SMM_BASE_MSR, smm_base);
81
82
83 if (!CONFIG(SOC_AMD_COMMON_LATE_SMM_LOCKING)) {
84 tseg_valid();
85 lock_smm();
86 }
87 }
88
post_mp_init(void)89 static void post_mp_init(void)
90 {
91 if (CONFIG(SOC_AMD_COMMON_BLOCK_UCODE))
92 amd_free_microcode();
93 global_smi_enable();
94 }
95
96 const struct mp_ops amd_mp_ops_with_smm = {
97 .pre_mp_init = pre_mp_init,
98 .get_cpu_count = get_cpu_count,
99 .get_smm_info = get_smm_info,
100 .per_cpu_smm_trigger = smm_relocation_handler,
101 .post_mp_init = post_mp_init,
102 };
103
104 const struct mp_ops amd_mp_ops_no_smm = {
105 .pre_mp_init = pre_mp_init,
106 .get_cpu_count = get_cpu_count,
107 };
108