xref: /aosp_15_r20/external/coreboot/src/cpu/x86/smm/smm_module_loader.c (revision b9411a12aaaa7e1e6a6fb7c5e057f44ee179a49c)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 
3 #include <acpi/acpi_gnvs.h>
4 #include <cbmem.h>
5 #include <commonlib/helpers.h>
6 #include <commonlib/region.h>
7 #include <console/console.h>
8 #include <cpu/cpu.h>
9 #include <cpu/x86/smm.h>
10 #include <device/device.h>
11 #include <device/mmio.h>
12 #include <rmodule.h>
13 #include <smmstore.h>
14 #include <stdio.h>
15 #include <string.h>
16 #include <types.h>
17 
18 #define SMM_CODE_SEGMENT_SIZE 0x10000
19 
20 /*
21  * Components that make up the SMRAM:
22  * 1. Save state - the total save state memory used
23  * 2. Stack - stacks for the CPUs in the SMM handler
24  * 3. Stub - SMM stub code for calling into handler
25  * 4. Handler - C-based SMM handler.
26  *
27  * The components are assumed to consist of one consecutive region.
28  */
29 
30 /*
31  * The stub is the entry point that sets up protected mode and stacks for each
32  * CPU. It then calls into the SMM handler module. It is encoded as an rmodule.
33  */
34 extern unsigned char _binary_smmstub_start[];
35 
36 /* Per CPU minimum stack size. */
37 #define SMM_MINIMUM_STACK_SIZE 32
38 
39 struct cpu_smm_info {
40 	uint8_t active;
41 	uintptr_t smbase;
42 	struct region ss;
43 	struct region stub_code;
44 };
45 struct cpu_smm_info cpus[CONFIG_MAX_CPUS] = { 0 };
46 
47 /*
48  * This method creates a map of all the CPU entry points, save state locations
49  * and the beginning and end of code segments for each CPU. This map is used
50  * during relocation to properly align as many CPUs that can fit into the SMRAM
51  * region. For more information on how SMRAM works, refer to the latest Intel
52  * developer's manuals (volume 3, chapter 34). SMRAM is divided up into the
53  * following regions:
54  * +-----------------+ Top of SMRAM
55  * |      MSEG       |
56  * +-----------------+
57  * |    common       |
58  * |  smi handler    | 64K
59  * |                 |
60  * +-----------------+
61  * | CPU 0 code  seg |
62  * +-----------------+
63  * | CPU 1 code seg  |
64  * +-----------------+
65  * | CPU x code seg  |
66  * +-----------------+
67  * |                 |
68  * |                 |
69  * +-----------------+
70  * |    stacks       |
71  * +-----------------+ <- START of SMRAM
72  *
73  * The code below checks when a code segment is full and begins placing the remainder
74  * CPUs in the lower segments. The entry point for each CPU is smbase + 0x8000
75  * and save state is smbase + 0x8000 + (0x8000 - state save size). Save state
76  * area grows downward into the CPUs entry point.  Therefore staggering too many
77  * CPUs in one 32K block will corrupt CPU0's entry code as the save states move
78  * downward.
79  * input : smbase of first CPU (all other CPUs
80  *         will go below this address)
81  * input : num_cpus in the system. The map will
82  *         be created from 0 to num_cpus.
83  */
smm_create_map(const uintptr_t smbase,const unsigned int num_cpus,const struct smm_loader_params * params)84 static int smm_create_map(const uintptr_t smbase, const unsigned int num_cpus,
85 			  const struct smm_loader_params *params)
86 {
87 	struct rmodule smm_stub;
88 
89 	if (ARRAY_SIZE(cpus) < num_cpus) {
90 		printk(BIOS_ERR, "%s: increase MAX_CPUS in Kconfig\n", __func__);
91 		return 0;
92 	}
93 
94 	if (rmodule_parse(&_binary_smmstub_start, &smm_stub)) {
95 		printk(BIOS_ERR, "%s: unable to get SMM module size\n", __func__);
96 		return 0;
97 	}
98 
99 	/*
100 	 * How many CPUs can fit into one 64K segment?
101 	 * Make sure that the first stub does not overlap with the last save state of a segment.
102 	 */
103 	const size_t stub_size = rmodule_memory_size(&smm_stub);
104 	const size_t needed_ss_size = MAX(params->cpu_save_state_size, stub_size);
105 	const size_t cpus_per_segment =
106 		(SMM_CODE_SEGMENT_SIZE - SMM_ENTRY_OFFSET - stub_size) / needed_ss_size;
107 
108 	if (cpus_per_segment == 0) {
109 		printk(BIOS_ERR, "%s: CPUs won't fit in segment. Broken stub or save state size\n",
110 		       __func__);
111 		return 0;
112 	}
113 
114 	for (unsigned int i = 0; i < num_cpus; i++) {
115 		const size_t segment_number = i / cpus_per_segment;
116 		cpus[i].smbase = smbase - SMM_CODE_SEGMENT_SIZE * segment_number
117 			- needed_ss_size * (i % cpus_per_segment);
118 		cpus[i].stub_code.offset = cpus[i].smbase + SMM_ENTRY_OFFSET;
119 		cpus[i].stub_code.size = stub_size;
120 		cpus[i].ss.offset = cpus[i].smbase + SMM_CODE_SEGMENT_SIZE
121 			- params->cpu_save_state_size;
122 		cpus[i].ss.size = params->cpu_save_state_size;
123 		cpus[i].active = 1;
124 	}
125 
126 	return 1;
127 }
128 
129 /*
130  * This method expects the smm relocation map to be complete.
131  * This method does not read any HW registers, it simply uses a
132  * map that was created during SMM setup.
133  * input: cpu_num - cpu number which is used as an index into the
134  *       map to return the smbase
135  */
smm_get_cpu_smbase(unsigned int cpu_num)136 u32 smm_get_cpu_smbase(unsigned int cpu_num)
137 {
138 	if (cpu_num < CONFIG_MAX_CPUS) {
139 		if (cpus[cpu_num].active)
140 			return cpus[cpu_num].smbase;
141 	}
142 	return 0;
143 }
144 
145 /*
146  * This method assumes that at least 1 CPU has been set up from
147  * which it will place other CPUs below its smbase ensuring that
148  * save state does not clobber the first CPUs init code segment. The init
149  * code which is the smm stub code is the same for all CPUs. They enter
150  * smm, setup stacks (based on their apic id), enter protected mode
151  * and then jump to the common smi handler.  The stack is allocated
152  * at the beginning of smram (aka tseg base, not smbase). The stack
153  * pointer for each CPU is calculated by using its apic id
154  * (code is in smm_stub.s)
155  * Each entry point will now have the same stub code which, sets up the CPU
156  * stack, enters protected mode and then jumps to the smi handler. It is
157  * important to enter protected mode before the jump because the "jump to
158  * address" might be larger than the 20bit address supported by real mode.
159  * SMI entry right now is in real mode.
160  * input: num_cpus - number of cpus that need relocation including
161  *        the first CPU (though its code is already loaded)
162  */
163 
smm_place_entry_code(const unsigned int num_cpus)164 static void smm_place_entry_code(const unsigned int num_cpus)
165 {
166 	unsigned int i;
167 	size_t size;
168 
169 	/* start at 1, the first CPU stub code is already there */
170 	size = region_sz(&cpus[0].stub_code);
171 	for (i = 1; i < num_cpus; i++) {
172 		printk(BIOS_DEBUG,
173 		       "SMM Module: placing smm entry code at %zx,  cpu # 0x%x\n",
174 		       region_offset(&cpus[i].stub_code), i);
175 		memcpy((void *)region_offset(&cpus[i].stub_code),
176 		       (void *)region_offset(&cpus[0].stub_code), size);
177 		printk(BIOS_SPEW, "%s: copying from %zx to %zx 0x%zx bytes\n",
178 		       __func__, region_offset(&cpus[0].stub_code),
179 		       region_offset(&cpus[i].stub_code), size);
180 	}
181 }
182 
183 static uintptr_t stack_top;
184 static size_t g_stack_size;
185 
smm_setup_stack(const uintptr_t perm_smbase,const size_t perm_smram_size,const unsigned int total_cpus,const size_t stack_size)186 int smm_setup_stack(const uintptr_t perm_smbase, const size_t perm_smram_size,
187 		     const unsigned int total_cpus, const size_t stack_size)
188 {
189 	/* Need a minimum stack size and alignment. */
190 	if (stack_size <= SMM_MINIMUM_STACK_SIZE || (stack_size & 3) != 0) {
191 		printk(BIOS_ERR, "%s: need minimum stack size\n", __func__);
192 		return -1;
193 	}
194 
195 	const size_t total_stack_size = total_cpus * stack_size;
196 	if (total_stack_size >= perm_smram_size) {
197 		printk(BIOS_ERR, "%s: Stack won't fit smram\n", __func__);
198 		return -1;
199 	}
200 	stack_top = perm_smbase + total_stack_size;
201 	g_stack_size = stack_size;
202 	return 0;
203 }
204 
205 /*
206  * Place the staggered entry points for each CPU. The entry points are
207  * staggered by the per CPU SMM save state size extending down from
208  * SMM_ENTRY_OFFSET.
209  */
smm_stub_place_staggered_entry_points(const struct smm_loader_params * params)210 static void smm_stub_place_staggered_entry_points(const struct smm_loader_params *params)
211 {
212 	if (params->num_concurrent_save_states > 1)
213 		smm_place_entry_code(params->num_concurrent_save_states);
214 }
215 
216 /*
217  * The stub setup code assumes it is completely contained within the
218  * default SMRAM size (0x10000) for the default SMI handler (entry at
219  * 0x30000), but no assumption should be made for the permanent SMI handler.
220  * The placement of CPU entry points for permanent handler are determined
221  * by the number of CPUs in the system and the amount of SMRAM.
222  * There are potentially 2 regions to place
223  * within the default SMRAM size:
224  * 1. Save state areas
225  * 2. Stub code
226  *
227  * The save state always lives at the top of the CPUS smbase (and the entry
228  * point is at offset 0x8000). This allows only a certain number of CPUs with
229  * staggered entry points until the save state area comes down far enough to
230  * overwrite/corrupt the entry code (stub code). Therefore, an SMM map is
231  * created to avoid this corruption, see smm_create_map() above.
232  * This module setup code works for the default (0x30000) SMM handler setup and the
233  * permanent SMM handler.
234  * The CPU stack is decided at runtime in the stub and is treaded as a continuous
235  * region. As this might not fit the default SMRAM region, the same region used
236  * by the permanent handler can be used during relocation.
237  */
smm_module_setup_stub(const uintptr_t smbase,const size_t smm_size,struct smm_loader_params * params)238 static int smm_module_setup_stub(const uintptr_t smbase, const size_t smm_size,
239 				 struct smm_loader_params *params)
240 {
241 	struct rmodule smm_stub;
242 	if (rmodule_parse(&_binary_smmstub_start, &smm_stub)) {
243 		printk(BIOS_ERR, "%s: unable to parse smm stub\n", __func__);
244 		return -1;
245 	}
246 	const size_t stub_size = rmodule_memory_size(&smm_stub);
247 
248 	/* Some sanity check */
249 	if (stub_size >= SMM_ENTRY_OFFSET) {
250 		printk(BIOS_ERR, "%s: Stub too large\n", __func__);
251 		return -1;
252 	}
253 
254 	const uintptr_t smm_stub_loc = smbase + SMM_ENTRY_OFFSET;
255 	if (rmodule_load((void *)smm_stub_loc, &smm_stub)) {
256 		printk(BIOS_ERR, "%s: load module failed\n", __func__);
257 		return -1;
258 	}
259 
260 	struct smm_stub_params *stub_params = rmodule_parameters(&smm_stub);
261 	stub_params->stack_top = stack_top;
262 	stub_params->stack_size = g_stack_size;
263 	stub_params->c_handler = (uintptr_t)params->handler;
264 	stub_params->cr3 = params->cr3;
265 
266 	/* This runs on the BSP. All the APs are its siblings */
267 	struct cpu_info *info = cpu_info();
268 	if (!info || !info->cpu) {
269 		printk(BIOS_ERR, "%s: Failed to find BSP struct device\n", __func__);
270 		return -1;
271 	}
272 	int i = 0;
273 	for (struct device *dev = info->cpu; dev; dev = dev->sibling)
274 		if (dev->enabled)
275 			stub_params->apic_id_to_cpu[i++] = dev->path.apic.initial_lapicid;
276 
277 	if (i != params->num_cpus) {
278 		printk(BIOS_ERR, "%s: Failed to set up apic map correctly\n", __func__);
279 		return -1;
280 	}
281 
282 	printk(BIOS_DEBUG, "%s: stack_top = 0x%x\n", __func__, stub_params->stack_top);
283 	printk(BIOS_DEBUG, "%s: per cpu stack_size = 0x%x\n", __func__,
284 	       stub_params->stack_size);
285 	printk(BIOS_DEBUG, "%s: runtime.smm_size = 0x%zx\n", __func__, smm_size);
286 
287 	smm_stub_place_staggered_entry_points(params);
288 
289 	printk(BIOS_DEBUG, "SMM Module: stub loaded at %lx. Will call %p\n", smm_stub_loc,
290 	       params->handler);
291 	return 0;
292 }
293 
294 /*
295  * smm_setup_relocation_handler assumes the callback is already loaded in
296  * memory. i.e. Another SMM module isn't chained to the stub. The other
297  * assumption is that the stub will be entered from the default SMRAM
298  * location: 0x30000 -> 0x40000.
299  */
smm_setup_relocation_handler(struct smm_loader_params * params)300 int smm_setup_relocation_handler(struct smm_loader_params *params)
301 {
302 	uintptr_t smram = SMM_DEFAULT_BASE;
303 	printk(BIOS_SPEW, "%s: enter\n", __func__);
304 	/* There can't be more than 1 concurrent save state for the relocation
305 	 * handler because all CPUs default to 0x30000 as SMBASE. */
306 	if (params->num_concurrent_save_states > 1)
307 		return -1;
308 
309 	/* A handler has to be defined to call for relocation. */
310 	if (params->handler == NULL)
311 		return -1;
312 
313 	/* Since the relocation handler always uses stack, adjust the number
314 	 * of concurrent stack users to be CONFIG_MAX_CPUS. */
315 	if (params->num_cpus == 0)
316 		params->num_cpus = CONFIG_MAX_CPUS;
317 
318 	printk(BIOS_SPEW, "%s: exit\n", __func__);
319 	return smm_module_setup_stub(smram, SMM_DEFAULT_SIZE, params);
320 }
321 
setup_smihandler_params(struct smm_runtime * mod_params,struct smm_loader_params * loader_params)322 static void setup_smihandler_params(struct smm_runtime *mod_params,
323 				    struct smm_loader_params *loader_params)
324 {
325 	uintptr_t tseg_base;
326 	size_t tseg_size;
327 
328 	smm_region(&tseg_base, &tseg_size);
329 
330 	mod_params->smbase = tseg_base;
331 	mod_params->smm_size = tseg_size;
332 	mod_params->save_state_size = loader_params->cpu_save_state_size;
333 	mod_params->num_cpus = loader_params->num_cpus;
334 	mod_params->gnvs_ptr = (uint32_t)(uintptr_t)acpi_get_gnvs();
335 	const struct cbmem_entry *cbmemc;
336 	if (CONFIG(CONSOLE_CBMEM) && (cbmemc = cbmem_entry_find(CBMEM_ID_CONSOLE))) {
337 		mod_params->cbmemc = cbmem_entry_start(cbmemc);
338 		mod_params->cbmemc_size = cbmem_entry_size(cbmemc);
339 	} else {
340 		mod_params->cbmemc = 0;
341 		mod_params->cbmemc_size = 0;
342 	}
343 
344 	for (int i = 0; i < loader_params->num_cpus; i++)
345 		mod_params->save_state_top[i] = region_end(&cpus[i].ss);
346 
347 	if (CONFIG(RUNTIME_CONFIGURABLE_SMM_LOGLEVEL))
348 		mod_params->smm_log_level = mainboard_set_smm_log_level();
349 	else
350 		mod_params->smm_log_level = 0;
351 
352 	if (CONFIG(SMM_PCI_RESOURCE_STORE))
353 		smm_pci_resource_store_init(mod_params);
354 
355 	if (CONFIG(SMMSTORE_V2)) {
356 		struct smmstore_params_info info;
357 		if (smmstore_get_info(&info) < 0) {
358 			printk(BIOS_INFO, "SMMSTORE: Failed to get meta data\n");
359 			return;
360 		}
361 
362 		void *ptr = cbmem_add(CBMEM_ID_SMM_COMBUFFER, info.block_size);
363 		if (!ptr) {
364 			printk(BIOS_ERR, "SMMSTORE: Failed to add com buffer\n");
365 			return;
366 		}
367 		mod_params->smmstore_com_buffer_base = (uintptr_t)ptr;
368 		mod_params->smmstore_com_buffer_size = info.block_size;
369 	}
370 }
371 
print_region(const char * name,const struct region region)372 static void print_region(const char *name, const struct region region)
373 {
374 	printk(BIOS_DEBUG, "%-12s [0x%zx-0x%zx]\n", name, region_offset(&region),
375 	       region_end(&region));
376 }
377 
378 /* STM + Handler + (Stub + Save state) * CONFIG_MAX_CPUS + stacks + page tables*/
379 #define SMM_REGIONS_ARRAY_SIZE (1  + 1 + CONFIG_MAX_CPUS * 2 + 1 + 1)
380 
append_and_check_region(const struct region smram,const struct region region,struct region * region_list,const char * name)381 static int append_and_check_region(const struct region smram,
382 				   const struct region region,
383 				   struct region *region_list,
384 				   const char *name)
385 {
386 	unsigned int region_counter = 0;
387 	for (; region_counter < SMM_REGIONS_ARRAY_SIZE; region_counter++)
388 		if (region_sz(&region_list[region_counter]) == 0)
389 			break;
390 
391 	if (region_counter >= SMM_REGIONS_ARRAY_SIZE) {
392 		printk(BIOS_ERR, "Array used to check regions too small\n");
393 		return 1;
394 	}
395 
396 	if (!region_is_subregion(&smram, &region)) {
397 		printk(BIOS_ERR, "%s not in SMM\n", name);
398 		return 1;
399 	}
400 
401 	print_region(name, region);
402 	for (unsigned int i = 0; i < region_counter; i++) {
403 		if (region_overlap(&region_list[i], &region)) {
404 			printk(BIOS_ERR, "%s overlaps with a previous region\n", name);
405 			return 1;
406 		}
407 	}
408 
409 	region_list[region_counter] = region;
410 
411 	return 0;
412 }
413 
414 #define _PRES (1ULL << 0)
415 #define _RW   (1ULL << 1)
416 #define _US   (1ULL << 2)
417 #define _A    (1ULL << 5)
418 #define _D    (1ULL << 6)
419 #define _PS   (1ULL << 7)
420 #define _GEN_DIR(a) (_PRES + _RW + _US + _A + (a))
421 #define _GEN_PAGE(a) (_PRES + _RW + _US + _PS + _A +  _D + (a))
422 #define PAGE_SIZE 8
423 
424 /* Return the PM4LE */
install_page_table(const uintptr_t handler_base)425 static uintptr_t install_page_table(const uintptr_t handler_base)
426 {
427 	const bool one_g_pages = !!(cpuid_edx(0x80000001) & (1 << 26));
428 	/* 4 1G pages or 4 PDPE entries with 512 * 2M pages */
429 	const size_t pages_needed = one_g_pages ? 4 : 2048 + 4;
430 	const uintptr_t pages_base = ALIGN_DOWN(handler_base - pages_needed * PAGE_SIZE, 4096);
431 	const uintptr_t pm4le = ALIGN_DOWN(pages_base - 8, 4096);
432 
433 	if (one_g_pages) {
434 		for (size_t i = 0; i < 4; i++)
435 			write64p(pages_base + i * PAGE_SIZE, _GEN_PAGE(1ull * GiB * i));
436 		write64p(pm4le, _GEN_DIR(pages_base));
437 	} else {
438 		for (size_t i = 0; i < 2048; i++)
439 			write64p(pages_base + i * PAGE_SIZE, _GEN_PAGE(2ull * MiB * i));
440 		write64p(pm4le, _GEN_DIR(pages_base + 2048 * PAGE_SIZE));
441 		for (size_t i = 0; i < 4; i++)
442 			write64p(pages_base + (2048 + i) * PAGE_SIZE, _GEN_DIR(pages_base + 4096 * i));
443 	}
444 	return pm4le;
445 }
446 
447 /*
448  *The SMM module is placed within the provided region in the following
449  * manner:
450  * +-----------------+ <- smram + size
451  * | BIOS resource   |
452  * | list (STM)      |
453  * +-----------------+
454  * |  smi handler    |
455  * |      ...        |
456  * +-----------------+
457  * |  page tables    |
458  * +-----------------+ <- cpu0
459  * |    stub code    | <- cpu1
460  * |    stub code    | <- cpu2
461  * |    stub code    | <- cpu3, etc
462  * |                 |
463  * |                 |
464  * |                 |
465  * |    stacks       |
466  * +-----------------+ <- smram start
467  *
468  * With CONFIG(SMM_TSEG) the stubs will be placed in the same segment as the
469  * permanent handler and the stacks.
470  */
smm_load_module(const uintptr_t smram_base,const size_t smram_size,struct smm_loader_params * params)471 int smm_load_module(const uintptr_t smram_base, const size_t smram_size,
472 		    struct smm_loader_params *params)
473 {
474 	/*
475 	 * Place in .bss to reduce stack usage.
476 	 * TODO: once CPU_INFO_V2 is used everywhere, use smaller stack for APs and move
477 	 * this back to the BSP stack.
478 	 */
479 	static struct region region_list[SMM_REGIONS_ARRAY_SIZE] = {};
480 
481 	struct rmodule smi_handler;
482 	if (rmodule_parse(&_binary_smm_start, &smi_handler))
483 		return -1;
484 
485 	const struct region smram = { .offset = smram_base, .size = smram_size };
486 	const uintptr_t smram_top = region_end(&smram);
487 
488 	const size_t stm_size =
489 		CONFIG(STM) ? CONFIG_MSEG_SIZE + CONFIG_BIOS_RESOURCE_LIST_SIZE : 0;
490 
491 	if (CONFIG(STM)) {
492 		struct region stm = {};
493 		stm.offset = smram_top - stm_size;
494 		stm.size = stm_size;
495 		if (append_and_check_region(smram, stm, region_list, "STM"))
496 			return -1;
497 		printk(BIOS_DEBUG, "MSEG size     0x%x\n", CONFIG_MSEG_SIZE);
498 		printk(BIOS_DEBUG, "BIOS res list 0x%x\n", CONFIG_BIOS_RESOURCE_LIST_SIZE);
499 	}
500 
501 	const size_t handler_size = rmodule_memory_size(&smi_handler);
502 	const size_t handler_alignment = rmodule_load_alignment(&smi_handler);
503 	const uintptr_t handler_base =
504 		ALIGN_DOWN(smram_top - stm_size - handler_size,
505 			   handler_alignment);
506 	struct region handler = {
507 		.offset = handler_base,
508 		.size = handler_size
509 	};
510 	if (append_and_check_region(smram, handler, region_list, "HANDLER"))
511 		return -1;
512 
513 	uintptr_t stub_segment_base;
514 	if (ENV_X86_64) {
515 		uintptr_t pt_base = install_page_table(handler_base);
516 		struct region page_tables = {
517 			.offset = pt_base,
518 			.size = handler_base - pt_base,
519 		};
520 		if (append_and_check_region(smram, page_tables, region_list, "PAGE TABLES"))
521 			return -1;
522 		params->cr3 = pt_base;
523 		stub_segment_base = pt_base - SMM_CODE_SEGMENT_SIZE;
524 	} else {
525 		stub_segment_base = handler_base - SMM_CODE_SEGMENT_SIZE;
526 	}
527 
528 	if (!smm_create_map(stub_segment_base, params->num_concurrent_save_states, params)) {
529 		printk(BIOS_ERR, "%s: Error creating CPU map\n", __func__);
530 		return -1;
531 	}
532 	for (unsigned int i = 0; i < params->num_concurrent_save_states; i++) {
533 		printk(BIOS_DEBUG, "\nCPU %u\n", i);
534 		char string[13];
535 		snprintf(string, sizeof(string), "  ss%d", i);
536 		if (append_and_check_region(smram, cpus[i].ss, region_list, string))
537 			return -1;
538 		snprintf(string, sizeof(string), "  stub%d", i);
539 		if (append_and_check_region(smram, cpus[i].stub_code, region_list, string))
540 			return -1;
541 	}
542 
543 	struct region stacks = {
544 		.offset = smram_base,
545 		.size = params->num_concurrent_save_states * CONFIG_SMM_MODULE_STACK_SIZE
546 	};
547 	printk(BIOS_DEBUG, "\n");
548 	if (append_and_check_region(smram, stacks, region_list, "stacks"))
549 		return -1;
550 
551 	if (rmodule_load((void *)handler_base, &smi_handler))
552 		return -1;
553 
554 	struct smm_runtime *smihandler_params = rmodule_parameters(&smi_handler);
555 	params->handler = rmodule_entry(&smi_handler);
556 	setup_smihandler_params(smihandler_params, params);
557 
558 	return smm_module_setup_stub(stub_segment_base, smram_size, params);
559 }
560