1 /*
2  * Copyright (c) 2015-2024, Arm Limited and Contributors. All rights reserved.
3  *
4  * SPDX-License-Identifier: BSD-3-Clause
5  */
6 
7 #include <assert.h>
8 
9 #include <arch.h>
10 #include <arch_helpers.h>
11 #include <common/bl_common.h>
12 #include <common/debug.h>
13 #include <drivers/console.h>
14 #include <lib/debugfs.h>
15 #include <lib/extensions/ras.h>
16 #include <lib/fconf/fconf.h>
17 #include <lib/gpt_rme/gpt_rme.h>
18 #include <lib/mmio.h>
19 #if TRANSFER_LIST
20 #include <lib/transfer_list.h>
21 #endif
22 #include <lib/xlat_tables/xlat_tables_compat.h>
23 #include <plat/arm/common/plat_arm.h>
24 #include <plat/common/platform.h>
25 #include <platform_def.h>
26 
27 static struct transfer_list_header *secure_tl __unused;
28 /*
29  * Placeholder variables for copying the arguments that have been passed to
30  * BL31 from BL2.
31  */
32 static entry_point_info_t bl32_image_ep_info;
33 static entry_point_info_t bl33_image_ep_info;
34 #if ENABLE_RME
35 static entry_point_info_t rmm_image_ep_info;
36 #endif
37 
38 #if !RESET_TO_BL31
39 /*
40  * Check that BL31_BASE is above ARM_FW_CONFIG_LIMIT. The reserved page
41  * is required for SOC_FW_CONFIG/TOS_FW_CONFIG passed from BL2.
42  */
43 #if TRANSFER_LIST
44 CASSERT(BL31_BASE >= PLAT_ARM_EL3_FW_HANDOFF_LIMIT, assert_bl31_base_overflows);
45 #else
46 CASSERT(BL31_BASE >= ARM_FW_CONFIG_LIMIT, assert_bl31_base_overflows);
47 #endif /* TRANSFER_LIST */
48 #endif /* RESET_TO_BL31 */
49 
50 /* Weak definitions may be overridden in specific ARM standard platform */
51 #pragma weak bl31_early_platform_setup2
52 #pragma weak bl31_platform_setup
53 #pragma weak bl31_plat_arch_setup
54 #pragma weak bl31_plat_get_next_image_ep_info
55 #pragma weak bl31_plat_runtime_setup
56 
57 #define MAP_BL31_TOTAL		MAP_REGION_FLAT(			\
58 					BL31_START,			\
59 					BL31_END - BL31_START,		\
60 					MT_MEMORY | MT_RW | EL3_PAS)
61 #if RECLAIM_INIT_CODE
62 IMPORT_SYM(unsigned long, __INIT_CODE_START__, BL_INIT_CODE_BASE);
63 IMPORT_SYM(unsigned long, __INIT_CODE_END__, BL_CODE_END_UNALIGNED);
64 IMPORT_SYM(unsigned long, __STACKS_END__, BL_STACKS_END_UNALIGNED);
65 
66 #define	BL_INIT_CODE_END	((BL_CODE_END_UNALIGNED + PAGE_SIZE - 1) & \
67 					~(PAGE_SIZE - 1))
68 #define	BL_STACKS_END		((BL_STACKS_END_UNALIGNED + PAGE_SIZE - 1) & \
69 					~(PAGE_SIZE - 1))
70 
71 #define MAP_BL_INIT_CODE	MAP_REGION_FLAT(			\
72 					BL_INIT_CODE_BASE,		\
73 					BL_INIT_CODE_END		\
74 						- BL_INIT_CODE_BASE,	\
75 					MT_CODE | EL3_PAS)
76 #endif
77 
78 #if SEPARATE_NOBITS_REGION
79 #define MAP_BL31_NOBITS		MAP_REGION_FLAT(			\
80 					BL31_NOBITS_BASE,		\
81 					BL31_NOBITS_LIMIT 		\
82 						- BL31_NOBITS_BASE,	\
83 					MT_MEMORY | MT_RW | EL3_PAS)
84 
85 #endif
86 /*******************************************************************************
87  * Return a pointer to the 'entry_point_info' structure of the next image for the
88  * security state specified. BL33 corresponds to the non-secure image type
89  * while BL32 corresponds to the secure image type. A NULL pointer is returned
90  * if the image does not exist.
91  ******************************************************************************/
bl31_plat_get_next_image_ep_info(uint32_t type)92 struct entry_point_info *bl31_plat_get_next_image_ep_info(uint32_t type)
93 {
94 	entry_point_info_t *next_image_info;
95 
96 	assert(sec_state_is_valid(type));
97 	if (type == NON_SECURE) {
98 		next_image_info = &bl33_image_ep_info;
99 	}
100 #if ENABLE_RME
101 	else if (type == REALM) {
102 		next_image_info = &rmm_image_ep_info;
103 	}
104 #endif
105 	else {
106 		next_image_info = &bl32_image_ep_info;
107 	}
108 
109 	/*
110 	 * None of the images on the ARM development platforms can have 0x0
111 	 * as the entrypoint
112 	 */
113 	if (next_image_info->pc)
114 		return next_image_info;
115 	else
116 		return NULL;
117 }
118 
119 /*******************************************************************************
120  * Perform any BL31 early platform setup common to ARM standard platforms.
121  * Here is an opportunity to copy parameters passed by the calling EL (S-EL1
122  * in BL2 & EL3 in BL1) before they are lost (potentially). This needs to be
123  * done before the MMU is initialized so that the memory layout can be used
124  * while creating page tables. BL2 has flushed this information to memory, so
125  * we are guaranteed to pick up good data.
126  ******************************************************************************/
127 #if TRANSFER_LIST
arm_bl31_early_platform_setup(u_register_t arg0,u_register_t arg1,u_register_t arg2,u_register_t arg3)128 void __init arm_bl31_early_platform_setup(u_register_t arg0, u_register_t arg1,
129 					  u_register_t arg2, u_register_t arg3)
130 {
131 	struct transfer_list_entry *te = NULL;
132 	struct entry_point_info *ep;
133 
134 	secure_tl = (struct transfer_list_header *)arg3;
135 
136 	/*
137 	 * Populate the global entry point structures used to execute subsequent
138 	 * images.
139 	 */
140 	while ((te = transfer_list_next(secure_tl, te)) != NULL) {
141 		ep = transfer_list_entry_data(te);
142 
143 		if (te->tag_id == TL_TAG_EXEC_EP_INFO64) {
144 			switch (GET_SECURITY_STATE(ep->h.attr)) {
145 			case NON_SECURE:
146 				bl33_image_ep_info = *ep;
147 				break;
148 #if ENABLE_RME
149 			case REALM:
150 				rmm_image_ep_info = *ep;
151 				break;
152 #endif
153 			case SECURE:
154 				bl32_image_ep_info = *ep;
155 				break;
156 			default:
157 				ERROR("Unrecognized Image Security State %lu\n",
158 				      GET_SECURITY_STATE(ep->h.attr));
159 				panic();
160 			}
161 		}
162 	}
163 }
164 #else
arm_bl31_early_platform_setup(void * from_bl2,uintptr_t soc_fw_config,uintptr_t hw_config,void * plat_params_from_bl2)165 void __init arm_bl31_early_platform_setup(void *from_bl2, uintptr_t soc_fw_config,
166 				uintptr_t hw_config, void *plat_params_from_bl2)
167 {
168 	/* Initialize the console to provide early debug support */
169 	arm_console_boot_init();
170 
171 #if RESET_TO_BL31
172 	/* There are no parameters from BL2 if BL31 is a reset vector */
173 	assert(from_bl2 == NULL);
174 	assert(plat_params_from_bl2 == NULL);
175 
176 # ifdef BL32_BASE
177 	/* Populate entry point information for BL32 */
178 	SET_PARAM_HEAD(&bl32_image_ep_info,
179 				PARAM_EP,
180 				VERSION_1,
181 				0);
182 	SET_SECURITY_STATE(bl32_image_ep_info.h.attr, SECURE);
183 	bl32_image_ep_info.pc = BL32_BASE;
184 	bl32_image_ep_info.spsr = arm_get_spsr_for_bl32_entry();
185 
186 #if defined(SPD_spmd)
187 	/* SPM (hafnium in secure world) expects SPM Core manifest base address
188 	 * in x0, which in !RESET_TO_BL31 case loaded after base of non shared
189 	 * SRAM(after 4KB offset of SRAM). But in RESET_TO_BL31 case all non
190 	 * shared SRAM is allocated to BL31, so to avoid overwriting of manifest
191 	 * keep it in the last page.
192 	 */
193 	bl32_image_ep_info.args.arg0 = ARM_TRUSTED_SRAM_BASE +
194 				PLAT_ARM_TRUSTED_SRAM_SIZE - PAGE_SIZE;
195 #endif
196 
197 # endif /* BL32_BASE */
198 
199 	/* Populate entry point information for BL33 */
200 	SET_PARAM_HEAD(&bl33_image_ep_info,
201 				PARAM_EP,
202 				VERSION_1,
203 				0);
204 	/*
205 	 * Tell BL31 where the non-trusted software image
206 	 * is located and the entry state information
207 	 */
208 	bl33_image_ep_info.pc = plat_get_ns_image_entrypoint();
209 
210 	bl33_image_ep_info.spsr = arm_get_spsr_for_bl33_entry();
211 	SET_SECURITY_STATE(bl33_image_ep_info.h.attr, NON_SECURE);
212 
213 #if ENABLE_RME
214 	/*
215 	 * Populate entry point information for RMM.
216 	 * Only PC needs to be set as other fields are determined by RMMD.
217 	 */
218 	rmm_image_ep_info.pc = RMM_BASE;
219 #endif /* ENABLE_RME */
220 
221 #else /* RESET_TO_BL31 */
222 
223 	/*
224 	 * In debug builds, we pass a special value in 'plat_params_from_bl2'
225 	 * to verify platform parameters from BL2 to BL31.
226 	 * In release builds, it's not used.
227 	 */
228 	assert(((unsigned long long)plat_params_from_bl2) ==
229 		ARM_BL31_PLAT_PARAM_VAL);
230 
231 	/*
232 	 * Check params passed from BL2 should not be NULL,
233 	 */
234 	bl_params_t *params_from_bl2 = (bl_params_t *)from_bl2;
235 	assert(params_from_bl2 != NULL);
236 	assert(params_from_bl2->h.type == PARAM_BL_PARAMS);
237 	assert(params_from_bl2->h.version >= VERSION_2);
238 
239 	bl_params_node_t *bl_params = params_from_bl2->head;
240 
241 	/*
242 	 * Copy BL33, BL32 and RMM (if present), entry point information.
243 	 * They are stored in Secure RAM, in BL2's address space.
244 	 */
245 	while (bl_params != NULL) {
246 		if (bl_params->image_id == BL32_IMAGE_ID) {
247 			bl32_image_ep_info = *bl_params->ep_info;
248 #if SPMC_AT_EL3
249 			/*
250 			 * Populate the BL32 image base, size and max limit in
251 			 * the entry point information, since there is no
252 			 * platform function to retrieve them in generic
253 			 * code. We choose arg2, arg3 and arg4 since the generic
254 			 * code uses arg1 for stashing the SP manifest size. The
255 			 * SPMC setup uses these arguments to update SP manifest
256 			 * with actual SP's base address and it size.
257 			 */
258 			bl32_image_ep_info.args.arg2 =
259 				bl_params->image_info->image_base;
260 			bl32_image_ep_info.args.arg3 =
261 				bl_params->image_info->image_size;
262 			bl32_image_ep_info.args.arg4 =
263 				bl_params->image_info->image_base +
264 				bl_params->image_info->image_max_size;
265 #endif
266 		}
267 #if ENABLE_RME
268 		else if (bl_params->image_id == RMM_IMAGE_ID) {
269 			rmm_image_ep_info = *bl_params->ep_info;
270 		}
271 #endif
272 		else if (bl_params->image_id == BL33_IMAGE_ID) {
273 			bl33_image_ep_info = *bl_params->ep_info;
274 		}
275 
276 		bl_params = bl_params->next_params_info;
277 	}
278 
279 	if (bl33_image_ep_info.pc == 0U)
280 		panic();
281 #if ENABLE_RME
282 	if (rmm_image_ep_info.pc == 0U)
283 		panic();
284 #endif
285 #endif /* RESET_TO_BL31 */
286 
287 # if ARM_LINUX_KERNEL_AS_BL33
288 	/*
289 	 * According to the file ``Documentation/arm64/booting.txt`` of the
290 	 * Linux kernel tree, Linux expects the physical address of the device
291 	 * tree blob (DTB) in x0, while x1-x3 are reserved for future use and
292 	 * must be 0.
293 	 * Repurpose the option to load Hafnium hypervisor in the normal world.
294 	 * It expects its manifest address in x0. This is essentially the linux
295 	 * dts (passed to the primary VM) by adding 'hypervisor' and chosen
296 	 * nodes specifying the Hypervisor configuration.
297 	 */
298 #if RESET_TO_BL31
299 	bl33_image_ep_info.args.arg0 = (u_register_t)ARM_PRELOADED_DTB_BASE;
300 #else
301 	bl33_image_ep_info.args.arg0 = (u_register_t)hw_config;
302 #endif
303 	bl33_image_ep_info.args.arg1 = 0U;
304 	bl33_image_ep_info.args.arg2 = 0U;
305 	bl33_image_ep_info.args.arg3 = 0U;
306 # endif
307 }
308 #endif
309 
bl31_early_platform_setup2(u_register_t arg0,u_register_t arg1,u_register_t arg2,u_register_t arg3)310 void bl31_early_platform_setup2(u_register_t arg0, u_register_t arg1,
311 		u_register_t arg2, u_register_t arg3)
312 {
313 #if TRANSFER_LIST
314 	arm_bl31_early_platform_setup(arg0, arg1, arg2, arg3);
315 #else
316 	arm_bl31_early_platform_setup((void *)arg0, arg1, arg2, (void *)arg3);
317 #endif
318 
319 	/*
320 	 * Initialize Interconnect for this cluster during cold boot.
321 	 * No need for locks as no other CPU is active.
322 	 */
323 	plat_arm_interconnect_init();
324 
325 	/*
326 	 * Enable Interconnect coherency for the primary CPU's cluster.
327 	 * Earlier bootloader stages might already do this (e.g. Trusted
328 	 * Firmware's BL1 does it) but we can't assume so. There is no harm in
329 	 * executing this code twice anyway.
330 	 * Platform specific PSCI code will enable coherency for other
331 	 * clusters.
332 	 */
333 	plat_arm_interconnect_enter_coherency();
334 }
335 
336 /*******************************************************************************
337  * Perform any BL31 platform setup common to ARM standard platforms
338  ******************************************************************************/
arm_bl31_platform_setup(void)339 void arm_bl31_platform_setup(void)
340 {
341 	/* Initialize the GIC driver, cpu and distributor interfaces */
342 	plat_arm_gic_driver_init();
343 	plat_arm_gic_init();
344 
345 #if RESET_TO_BL31
346 	/*
347 	 * Do initial security configuration to allow DRAM/device access
348 	 * (if earlier BL has not already done so).
349 	 */
350 	plat_arm_security_setup();
351 
352 #if defined(PLAT_ARM_MEM_PROT_ADDR)
353 	arm_nor_psci_do_dyn_mem_protect();
354 #endif /* PLAT_ARM_MEM_PROT_ADDR */
355 
356 #endif /* RESET_TO_BL31 */
357 
358 	/* Enable and initialize the System level generic timer */
359 	mmio_write_32(ARM_SYS_CNTCTL_BASE + CNTCR_OFF,
360 			CNTCR_FCREQ(0U) | CNTCR_EN);
361 
362 	/* Allow access to the System counter timer module */
363 	arm_configure_sys_timer();
364 
365 	/* Initialize power controller before setting up topology */
366 	plat_arm_pwrc_setup();
367 
368 #if ENABLE_FEAT_RAS && FFH_SUPPORT
369 	ras_init();
370 #endif
371 
372 #if USE_DEBUGFS
373 	debugfs_init();
374 #endif /* USE_DEBUGFS */
375 }
376 
377 /*******************************************************************************
378  * Perform any BL31 platform runtime setup prior to BL31 exit common to ARM
379  * standard platforms
380  ******************************************************************************/
arm_bl31_plat_runtime_setup(void)381 void arm_bl31_plat_runtime_setup(void)
382 {
383 	/* Initialize the runtime console */
384 	arm_console_runtime_init();
385 
386 #if RECLAIM_INIT_CODE
387 	arm_free_init_memory();
388 #endif
389 
390 #if PLAT_RO_XLAT_TABLES
391 	arm_xlat_make_tables_readonly();
392 #endif
393 }
394 
395 #if RECLAIM_INIT_CODE
396 /*
397  * Make memory for image boot time code RW to reclaim it as stack for the
398  * secondary cores, or RO where it cannot be reclaimed:
399  *
400  *            |-------- INIT SECTION --------|
401  *  -----------------------------------------
402  * |  CORE 0  |  CORE 1  |  CORE 2  | EXTRA  |
403  * |  STACK   |  STACK   |  STACK   | SPACE  |
404  *  -----------------------------------------
405  *             <-------------------> <------>
406  *                MAKE RW AND XN       MAKE
407  *                  FOR STACKS       RO AND XN
408  */
arm_free_init_memory(void)409 void arm_free_init_memory(void)
410 {
411 	int ret = 0;
412 
413 	if (BL_STACKS_END < BL_INIT_CODE_END) {
414 		/* Reclaim some of the init section as stack if possible. */
415 		if (BL_INIT_CODE_BASE < BL_STACKS_END) {
416 			ret |= xlat_change_mem_attributes(BL_INIT_CODE_BASE,
417 					BL_STACKS_END - BL_INIT_CODE_BASE,
418 					MT_RW_DATA);
419 		}
420 		/* Make the rest of the init section read-only. */
421 		ret |= xlat_change_mem_attributes(BL_STACKS_END,
422 				BL_INIT_CODE_END - BL_STACKS_END,
423 				MT_RO_DATA);
424 	} else {
425 		/* The stacks cover the init section, so reclaim it all. */
426 		ret |= xlat_change_mem_attributes(BL_INIT_CODE_BASE,
427 				BL_INIT_CODE_END - BL_INIT_CODE_BASE,
428 				MT_RW_DATA);
429 	}
430 
431 	if (ret != 0) {
432 		ERROR("Could not reclaim initialization code");
433 		panic();
434 	}
435 }
436 #endif
437 
bl31_platform_setup(void)438 void __init bl31_platform_setup(void)
439 {
440 	arm_bl31_platform_setup();
441 }
442 
bl31_plat_runtime_setup(void)443 void bl31_plat_runtime_setup(void)
444 {
445 	arm_bl31_plat_runtime_setup();
446 }
447 
448 /*******************************************************************************
449  * Perform the very early platform specific architectural setup shared between
450  * ARM standard platforms. This only does basic initialization. Later
451  * architectural setup (bl31_arch_setup()) does not do anything platform
452  * specific.
453  ******************************************************************************/
arm_bl31_plat_arch_setup(void)454 void __init arm_bl31_plat_arch_setup(void)
455 {
456 	const mmap_region_t bl_regions[] = {
457 		MAP_BL31_TOTAL,
458 #if ENABLE_RME
459 		ARM_MAP_L0_GPT_REGION,
460 #endif
461 #if RECLAIM_INIT_CODE
462 		MAP_BL_INIT_CODE,
463 #endif
464 #if SEPARATE_NOBITS_REGION
465 		MAP_BL31_NOBITS,
466 #endif
467 		ARM_MAP_BL_RO,
468 #if USE_ROMLIB
469 		ARM_MAP_ROMLIB_CODE,
470 		ARM_MAP_ROMLIB_DATA,
471 #endif
472 #if USE_COHERENT_MEM
473 		ARM_MAP_BL_COHERENT_RAM,
474 #endif
475 		{0}
476 	};
477 
478 	setup_page_tables(bl_regions, plat_arm_get_mmap());
479 
480 	enable_mmu_el3(0);
481 
482 #if ENABLE_RME
483 	/*
484 	 * Initialise Granule Protection library and enable GPC for the primary
485 	 * processor. The tables have already been initialized by a previous BL
486 	 * stage, so there is no need to provide any PAS here. This function
487 	 * sets up pointers to those tables.
488 	 */
489 	if (gpt_runtime_init() < 0) {
490 		ERROR("gpt_runtime_init() failed!\n");
491 		panic();
492 	}
493 #endif /* ENABLE_RME */
494 
495 	arm_setup_romlib();
496 }
497 
bl31_plat_arch_setup(void)498 void __init bl31_plat_arch_setup(void)
499 {
500 	struct transfer_list_entry *te __unused;
501 
502 	arm_bl31_plat_arch_setup();
503 
504 #if TRANSFER_LIST && !RESET_TO_BL2
505 	te = transfer_list_find(secure_tl, TL_TAG_FDT);
506 	assert(te != NULL);
507 
508 	/* Populate HW_CONFIG device tree with the mapped address */
509 	fconf_populate("HW_CONFIG", (uintptr_t)transfer_list_entry_data(te));
510 #endif
511 }
512