1/* 2 * Copyright (c) 2013-2023, Arm Limited and Contributors. All rights reserved. 3 * 4 * SPDX-License-Identifier: BSD-3-Clause 5 */ 6 7#include <common/bl_common.ld.h> 8#include <lib/xlat_tables/xlat_tables_defs.h> 9 10OUTPUT_FORMAT(PLATFORM_LINKER_FORMAT) 11OUTPUT_ARCH(PLATFORM_LINKER_ARCH) 12ENTRY(bl31_entrypoint) 13 14MEMORY { 15 RAM (rwx): ORIGIN = BL31_BASE, LENGTH = BL31_LIMIT - BL31_BASE 16 17#if SEPARATE_NOBITS_REGION 18 NOBITS (rw!a): ORIGIN = BL31_NOBITS_BASE, LENGTH = BL31_NOBITS_LIMIT - BL31_NOBITS_BASE 19#else /* SEPARATE_NOBITS_REGION */ 20# define NOBITS RAM 21#endif /* SEPARATE_NOBITS_REGION */ 22} 23 24#ifdef PLAT_EXTRA_LD_SCRIPT 25# include <plat.ld.S> 26#endif /* PLAT_EXTRA_LD_SCRIPT */ 27 28SECTIONS { 29 RAM_REGION_START = ORIGIN(RAM); 30 RAM_REGION_LENGTH = LENGTH(RAM); 31 . = BL31_BASE; 32 33 ASSERT(. == ALIGN(PAGE_SIZE), 34 "BL31_BASE address is not aligned on a page boundary.") 35 36 __BL31_START__ = .; 37 38#if SEPARATE_CODE_AND_RODATA 39 .text . : { 40 ASSERT(. == ALIGN(PAGE_SIZE), 41 ".text is not aligned on a page boundary."); 42 43 __TEXT_START__ = .; 44 45 *bl31_entrypoint.o(.text*) 46 *(SORT_BY_ALIGNMENT(SORT(.text*))) 47 *(.vectors) 48 __TEXT_END_UNALIGNED__ = .; 49 50 . = ALIGN(PAGE_SIZE); 51 52 __TEXT_END__ = .; 53 } >RAM 54 55 .rodata . : { 56 __RODATA_START__ = .; 57 58 *(SORT_BY_ALIGNMENT(.rodata*)) 59 60# if PLAT_EXTRA_RODATA_INCLUDES 61# include <plat.ld.rodata.inc> 62# endif /* PLAT_EXTRA_RODATA_INCLUDES */ 63 64 RODATA_COMMON 65 66 . = ALIGN(8); 67 68# include <lib/el3_runtime/pubsub_events.h> 69 __RODATA_END_UNALIGNED__ = .; 70 71 . = ALIGN(PAGE_SIZE); 72 73 __RODATA_END__ = .; 74 } >RAM 75#else /* SEPARATE_CODE_AND_RODATA */ 76 .ro . : { 77 ASSERT(. == ALIGN(PAGE_SIZE), 78 ".ro is not aligned on a page boundary."); 79 80 __RO_START__ = .; 81 82 *bl31_entrypoint.o(.text*) 83 *(SORT_BY_ALIGNMENT(.text*)) 84 *(SORT_BY_ALIGNMENT(.rodata*)) 85 86 RODATA_COMMON 87 88 . = ALIGN(8); 89 90# include <lib/el3_runtime/pubsub_events.h> 91 92 *(.vectors) 93 94 __RO_END_UNALIGNED__ = .; 95 96 /* 97 * Memory page(s) mapped to this section will be marked as read-only, 98 * executable. No RW data from the next section must creep in. Ensure 99 * that the rest of the current memory page is unused. 100 */ 101 . = ALIGN(PAGE_SIZE); 102 103 __RO_END__ = .; 104 } >RAM 105#endif /* SEPARATE_CODE_AND_RODATA */ 106 107 ASSERT(__CPU_OPS_END__ > __CPU_OPS_START__, 108 "cpu_ops not defined for this platform.") 109 110#if SPM_MM || (SPMC_AT_EL3 && SPMC_AT_EL3_SEL0_SP) 111# ifndef SPM_SHIM_EXCEPTIONS_VMA 112# define SPM_SHIM_EXCEPTIONS_VMA RAM 113# endif /* SPM_SHIM_EXCEPTIONS_VMA */ 114 115 /* 116 * Exception vectors of the SPM shim layer. They must be aligned to a 2K 117 * address but we need to place them in a separate page so that we can set 118 * individual permissions on them, so the actual alignment needed is the 119 * page size. 120 * 121 * There's no need to include this into the RO section of BL31 because it 122 * doesn't need to be accessed by BL31. 123 */ 124 .spm_shim_exceptions : ALIGN(PAGE_SIZE) { 125 __SPM_SHIM_EXCEPTIONS_START__ = .; 126 127 *(.spm_shim_exceptions) 128 129 . = ALIGN(PAGE_SIZE); 130 131 __SPM_SHIM_EXCEPTIONS_END__ = .; 132 } >SPM_SHIM_EXCEPTIONS_VMA AT>RAM 133 134 PROVIDE(__SPM_SHIM_EXCEPTIONS_LMA__ = LOADADDR(.spm_shim_exceptions)); 135 136 . = LOADADDR(.spm_shim_exceptions) + SIZEOF(.spm_shim_exceptions); 137#endif /* SPM_MM || (SPMC_AT_EL3 && SPMC_AT_EL3_SEL0_SP) */ 138 139 __RW_START__ = .; 140 141 DATA_SECTION >RAM 142 RELA_SECTION >RAM 143 144#ifdef BL31_PROGBITS_LIMIT 145 ASSERT( 146 . <= BL31_PROGBITS_LIMIT, 147 "BL31 progbits has exceeded its limit. Consider disabling some features." 148 ) 149#endif /* BL31_PROGBITS_LIMIT */ 150 151#if SEPARATE_NOBITS_REGION 152 . = ALIGN(PAGE_SIZE); 153 154 __RW_END__ = .; 155 __BL31_END__ = .; 156 157 ASSERT(. <= BL31_LIMIT, "BL31 image has exceeded its limit.") 158 159 . = BL31_NOBITS_BASE; 160 161 ASSERT(. == ALIGN(PAGE_SIZE), 162 "BL31 NOBITS base address is not aligned on a page boundary.") 163 164 __NOBITS_START__ = .; 165#endif /* SEPARATE_NOBITS_REGION */ 166 167 STACK_SECTION >NOBITS 168 BSS_SECTION >NOBITS 169 XLAT_TABLE_SECTION >NOBITS 170 171#if USE_COHERENT_MEM 172 /* 173 * The base address of the coherent memory section must be page-aligned to 174 * guarantee that the coherent data are stored on their own pages and are 175 * not mixed with normal data. This is required to set up the correct 176 * memory attributes for the coherent data page tables. 177 */ 178 .coherent_ram (NOLOAD) : ALIGN(PAGE_SIZE) { 179 __COHERENT_RAM_START__ = .; 180 181 /* 182 * Bakery locks are stored in coherent memory. Each lock's data is 183 * contiguous and fully allocated by the compiler. 184 */ 185 *(.bakery_lock) 186 *(.tzfw_coherent_mem) 187 188 __COHERENT_RAM_END_UNALIGNED__ = .; 189 190 /* 191 * Memory page(s) mapped to this section will be marked as device 192 * memory. No other unexpected data must creep in. Ensure the rest of 193 * the current memory page is unused. 194 */ 195 . = ALIGN(PAGE_SIZE); 196 197 __COHERENT_RAM_END__ = .; 198 } >NOBITS 199#endif /* USE_COHERENT_MEM */ 200 201#if SEPARATE_NOBITS_REGION 202 __NOBITS_END__ = .; 203 204 ASSERT(. <= BL31_NOBITS_LIMIT, "BL31 NOBITS region has exceeded its limit.") 205#else /* SEPARATE_NOBITS_REGION */ 206 __RW_END__ = .; 207 __BL31_END__ = .; 208 209 ASSERT(. <= BL31_LIMIT, "BL31 image has exceeded its limit.") 210#endif /* SEPARATE_NOBITS_REGION */ 211 RAM_REGION_END = .; 212 213 /DISCARD/ : { 214 *(.dynsym .dynstr .hash .gnu.hash) 215 } 216} 217