1/* 2 * Copyright (c) 2016-2023, Arm Limited and Contributors. All rights reserved. 3 * 4 * SPDX-License-Identifier: BSD-3-Clause 5 */ 6 7#include <common/bl_common.ld.h> 8#include <lib/xlat_tables/xlat_tables_defs.h> 9 10OUTPUT_FORMAT(elf32-littlearm) 11OUTPUT_ARCH(arm) 12ENTRY(sp_min_vector_table) 13 14MEMORY { 15 RAM (rwx): ORIGIN = BL32_BASE, LENGTH = BL32_LIMIT - BL32_BASE 16} 17 18#ifdef PLAT_SP_MIN_EXTRA_LD_SCRIPT 19# include <plat_sp_min.ld.S> 20#endif /* PLAT_SP_MIN_EXTRA_LD_SCRIPT */ 21 22SECTIONS { 23 RAM_REGION_START = ORIGIN(RAM); 24 RAM_REGION_LENGTH = LENGTH(RAM); 25 . = BL32_BASE; 26 27 ASSERT(. == ALIGN(PAGE_SIZE), 28 "BL32_BASE address is not aligned on a page boundary.") 29 30#if SEPARATE_CODE_AND_RODATA 31 .text . : { 32 ASSERT(. == ALIGN(PAGE_SIZE), 33 ".text address is not aligned on a page boundary."); 34 35 __TEXT_START__ = .; 36 37 *entrypoint.o(.text*) 38 *(SORT_BY_ALIGNMENT(.text*)) 39 *(.vectors) 40 __TEXT_END_UNALIGNED__ = .; 41 42 . = ALIGN(PAGE_SIZE); 43 44 __TEXT_END__ = .; 45 } >RAM 46 47 /* .ARM.extab and .ARM.exidx are only added because Clang needs them */ 48 .ARM.extab . : { 49 *(.ARM.extab* .gnu.linkonce.armextab.*) 50 } >RAM 51 52 .ARM.exidx . : { 53 *(.ARM.exidx* .gnu.linkonce.armexidx.*) 54 } >RAM 55 56 .rodata . : { 57 __RODATA_START__ = .; 58 *(SORT_BY_ALIGNMENT(.rodata*)) 59 60 RODATA_COMMON 61 62 . = ALIGN(8); 63 64# include <lib/el3_runtime/pubsub_events.h> 65 __RODATA_END_UNALIGNED__ = .; 66 67 . = ALIGN(PAGE_SIZE); 68 69 __RODATA_END__ = .; 70 } >RAM 71#else /* SEPARATE_CODE_AND_RODATA */ 72 .ro . : { 73 ASSERT(. == ALIGN(PAGE_SIZE), 74 ".ro address is not aligned on a page boundary."); 75 76 __RO_START__ = .; 77 78 *entrypoint.o(.text*) 79 *(SORT_BY_ALIGNMENT(.text*)) 80 *(SORT_BY_ALIGNMENT(.rodata*)) 81 82 RODATA_COMMON 83 84 . = ALIGN(8); 85 86# include <lib/el3_runtime/pubsub_events.h> 87 88 *(.vectors) 89 90 __RO_END_UNALIGNED__ = .; 91 92 /* 93 * Memory page(s) mapped to this section will be marked as device 94 * memory. No other unexpected data must creep in. Ensure that the rest 95 * of the current memory page is unused. 96 */ 97 . = ALIGN(PAGE_SIZE); 98 99 __RO_END__ = .; 100 } >RAM 101#endif /* SEPARATE_CODE_AND_RODATA */ 102 103 ASSERT(__CPU_OPS_END__ > __CPU_OPS_START__, 104 "cpu_ops not defined for this platform.") 105 106 __RW_START__ = .; 107 108 DATA_SECTION >RAM 109 RELA_SECTION >RAM 110 111#ifdef BL32_PROGBITS_LIMIT 112 ASSERT(. <= BL32_PROGBITS_LIMIT, "BL32 progbits has exceeded its limit.") 113#endif /* BL32_PROGBITS_LIMIT */ 114 115 STACK_SECTION >RAM 116 BSS_SECTION >RAM 117 XLAT_TABLE_SECTION >RAM 118 119 __BSS_SIZE__ = SIZEOF(.bss); 120 121#if USE_COHERENT_MEM 122 /* 123 * The base address of the coherent memory section must be page-aligned to 124 * guarantee that the coherent data are stored on their own pages and are 125 * not mixed with normal data. This is required to set up the correct 126 * memory attributes for the coherent data page tables. 127 */ 128 .coherent_ram (NOLOAD) : ALIGN(PAGE_SIZE) { 129 __COHERENT_RAM_START__ = .; 130 131 /* 132 * Bakery locks are stored in coherent memory. Each lock's data is 133 * contiguous and fully allocated by the compiler. 134 */ 135 *(.bakery_lock) 136 *(.tzfw_coherent_mem) 137 138 __COHERENT_RAM_END_UNALIGNED__ = .; 139 140 /* 141 * Memory page(s) mapped to this section will be marked as device 142 * memory. No other unexpected data must creep in. Ensure that the rest 143 * of the current memory page is unused. 144 */ 145 . = ALIGN(PAGE_SIZE); 146 147 __COHERENT_RAM_END__ = .; 148 } >RAM 149 150 __COHERENT_RAM_UNALIGNED_SIZE__ = 151 __COHERENT_RAM_END_UNALIGNED__ - __COHERENT_RAM_START__; 152#endif /* USE_COHERENT_MEM */ 153 154 __RW_END__ = .; 155 __BL32_END__ = .; 156 157 /DISCARD/ : { 158 *(.dynsym .dynstr .hash .gnu.hash) 159 } 160 161 ASSERT(. <= BL32_LIMIT, "BL32 image has exceeded its limit.") 162 RAM_REGION_END = .; 163} 164