1OUTPUT_FORMAT("elf64-littleaarch64", "elf64-bigaarch64", "elf64-littleaarch64")
2OUTPUT_ARCH(aarch64)
3
4_start_phys = _start - %KERNEL_BASE% + %MEMBASE%;
5ENTRY(_start_phys)
6
7SECTIONS
8{
9    . = %KERNEL_BASE% + %KERNEL_LOAD_OFFSET%;
10
11    /*
12     * LLVM introduced a static constructor, init_have_lse_atomics, to
13     * compiler-rt that we don't want and that breaks our build. Until we compile
14     * our own compiler-rt and either provide getauxval and enable CFI, or remove
15     * this function, we can remove it during linking here
16     */
17    /DISCARD/ : {
18        *libclang_rt.builtins-aarch64-android.a:cpu_model.c.o(
19            .text.init_cpu_features
20            .text.init_have_lse_atomics
21            .init_array*
22        )
23
24        *libclang_rt.builtins-aarch64-android.a:aarch64.c.o(
25            .text.__init_cpu_features
26            .text.init_have_lse_atomics
27            .init_array*
28        )
29    }
30
31    /* text/read-only data */
32    /* set the load address to physical MEMBASE */
33    .text : AT(%MEMBASE% + %KERNEL_LOAD_OFFSET%) {
34        __code_start = .;
35        KEEP(*(.text.boot))
36        KEEP(*(.text.boot.early.vectab))
37        KEEP(*(.text.boot.vectab))
38        *(.text* .sram.text.glue_7* .gnu.linkonce.t.*)
39    }
40
41    .interp : { *(.interp) }
42    .hash : { *(.hash) }
43    .dynsym : { *(.dynsym) }
44    .dynstr : { *(.dynstr) }
45    .init : { *(.init) } =0x9090
46    .plt : { *(.plt) }
47
48    /*
49     * .plt needs to be empty because its entries might call into the dynamic
50     * loader, which doesn't exist for Trusty (or any kernel).
51     */
52    ASSERT(SIZEOF(.plt) == 0, ".plt section should be empty")
53
54    /* .ARM.exidx is sorted, so has to go in its own output section.  */
55    __exidx_start = .;
56    .ARM.exidx : { *(.ARM.exidx* .gnu.linkonce.armexidx.*) }
57    __exidx_end = .;
58
59    .fake_post_text : {
60	    __code_end = .;
61    }
62
63    .rodata : ALIGN(4096) {
64        __rodata_start = .;
65        __fault_handler_table_start = .;
66        KEEP(*(.rodata.fault_handler_table))
67        __fault_handler_table_end = .;
68        *(.rodata .rodata.* .gnu.linkonce.r.*)
69    }
70
71    .rel.dyn : {
72        *(.rel.text) *(.rel.gnu.linkonce.t*)
73        *(.rel.init)
74        *(.rel.plt)
75        *(.rel.rodata) *(.rel.gnu.linkonce.r*)
76        *(.rel.lk_init)
77        *(.rel.apps)
78        *(.rel.drivers)
79        *(.rel.data) *(.rel.gnu.linkonce.d*)
80        *(.rel.devices)
81        *(.rel.ctors)
82        *(.rel.dtors)
83        *(.rel.got)
84        *(.rel.bss) *(.rel.bss.*) *(.rel.gnu.linkonce.b*)
85    }
86
87    .rela.dyn : {
88        *(.rela.text) *(.rela.gnu.linkonce.t*)
89        *(.rela.init)
90        *(.rela.plt)
91        *(.rela.rodata) *(.rela.gnu.linkonce.r*)
92        *(.rela.lk_init)
93        *(.rela.apps)
94        *(.rela.drivers)
95        *(.rela.data) *(.rela.gnu.linkonce.d*)
96        *(.rela.devices)
97        *(.rela.ctors)
98        *(.rela.dtors)
99        *(.rela.got)
100        *(.rela.bss) *(.rela.bss.*) *(.rela.gnu.linkonce.b*)
101    }
102
103    /*
104     * Non-RELR dynamic relocations are not implemented yet.
105     * .rel.dyn should not ever appear on AArch64 anyway.
106     */
107    ASSERT(SIZEOF(.rel.dyn) == 0, "Found non-RELR relocations in .rel.dyn")
108    ASSERT(SIZEOF(.rela.dyn) == 0, "Found non-RELR relocations in .rel.dyn")
109
110    .relr.dyn : ALIGN(8) {
111        __relr_start = .;
112        *(.relr.dyn)
113        __relr_end = .;
114    }
115
116    .ctors : ALIGN(8) {
117        __ctor_list = .;
118        KEEP(*(.ctors .init_array))
119        __ctor_end = .;
120    }
121    .dtors : ALIGN(8) {
122        __dtor_list = .;
123        KEEP(*(.dtors .fini_array))
124        __dtor_end = .;
125    }
126
127    /*
128     * .got and .dynamic need to follow .ctors and .dtors because the linker
129     * puts them all in the RELRO segment and wants them contiguous
130     */
131    .dynamic : { *(.dynamic) }
132    .got : { *(.got.plt) *(.got) }
133
134    /*
135     * extra linker scripts tend to insert sections just after .rodata,
136     * so we want to make sure this symbol comes after anything inserted above,
137     * but not aligned to the next section necessarily.
138     */
139    .fake_post_rodata : {
140        __rodata_end = .;
141    }
142
143    .data : ALIGN(4096) {
144        /* writable data  */
145        __data_start_rom = .;
146        /* in one segment binaries, the rom data address is on top of the ram data address */
147        __data_start = .;
148        *(.data .data.* .gnu.linkonce.d.*)
149    }
150
151    /*
152     * extra linker scripts tend to insert sections just after .data,
153     * so we want to make sure this symbol comes after anything inserted above,
154     * but not aligned to the next section necessarily.
155     */
156    .fake_post_data : {
157        __data_end = .;
158    }
159
160    /* unintialized data (in same segment as writable data) */
161    .bss : ALIGN(4096) {
162        __bss_start = .;
163        KEEP(*(.bss.prebss.*))
164        . = ALIGN(8);
165        __post_prebss_bss_start = .;
166        *(.bss .bss.*)
167        *(.gnu.linkonce.b.*)
168        *(COMMON)
169        . = ALIGN(8);
170        __bss_end = .;
171    }
172
173    /* Align the end to ensure anything after the kernel ends up on its own pages */
174    . = ALIGN(4096);
175    _end = .;
176
177    . = %KERNEL_BASE% + %MEMSIZE%;
178    _end_of_ram = .;
179
180    /* Strip unnecessary stuff */
181    /DISCARD/ : { *(.comment .note .eh_frame) }
182}
183