xref: /linux/arch/arm64/kernel/vmlinux.lds.S (revision 2c739ced5886cd8c8361faa79a9522ec05174ed0)
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * ld script to make ARM Linux kernel
4 * taken from the i386 version by Russell King
5 * Written by Martin Mares <mj@atrey.karlin.mff.cuni.cz>
6 */
7
8#define RO_EXCEPTION_TABLE_ALIGN	8
9#define RUNTIME_DISCARD_EXIT
10
11#include <asm-generic/vmlinux.lds.h>
12#include <asm/cache.h>
13#include <asm/kernel-pgtable.h>
14#include <asm/memory.h>
15#include <asm/page.h>
16
17#include "image.h"
18
19OUTPUT_ARCH(aarch64)
20ENTRY(_text)
21
22jiffies = jiffies_64;
23
24
25#define HYPERVISOR_EXTABLE					\
26	. = ALIGN(SZ_8);					\
27	__start___kvm_ex_table = .;				\
28	*(__kvm_ex_table)					\
29	__stop___kvm_ex_table = .;
30
31#define HYPERVISOR_TEXT					\
32	/*						\
33	 * Align to 4 KB so that			\
34	 * a) the HYP vector table is at its minimum	\
35	 *    alignment of 2048 bytes			\
36	 * b) the HYP init code will not cross a page	\
37	 *    boundary if its size does not exceed	\
38	 *    4 KB (see related ASSERT() below)		\
39	 */						\
40	. = ALIGN(SZ_4K);				\
41	__hyp_idmap_text_start = .;			\
42	*(.hyp.idmap.text)				\
43	__hyp_idmap_text_end = .;			\
44	__hyp_text_start = .;				\
45	*(.hyp.text)					\
46	HYPERVISOR_EXTABLE				\
47	__hyp_text_end = .;
48
49#define IDMAP_TEXT					\
50	. = ALIGN(SZ_4K);				\
51	__idmap_text_start = .;				\
52	*(.idmap.text)					\
53	__idmap_text_end = .;
54
55#ifdef CONFIG_HIBERNATION
56#define HIBERNATE_TEXT					\
57	. = ALIGN(SZ_4K);				\
58	__hibernate_exit_text_start = .;		\
59	*(.hibernate_exit.text)				\
60	__hibernate_exit_text_end = .;
61#else
62#define HIBERNATE_TEXT
63#endif
64
65#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
66#define TRAMP_TEXT					\
67	. = ALIGN(PAGE_SIZE);				\
68	__entry_tramp_text_start = .;			\
69	*(.entry.tramp.text)				\
70	. = ALIGN(PAGE_SIZE);				\
71	__entry_tramp_text_end = .;
72#else
73#define TRAMP_TEXT
74#endif
75
76/*
77 * The size of the PE/COFF section that covers the kernel image, which
78 * runs from _stext to _edata, must be a round multiple of the PE/COFF
79 * FileAlignment, which we set to its minimum value of 0x200. '_stext'
80 * itself is 4 KB aligned, so padding out _edata to a 0x200 aligned
81 * boundary should be sufficient.
82 */
83PECOFF_FILE_ALIGNMENT = 0x200;
84
85#ifdef CONFIG_EFI
86#define PECOFF_EDATA_PADDING	\
87	.pecoff_edata_padding : { BYTE(0); . = ALIGN(PECOFF_FILE_ALIGNMENT); }
88#else
89#define PECOFF_EDATA_PADDING
90#endif
91
92SECTIONS
93{
94	/*
95	 * XXX: The linker does not define how output sections are
96	 * assigned to input sections when there are multiple statements
97	 * matching the same input section name.  There is no documented
98	 * order of matching.
99	 */
100	DISCARDS
101	/DISCARD/ : {
102		*(.interp .dynamic)
103		*(.dynsym .dynstr .hash .gnu.hash)
104	}
105
106	. = KIMAGE_VADDR;
107
108	.head.text : {
109		_text = .;
110		HEAD_TEXT
111	}
112	.text : {			/* Real text segment		*/
113		_stext = .;		/* Text and read-only data	*/
114			IRQENTRY_TEXT
115			SOFTIRQENTRY_TEXT
116			ENTRY_TEXT
117			TEXT_TEXT
118			SCHED_TEXT
119			CPUIDLE_TEXT
120			LOCK_TEXT
121			KPROBES_TEXT
122			HYPERVISOR_TEXT
123			IDMAP_TEXT
124			HIBERNATE_TEXT
125			TRAMP_TEXT
126			*(.fixup)
127			*(.gnu.warning)
128		. = ALIGN(16);
129		*(.got)			/* Global offset table		*/
130	}
131
132	/*
133	 * Make sure that the .got.plt is either completely empty or it
134	 * contains only the lazy dispatch entries.
135	 */
136	.got.plt : { *(.got.plt) }
137	ASSERT(SIZEOF(.got.plt) == 0 || SIZEOF(.got.plt) == 0x18,
138	       "Unexpected GOT/PLT entries detected!")
139
140	. = ALIGN(SEGMENT_ALIGN);
141	_etext = .;			/* End of text section */
142
143	/* everything from this point to __init_begin will be marked RO NX */
144	RO_DATA(PAGE_SIZE)
145
146	idmap_pg_dir = .;
147	. += IDMAP_DIR_SIZE;
148	idmap_pg_end = .;
149
150#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
151	tramp_pg_dir = .;
152	. += PAGE_SIZE;
153#endif
154
155#ifdef CONFIG_ARM64_SW_TTBR0_PAN
156	reserved_ttbr0 = .;
157	. += RESERVED_TTBR0_SIZE;
158#endif
159	swapper_pg_dir = .;
160	. += PAGE_SIZE;
161	swapper_pg_end = .;
162
163	. = ALIGN(SEGMENT_ALIGN);
164	__init_begin = .;
165	__inittext_begin = .;
166
167	INIT_TEXT_SECTION(8)
168
169	__exittext_begin = .;
170	.exit.text : {
171		EXIT_TEXT
172	}
173	__exittext_end = .;
174
175	. = ALIGN(4);
176	.altinstructions : {
177		__alt_instructions = .;
178		*(.altinstructions)
179		__alt_instructions_end = .;
180	}
181
182	. = ALIGN(SEGMENT_ALIGN);
183	__inittext_end = .;
184	__initdata_begin = .;
185
186	.init.data : {
187		INIT_DATA
188		INIT_SETUP(16)
189		INIT_CALLS
190		CON_INITCALL
191		INIT_RAM_FS
192		*(.init.rodata.* .init.bss)	/* from the EFI stub */
193	}
194	.exit.data : {
195		EXIT_DATA
196	}
197
198	PERCPU_SECTION(L1_CACHE_BYTES)
199
200	.rela.dyn : ALIGN(8) {
201		*(.rela .rela*)
202	}
203
204	__rela_offset	= ABSOLUTE(ADDR(.rela.dyn) - KIMAGE_VADDR);
205	__rela_size	= SIZEOF(.rela.dyn);
206
207#ifdef CONFIG_RELR
208	.relr.dyn : ALIGN(8) {
209		*(.relr.dyn)
210	}
211
212	__relr_offset	= ABSOLUTE(ADDR(.relr.dyn) - KIMAGE_VADDR);
213	__relr_size	= SIZEOF(.relr.dyn);
214#endif
215
216	. = ALIGN(SEGMENT_ALIGN);
217	__initdata_end = .;
218	__init_end = .;
219
220	_data = .;
221	_sdata = .;
222	RW_DATA(L1_CACHE_BYTES, PAGE_SIZE, THREAD_ALIGN)
223
224	/*
225	 * Data written with the MMU off but read with the MMU on requires
226	 * cache lines to be invalidated, discarding up to a Cache Writeback
227	 * Granule (CWG) of data from the cache. Keep the section that
228	 * requires this type of maintenance to be in its own Cache Writeback
229	 * Granule (CWG) area so the cache maintenance operations don't
230	 * interfere with adjacent data.
231	 */
232	.mmuoff.data.write : ALIGN(SZ_2K) {
233		__mmuoff_data_start = .;
234		*(.mmuoff.data.write)
235	}
236	. = ALIGN(SZ_2K);
237	.mmuoff.data.read : {
238		*(.mmuoff.data.read)
239		__mmuoff_data_end = .;
240	}
241
242	PECOFF_EDATA_PADDING
243	__pecoff_data_rawsize = ABSOLUTE(. - __initdata_begin);
244	_edata = .;
245
246	BSS_SECTION(0, 0, 0)
247
248	. = ALIGN(PAGE_SIZE);
249	init_pg_dir = .;
250	. += INIT_DIR_SIZE;
251	init_pg_end = .;
252
253	. = ALIGN(SEGMENT_ALIGN);
254	__pecoff_data_size = ABSOLUTE(. - __initdata_begin);
255	_end = .;
256
257	STABS_DEBUG
258	DWARF_DEBUG
259	ELF_DETAILS
260
261	HEAD_SYMBOLS
262
263	/*
264	 * Sections that should stay zero sized, which is safer to
265	 * explicitly check instead of blindly discarding.
266	 */
267	.plt : {
268		*(.plt) *(.plt.*) *(.iplt) *(.igot)
269	}
270	ASSERT(SIZEOF(.plt) == 0, "Unexpected run-time procedure linkages detected!")
271
272	.data.rel.ro : { *(.data.rel.ro) }
273	ASSERT(SIZEOF(.data.rel.ro) == 0, "Unexpected RELRO detected!")
274}
275
276#include "image-vars.h"
277
278/*
279 * The HYP init code and ID map text can't be longer than a page each,
280 * and should not cross a page boundary.
281 */
282ASSERT(__hyp_idmap_text_end - (__hyp_idmap_text_start & ~(SZ_4K - 1)) <= SZ_4K,
283	"HYP init code too big or misaligned")
284ASSERT(__idmap_text_end - (__idmap_text_start & ~(SZ_4K - 1)) <= SZ_4K,
285	"ID map text too big or misaligned")
286#ifdef CONFIG_HIBERNATION
287ASSERT(__hibernate_exit_text_end - (__hibernate_exit_text_start & ~(SZ_4K - 1))
288	<= SZ_4K, "Hibernate exit text too big or misaligned")
289#endif
290#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
291ASSERT((__entry_tramp_text_end - __entry_tramp_text_start) == PAGE_SIZE,
292	"Entry trampoline text too big")
293#endif
294/*
295 * If padding is applied before .head.text, virt<->phys conversions will fail.
296 */
297ASSERT(_text == KIMAGE_VADDR, "HEAD is misaligned")
298