xref: /linux/arch/arm64/kernel/vmlinux.lds.S (revision 3fd6c59042dbba50391e30862beac979491145fe)
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * ld script to make ARM Linux kernel
4 * taken from the i386 version by Russell King
5 * Written by Martin Mares <mj@atrey.karlin.mff.cuni.cz>
6 */
7
8#include <asm/hyp_image.h>
9#ifdef CONFIG_KVM
10#define HYPERVISOR_EXTABLE					\
11	. = ALIGN(SZ_8);					\
12	__start___kvm_ex_table = .;				\
13	*(__kvm_ex_table)					\
14	__stop___kvm_ex_table = .;
15
16#define HYPERVISOR_DATA_SECTIONS				\
17	HYP_SECTION_NAME(.rodata) : {				\
18		. = ALIGN(PAGE_SIZE);				\
19		__hyp_rodata_start = .;				\
20		*(HYP_SECTION_NAME(.data..ro_after_init))	\
21		*(HYP_SECTION_NAME(.rodata))			\
22		. = ALIGN(PAGE_SIZE);				\
23		__hyp_rodata_end = .;				\
24	}
25
26#define HYPERVISOR_PERCPU_SECTION				\
27	. = ALIGN(PAGE_SIZE);					\
28	HYP_SECTION_NAME(.data..percpu) : {			\
29		*(HYP_SECTION_NAME(.data..percpu))		\
30	}
31
32#define HYPERVISOR_RELOC_SECTION				\
33	.hyp.reloc : ALIGN(4) {					\
34		__hyp_reloc_begin = .;				\
35		*(.hyp.reloc)					\
36		__hyp_reloc_end = .;				\
37	}
38
39#define BSS_FIRST_SECTIONS					\
40	__hyp_bss_start = .;					\
41	*(HYP_SECTION_NAME(.bss))				\
42	. = ALIGN(PAGE_SIZE);					\
43	__hyp_bss_end = .;
44
45/*
46 * We require that __hyp_bss_start and __bss_start are aligned, and enforce it
47 * with an assertion. But the BSS_SECTION macro places an empty .sbss section
48 * between them, which can in some cases cause the linker to misalign them. To
49 * work around the issue, force a page alignment for __bss_start.
50 */
51#define SBSS_ALIGN			PAGE_SIZE
52#else /* CONFIG_KVM */
53#define HYPERVISOR_EXTABLE
54#define HYPERVISOR_DATA_SECTIONS
55#define HYPERVISOR_PERCPU_SECTION
56#define HYPERVISOR_RELOC_SECTION
57#define SBSS_ALIGN			0
58#endif
59
60#define RO_EXCEPTION_TABLE_ALIGN	4
61#define RUNTIME_DISCARD_EXIT
62
63#include <asm-generic/vmlinux.lds.h>
64#include <asm/cache.h>
65#include <asm/kernel-pgtable.h>
66#include <asm/kexec.h>
67#include <asm/memory.h>
68#include <asm/page.h>
69
70#include "image.h"
71
72OUTPUT_ARCH(aarch64)
73ENTRY(_text)
74
75jiffies = jiffies_64;
76
77#define HYPERVISOR_TEXT					\
78	. = ALIGN(PAGE_SIZE);				\
79	__hyp_idmap_text_start = .;			\
80	*(.hyp.idmap.text)				\
81	__hyp_idmap_text_end = .;			\
82	__hyp_text_start = .;				\
83	*(.hyp.text)					\
84	HYPERVISOR_EXTABLE				\
85	. = ALIGN(PAGE_SIZE);				\
86	__hyp_text_end = .;
87
88#define IDMAP_TEXT					\
89	. = ALIGN(SZ_4K);				\
90	__idmap_text_start = .;				\
91	*(.idmap.text)					\
92	__idmap_text_end = .;
93
94#ifdef CONFIG_HIBERNATION
95#define HIBERNATE_TEXT					\
96	ALIGN_FUNCTION();				\
97	__hibernate_exit_text_start = .;		\
98	*(.hibernate_exit.text)				\
99	__hibernate_exit_text_end = .;
100#else
101#define HIBERNATE_TEXT
102#endif
103
104#ifdef CONFIG_KEXEC_CORE
105#define KEXEC_TEXT					\
106	ALIGN_FUNCTION();				\
107	__relocate_new_kernel_start = .;		\
108	*(.kexec_relocate.text)				\
109	__relocate_new_kernel_end = .;
110#else
111#define KEXEC_TEXT
112#endif
113
114#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
115#define TRAMP_TEXT					\
116	. = ALIGN(PAGE_SIZE);				\
117	__entry_tramp_text_start = .;			\
118	*(.entry.tramp.text)				\
119	. = ALIGN(PAGE_SIZE);				\
120	__entry_tramp_text_end = .;			\
121	*(.entry.tramp.rodata)
122#else
123#define TRAMP_TEXT
124#endif
125
126#ifdef CONFIG_UNWIND_TABLES
127#define UNWIND_DATA_SECTIONS				\
128	.eh_frame : {					\
129		__pi___eh_frame_start = .;		\
130		*(.eh_frame)				\
131		__pi___eh_frame_end = .;		\
132	}
133#else
134#define UNWIND_DATA_SECTIONS
135#endif
136
137/*
138 * The size of the PE/COFF section that covers the kernel image, which
139 * runs from _stext to _edata, must be a round multiple of the PE/COFF
140 * FileAlignment, which we set to its minimum value of 0x200. '_stext'
141 * itself is 4 KB aligned, so padding out _edata to a 0x200 aligned
142 * boundary should be sufficient.
143 */
144PECOFF_FILE_ALIGNMENT = 0x200;
145
146#ifdef CONFIG_EFI
147#define PECOFF_EDATA_PADDING	\
148	.pecoff_edata_padding : { BYTE(0); . = ALIGN(PECOFF_FILE_ALIGNMENT); }
149#else
150#define PECOFF_EDATA_PADDING
151#endif
152
153SECTIONS
154{
155	/*
156	 * XXX: The linker does not define how output sections are
157	 * assigned to input sections when there are multiple statements
158	 * matching the same input section name.  There is no documented
159	 * order of matching.
160	 */
161	DISCARDS
162	/DISCARD/ : {
163		*(.interp .dynamic)
164		*(.dynsym .dynstr .hash .gnu.hash)
165	}
166
167	. = KIMAGE_VADDR;
168
169	.head.text : {
170		_text = .;
171		HEAD_TEXT
172	}
173	.text : ALIGN(SEGMENT_ALIGN) {	/* Real text segment		*/
174		_stext = .;		/* Text and read-only data	*/
175			IRQENTRY_TEXT
176			SOFTIRQENTRY_TEXT
177			ENTRY_TEXT
178			TEXT_TEXT
179			SCHED_TEXT
180			LOCK_TEXT
181			KPROBES_TEXT
182			HYPERVISOR_TEXT
183			*(.gnu.warning)
184	}
185
186	. = ALIGN(SEGMENT_ALIGN);
187	_etext = .;			/* End of text section */
188
189	/* everything from this point to __init_begin will be marked RO NX */
190	RO_DATA(PAGE_SIZE)
191
192	HYPERVISOR_DATA_SECTIONS
193
194	.got : { *(.got) }
195	/*
196	 * Make sure that the .got.plt is either completely empty or it
197	 * contains only the lazy dispatch entries.
198	 */
199	.got.plt : { *(.got.plt) }
200	ASSERT(SIZEOF(.got.plt) == 0 || SIZEOF(.got.plt) == 0x18,
201	       "Unexpected GOT/PLT entries detected!")
202
203	/* code sections that are never executed via the kernel mapping */
204	.rodata.text : {
205		TRAMP_TEXT
206		HIBERNATE_TEXT
207		KEXEC_TEXT
208		IDMAP_TEXT
209		. = ALIGN(PAGE_SIZE);
210	}
211
212	idmap_pg_dir = .;
213	. += PAGE_SIZE;
214
215#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
216	tramp_pg_dir = .;
217	. += PAGE_SIZE;
218#endif
219
220	reserved_pg_dir = .;
221	. += PAGE_SIZE;
222
223	swapper_pg_dir = .;
224	. += PAGE_SIZE;
225
226	. = ALIGN(SEGMENT_ALIGN);
227	__init_begin = .;
228	__inittext_begin = .;
229
230	INIT_TEXT_SECTION(8)
231
232	__exittext_begin = .;
233	.exit.text : {
234		EXIT_TEXT
235	}
236	__exittext_end = .;
237
238	. = ALIGN(4);
239	.altinstructions : {
240		__alt_instructions = .;
241		*(.altinstructions)
242		__alt_instructions_end = .;
243	}
244
245	UNWIND_DATA_SECTIONS
246
247	. = ALIGN(SEGMENT_ALIGN);
248	__inittext_end = .;
249	__initdata_begin = .;
250
251	init_idmap_pg_dir = .;
252	. += INIT_IDMAP_DIR_SIZE;
253	init_idmap_pg_end = .;
254
255	.init.data : {
256		INIT_DATA
257		INIT_SETUP(16)
258		INIT_CALLS
259		CON_INITCALL
260		INIT_RAM_FS
261		*(.init.altinstructions .init.bss)	/* from the EFI stub */
262	}
263	.exit.data : {
264		EXIT_DATA
265	}
266
267	RUNTIME_CONST_VARIABLES
268
269	PERCPU_SECTION(L1_CACHE_BYTES)
270	HYPERVISOR_PERCPU_SECTION
271
272	HYPERVISOR_RELOC_SECTION
273
274	.rela.dyn : ALIGN(8) {
275		__pi_rela_start = .;
276		*(.rela .rela*)
277		__pi_rela_end = .;
278	}
279
280	.relr.dyn : ALIGN(8) {
281		__pi_relr_start = .;
282		*(.relr.dyn)
283		__pi_relr_end = .;
284	}
285
286	. = ALIGN(SEGMENT_ALIGN);
287	__initdata_end = .;
288	__init_end = .;
289
290	.data.rel.ro : { *(.data.rel.ro) }
291	ASSERT(SIZEOF(.data.rel.ro) == 0, "Unexpected RELRO detected!")
292
293	_data = .;
294	_sdata = .;
295	RW_DATA(L1_CACHE_BYTES, PAGE_SIZE, THREAD_ALIGN)
296
297	/*
298	 * Data written with the MMU off but read with the MMU on requires
299	 * cache lines to be invalidated, discarding up to a Cache Writeback
300	 * Granule (CWG) of data from the cache. Keep the section that
301	 * requires this type of maintenance to be in its own Cache Writeback
302	 * Granule (CWG) area so the cache maintenance operations don't
303	 * interfere with adjacent data.
304	 */
305	.mmuoff.data.write : ALIGN(SZ_2K) {
306		__mmuoff_data_start = .;
307		*(.mmuoff.data.write)
308	}
309	. = ALIGN(SZ_2K);
310	.mmuoff.data.read : {
311		*(.mmuoff.data.read)
312		__mmuoff_data_end = .;
313	}
314
315	PECOFF_EDATA_PADDING
316	__pecoff_data_rawsize = ABSOLUTE(. - __initdata_begin);
317	_edata = .;
318
319	/* start of zero-init region */
320	BSS_SECTION(SBSS_ALIGN, 0, 0)
321
322	. = ALIGN(PAGE_SIZE);
323	init_pg_dir = .;
324	. += INIT_DIR_SIZE;
325	init_pg_end = .;
326	/* end of zero-init region */
327
328	. += SZ_4K;		/* stack for the early C runtime */
329	early_init_stack = .;
330
331	. = ALIGN(SEGMENT_ALIGN);
332	__pecoff_data_size = ABSOLUTE(. - __initdata_begin);
333	_end = .;
334
335	STABS_DEBUG
336	DWARF_DEBUG
337	ELF_DETAILS
338
339	HEAD_SYMBOLS
340
341	/*
342	 * Sections that should stay zero sized, which is safer to
343	 * explicitly check instead of blindly discarding.
344	 */
345	.plt : {
346		*(.plt) *(.plt.*) *(.iplt) *(.igot .igot.plt)
347	}
348	ASSERT(SIZEOF(.plt) == 0, "Unexpected run-time procedure linkages detected!")
349}
350
351#include "image-vars.h"
352
353/*
354 * The HYP init code and ID map text can't be longer than a page each. The
355 * former is page-aligned, but the latter may not be with 16K or 64K pages, so
356 * it should also not cross a page boundary.
357 */
358ASSERT(__hyp_idmap_text_end - __hyp_idmap_text_start <= PAGE_SIZE,
359	"HYP init code too big")
360ASSERT(__idmap_text_end - (__idmap_text_start & ~(SZ_4K - 1)) <= SZ_4K,
361	"ID map text too big or misaligned")
362#ifdef CONFIG_HIBERNATION
363ASSERT(__hibernate_exit_text_end - __hibernate_exit_text_start <= SZ_4K,
364       "Hibernate exit text is bigger than 4 KiB")
365ASSERT(__hibernate_exit_text_start == swsusp_arch_suspend_exit,
366       "Hibernate exit text does not start with swsusp_arch_suspend_exit")
367#endif
368#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
369ASSERT((__entry_tramp_text_end - __entry_tramp_text_start) <= 3*PAGE_SIZE,
370	"Entry trampoline text too big")
371#endif
372#ifdef CONFIG_KVM
373ASSERT(__hyp_bss_start == __bss_start, "HYP and Host BSS are misaligned")
374#endif
375/*
376 * If padding is applied before .head.text, virt<->phys conversions will fail.
377 */
378ASSERT(_text == KIMAGE_VADDR, "HEAD is misaligned")
379
380ASSERT(swapper_pg_dir - reserved_pg_dir == RESERVED_SWAPPER_OFFSET,
381       "RESERVED_SWAPPER_OFFSET is wrong!")
382
383#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
384ASSERT(swapper_pg_dir - tramp_pg_dir == TRAMP_SWAPPER_OFFSET,
385       "TRAMP_SWAPPER_OFFSET is wrong!")
386#endif
387
388#ifdef CONFIG_KEXEC_CORE
389/* kexec relocation code should fit into one KEXEC_CONTROL_PAGE_SIZE */
390ASSERT(__relocate_new_kernel_end - __relocate_new_kernel_start <= SZ_4K,
391       "kexec relocation code is bigger than 4 KiB")
392ASSERT(KEXEC_CONTROL_PAGE_SIZE >= SZ_4K, "KEXEC_CONTROL_PAGE_SIZE is broken")
393ASSERT(__relocate_new_kernel_start == arm64_relocate_new_kernel,
394       "kexec control page does not start with arm64_relocate_new_kernel")
395#endif
396