xref: /linux/arch/powerpc/kernel/vmlinux.lds.S (revision 1c07425e902cd3137961c3d45b4271bf8a9b8eb9)
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifdef CONFIG_PPC64
3#define PROVIDE32(x)	PROVIDE(__unused__##x)
4#else
5#define PROVIDE32(x)	PROVIDE(x)
6#endif
7
8#define BSS_FIRST_SECTIONS *(.bss.prominit)
9#define EMITS_PT_NOTE
10#define RO_EXCEPTION_TABLE_ALIGN	0
11#define RUNTIME_DISCARD_EXIT
12
13#define SOFT_MASK_TABLE(align)						\
14	. = ALIGN(align);						\
15	__soft_mask_table : AT(ADDR(__soft_mask_table) - LOAD_OFFSET) {	\
16		__start___soft_mask_table = .;				\
17		KEEP(*(__soft_mask_table))				\
18		__stop___soft_mask_table = .;				\
19	}
20
21#define RESTART_TABLE(align)						\
22	. = ALIGN(align);						\
23	__restart_table : AT(ADDR(__restart_table) - LOAD_OFFSET) {	\
24		__start___restart_table = .;				\
25		KEEP(*(__restart_table))				\
26		__stop___restart_table = .;				\
27	}
28
29#include <asm/page.h>
30#include <asm-generic/vmlinux.lds.h>
31#include <asm/cache.h>
32#include <asm/thread_info.h>
33
34#define STRICT_ALIGN_SIZE	(1 << CONFIG_DATA_SHIFT)
35
36#if STRICT_ALIGN_SIZE < PAGE_SIZE
37#error "CONFIG_DATA_SHIFT must be >= PAGE_SHIFT"
38#endif
39
40ENTRY(_stext)
41
42PHDRS {
43	text PT_LOAD FLAGS(7); /* RWX */
44	note PT_NOTE FLAGS(0);
45}
46
47#ifdef CONFIG_PPC64
48OUTPUT_ARCH(powerpc:common64)
49jiffies = jiffies_64;
50#else
51OUTPUT_ARCH(powerpc:common)
52jiffies = jiffies_64 + 4;
53#endif
54SECTIONS
55{
56	. = KERNELBASE;
57
58/*
59 * Text, read only data and other permanent read-only sections
60 */
61
62	_text = .;
63	_stext = .;
64
65	/*
66	 * Head text.
67	 * This needs to be in its own output section to avoid ld placing
68	 * branch trampoline stubs randomly throughout the fixed sections,
69	 * which it will do (even if the branch comes from another section)
70	 * in order to optimize stub generation.
71	 */
72	.head.text : AT(ADDR(.head.text) - LOAD_OFFSET) {
73#ifdef CONFIG_PPC64
74		KEEP(*(.head.text.first_256B));
75#ifdef CONFIG_PPC_BOOK3E_64
76#else
77		KEEP(*(.head.text.real_vectors));
78		*(.head.text.real_trampolines);
79		KEEP(*(.head.text.virt_vectors));
80		*(.head.text.virt_trampolines);
81# if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV)
82		KEEP(*(.head.data.fwnmi_page));
83# endif
84#endif
85#else /* !CONFIG_PPC64 */
86		HEAD_TEXT
87#endif
88	} :text
89
90	__head_end = .;
91
92#ifdef CONFIG_PPC64
93	/*
94	 * ALIGN(0) overrides the default output section alignment because
95	 * this needs to start right after .head.text in order for fixed
96	 * section placement to work.
97	 */
98	.text ALIGN(0) : AT(ADDR(.text) - LOAD_OFFSET) {
99#ifdef CONFIG_LD_HEAD_STUB_CATCH
100		KEEP(*(.linker_stub_catch));
101		. = . ;
102#endif
103
104#else
105	.text : AT(ADDR(.text) - LOAD_OFFSET) {
106		ALIGN_FUNCTION();
107#endif
108		/* careful! __ftr_alt_* sections need to be close to .text */
109		*(.text.hot .text.hot.* TEXT_MAIN .text.fixup .text.unlikely .text.unlikely.* .fixup __ftr_alt_* .ref.text);
110#ifdef CONFIG_PPC64
111		*(.tramp.ftrace.text);
112#endif
113		NOINSTR_TEXT
114		SCHED_TEXT
115		CPUIDLE_TEXT
116		LOCK_TEXT
117		KPROBES_TEXT
118		IRQENTRY_TEXT
119		SOFTIRQENTRY_TEXT
120		/*
121		 * -Os builds call FP save/restore functions. The powerpc64
122		 * linker generates those on demand in the .sfpr section.
123		 * .sfpr gets placed at the beginning of a group of input
124		 * sections, which can break start-of-text offset if it is
125		 * included with the main text sections, so put it by itself.
126		 */
127		*(.sfpr);
128		MEM_KEEP(init.text)
129		MEM_KEEP(exit.text)
130	} :text
131
132	. = ALIGN(PAGE_SIZE);
133	_etext = .;
134	PROVIDE32 (etext = .);
135
136	/* Read-only data */
137	RO_DATA(PAGE_SIZE)
138
139#ifdef CONFIG_PPC32
140	.sdata2 : AT(ADDR(.sdata2) - LOAD_OFFSET) {
141		*(.sdata2)
142	}
143#endif
144
145	.data.rel.ro : AT(ADDR(.data.rel.ro) - LOAD_OFFSET) {
146		*(.data.rel.ro .data.rel.ro.*)
147	}
148
149	.branch_lt : AT(ADDR(.branch_lt) - LOAD_OFFSET) {
150		*(.branch_lt)
151	}
152
153#ifdef CONFIG_PPC32
154	.got1 : AT(ADDR(.got1) - LOAD_OFFSET) {
155		*(.got1)
156	}
157	.got2 : AT(ADDR(.got2) - LOAD_OFFSET) {
158		__got2_start = .;
159		*(.got2)
160		__got2_end = .;
161	}
162	.got : AT(ADDR(.got) - LOAD_OFFSET) {
163		*(.got)
164		*(.got.plt)
165	}
166	.plt : AT(ADDR(.plt) - LOAD_OFFSET) {
167		/* XXX: is .plt (and .got.plt) required? */
168		*(.plt)
169	}
170
171#else /* CONFIG_PPC32 */
172	.toc1 : AT(ADDR(.toc1) - LOAD_OFFSET) {
173		*(.toc1)
174	}
175
176	.got : AT(ADDR(.got) - LOAD_OFFSET) ALIGN(256) {
177		*(.got .toc)
178	}
179
180	SOFT_MASK_TABLE(8)
181	RESTART_TABLE(8)
182
183#ifdef CONFIG_PPC64_ELF_ABI_V1
184	.opd : AT(ADDR(.opd) - LOAD_OFFSET) {
185		__start_opd = .;
186		KEEP(*(.opd))
187		__end_opd = .;
188	}
189#endif
190
191	. = ALIGN(8);
192	__stf_entry_barrier_fixup : AT(ADDR(__stf_entry_barrier_fixup) - LOAD_OFFSET) {
193		__start___stf_entry_barrier_fixup = .;
194		*(__stf_entry_barrier_fixup)
195		__stop___stf_entry_barrier_fixup = .;
196	}
197
198	. = ALIGN(8);
199	__uaccess_flush_fixup : AT(ADDR(__uaccess_flush_fixup) - LOAD_OFFSET) {
200		__start___uaccess_flush_fixup = .;
201		*(__uaccess_flush_fixup)
202		__stop___uaccess_flush_fixup = .;
203	}
204
205	. = ALIGN(8);
206	__entry_flush_fixup : AT(ADDR(__entry_flush_fixup) - LOAD_OFFSET) {
207		__start___entry_flush_fixup = .;
208		*(__entry_flush_fixup)
209		__stop___entry_flush_fixup = .;
210	}
211
212	. = ALIGN(8);
213	__scv_entry_flush_fixup : AT(ADDR(__scv_entry_flush_fixup) - LOAD_OFFSET) {
214		__start___scv_entry_flush_fixup = .;
215		*(__scv_entry_flush_fixup)
216		__stop___scv_entry_flush_fixup = .;
217	}
218
219	. = ALIGN(8);
220	__stf_exit_barrier_fixup : AT(ADDR(__stf_exit_barrier_fixup) - LOAD_OFFSET) {
221		__start___stf_exit_barrier_fixup = .;
222		*(__stf_exit_barrier_fixup)
223		__stop___stf_exit_barrier_fixup = .;
224	}
225
226	. = ALIGN(8);
227	__rfi_flush_fixup : AT(ADDR(__rfi_flush_fixup) - LOAD_OFFSET) {
228		__start___rfi_flush_fixup = .;
229		*(__rfi_flush_fixup)
230		__stop___rfi_flush_fixup = .;
231	}
232#endif /* CONFIG_PPC32 */
233
234#ifdef CONFIG_PPC_BARRIER_NOSPEC
235	. = ALIGN(8);
236	__spec_barrier_fixup : AT(ADDR(__spec_barrier_fixup) - LOAD_OFFSET) {
237		__start___barrier_nospec_fixup = .;
238		*(__barrier_nospec_fixup)
239		__stop___barrier_nospec_fixup = .;
240	}
241#endif /* CONFIG_PPC_BARRIER_NOSPEC */
242
243#ifdef CONFIG_PPC_E500
244	. = ALIGN(8);
245	__spec_btb_flush_fixup : AT(ADDR(__spec_btb_flush_fixup) - LOAD_OFFSET) {
246		__start__btb_flush_fixup = .;
247		*(__btb_flush_fixup)
248		__stop__btb_flush_fixup = .;
249	}
250#endif
251
252	/*
253	 * Various code relies on __init_begin being at the strict RWX boundary.
254	 */
255	. = ALIGN(STRICT_ALIGN_SIZE);
256	__srwx_boundary = .;
257	__end_rodata = .;
258	__init_begin = .;
259
260/*
261 * Init sections discarded at runtime
262 */
263	.init.text : AT(ADDR(.init.text) - LOAD_OFFSET) {
264		_sinittext = .;
265		INIT_TEXT
266
267		/*
268		 *.init.text might be RO so we must ensure this section ends on
269		 * a page boundary.
270		 */
271		. = ALIGN(PAGE_SIZE);
272		_einittext = .;
273#ifdef CONFIG_PPC64
274		*(.tramp.ftrace.init);
275#endif
276	} :text
277
278	/* .exit.text is discarded at runtime, not link time,
279	 * to deal with references from __bug_table
280	 */
281	.exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
282		EXIT_TEXT
283	}
284
285	. = ALIGN(PAGE_SIZE);
286
287	INIT_DATA_SECTION(16)
288
289	. = ALIGN(8);
290	__ftr_fixup : AT(ADDR(__ftr_fixup) - LOAD_OFFSET) {
291		__start___ftr_fixup = .;
292		KEEP(*(__ftr_fixup))
293		__stop___ftr_fixup = .;
294	}
295	. = ALIGN(8);
296	__mmu_ftr_fixup : AT(ADDR(__mmu_ftr_fixup) - LOAD_OFFSET) {
297		__start___mmu_ftr_fixup = .;
298		KEEP(*(__mmu_ftr_fixup))
299		__stop___mmu_ftr_fixup = .;
300	}
301	. = ALIGN(8);
302	__lwsync_fixup : AT(ADDR(__lwsync_fixup) - LOAD_OFFSET) {
303		__start___lwsync_fixup = .;
304		KEEP(*(__lwsync_fixup))
305		__stop___lwsync_fixup = .;
306	}
307#ifdef CONFIG_PPC64
308	. = ALIGN(8);
309	__fw_ftr_fixup : AT(ADDR(__fw_ftr_fixup) - LOAD_OFFSET) {
310		__start___fw_ftr_fixup = .;
311		KEEP(*(__fw_ftr_fixup))
312		__stop___fw_ftr_fixup = .;
313	}
314#endif
315
316	PERCPU_SECTION(L1_CACHE_BYTES)
317
318	. = ALIGN(8);
319	.machine.desc : AT(ADDR(.machine.desc) - LOAD_OFFSET) {
320		__machine_desc_start = . ;
321		KEEP(*(.machine.desc))
322		__machine_desc_end = . ;
323	}
324#ifdef CONFIG_RELOCATABLE
325	. = ALIGN(8);
326	.dynsym : AT(ADDR(.dynsym) - LOAD_OFFSET)
327	{
328		__dynamic_symtab = .;
329		*(.dynsym)
330	}
331	.dynstr : AT(ADDR(.dynstr) - LOAD_OFFSET) { *(.dynstr) }
332	.dynamic : AT(ADDR(.dynamic) - LOAD_OFFSET)
333	{
334		__dynamic_start = .;
335		*(.dynamic)
336	}
337	.hash : AT(ADDR(.hash) - LOAD_OFFSET) { *(.hash) }
338	.gnu.hash : AT(ADDR(.gnu.hash) - LOAD_OFFSET) { *(.gnu.hash) }
339	.interp : AT(ADDR(.interp) - LOAD_OFFSET) { *(.interp) }
340	.rela.dyn : AT(ADDR(.rela.dyn) - LOAD_OFFSET)
341	{
342		__rela_dyn_start = .;
343		*(.rela*)
344	}
345#endif
346	/* .exit.data is discarded at runtime, not link time,
347	 * to deal with references from .exit.text
348	 */
349	.exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) {
350		EXIT_DATA
351	}
352
353	/* freed after init ends here */
354	. = ALIGN(PAGE_SIZE);
355	__init_end = .;
356
357/*
358 * And now the various read/write data
359 */
360
361	. = ALIGN(PAGE_SIZE);
362	_sdata = .;
363
364	.data : AT(ADDR(.data) - LOAD_OFFSET) {
365		DATA_DATA
366		*(.data.rel*)
367#ifdef CONFIG_PPC32
368		*(SDATA_MAIN)
369#endif
370	}
371
372	/* The initial task and kernel stack */
373	INIT_TASK_DATA_SECTION(THREAD_ALIGN)
374
375	.data..page_aligned : AT(ADDR(.data..page_aligned) - LOAD_OFFSET) {
376		PAGE_ALIGNED_DATA(PAGE_SIZE)
377	}
378
379	.data..cacheline_aligned : AT(ADDR(.data..cacheline_aligned) - LOAD_OFFSET) {
380		CACHELINE_ALIGNED_DATA(L1_CACHE_BYTES)
381	}
382
383	.data..read_mostly : AT(ADDR(.data..read_mostly) - LOAD_OFFSET) {
384		READ_MOSTLY_DATA(L1_CACHE_BYTES)
385	}
386
387	. = ALIGN(PAGE_SIZE);
388	.data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
389		NOSAVE_DATA
390	}
391
392	BUG_TABLE
393
394	. = ALIGN(PAGE_SIZE);
395	_edata  =  .;
396	PROVIDE32 (edata = .);
397
398/*
399 * And finally the bss
400 */
401
402	BSS_SECTION(0, 0, 0)
403
404	. = ALIGN(PAGE_SIZE);
405	_end = . ;
406	PROVIDE32 (end = .);
407
408	DWARF_DEBUG
409	ELF_DETAILS
410
411	DISCARDS
412	/DISCARD/ : {
413		*(*.EMB.apuinfo)
414		*(.glink .iplt .plt)
415		*(.gnu.version*)
416		*(.gnu.attributes)
417		*(.eh_frame)
418#ifndef CONFIG_RELOCATABLE
419		*(.rela*)
420#endif
421	}
422}
423