xref: /linux/arch/powerpc/kernel/vmlinux.lds.S (revision 2dbc0838bcf24ca59cabc3130cf3b1d6809cdcd4)
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifdef CONFIG_PPC64
3#define PROVIDE32(x)	PROVIDE(__unused__##x)
4#else
5#define PROVIDE32(x)	PROVIDE(x)
6#endif
7
8#define BSS_FIRST_SECTIONS *(.bss.prominit)
9
10#include <asm/page.h>
11#include <asm-generic/vmlinux.lds.h>
12#include <asm/cache.h>
13#include <asm/thread_info.h>
14
15#define STRICT_ALIGN_SIZE	(1 << CONFIG_DATA_SHIFT)
16#define ETEXT_ALIGN_SIZE	(1 << CONFIG_ETEXT_SHIFT)
17
18ENTRY(_stext)
19
20PHDRS {
21	kernel PT_LOAD FLAGS(7); /* RWX */
22	notes PT_NOTE FLAGS(0);
23	dummy PT_NOTE FLAGS(0);
24
25	/* binutils < 2.18 has a bug that makes it misbehave when taking an
26	   ELF file with all segments at load address 0 as input.  This
27	   happens when running "strip" on vmlinux, because of the AT() magic
28	   in this linker script.  People using GCC >= 4.2 won't run into
29	   this problem, because the "build-id" support will put some data
30	   into the "notes" segment (at a non-zero load address).
31
32	   To work around this, we force some data into both the "dummy"
33	   segment and the kernel segment, so the dummy segment will get a
34	   non-zero load address.  It's not enough to always create the
35	   "notes" segment, since if nothing gets assigned to it, its load
36	   address will be zero.  */
37}
38
39#ifdef CONFIG_PPC64
40OUTPUT_ARCH(powerpc:common64)
41jiffies = jiffies_64;
42#else
43OUTPUT_ARCH(powerpc:common)
44jiffies = jiffies_64 + 4;
45#endif
46SECTIONS
47{
48	. = KERNELBASE;
49
50/*
51 * Text, read only data and other permanent read-only sections
52 */
53
54	_text = .;
55	_stext = .;
56
57	/*
58	 * Head text.
59	 * This needs to be in its own output section to avoid ld placing
60	 * branch trampoline stubs randomly throughout the fixed sections,
61	 * which it will do (even if the branch comes from another section)
62	 * in order to optimize stub generation.
63	 */
64	.head.text : AT(ADDR(.head.text) - LOAD_OFFSET) {
65#ifdef CONFIG_PPC64
66		KEEP(*(.head.text.first_256B));
67#ifdef CONFIG_PPC_BOOK3E
68#else
69		KEEP(*(.head.text.real_vectors));
70		*(.head.text.real_trampolines);
71		KEEP(*(.head.text.virt_vectors));
72		*(.head.text.virt_trampolines);
73# if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV)
74		KEEP(*(.head.data.fwnmi_page));
75# endif
76#endif
77#else /* !CONFIG_PPC64 */
78		HEAD_TEXT
79#endif
80	} :kernel
81
82	__head_end = .;
83
84#ifdef CONFIG_PPC64
85	/*
86	 * ALIGN(0) overrides the default output section alignment because
87	 * this needs to start right after .head.text in order for fixed
88	 * section placement to work.
89	 */
90	.text ALIGN(0) : AT(ADDR(.text) - LOAD_OFFSET) {
91#ifdef CONFIG_LD_HEAD_STUB_CATCH
92		KEEP(*(.linker_stub_catch));
93		. = . ;
94#endif
95
96#else
97	.text : AT(ADDR(.text) - LOAD_OFFSET) {
98		ALIGN_FUNCTION();
99#endif
100		/* careful! __ftr_alt_* sections need to be close to .text */
101		*(.text.hot TEXT_MAIN .text.fixup .text.unlikely .fixup __ftr_alt_* .ref.text);
102#ifdef CONFIG_PPC64
103		*(.tramp.ftrace.text);
104#endif
105		SCHED_TEXT
106		CPUIDLE_TEXT
107		LOCK_TEXT
108		KPROBES_TEXT
109		IRQENTRY_TEXT
110		SOFTIRQENTRY_TEXT
111		/*
112		 * -Os builds call FP save/restore functions. The powerpc64
113		 * linker generates those on demand in the .sfpr section.
114		 * .sfpr gets placed at the beginning of a group of input
115		 * sections, which can break start-of-text offset if it is
116		 * included with the main text sections, so put it by itself.
117		 */
118		*(.sfpr);
119		MEM_KEEP(init.text)
120		MEM_KEEP(exit.text)
121
122#ifdef CONFIG_PPC32
123		*(.got1)
124		__got2_start = .;
125		*(.got2)
126		__got2_end = .;
127#endif /* CONFIG_PPC32 */
128
129	} :kernel
130
131	. = ALIGN(ETEXT_ALIGN_SIZE);
132	_etext = .;
133	PROVIDE32 (etext = .);
134
135	/* Read-only data */
136	RO_DATA(PAGE_SIZE)
137
138#ifdef CONFIG_PPC64
139	. = ALIGN(8);
140	__stf_entry_barrier_fixup : AT(ADDR(__stf_entry_barrier_fixup) - LOAD_OFFSET) {
141		__start___stf_entry_barrier_fixup = .;
142		*(__stf_entry_barrier_fixup)
143		__stop___stf_entry_barrier_fixup = .;
144	}
145
146	. = ALIGN(8);
147	__stf_exit_barrier_fixup : AT(ADDR(__stf_exit_barrier_fixup) - LOAD_OFFSET) {
148		__start___stf_exit_barrier_fixup = .;
149		*(__stf_exit_barrier_fixup)
150		__stop___stf_exit_barrier_fixup = .;
151	}
152
153	. = ALIGN(8);
154	__rfi_flush_fixup : AT(ADDR(__rfi_flush_fixup) - LOAD_OFFSET) {
155		__start___rfi_flush_fixup = .;
156		*(__rfi_flush_fixup)
157		__stop___rfi_flush_fixup = .;
158	}
159#endif /* CONFIG_PPC64 */
160
161#ifdef CONFIG_PPC_BARRIER_NOSPEC
162	. = ALIGN(8);
163	__spec_barrier_fixup : AT(ADDR(__spec_barrier_fixup) - LOAD_OFFSET) {
164		__start___barrier_nospec_fixup = .;
165		*(__barrier_nospec_fixup)
166		__stop___barrier_nospec_fixup = .;
167	}
168#endif /* CONFIG_PPC_BARRIER_NOSPEC */
169
170#ifdef CONFIG_PPC_FSL_BOOK3E
171	. = ALIGN(8);
172	__spec_btb_flush_fixup : AT(ADDR(__spec_btb_flush_fixup) - LOAD_OFFSET) {
173		__start__btb_flush_fixup = .;
174		*(__btb_flush_fixup)
175		__stop__btb_flush_fixup = .;
176	}
177#endif
178	EXCEPTION_TABLE(0)
179
180	NOTES :kernel :notes
181
182	/* The dummy segment contents for the bug workaround mentioned above
183	   near PHDRS.  */
184	.dummy : AT(ADDR(.dummy) - LOAD_OFFSET) {
185		LONG(0)
186		LONG(0)
187		LONG(0)
188	} :kernel :dummy
189
190/*
191 * Init sections discarded at runtime
192 */
193	. = ALIGN(STRICT_ALIGN_SIZE);
194	__init_begin = .;
195	. = ALIGN(PAGE_SIZE);
196	.init.text : AT(ADDR(.init.text) - LOAD_OFFSET) {
197		_sinittext = .;
198		INIT_TEXT
199		_einittext = .;
200#ifdef CONFIG_PPC64
201		*(.tramp.ftrace.init);
202#endif
203	} :kernel
204
205	/* .exit.text is discarded at runtime, not link time,
206	 * to deal with references from __bug_table
207	 */
208	.exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
209		EXIT_TEXT
210	}
211
212	.init.data : AT(ADDR(.init.data) - LOAD_OFFSET) {
213		INIT_DATA
214	}
215
216	.init.setup : AT(ADDR(.init.setup) - LOAD_OFFSET) {
217		INIT_SETUP(16)
218	}
219
220	.initcall.init : AT(ADDR(.initcall.init) - LOAD_OFFSET) {
221		INIT_CALLS
222	}
223
224	.con_initcall.init : AT(ADDR(.con_initcall.init) - LOAD_OFFSET) {
225		CON_INITCALL
226	}
227
228	. = ALIGN(8);
229	__ftr_fixup : AT(ADDR(__ftr_fixup) - LOAD_OFFSET) {
230		__start___ftr_fixup = .;
231		KEEP(*(__ftr_fixup))
232		__stop___ftr_fixup = .;
233	}
234	. = ALIGN(8);
235	__mmu_ftr_fixup : AT(ADDR(__mmu_ftr_fixup) - LOAD_OFFSET) {
236		__start___mmu_ftr_fixup = .;
237		KEEP(*(__mmu_ftr_fixup))
238		__stop___mmu_ftr_fixup = .;
239	}
240	. = ALIGN(8);
241	__lwsync_fixup : AT(ADDR(__lwsync_fixup) - LOAD_OFFSET) {
242		__start___lwsync_fixup = .;
243		KEEP(*(__lwsync_fixup))
244		__stop___lwsync_fixup = .;
245	}
246#ifdef CONFIG_PPC64
247	. = ALIGN(8);
248	__fw_ftr_fixup : AT(ADDR(__fw_ftr_fixup) - LOAD_OFFSET) {
249		__start___fw_ftr_fixup = .;
250		KEEP(*(__fw_ftr_fixup))
251		__stop___fw_ftr_fixup = .;
252	}
253#endif
254	.init.ramfs : AT(ADDR(.init.ramfs) - LOAD_OFFSET) {
255		INIT_RAM_FS
256	}
257
258	PERCPU_SECTION(L1_CACHE_BYTES)
259
260	. = ALIGN(8);
261	.machine.desc : AT(ADDR(.machine.desc) - LOAD_OFFSET) {
262		__machine_desc_start = . ;
263		KEEP(*(.machine.desc))
264		__machine_desc_end = . ;
265	}
266#ifdef CONFIG_RELOCATABLE
267	. = ALIGN(8);
268	.dynsym : AT(ADDR(.dynsym) - LOAD_OFFSET)
269	{
270#ifdef CONFIG_PPC32
271		__dynamic_symtab = .;
272#endif
273		*(.dynsym)
274	}
275	.dynstr : AT(ADDR(.dynstr) - LOAD_OFFSET) { *(.dynstr) }
276	.dynamic : AT(ADDR(.dynamic) - LOAD_OFFSET)
277	{
278		__dynamic_start = .;
279		*(.dynamic)
280	}
281	.hash : AT(ADDR(.hash) - LOAD_OFFSET) { *(.hash) }
282	.interp : AT(ADDR(.interp) - LOAD_OFFSET) { *(.interp) }
283	.rela.dyn : AT(ADDR(.rela.dyn) - LOAD_OFFSET)
284	{
285		__rela_dyn_start = .;
286		*(.rela*)
287	}
288#endif
289	/* .exit.data is discarded at runtime, not link time,
290	 * to deal with references from .exit.text
291	 */
292	.exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) {
293		EXIT_DATA
294	}
295
296	/* freed after init ends here */
297	. = ALIGN(PAGE_SIZE);
298	__init_end = .;
299
300/*
301 * And now the various read/write data
302 */
303
304	. = ALIGN(PAGE_SIZE);
305	_sdata = .;
306
307#ifdef CONFIG_PPC32
308	.data : AT(ADDR(.data) - LOAD_OFFSET) {
309		DATA_DATA
310#ifdef CONFIG_UBSAN
311		*(.data..Lubsan_data*)
312		*(.data..Lubsan_type*)
313#endif
314		*(.data.rel*)
315		*(SDATA_MAIN)
316		*(.sdata2)
317		*(.got.plt) *(.got)
318		*(.plt)
319		*(.branch_lt)
320	}
321#else
322	.data : AT(ADDR(.data) - LOAD_OFFSET) {
323		DATA_DATA
324		*(.data.rel*)
325		*(.toc1)
326		*(.branch_lt)
327	}
328
329	.opd : AT(ADDR(.opd) - LOAD_OFFSET) {
330		__start_opd = .;
331		KEEP(*(.opd))
332		__end_opd = .;
333	}
334
335	. = ALIGN(256);
336	.got : AT(ADDR(.got) - LOAD_OFFSET) {
337		__toc_start = .;
338#ifndef CONFIG_RELOCATABLE
339		__prom_init_toc_start = .;
340		arch/powerpc/kernel/prom_init.o*(.toc .got)
341		__prom_init_toc_end = .;
342#endif
343		*(.got)
344		*(.toc)
345	}
346#endif
347
348	/* The initial task and kernel stack */
349	INIT_TASK_DATA_SECTION(THREAD_SIZE)
350
351	.data..page_aligned : AT(ADDR(.data..page_aligned) - LOAD_OFFSET) {
352		PAGE_ALIGNED_DATA(PAGE_SIZE)
353	}
354
355	.data..cacheline_aligned : AT(ADDR(.data..cacheline_aligned) - LOAD_OFFSET) {
356		CACHELINE_ALIGNED_DATA(L1_CACHE_BYTES)
357	}
358
359	.data..read_mostly : AT(ADDR(.data..read_mostly) - LOAD_OFFSET) {
360		READ_MOSTLY_DATA(L1_CACHE_BYTES)
361	}
362
363	. = ALIGN(PAGE_SIZE);
364	.data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
365		NOSAVE_DATA
366	}
367
368	BUG_TABLE
369
370	. = ALIGN(PAGE_SIZE);
371	_edata  =  .;
372	PROVIDE32 (edata = .);
373
374/*
375 * And finally the bss
376 */
377
378	BSS_SECTION(0, 0, 0)
379
380	. = ALIGN(PAGE_SIZE);
381	_end = . ;
382	PROVIDE32 (end = .);
383
384	STABS_DEBUG
385
386	DWARF_DEBUG
387
388	DISCARDS
389	/DISCARD/ : {
390		*(*.EMB.apuinfo)
391		*(.glink .iplt .plt .rela* .comment)
392		*(.gnu.version*)
393		*(.gnu.attributes)
394		*(.eh_frame)
395	}
396}
397