xref: /linux/include/asm-generic/vmlinux.lds.h (revision 367b8112fe2ea5c39a7bb4d263dcdd9b612fae18)
1 #ifndef LOAD_OFFSET
2 #define LOAD_OFFSET 0
3 #endif
4 
5 #ifndef VMLINUX_SYMBOL
6 #define VMLINUX_SYMBOL(_sym_) _sym_
7 #endif
8 
9 /* Align . to a 8 byte boundary equals to maximum function alignment. */
10 #define ALIGN_FUNCTION()  . = ALIGN(8)
11 
12 /* The actual configuration determine if the init/exit sections
13  * are handled as text/data or they can be discarded (which
14  * often happens at runtime)
15  */
16 #ifdef CONFIG_HOTPLUG
17 #define DEV_KEEP(sec)    *(.dev##sec)
18 #define DEV_DISCARD(sec)
19 #else
20 #define DEV_KEEP(sec)
21 #define DEV_DISCARD(sec) *(.dev##sec)
22 #endif
23 
24 #ifdef CONFIG_HOTPLUG_CPU
25 #define CPU_KEEP(sec)    *(.cpu##sec)
26 #define CPU_DISCARD(sec)
27 #else
28 #define CPU_KEEP(sec)
29 #define CPU_DISCARD(sec) *(.cpu##sec)
30 #endif
31 
32 #if defined(CONFIG_MEMORY_HOTPLUG)
33 #define MEM_KEEP(sec)    *(.mem##sec)
34 #define MEM_DISCARD(sec)
35 #else
36 #define MEM_KEEP(sec)
37 #define MEM_DISCARD(sec) *(.mem##sec)
38 #endif
39 
40 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
41 #define MCOUNT_REC()	VMLINUX_SYMBOL(__start_mcount_loc) = .; \
42 			*(__mcount_loc)				\
43 			VMLINUX_SYMBOL(__stop_mcount_loc) = .;
44 #else
45 #define MCOUNT_REC()
46 #endif
47 
48 /* .data section */
49 #define DATA_DATA							\
50 	*(.data)							\
51 	*(.data.init.refok)						\
52 	*(.ref.data)							\
53 	DEV_KEEP(init.data)						\
54 	DEV_KEEP(exit.data)						\
55 	CPU_KEEP(init.data)						\
56 	CPU_KEEP(exit.data)						\
57 	MEM_KEEP(init.data)						\
58 	MEM_KEEP(exit.data)						\
59 	. = ALIGN(8);							\
60 	VMLINUX_SYMBOL(__start___markers) = .;				\
61 	*(__markers)							\
62 	VMLINUX_SYMBOL(__stop___markers) = .;				\
63 	VMLINUX_SYMBOL(__start___tracepoints) = .;			\
64 	*(__tracepoints)						\
65 	VMLINUX_SYMBOL(__stop___tracepoints) = .;
66 
67 #define RO_DATA(align)							\
68 	. = ALIGN((align));						\
69 	.rodata           : AT(ADDR(.rodata) - LOAD_OFFSET) {		\
70 		VMLINUX_SYMBOL(__start_rodata) = .;			\
71 		*(.rodata) *(.rodata.*)					\
72 		*(__vermagic)		/* Kernel version magic */	\
73 		*(__markers_strings)	/* Markers: strings */		\
74 		*(__tracepoints_strings)/* Tracepoints: strings */	\
75 	}								\
76 									\
77 	.rodata1          : AT(ADDR(.rodata1) - LOAD_OFFSET) {		\
78 		*(.rodata1)						\
79 	}								\
80 									\
81 	BUG_TABLE							\
82 									\
83 	/* PCI quirks */						\
84 	.pci_fixup        : AT(ADDR(.pci_fixup) - LOAD_OFFSET) {	\
85 		VMLINUX_SYMBOL(__start_pci_fixups_early) = .;		\
86 		*(.pci_fixup_early)					\
87 		VMLINUX_SYMBOL(__end_pci_fixups_early) = .;		\
88 		VMLINUX_SYMBOL(__start_pci_fixups_header) = .;		\
89 		*(.pci_fixup_header)					\
90 		VMLINUX_SYMBOL(__end_pci_fixups_header) = .;		\
91 		VMLINUX_SYMBOL(__start_pci_fixups_final) = .;		\
92 		*(.pci_fixup_final)					\
93 		VMLINUX_SYMBOL(__end_pci_fixups_final) = .;		\
94 		VMLINUX_SYMBOL(__start_pci_fixups_enable) = .;		\
95 		*(.pci_fixup_enable)					\
96 		VMLINUX_SYMBOL(__end_pci_fixups_enable) = .;		\
97 		VMLINUX_SYMBOL(__start_pci_fixups_resume) = .;		\
98 		*(.pci_fixup_resume)					\
99 		VMLINUX_SYMBOL(__end_pci_fixups_resume) = .;		\
100 		VMLINUX_SYMBOL(__start_pci_fixups_resume_early) = .;	\
101 		*(.pci_fixup_resume_early)				\
102 		VMLINUX_SYMBOL(__end_pci_fixups_resume_early) = .;	\
103 		VMLINUX_SYMBOL(__start_pci_fixups_suspend) = .;		\
104 		*(.pci_fixup_suspend)					\
105 		VMLINUX_SYMBOL(__end_pci_fixups_suspend) = .;		\
106 	}								\
107 									\
108 	/* Built-in firmware blobs */					\
109 	.builtin_fw        : AT(ADDR(.builtin_fw) - LOAD_OFFSET) {	\
110 		VMLINUX_SYMBOL(__start_builtin_fw) = .;			\
111 		*(.builtin_fw)						\
112 		VMLINUX_SYMBOL(__end_builtin_fw) = .;			\
113 	}								\
114 									\
115 	/* RapidIO route ops */						\
116 	.rio_route        : AT(ADDR(.rio_route) - LOAD_OFFSET) {	\
117 		VMLINUX_SYMBOL(__start_rio_route_ops) = .;		\
118 		*(.rio_route_ops)					\
119 		VMLINUX_SYMBOL(__end_rio_route_ops) = .;		\
120 	}								\
121 									\
122 	TRACEDATA							\
123 									\
124 	/* Kernel symbol table: Normal symbols */			\
125 	__ksymtab         : AT(ADDR(__ksymtab) - LOAD_OFFSET) {		\
126 		VMLINUX_SYMBOL(__start___ksymtab) = .;			\
127 		*(__ksymtab)						\
128 		VMLINUX_SYMBOL(__stop___ksymtab) = .;			\
129 	}								\
130 									\
131 	/* Kernel symbol table: GPL-only symbols */			\
132 	__ksymtab_gpl     : AT(ADDR(__ksymtab_gpl) - LOAD_OFFSET) {	\
133 		VMLINUX_SYMBOL(__start___ksymtab_gpl) = .;		\
134 		*(__ksymtab_gpl)					\
135 		VMLINUX_SYMBOL(__stop___ksymtab_gpl) = .;		\
136 	}								\
137 									\
138 	/* Kernel symbol table: Normal unused symbols */		\
139 	__ksymtab_unused  : AT(ADDR(__ksymtab_unused) - LOAD_OFFSET) {	\
140 		VMLINUX_SYMBOL(__start___ksymtab_unused) = .;		\
141 		*(__ksymtab_unused)					\
142 		VMLINUX_SYMBOL(__stop___ksymtab_unused) = .;		\
143 	}								\
144 									\
145 	/* Kernel symbol table: GPL-only unused symbols */		\
146 	__ksymtab_unused_gpl : AT(ADDR(__ksymtab_unused_gpl) - LOAD_OFFSET) { \
147 		VMLINUX_SYMBOL(__start___ksymtab_unused_gpl) = .;	\
148 		*(__ksymtab_unused_gpl)					\
149 		VMLINUX_SYMBOL(__stop___ksymtab_unused_gpl) = .;	\
150 	}								\
151 									\
152 	/* Kernel symbol table: GPL-future-only symbols */		\
153 	__ksymtab_gpl_future : AT(ADDR(__ksymtab_gpl_future) - LOAD_OFFSET) { \
154 		VMLINUX_SYMBOL(__start___ksymtab_gpl_future) = .;	\
155 		*(__ksymtab_gpl_future)					\
156 		VMLINUX_SYMBOL(__stop___ksymtab_gpl_future) = .;	\
157 	}								\
158 									\
159 	/* Kernel symbol table: Normal symbols */			\
160 	__kcrctab         : AT(ADDR(__kcrctab) - LOAD_OFFSET) {		\
161 		VMLINUX_SYMBOL(__start___kcrctab) = .;			\
162 		*(__kcrctab)						\
163 		VMLINUX_SYMBOL(__stop___kcrctab) = .;			\
164 	}								\
165 									\
166 	/* Kernel symbol table: GPL-only symbols */			\
167 	__kcrctab_gpl     : AT(ADDR(__kcrctab_gpl) - LOAD_OFFSET) {	\
168 		VMLINUX_SYMBOL(__start___kcrctab_gpl) = .;		\
169 		*(__kcrctab_gpl)					\
170 		VMLINUX_SYMBOL(__stop___kcrctab_gpl) = .;		\
171 	}								\
172 									\
173 	/* Kernel symbol table: Normal unused symbols */		\
174 	__kcrctab_unused  : AT(ADDR(__kcrctab_unused) - LOAD_OFFSET) {	\
175 		VMLINUX_SYMBOL(__start___kcrctab_unused) = .;		\
176 		*(__kcrctab_unused)					\
177 		VMLINUX_SYMBOL(__stop___kcrctab_unused) = .;		\
178 	}								\
179 									\
180 	/* Kernel symbol table: GPL-only unused symbols */		\
181 	__kcrctab_unused_gpl : AT(ADDR(__kcrctab_unused_gpl) - LOAD_OFFSET) { \
182 		VMLINUX_SYMBOL(__start___kcrctab_unused_gpl) = .;	\
183 		*(__kcrctab_unused_gpl)					\
184 		VMLINUX_SYMBOL(__stop___kcrctab_unused_gpl) = .;	\
185 	}								\
186 									\
187 	/* Kernel symbol table: GPL-future-only symbols */		\
188 	__kcrctab_gpl_future : AT(ADDR(__kcrctab_gpl_future) - LOAD_OFFSET) { \
189 		VMLINUX_SYMBOL(__start___kcrctab_gpl_future) = .;	\
190 		*(__kcrctab_gpl_future)					\
191 		VMLINUX_SYMBOL(__stop___kcrctab_gpl_future) = .;	\
192 	}								\
193 									\
194 	/* Kernel symbol table: strings */				\
195         __ksymtab_strings : AT(ADDR(__ksymtab_strings) - LOAD_OFFSET) {	\
196 		*(__ksymtab_strings)					\
197 	}								\
198 									\
199 	/* __*init sections */						\
200 	__init_rodata : AT(ADDR(__init_rodata) - LOAD_OFFSET) {		\
201 		*(.ref.rodata)						\
202 		MCOUNT_REC()						\
203 		DEV_KEEP(init.rodata)					\
204 		DEV_KEEP(exit.rodata)					\
205 		CPU_KEEP(init.rodata)					\
206 		CPU_KEEP(exit.rodata)					\
207 		MEM_KEEP(init.rodata)					\
208 		MEM_KEEP(exit.rodata)					\
209 	}								\
210 									\
211 	/* Built-in module parameters. */				\
212 	__param : AT(ADDR(__param) - LOAD_OFFSET) {			\
213 		VMLINUX_SYMBOL(__start___param) = .;			\
214 		*(__param)						\
215 		VMLINUX_SYMBOL(__stop___param) = .;			\
216 		. = ALIGN((align));					\
217 		VMLINUX_SYMBOL(__end_rodata) = .;			\
218 	}								\
219 	. = ALIGN((align));
220 
221 /* RODATA provided for backward compatibility.
222  * All archs are supposed to use RO_DATA() */
223 #define RODATA RO_DATA(4096)
224 
225 #define SECURITY_INIT							\
226 	.security_initcall.init : AT(ADDR(.security_initcall.init) - LOAD_OFFSET) { \
227 		VMLINUX_SYMBOL(__security_initcall_start) = .;		\
228 		*(.security_initcall.init) 				\
229 		VMLINUX_SYMBOL(__security_initcall_end) = .;		\
230 	}
231 
232 /* .text section. Map to function alignment to avoid address changes
233  * during second ld run in second ld pass when generating System.map */
234 #define TEXT_TEXT							\
235 		ALIGN_FUNCTION();					\
236 		*(.text.hot)						\
237 		*(.text)						\
238 		*(.ref.text)						\
239 		*(.text.init.refok)					\
240 		*(.exit.text.refok)					\
241 	DEV_KEEP(init.text)						\
242 	DEV_KEEP(exit.text)						\
243 	CPU_KEEP(init.text)						\
244 	CPU_KEEP(exit.text)						\
245 	MEM_KEEP(init.text)						\
246 	MEM_KEEP(exit.text)						\
247 		*(.text.unlikely)
248 
249 
250 /* sched.text is aling to function alignment to secure we have same
251  * address even at second ld pass when generating System.map */
252 #define SCHED_TEXT							\
253 		ALIGN_FUNCTION();					\
254 		VMLINUX_SYMBOL(__sched_text_start) = .;			\
255 		*(.sched.text)						\
256 		VMLINUX_SYMBOL(__sched_text_end) = .;
257 
258 /* spinlock.text is aling to function alignment to secure we have same
259  * address even at second ld pass when generating System.map */
260 #define LOCK_TEXT							\
261 		ALIGN_FUNCTION();					\
262 		VMLINUX_SYMBOL(__lock_text_start) = .;			\
263 		*(.spinlock.text)					\
264 		VMLINUX_SYMBOL(__lock_text_end) = .;
265 
266 #define KPROBES_TEXT							\
267 		ALIGN_FUNCTION();					\
268 		VMLINUX_SYMBOL(__kprobes_text_start) = .;		\
269 		*(.kprobes.text)					\
270 		VMLINUX_SYMBOL(__kprobes_text_end) = .;
271 
272 /* Section used for early init (in .S files) */
273 #define HEAD_TEXT  *(.head.text)
274 
275 /* init and exit section handling */
276 #define INIT_DATA							\
277 	*(.init.data)							\
278 	DEV_DISCARD(init.data)						\
279 	DEV_DISCARD(init.rodata)					\
280 	CPU_DISCARD(init.data)						\
281 	CPU_DISCARD(init.rodata)					\
282 	MEM_DISCARD(init.data)						\
283 	MEM_DISCARD(init.rodata)					\
284 	/* implement dynamic printk debug */				\
285 	VMLINUX_SYMBOL(__start___verbose_strings) = .;                  \
286 	*(__verbose_strings)                                            \
287 	VMLINUX_SYMBOL(__stop___verbose_strings) = .;                   \
288 	. = ALIGN(8);							\
289 	VMLINUX_SYMBOL(__start___verbose) = .;                          \
290 	*(__verbose)                                                    \
291 	VMLINUX_SYMBOL(__stop___verbose) = .;
292 
293 #define INIT_TEXT							\
294 	*(.init.text)							\
295 	DEV_DISCARD(init.text)						\
296 	CPU_DISCARD(init.text)						\
297 	MEM_DISCARD(init.text)
298 
299 #define EXIT_DATA							\
300 	*(.exit.data)							\
301 	DEV_DISCARD(exit.data)						\
302 	DEV_DISCARD(exit.rodata)					\
303 	CPU_DISCARD(exit.data)						\
304 	CPU_DISCARD(exit.rodata)					\
305 	MEM_DISCARD(exit.data)						\
306 	MEM_DISCARD(exit.rodata)
307 
308 #define EXIT_TEXT							\
309 	*(.exit.text)							\
310 	DEV_DISCARD(exit.text)						\
311 	CPU_DISCARD(exit.text)						\
312 	MEM_DISCARD(exit.text)
313 
314 		/* DWARF debug sections.
315 		Symbols in the DWARF debugging sections are relative to
316 		the beginning of the section so we begin them at 0.  */
317 #define DWARF_DEBUG							\
318 		/* DWARF 1 */						\
319 		.debug          0 : { *(.debug) }			\
320 		.line           0 : { *(.line) }			\
321 		/* GNU DWARF 1 extensions */				\
322 		.debug_srcinfo  0 : { *(.debug_srcinfo) }		\
323 		.debug_sfnames  0 : { *(.debug_sfnames) }		\
324 		/* DWARF 1.1 and DWARF 2 */				\
325 		.debug_aranges  0 : { *(.debug_aranges) }		\
326 		.debug_pubnames 0 : { *(.debug_pubnames) }		\
327 		/* DWARF 2 */						\
328 		.debug_info     0 : { *(.debug_info			\
329 				.gnu.linkonce.wi.*) }			\
330 		.debug_abbrev   0 : { *(.debug_abbrev) }		\
331 		.debug_line     0 : { *(.debug_line) }			\
332 		.debug_frame    0 : { *(.debug_frame) }			\
333 		.debug_str      0 : { *(.debug_str) }			\
334 		.debug_loc      0 : { *(.debug_loc) }			\
335 		.debug_macinfo  0 : { *(.debug_macinfo) }		\
336 		/* SGI/MIPS DWARF 2 extensions */			\
337 		.debug_weaknames 0 : { *(.debug_weaknames) }		\
338 		.debug_funcnames 0 : { *(.debug_funcnames) }		\
339 		.debug_typenames 0 : { *(.debug_typenames) }		\
340 		.debug_varnames  0 : { *(.debug_varnames) }		\
341 
342 		/* Stabs debugging sections.  */
343 #define STABS_DEBUG							\
344 		.stab 0 : { *(.stab) }					\
345 		.stabstr 0 : { *(.stabstr) }				\
346 		.stab.excl 0 : { *(.stab.excl) }			\
347 		.stab.exclstr 0 : { *(.stab.exclstr) }			\
348 		.stab.index 0 : { *(.stab.index) }			\
349 		.stab.indexstr 0 : { *(.stab.indexstr) }		\
350 		.comment 0 : { *(.comment) }
351 
352 #ifdef CONFIG_GENERIC_BUG
353 #define BUG_TABLE							\
354 	. = ALIGN(8);							\
355 	__bug_table : AT(ADDR(__bug_table) - LOAD_OFFSET) {		\
356 		VMLINUX_SYMBOL(__start___bug_table) = .;		\
357 		*(__bug_table)						\
358 		VMLINUX_SYMBOL(__stop___bug_table) = .;			\
359 	}
360 #else
361 #define BUG_TABLE
362 #endif
363 
364 #ifdef CONFIG_PM_TRACE
365 #define TRACEDATA							\
366 	. = ALIGN(4);							\
367 	.tracedata : AT(ADDR(.tracedata) - LOAD_OFFSET) {		\
368 		VMLINUX_SYMBOL(__tracedata_start) = .;			\
369 		*(.tracedata)						\
370 		VMLINUX_SYMBOL(__tracedata_end) = .;			\
371 	}
372 #else
373 #define TRACEDATA
374 #endif
375 
376 #define NOTES								\
377 	.notes : AT(ADDR(.notes) - LOAD_OFFSET) {			\
378 		VMLINUX_SYMBOL(__start_notes) = .;			\
379 		*(.note.*)						\
380 		VMLINUX_SYMBOL(__stop_notes) = .;			\
381 	}
382 
383 #define INITCALLS							\
384 	*(.initcallearly.init)						\
385 	VMLINUX_SYMBOL(__early_initcall_end) = .;			\
386   	*(.initcall0.init)						\
387   	*(.initcall0s.init)						\
388   	*(.initcall1.init)						\
389   	*(.initcall1s.init)						\
390   	*(.initcall2.init)						\
391   	*(.initcall2s.init)						\
392   	*(.initcall3.init)						\
393   	*(.initcall3s.init)						\
394   	*(.initcall4.init)						\
395   	*(.initcall4s.init)						\
396   	*(.initcall5.init)						\
397   	*(.initcall5s.init)						\
398 	*(.initcallrootfs.init)						\
399   	*(.initcall6.init)						\
400   	*(.initcall6s.init)						\
401   	*(.initcall7.init)						\
402   	*(.initcall7s.init)
403 
404 #define PERCPU(align)							\
405 	. = ALIGN(align);						\
406 	VMLINUX_SYMBOL(__per_cpu_start) = .;				\
407 	.data.percpu  : AT(ADDR(.data.percpu) - LOAD_OFFSET) {		\
408 		*(.data.percpu.page_aligned)				\
409 		*(.data.percpu)						\
410 		*(.data.percpu.shared_aligned)				\
411 	}								\
412 	VMLINUX_SYMBOL(__per_cpu_end) = .;
413