xref: /linux/include/asm-generic/vmlinux.lds.h (revision e27ecdd94d81e5bc3d1f68591701db5adb342f0d)
1 /*
2  * Helper macros to support writing architecture specific
3  * linker scripts.
4  *
5  * A minimal linker scripts has following content:
6  * [This is a sample, architectures may have special requiriements]
7  *
8  * OUTPUT_FORMAT(...)
9  * OUTPUT_ARCH(...)
10  * ENTRY(...)
11  * SECTIONS
12  * {
13  *	. = START;
14  *	__init_begin = .;
15  *	HEAD_TEXT_SECTION
16  *	INIT_TEXT_SECTION(PAGE_SIZE)
17  *	INIT_DATA_SECTION(...)
18  *	PERCPU(PAGE_SIZE)
19  *	__init_end = .;
20  *
21  *	_stext = .;
22  *	TEXT_SECTION = 0
23  *	_etext = .;
24  *
25  *      _sdata = .;
26  *	RO_DATA_SECTION(PAGE_SIZE)
27  *	RW_DATA_SECTION(...)
28  *	_edata = .;
29  *
30  *	EXCEPTION_TABLE(...)
31  *	NOTES
32  *
33  *	__bss_start = .;
34  *	BSS_SECTION(0, 0)
35  *	__bss_stop = .;
36  *	_end = .;
37  *
38  *	/DISCARD/ : {
39  *		EXIT_TEXT
40  *		EXIT_DATA
41  *		EXIT_CALL
42  *	}
43  *	STABS_DEBUG
44  *	DWARF_DEBUG
45  * }
46  *
47  * [__init_begin, __init_end] is the init section that may be freed after init
48  * [_stext, _etext] is the text section
49  * [_sdata, _edata] is the data section
50  *
51  * Some of the included output section have their own set of constants.
52  * Examples are: [__initramfs_start, __initramfs_end] for initramfs and
53  *               [__nosave_begin, __nosave_end] for the nosave data
54  */
55 
56 #ifndef LOAD_OFFSET
57 #define LOAD_OFFSET 0
58 #endif
59 
60 #ifndef VMLINUX_SYMBOL
61 #define VMLINUX_SYMBOL(_sym_) _sym_
62 #endif
63 
64 /* Align . to a 8 byte boundary equals to maximum function alignment. */
65 #define ALIGN_FUNCTION()  . = ALIGN(8)
66 
67 /* The actual configuration determine if the init/exit sections
68  * are handled as text/data or they can be discarded (which
69  * often happens at runtime)
70  */
71 #ifdef CONFIG_HOTPLUG
72 #define DEV_KEEP(sec)    *(.dev##sec)
73 #define DEV_DISCARD(sec)
74 #else
75 #define DEV_KEEP(sec)
76 #define DEV_DISCARD(sec) *(.dev##sec)
77 #endif
78 
79 #ifdef CONFIG_HOTPLUG_CPU
80 #define CPU_KEEP(sec)    *(.cpu##sec)
81 #define CPU_DISCARD(sec)
82 #else
83 #define CPU_KEEP(sec)
84 #define CPU_DISCARD(sec) *(.cpu##sec)
85 #endif
86 
87 #if defined(CONFIG_MEMORY_HOTPLUG)
88 #define MEM_KEEP(sec)    *(.mem##sec)
89 #define MEM_DISCARD(sec)
90 #else
91 #define MEM_KEEP(sec)
92 #define MEM_DISCARD(sec) *(.mem##sec)
93 #endif
94 
95 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
96 #define MCOUNT_REC()	VMLINUX_SYMBOL(__start_mcount_loc) = .; \
97 			*(__mcount_loc)				\
98 			VMLINUX_SYMBOL(__stop_mcount_loc) = .;
99 #else
100 #define MCOUNT_REC()
101 #endif
102 
103 #ifdef CONFIG_TRACE_BRANCH_PROFILING
104 #define LIKELY_PROFILE()	VMLINUX_SYMBOL(__start_annotated_branch_profile) = .; \
105 				*(_ftrace_annotated_branch)			      \
106 				VMLINUX_SYMBOL(__stop_annotated_branch_profile) = .;
107 #else
108 #define LIKELY_PROFILE()
109 #endif
110 
111 #ifdef CONFIG_PROFILE_ALL_BRANCHES
112 #define BRANCH_PROFILE()	VMLINUX_SYMBOL(__start_branch_profile) = .;   \
113 				*(_ftrace_branch)			      \
114 				VMLINUX_SYMBOL(__stop_branch_profile) = .;
115 #else
116 #define BRANCH_PROFILE()
117 #endif
118 
119 #ifdef CONFIG_EVENT_TRACING
120 #define FTRACE_EVENTS()	VMLINUX_SYMBOL(__start_ftrace_events) = .;	\
121 			*(_ftrace_events)				\
122 			VMLINUX_SYMBOL(__stop_ftrace_events) = .;
123 #else
124 #define FTRACE_EVENTS()
125 #endif
126 
127 #ifdef CONFIG_TRACING
128 #define TRACE_PRINTKS() VMLINUX_SYMBOL(__start___trace_bprintk_fmt) = .;      \
129 			 *(__trace_printk_fmt) /* Trace_printk fmt' pointer */ \
130 			 VMLINUX_SYMBOL(__stop___trace_bprintk_fmt) = .;
131 #else
132 #define TRACE_PRINTKS()
133 #endif
134 
135 #ifdef CONFIG_FTRACE_SYSCALLS
136 #define TRACE_SYSCALLS() VMLINUX_SYMBOL(__start_syscalls_metadata) = .;	\
137 			 *(__syscalls_metadata)				\
138 			 VMLINUX_SYMBOL(__stop_syscalls_metadata) = .;
139 #else
140 #define TRACE_SYSCALLS()
141 #endif
142 
143 /* .data section */
144 #define DATA_DATA							\
145 	*(.data)							\
146 	*(.ref.data)							\
147 	DEV_KEEP(init.data)						\
148 	DEV_KEEP(exit.data)						\
149 	CPU_KEEP(init.data)						\
150 	CPU_KEEP(exit.data)						\
151 	MEM_KEEP(init.data)						\
152 	MEM_KEEP(exit.data)						\
153 	. = ALIGN(8);							\
154 	VMLINUX_SYMBOL(__start___markers) = .;				\
155 	*(__markers)							\
156 	VMLINUX_SYMBOL(__stop___markers) = .;				\
157 	. = ALIGN(32);							\
158 	VMLINUX_SYMBOL(__start___tracepoints) = .;			\
159 	*(__tracepoints)						\
160 	VMLINUX_SYMBOL(__stop___tracepoints) = .;			\
161 	/* implement dynamic printk debug */				\
162 	. = ALIGN(8);							\
163 	VMLINUX_SYMBOL(__start___verbose) = .;                          \
164 	*(__verbose)                                                    \
165 	VMLINUX_SYMBOL(__stop___verbose) = .;				\
166 	LIKELY_PROFILE()		       				\
167 	BRANCH_PROFILE()						\
168 	TRACE_PRINTKS()							\
169 	FTRACE_EVENTS()							\
170 	TRACE_SYSCALLS()
171 
172 /*
173  * Data section helpers
174  */
175 #define NOSAVE_DATA							\
176 	. = ALIGN(PAGE_SIZE);						\
177 	VMLINUX_SYMBOL(__nosave_begin) = .;				\
178 	*(.data.nosave)							\
179 	. = ALIGN(PAGE_SIZE);						\
180 	VMLINUX_SYMBOL(__nosave_end) = .;
181 
182 #define PAGE_ALIGNED_DATA(page_align)					\
183 	. = ALIGN(page_align);						\
184 	*(.data.page_aligned)
185 
186 #define READ_MOSTLY_DATA(align)						\
187 	. = ALIGN(align);						\
188 	*(.data.read_mostly)
189 
190 #define CACHELINE_ALIGNED_DATA(align)					\
191 	. = ALIGN(align);						\
192 	*(.data.cacheline_aligned)
193 
194 #define INIT_TASK(align)						\
195 	. = ALIGN(align);						\
196 	*(.data.init_task)
197 
198 /*
199  * Read only Data
200  */
201 #define RO_DATA_SECTION(align)						\
202 	. = ALIGN((align));						\
203 	.rodata           : AT(ADDR(.rodata) - LOAD_OFFSET) {		\
204 		VMLINUX_SYMBOL(__start_rodata) = .;			\
205 		*(.rodata) *(.rodata.*)					\
206 		*(__vermagic)		/* Kernel version magic */	\
207 		*(__markers_strings)	/* Markers: strings */		\
208 		*(__tracepoints_strings)/* Tracepoints: strings */	\
209 	}								\
210 									\
211 	.rodata1          : AT(ADDR(.rodata1) - LOAD_OFFSET) {		\
212 		*(.rodata1)						\
213 	}								\
214 									\
215 	BUG_TABLE							\
216 									\
217 	/* PCI quirks */						\
218 	.pci_fixup        : AT(ADDR(.pci_fixup) - LOAD_OFFSET) {	\
219 		VMLINUX_SYMBOL(__start_pci_fixups_early) = .;		\
220 		*(.pci_fixup_early)					\
221 		VMLINUX_SYMBOL(__end_pci_fixups_early) = .;		\
222 		VMLINUX_SYMBOL(__start_pci_fixups_header) = .;		\
223 		*(.pci_fixup_header)					\
224 		VMLINUX_SYMBOL(__end_pci_fixups_header) = .;		\
225 		VMLINUX_SYMBOL(__start_pci_fixups_final) = .;		\
226 		*(.pci_fixup_final)					\
227 		VMLINUX_SYMBOL(__end_pci_fixups_final) = .;		\
228 		VMLINUX_SYMBOL(__start_pci_fixups_enable) = .;		\
229 		*(.pci_fixup_enable)					\
230 		VMLINUX_SYMBOL(__end_pci_fixups_enable) = .;		\
231 		VMLINUX_SYMBOL(__start_pci_fixups_resume) = .;		\
232 		*(.pci_fixup_resume)					\
233 		VMLINUX_SYMBOL(__end_pci_fixups_resume) = .;		\
234 		VMLINUX_SYMBOL(__start_pci_fixups_resume_early) = .;	\
235 		*(.pci_fixup_resume_early)				\
236 		VMLINUX_SYMBOL(__end_pci_fixups_resume_early) = .;	\
237 		VMLINUX_SYMBOL(__start_pci_fixups_suspend) = .;		\
238 		*(.pci_fixup_suspend)					\
239 		VMLINUX_SYMBOL(__end_pci_fixups_suspend) = .;		\
240 	}								\
241 									\
242 	/* Built-in firmware blobs */					\
243 	.builtin_fw        : AT(ADDR(.builtin_fw) - LOAD_OFFSET) {	\
244 		VMLINUX_SYMBOL(__start_builtin_fw) = .;			\
245 		*(.builtin_fw)						\
246 		VMLINUX_SYMBOL(__end_builtin_fw) = .;			\
247 	}								\
248 									\
249 	/* RapidIO route ops */						\
250 	.rio_route        : AT(ADDR(.rio_route) - LOAD_OFFSET) {	\
251 		VMLINUX_SYMBOL(__start_rio_route_ops) = .;		\
252 		*(.rio_route_ops)					\
253 		VMLINUX_SYMBOL(__end_rio_route_ops) = .;		\
254 	}								\
255 									\
256 	TRACEDATA							\
257 									\
258 	/* Kernel symbol table: Normal symbols */			\
259 	__ksymtab         : AT(ADDR(__ksymtab) - LOAD_OFFSET) {		\
260 		VMLINUX_SYMBOL(__start___ksymtab) = .;			\
261 		*(__ksymtab)						\
262 		VMLINUX_SYMBOL(__stop___ksymtab) = .;			\
263 	}								\
264 									\
265 	/* Kernel symbol table: GPL-only symbols */			\
266 	__ksymtab_gpl     : AT(ADDR(__ksymtab_gpl) - LOAD_OFFSET) {	\
267 		VMLINUX_SYMBOL(__start___ksymtab_gpl) = .;		\
268 		*(__ksymtab_gpl)					\
269 		VMLINUX_SYMBOL(__stop___ksymtab_gpl) = .;		\
270 	}								\
271 									\
272 	/* Kernel symbol table: Normal unused symbols */		\
273 	__ksymtab_unused  : AT(ADDR(__ksymtab_unused) - LOAD_OFFSET) {	\
274 		VMLINUX_SYMBOL(__start___ksymtab_unused) = .;		\
275 		*(__ksymtab_unused)					\
276 		VMLINUX_SYMBOL(__stop___ksymtab_unused) = .;		\
277 	}								\
278 									\
279 	/* Kernel symbol table: GPL-only unused symbols */		\
280 	__ksymtab_unused_gpl : AT(ADDR(__ksymtab_unused_gpl) - LOAD_OFFSET) { \
281 		VMLINUX_SYMBOL(__start___ksymtab_unused_gpl) = .;	\
282 		*(__ksymtab_unused_gpl)					\
283 		VMLINUX_SYMBOL(__stop___ksymtab_unused_gpl) = .;	\
284 	}								\
285 									\
286 	/* Kernel symbol table: GPL-future-only symbols */		\
287 	__ksymtab_gpl_future : AT(ADDR(__ksymtab_gpl_future) - LOAD_OFFSET) { \
288 		VMLINUX_SYMBOL(__start___ksymtab_gpl_future) = .;	\
289 		*(__ksymtab_gpl_future)					\
290 		VMLINUX_SYMBOL(__stop___ksymtab_gpl_future) = .;	\
291 	}								\
292 									\
293 	/* Kernel symbol table: Normal symbols */			\
294 	__kcrctab         : AT(ADDR(__kcrctab) - LOAD_OFFSET) {		\
295 		VMLINUX_SYMBOL(__start___kcrctab) = .;			\
296 		*(__kcrctab)						\
297 		VMLINUX_SYMBOL(__stop___kcrctab) = .;			\
298 	}								\
299 									\
300 	/* Kernel symbol table: GPL-only symbols */			\
301 	__kcrctab_gpl     : AT(ADDR(__kcrctab_gpl) - LOAD_OFFSET) {	\
302 		VMLINUX_SYMBOL(__start___kcrctab_gpl) = .;		\
303 		*(__kcrctab_gpl)					\
304 		VMLINUX_SYMBOL(__stop___kcrctab_gpl) = .;		\
305 	}								\
306 									\
307 	/* Kernel symbol table: Normal unused symbols */		\
308 	__kcrctab_unused  : AT(ADDR(__kcrctab_unused) - LOAD_OFFSET) {	\
309 		VMLINUX_SYMBOL(__start___kcrctab_unused) = .;		\
310 		*(__kcrctab_unused)					\
311 		VMLINUX_SYMBOL(__stop___kcrctab_unused) = .;		\
312 	}								\
313 									\
314 	/* Kernel symbol table: GPL-only unused symbols */		\
315 	__kcrctab_unused_gpl : AT(ADDR(__kcrctab_unused_gpl) - LOAD_OFFSET) { \
316 		VMLINUX_SYMBOL(__start___kcrctab_unused_gpl) = .;	\
317 		*(__kcrctab_unused_gpl)					\
318 		VMLINUX_SYMBOL(__stop___kcrctab_unused_gpl) = .;	\
319 	}								\
320 									\
321 	/* Kernel symbol table: GPL-future-only symbols */		\
322 	__kcrctab_gpl_future : AT(ADDR(__kcrctab_gpl_future) - LOAD_OFFSET) { \
323 		VMLINUX_SYMBOL(__start___kcrctab_gpl_future) = .;	\
324 		*(__kcrctab_gpl_future)					\
325 		VMLINUX_SYMBOL(__stop___kcrctab_gpl_future) = .;	\
326 	}								\
327 									\
328 	/* Kernel symbol table: strings */				\
329         __ksymtab_strings : AT(ADDR(__ksymtab_strings) - LOAD_OFFSET) {	\
330 		*(__ksymtab_strings)					\
331 	}								\
332 									\
333 	/* __*init sections */						\
334 	__init_rodata : AT(ADDR(__init_rodata) - LOAD_OFFSET) {		\
335 		*(.ref.rodata)						\
336 		MCOUNT_REC()						\
337 		DEV_KEEP(init.rodata)					\
338 		DEV_KEEP(exit.rodata)					\
339 		CPU_KEEP(init.rodata)					\
340 		CPU_KEEP(exit.rodata)					\
341 		MEM_KEEP(init.rodata)					\
342 		MEM_KEEP(exit.rodata)					\
343 	}								\
344 									\
345 	/* Built-in module parameters. */				\
346 	__param : AT(ADDR(__param) - LOAD_OFFSET) {			\
347 		VMLINUX_SYMBOL(__start___param) = .;			\
348 		*(__param)						\
349 		VMLINUX_SYMBOL(__stop___param) = .;			\
350 		. = ALIGN((align));					\
351 		VMLINUX_SYMBOL(__end_rodata) = .;			\
352 	}								\
353 	. = ALIGN((align));
354 
355 /* RODATA & RO_DATA provided for backward compatibility.
356  * All archs are supposed to use RO_DATA() */
357 #define RODATA          RO_DATA_SECTION(4096)
358 #define RO_DATA(align)  RO_DATA_SECTION(align)
359 
360 #define SECURITY_INIT							\
361 	.security_initcall.init : AT(ADDR(.security_initcall.init) - LOAD_OFFSET) { \
362 		VMLINUX_SYMBOL(__security_initcall_start) = .;		\
363 		*(.security_initcall.init) 				\
364 		VMLINUX_SYMBOL(__security_initcall_end) = .;		\
365 	}
366 
367 /* .text section. Map to function alignment to avoid address changes
368  * during second ld run in second ld pass when generating System.map */
369 #define TEXT_TEXT							\
370 		ALIGN_FUNCTION();					\
371 		*(.text.hot)						\
372 		*(.text)						\
373 		*(.ref.text)						\
374 	DEV_KEEP(init.text)						\
375 	DEV_KEEP(exit.text)						\
376 	CPU_KEEP(init.text)						\
377 	CPU_KEEP(exit.text)						\
378 	MEM_KEEP(init.text)						\
379 	MEM_KEEP(exit.text)						\
380 		*(.text.unlikely)
381 
382 
383 /* sched.text is aling to function alignment to secure we have same
384  * address even at second ld pass when generating System.map */
385 #define SCHED_TEXT							\
386 		ALIGN_FUNCTION();					\
387 		VMLINUX_SYMBOL(__sched_text_start) = .;			\
388 		*(.sched.text)						\
389 		VMLINUX_SYMBOL(__sched_text_end) = .;
390 
391 /* spinlock.text is aling to function alignment to secure we have same
392  * address even at second ld pass when generating System.map */
393 #define LOCK_TEXT							\
394 		ALIGN_FUNCTION();					\
395 		VMLINUX_SYMBOL(__lock_text_start) = .;			\
396 		*(.spinlock.text)					\
397 		VMLINUX_SYMBOL(__lock_text_end) = .;
398 
399 #define KPROBES_TEXT							\
400 		ALIGN_FUNCTION();					\
401 		VMLINUX_SYMBOL(__kprobes_text_start) = .;		\
402 		*(.kprobes.text)					\
403 		VMLINUX_SYMBOL(__kprobes_text_end) = .;
404 
405 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
406 #define IRQENTRY_TEXT							\
407 		ALIGN_FUNCTION();					\
408 		VMLINUX_SYMBOL(__irqentry_text_start) = .;		\
409 		*(.irqentry.text)					\
410 		VMLINUX_SYMBOL(__irqentry_text_end) = .;
411 #else
412 #define IRQENTRY_TEXT
413 #endif
414 
415 /* Section used for early init (in .S files) */
416 #define HEAD_TEXT  *(.head.text)
417 
418 #define HEAD_TEXT_SECTION							\
419 	.head.text : AT(ADDR(.head.text) - LOAD_OFFSET) {		\
420 		HEAD_TEXT						\
421 	}
422 
423 /*
424  * Exception table
425  */
426 #define EXCEPTION_TABLE(align)						\
427 	. = ALIGN(align);						\
428 	__ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) {		\
429 		VMLINUX_SYMBOL(__start___ex_table) = .;			\
430 		*(__ex_table)						\
431 		VMLINUX_SYMBOL(__stop___ex_table) = .;			\
432 	}
433 
434 /*
435  * Init task
436  */
437 #define INIT_TASK_DATA(align)						\
438 	. = ALIGN(align);						\
439 	.data.init_task : {						\
440 		INIT_TASK						\
441 	}
442 
443 /* init and exit section handling */
444 #define INIT_DATA							\
445 	*(.init.data)							\
446 	DEV_DISCARD(init.data)						\
447 	CPU_DISCARD(init.data)						\
448 	MEM_DISCARD(init.data)						\
449 	*(.init.rodata)							\
450 	DEV_DISCARD(init.rodata)					\
451 	CPU_DISCARD(init.rodata)					\
452 	MEM_DISCARD(init.rodata)
453 
454 #define INIT_TEXT							\
455 	*(.init.text)							\
456 	DEV_DISCARD(init.text)						\
457 	CPU_DISCARD(init.text)						\
458 	MEM_DISCARD(init.text)
459 
460 #define EXIT_DATA							\
461 	*(.exit.data)							\
462 	DEV_DISCARD(exit.data)						\
463 	DEV_DISCARD(exit.rodata)					\
464 	CPU_DISCARD(exit.data)						\
465 	CPU_DISCARD(exit.rodata)					\
466 	MEM_DISCARD(exit.data)						\
467 	MEM_DISCARD(exit.rodata)
468 
469 #define EXIT_TEXT							\
470 	*(.exit.text)							\
471 	DEV_DISCARD(exit.text)						\
472 	CPU_DISCARD(exit.text)						\
473 	MEM_DISCARD(exit.text)
474 
475 #define EXIT_CALL							\
476 	*(.exitcall.exit)
477 
478 /*
479  * bss (Block Started by Symbol) - uninitialized data
480  * zeroed during startup
481  */
482 #define SBSS								\
483 	.sbss : AT(ADDR(.sbss) - LOAD_OFFSET) {				\
484 		*(.sbss)						\
485 		*(.scommon)						\
486 	}
487 
488 #define BSS(bss_align)							\
489 	. = ALIGN(bss_align);						\
490 	.bss : AT(ADDR(.bss) - LOAD_OFFSET) {				\
491 		VMLINUX_SYMBOL(__bss_start) = .;			\
492 		*(.bss.page_aligned)					\
493 		*(.dynbss)						\
494 		*(.bss)							\
495 		*(COMMON)						\
496 		VMLINUX_SYMBOL(__bss_stop) = .;				\
497 	}
498 
499 /*
500  * DWARF debug sections.
501  * Symbols in the DWARF debugging sections are relative to
502  * the beginning of the section so we begin them at 0.
503  */
504 #define DWARF_DEBUG							\
505 		/* DWARF 1 */						\
506 		.debug          0 : { *(.debug) }			\
507 		.line           0 : { *(.line) }			\
508 		/* GNU DWARF 1 extensions */				\
509 		.debug_srcinfo  0 : { *(.debug_srcinfo) }		\
510 		.debug_sfnames  0 : { *(.debug_sfnames) }		\
511 		/* DWARF 1.1 and DWARF 2 */				\
512 		.debug_aranges  0 : { *(.debug_aranges) }		\
513 		.debug_pubnames 0 : { *(.debug_pubnames) }		\
514 		/* DWARF 2 */						\
515 		.debug_info     0 : { *(.debug_info			\
516 				.gnu.linkonce.wi.*) }			\
517 		.debug_abbrev   0 : { *(.debug_abbrev) }		\
518 		.debug_line     0 : { *(.debug_line) }			\
519 		.debug_frame    0 : { *(.debug_frame) }			\
520 		.debug_str      0 : { *(.debug_str) }			\
521 		.debug_loc      0 : { *(.debug_loc) }			\
522 		.debug_macinfo  0 : { *(.debug_macinfo) }		\
523 		/* SGI/MIPS DWARF 2 extensions */			\
524 		.debug_weaknames 0 : { *(.debug_weaknames) }		\
525 		.debug_funcnames 0 : { *(.debug_funcnames) }		\
526 		.debug_typenames 0 : { *(.debug_typenames) }		\
527 		.debug_varnames  0 : { *(.debug_varnames) }		\
528 
529 		/* Stabs debugging sections.  */
530 #define STABS_DEBUG							\
531 		.stab 0 : { *(.stab) }					\
532 		.stabstr 0 : { *(.stabstr) }				\
533 		.stab.excl 0 : { *(.stab.excl) }			\
534 		.stab.exclstr 0 : { *(.stab.exclstr) }			\
535 		.stab.index 0 : { *(.stab.index) }			\
536 		.stab.indexstr 0 : { *(.stab.indexstr) }		\
537 		.comment 0 : { *(.comment) }
538 
539 #ifdef CONFIG_GENERIC_BUG
540 #define BUG_TABLE							\
541 	. = ALIGN(8);							\
542 	__bug_table : AT(ADDR(__bug_table) - LOAD_OFFSET) {		\
543 		VMLINUX_SYMBOL(__start___bug_table) = .;		\
544 		*(__bug_table)						\
545 		VMLINUX_SYMBOL(__stop___bug_table) = .;			\
546 	}
547 #else
548 #define BUG_TABLE
549 #endif
550 
551 #ifdef CONFIG_PM_TRACE
552 #define TRACEDATA							\
553 	. = ALIGN(4);							\
554 	.tracedata : AT(ADDR(.tracedata) - LOAD_OFFSET) {		\
555 		VMLINUX_SYMBOL(__tracedata_start) = .;			\
556 		*(.tracedata)						\
557 		VMLINUX_SYMBOL(__tracedata_end) = .;			\
558 	}
559 #else
560 #define TRACEDATA
561 #endif
562 
563 #define NOTES								\
564 	.notes : AT(ADDR(.notes) - LOAD_OFFSET) {			\
565 		VMLINUX_SYMBOL(__start_notes) = .;			\
566 		*(.note.*)						\
567 		VMLINUX_SYMBOL(__stop_notes) = .;			\
568 	}
569 
570 #define INIT_SETUP(initsetup_align)					\
571 		. = ALIGN(initsetup_align);				\
572 		VMLINUX_SYMBOL(__setup_start) = .;			\
573 		*(.init.setup)						\
574 		VMLINUX_SYMBOL(__setup_end) = .;
575 
576 #define INITCALLS							\
577 	*(.initcallearly.init)						\
578 	VMLINUX_SYMBOL(__early_initcall_end) = .;			\
579   	*(.initcall0.init)						\
580   	*(.initcall0s.init)						\
581   	*(.initcall1.init)						\
582   	*(.initcall1s.init)						\
583   	*(.initcall2.init)						\
584   	*(.initcall2s.init)						\
585   	*(.initcall3.init)						\
586   	*(.initcall3s.init)						\
587   	*(.initcall4.init)						\
588   	*(.initcall4s.init)						\
589   	*(.initcall5.init)						\
590   	*(.initcall5s.init)						\
591 	*(.initcallrootfs.init)						\
592   	*(.initcall6.init)						\
593   	*(.initcall6s.init)						\
594   	*(.initcall7.init)						\
595   	*(.initcall7s.init)
596 
597 #define INIT_CALLS							\
598 		VMLINUX_SYMBOL(__initcall_start) = .;			\
599 		INITCALLS						\
600 		VMLINUX_SYMBOL(__initcall_end) = .;
601 
602 #define CON_INITCALL							\
603 		VMLINUX_SYMBOL(__con_initcall_start) = .;		\
604 		*(.con_initcall.init)					\
605 		VMLINUX_SYMBOL(__con_initcall_end) = .;
606 
607 #define SECURITY_INITCALL						\
608 		VMLINUX_SYMBOL(__security_initcall_start) = .;		\
609 		*(.security_initcall.init)				\
610 		VMLINUX_SYMBOL(__security_initcall_end) = .;
611 
612 #ifdef CONFIG_BLK_DEV_INITRD
613 #define INIT_RAM_FS							\
614 	. = ALIGN(PAGE_SIZE);						\
615 	VMLINUX_SYMBOL(__initramfs_start) = .;				\
616 	*(.init.ramfs)							\
617 	VMLINUX_SYMBOL(__initramfs_end) = .;
618 #else
619 #define INITRAMFS
620 #endif
621 
622 /**
623  * PERCPU_VADDR - define output section for percpu area
624  * @vaddr: explicit base address (optional)
625  * @phdr: destination PHDR (optional)
626  *
627  * Macro which expands to output section for percpu area.  If @vaddr
628  * is not blank, it specifies explicit base address and all percpu
629  * symbols will be offset from the given address.  If blank, @vaddr
630  * always equals @laddr + LOAD_OFFSET.
631  *
632  * @phdr defines the output PHDR to use if not blank.  Be warned that
633  * output PHDR is sticky.  If @phdr is specified, the next output
634  * section in the linker script will go there too.  @phdr should have
635  * a leading colon.
636  *
637  * Note that this macros defines __per_cpu_load as an absolute symbol.
638  * If there is no need to put the percpu section at a predetermined
639  * address, use PERCPU().
640  */
641 #define PERCPU_VADDR(vaddr, phdr)					\
642 	VMLINUX_SYMBOL(__per_cpu_load) = .;				\
643 	.data.percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load)		\
644 				- LOAD_OFFSET) {			\
645 		VMLINUX_SYMBOL(__per_cpu_start) = .;			\
646 		*(.data.percpu.first)					\
647 		*(.data.percpu.page_aligned)				\
648 		*(.data.percpu)						\
649 		*(.data.percpu.shared_aligned)				\
650 		VMLINUX_SYMBOL(__per_cpu_end) = .;			\
651 	} phdr								\
652 	. = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data.percpu);
653 
654 /**
655  * PERCPU - define output section for percpu area, simple version
656  * @align: required alignment
657  *
658  * Align to @align and outputs output section for percpu area.  This
659  * macro doesn't maniuplate @vaddr or @phdr and __per_cpu_load and
660  * __per_cpu_start will be identical.
661  *
662  * This macro is equivalent to ALIGN(align); PERCPU_VADDR( , ) except
663  * that __per_cpu_load is defined as a relative symbol against
664  * .data.percpu which is required for relocatable x86_32
665  * configuration.
666  */
667 #define PERCPU(align)							\
668 	. = ALIGN(align);						\
669 	.data.percpu	: AT(ADDR(.data.percpu) - LOAD_OFFSET) {	\
670 		VMLINUX_SYMBOL(__per_cpu_load) = .;			\
671 		VMLINUX_SYMBOL(__per_cpu_start) = .;			\
672 		*(.data.percpu.first)					\
673 		*(.data.percpu.page_aligned)				\
674 		*(.data.percpu)						\
675 		*(.data.percpu.shared_aligned)				\
676 		VMLINUX_SYMBOL(__per_cpu_end) = .;			\
677 	}
678 
679 
680 /*
681  * Definition of the high level *_SECTION macros
682  * They will fit only a subset of the architectures
683  */
684 
685 
686 /*
687  * Writeable data.
688  * All sections are combined in a single .data section.
689  * The sections following CONSTRUCTORS are arranged so their
690  * typical alignment matches.
691  * A cacheline is typical/always less than a PAGE_SIZE so
692  * the sections that has this restriction (or similar)
693  * is located before the ones requiring PAGE_SIZE alignment.
694  * NOSAVE_DATA starts and ends with a PAGE_SIZE alignment which
695  * matches the requirment of PAGE_ALIGNED_DATA.
696  *
697  * use 0 as page_align if page_aligned data is not used */
698 #define RW_DATA_SECTION(cacheline, nosave, pagealigned, inittask)	\
699 	. = ALIGN(PAGE_SIZE);						\
700 	.data : AT(ADDR(.data) - LOAD_OFFSET) {				\
701 		INIT_TASK(inittask)					\
702 		CACHELINE_ALIGNED_DATA(cacheline)			\
703 		READ_MOSTLY_DATA(cacheline)				\
704 		DATA_DATA						\
705 		CONSTRUCTORS						\
706 		NOSAVE_DATA(nosave)					\
707 		PAGE_ALIGNED_DATA(pagealigned)				\
708 	}
709 
710 #define INIT_TEXT_SECTION(inittext_align)				\
711 	. = ALIGN(inittext_align);					\
712 	.init.text : AT(ADDR(.init.text) - LOAD_OFFSET) {		\
713 		VMLINUX_SYMBOL(_sinittext) = .;				\
714 		INIT_TEXT						\
715 		VMLINUX_SYMBOL(_einittext) = .;				\
716 	}
717 
718 #define INIT_DATA_SECTION(initsetup_align)				\
719 	.init.data : AT(ADDR(.init.data) - LOAD_OFFSET) {		\
720 		INIT_DATA						\
721 		INIT_SETUP(initsetup_align)				\
722 		INIT_CALLS						\
723 		CON_INITCALL						\
724 		SECURITY_INITCALL					\
725 		INIT_RAM_FS						\
726 	}
727 
728 #define BSS_SECTION(sbss_align, bss_align)				\
729 	SBSS								\
730 	BSS(bss_align)							\
731 	. = ALIGN(4);
732 
733