xref: /linux/include/asm-generic/vmlinux.lds.h (revision 8cd3c556b5ce58e2a6f9a084711e6fc03f375745)
1 /*
2  * Helper macros to support writing architecture specific
3  * linker scripts.
4  *
5  * A minimal linker scripts has following content:
6  * [This is a sample, architectures may have special requiriements]
7  *
8  * OUTPUT_FORMAT(...)
9  * OUTPUT_ARCH(...)
10  * ENTRY(...)
11  * SECTIONS
12  * {
13  *	. = START;
14  *	__init_begin = .;
15  *	HEAD_TEXT_SECTION
16  *	INIT_TEXT_SECTION(PAGE_SIZE)
17  *	INIT_DATA_SECTION(...)
18  *	PERCPU_SECTION(CACHELINE_SIZE)
19  *	__init_end = .;
20  *
21  *	_stext = .;
22  *	TEXT_SECTION = 0
23  *	_etext = .;
24  *
25  *      _sdata = .;
26  *	RO_DATA_SECTION(PAGE_SIZE)
27  *	RW_DATA_SECTION(...)
28  *	_edata = .;
29  *
30  *	EXCEPTION_TABLE(...)
31  *	NOTES
32  *
33  *	BSS_SECTION(0, 0, 0)
34  *	_end = .;
35  *
36  *	STABS_DEBUG
37  *	DWARF_DEBUG
38  *
39  *	DISCARDS		// must be the last
40  * }
41  *
42  * [__init_begin, __init_end] is the init section that may be freed after init
43  * [_stext, _etext] is the text section
44  * [_sdata, _edata] is the data section
45  *
46  * Some of the included output section have their own set of constants.
47  * Examples are: [__initramfs_start, __initramfs_end] for initramfs and
48  *               [__nosave_begin, __nosave_end] for the nosave data
49  */
50 
51 #ifndef LOAD_OFFSET
52 #define LOAD_OFFSET 0
53 #endif
54 
55 #include <linux/export.h>
56 
57 /* Align . to a 8 byte boundary equals to maximum function alignment. */
58 #define ALIGN_FUNCTION()  . = ALIGN(8)
59 
60 /*
61  * Align to a 32 byte boundary equal to the
62  * alignment gcc 4.5 uses for a struct
63  */
64 #define STRUCT_ALIGNMENT 32
65 #define STRUCT_ALIGN() . = ALIGN(STRUCT_ALIGNMENT)
66 
67 /* The actual configuration determine if the init/exit sections
68  * are handled as text/data or they can be discarded (which
69  * often happens at runtime)
70  */
71 #ifdef CONFIG_HOTPLUG_CPU
72 #define CPU_KEEP(sec)    *(.cpu##sec)
73 #define CPU_DISCARD(sec)
74 #else
75 #define CPU_KEEP(sec)
76 #define CPU_DISCARD(sec) *(.cpu##sec)
77 #endif
78 
79 #if defined(CONFIG_MEMORY_HOTPLUG)
80 #define MEM_KEEP(sec)    *(.mem##sec)
81 #define MEM_DISCARD(sec)
82 #else
83 #define MEM_KEEP(sec)
84 #define MEM_DISCARD(sec) *(.mem##sec)
85 #endif
86 
87 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
88 #define MCOUNT_REC()	. = ALIGN(8);				\
89 			VMLINUX_SYMBOL(__start_mcount_loc) = .; \
90 			*(__mcount_loc)				\
91 			VMLINUX_SYMBOL(__stop_mcount_loc) = .;
92 #else
93 #define MCOUNT_REC()
94 #endif
95 
96 #ifdef CONFIG_TRACE_BRANCH_PROFILING
97 #define LIKELY_PROFILE()	VMLINUX_SYMBOL(__start_annotated_branch_profile) = .; \
98 				*(_ftrace_annotated_branch)			      \
99 				VMLINUX_SYMBOL(__stop_annotated_branch_profile) = .;
100 #else
101 #define LIKELY_PROFILE()
102 #endif
103 
104 #ifdef CONFIG_PROFILE_ALL_BRANCHES
105 #define BRANCH_PROFILE()	VMLINUX_SYMBOL(__start_branch_profile) = .;   \
106 				*(_ftrace_branch)			      \
107 				VMLINUX_SYMBOL(__stop_branch_profile) = .;
108 #else
109 #define BRANCH_PROFILE()
110 #endif
111 
112 #ifdef CONFIG_EVENT_TRACING
113 #define FTRACE_EVENTS()	. = ALIGN(8);					\
114 			VMLINUX_SYMBOL(__start_ftrace_events) = .;	\
115 			*(_ftrace_events)				\
116 			VMLINUX_SYMBOL(__stop_ftrace_events) = .;
117 #else
118 #define FTRACE_EVENTS()
119 #endif
120 
121 #ifdef CONFIG_TRACING
122 #define TRACE_PRINTKS() VMLINUX_SYMBOL(__start___trace_bprintk_fmt) = .;      \
123 			 *(__trace_printk_fmt) /* Trace_printk fmt' pointer */ \
124 			 VMLINUX_SYMBOL(__stop___trace_bprintk_fmt) = .;
125 #else
126 #define TRACE_PRINTKS()
127 #endif
128 
129 #ifdef CONFIG_FTRACE_SYSCALLS
130 #define TRACE_SYSCALLS() . = ALIGN(8);					\
131 			 VMLINUX_SYMBOL(__start_syscalls_metadata) = .;	\
132 			 *(__syscalls_metadata)				\
133 			 VMLINUX_SYMBOL(__stop_syscalls_metadata) = .;
134 #else
135 #define TRACE_SYSCALLS()
136 #endif
137 
138 #ifdef CONFIG_CLKSRC_OF
139 #define CLKSRC_OF_TABLES() . = ALIGN(8);				\
140 			   VMLINUX_SYMBOL(__clksrc_of_table) = .;	\
141 			   *(__clksrc_of_table)				\
142 			   *(__clksrc_of_table_end)
143 #else
144 #define CLKSRC_OF_TABLES()
145 #endif
146 
147 #ifdef CONFIG_IRQCHIP
148 #define IRQCHIP_OF_MATCH_TABLE()					\
149 	. = ALIGN(8);							\
150 	VMLINUX_SYMBOL(__irqchip_begin) = .;				\
151 	*(__irqchip_of_table)		  				\
152 	*(__irqchip_of_end)
153 #else
154 #define IRQCHIP_OF_MATCH_TABLE()
155 #endif
156 
157 #ifdef CONFIG_COMMON_CLK
158 #define CLK_OF_TABLES() . = ALIGN(8);				\
159 			VMLINUX_SYMBOL(__clk_of_table) = .;	\
160 			*(__clk_of_table)			\
161 			*(__clk_of_table_end)
162 #else
163 #define CLK_OF_TABLES()
164 #endif
165 
166 #define KERNEL_DTB()							\
167 	STRUCT_ALIGN();							\
168 	VMLINUX_SYMBOL(__dtb_start) = .;				\
169 	*(.dtb.init.rodata)						\
170 	VMLINUX_SYMBOL(__dtb_end) = .;
171 
172 /* .data section */
173 #define DATA_DATA							\
174 	*(.data)							\
175 	*(.ref.data)							\
176 	*(.data..shared_aligned) /* percpu related */			\
177 	CPU_KEEP(init.data)						\
178 	CPU_KEEP(exit.data)						\
179 	MEM_KEEP(init.data)						\
180 	MEM_KEEP(exit.data)						\
181 	*(.data.unlikely)						\
182 	STRUCT_ALIGN();							\
183 	*(__tracepoints)						\
184 	/* implement dynamic printk debug */				\
185 	. = ALIGN(8);                                                   \
186 	VMLINUX_SYMBOL(__start___jump_table) = .;                       \
187 	*(__jump_table)                                                 \
188 	VMLINUX_SYMBOL(__stop___jump_table) = .;                        \
189 	. = ALIGN(8);							\
190 	VMLINUX_SYMBOL(__start___verbose) = .;                          \
191 	*(__verbose)                                                    \
192 	VMLINUX_SYMBOL(__stop___verbose) = .;				\
193 	LIKELY_PROFILE()		       				\
194 	BRANCH_PROFILE()						\
195 	TRACE_PRINTKS()
196 
197 /*
198  * Data section helpers
199  */
200 #define NOSAVE_DATA							\
201 	. = ALIGN(PAGE_SIZE);						\
202 	VMLINUX_SYMBOL(__nosave_begin) = .;				\
203 	*(.data..nosave)						\
204 	. = ALIGN(PAGE_SIZE);						\
205 	VMLINUX_SYMBOL(__nosave_end) = .;
206 
207 #define PAGE_ALIGNED_DATA(page_align)					\
208 	. = ALIGN(page_align);						\
209 	*(.data..page_aligned)
210 
211 #define READ_MOSTLY_DATA(align)						\
212 	. = ALIGN(align);						\
213 	*(.data..read_mostly)						\
214 	. = ALIGN(align);
215 
216 #define CACHELINE_ALIGNED_DATA(align)					\
217 	. = ALIGN(align);						\
218 	*(.data..cacheline_aligned)
219 
220 #define INIT_TASK_DATA(align)						\
221 	. = ALIGN(align);						\
222 	*(.data..init_task)
223 
224 /*
225  * Read only Data
226  */
227 #define RO_DATA_SECTION(align)						\
228 	. = ALIGN((align));						\
229 	.rodata           : AT(ADDR(.rodata) - LOAD_OFFSET) {		\
230 		VMLINUX_SYMBOL(__start_rodata) = .;			\
231 		*(.rodata) *(.rodata.*)					\
232 		*(__vermagic)		/* Kernel version magic */	\
233 		. = ALIGN(8);						\
234 		VMLINUX_SYMBOL(__start___tracepoints_ptrs) = .;		\
235 		*(__tracepoints_ptrs)	/* Tracepoints: pointer array */\
236 		VMLINUX_SYMBOL(__stop___tracepoints_ptrs) = .;		\
237 		*(__tracepoints_strings)/* Tracepoints: strings */	\
238 	}								\
239 									\
240 	.rodata1          : AT(ADDR(.rodata1) - LOAD_OFFSET) {		\
241 		*(.rodata1)						\
242 	}								\
243 									\
244 	BUG_TABLE							\
245 									\
246 	/* PCI quirks */						\
247 	.pci_fixup        : AT(ADDR(.pci_fixup) - LOAD_OFFSET) {	\
248 		VMLINUX_SYMBOL(__start_pci_fixups_early) = .;		\
249 		*(.pci_fixup_early)					\
250 		VMLINUX_SYMBOL(__end_pci_fixups_early) = .;		\
251 		VMLINUX_SYMBOL(__start_pci_fixups_header) = .;		\
252 		*(.pci_fixup_header)					\
253 		VMLINUX_SYMBOL(__end_pci_fixups_header) = .;		\
254 		VMLINUX_SYMBOL(__start_pci_fixups_final) = .;		\
255 		*(.pci_fixup_final)					\
256 		VMLINUX_SYMBOL(__end_pci_fixups_final) = .;		\
257 		VMLINUX_SYMBOL(__start_pci_fixups_enable) = .;		\
258 		*(.pci_fixup_enable)					\
259 		VMLINUX_SYMBOL(__end_pci_fixups_enable) = .;		\
260 		VMLINUX_SYMBOL(__start_pci_fixups_resume) = .;		\
261 		*(.pci_fixup_resume)					\
262 		VMLINUX_SYMBOL(__end_pci_fixups_resume) = .;		\
263 		VMLINUX_SYMBOL(__start_pci_fixups_resume_early) = .;	\
264 		*(.pci_fixup_resume_early)				\
265 		VMLINUX_SYMBOL(__end_pci_fixups_resume_early) = .;	\
266 		VMLINUX_SYMBOL(__start_pci_fixups_suspend) = .;		\
267 		*(.pci_fixup_suspend)					\
268 		VMLINUX_SYMBOL(__end_pci_fixups_suspend) = .;		\
269 	}								\
270 									\
271 	/* Built-in firmware blobs */					\
272 	.builtin_fw        : AT(ADDR(.builtin_fw) - LOAD_OFFSET) {	\
273 		VMLINUX_SYMBOL(__start_builtin_fw) = .;			\
274 		*(.builtin_fw)						\
275 		VMLINUX_SYMBOL(__end_builtin_fw) = .;			\
276 	}								\
277 									\
278 	TRACEDATA							\
279 									\
280 	/* Kernel symbol table: Normal symbols */			\
281 	__ksymtab         : AT(ADDR(__ksymtab) - LOAD_OFFSET) {		\
282 		VMLINUX_SYMBOL(__start___ksymtab) = .;			\
283 		*(SORT(___ksymtab+*))					\
284 		VMLINUX_SYMBOL(__stop___ksymtab) = .;			\
285 	}								\
286 									\
287 	/* Kernel symbol table: GPL-only symbols */			\
288 	__ksymtab_gpl     : AT(ADDR(__ksymtab_gpl) - LOAD_OFFSET) {	\
289 		VMLINUX_SYMBOL(__start___ksymtab_gpl) = .;		\
290 		*(SORT(___ksymtab_gpl+*))				\
291 		VMLINUX_SYMBOL(__stop___ksymtab_gpl) = .;		\
292 	}								\
293 									\
294 	/* Kernel symbol table: Normal unused symbols */		\
295 	__ksymtab_unused  : AT(ADDR(__ksymtab_unused) - LOAD_OFFSET) {	\
296 		VMLINUX_SYMBOL(__start___ksymtab_unused) = .;		\
297 		*(SORT(___ksymtab_unused+*))				\
298 		VMLINUX_SYMBOL(__stop___ksymtab_unused) = .;		\
299 	}								\
300 									\
301 	/* Kernel symbol table: GPL-only unused symbols */		\
302 	__ksymtab_unused_gpl : AT(ADDR(__ksymtab_unused_gpl) - LOAD_OFFSET) { \
303 		VMLINUX_SYMBOL(__start___ksymtab_unused_gpl) = .;	\
304 		*(SORT(___ksymtab_unused_gpl+*))			\
305 		VMLINUX_SYMBOL(__stop___ksymtab_unused_gpl) = .;	\
306 	}								\
307 									\
308 	/* Kernel symbol table: GPL-future-only symbols */		\
309 	__ksymtab_gpl_future : AT(ADDR(__ksymtab_gpl_future) - LOAD_OFFSET) { \
310 		VMLINUX_SYMBOL(__start___ksymtab_gpl_future) = .;	\
311 		*(SORT(___ksymtab_gpl_future+*))			\
312 		VMLINUX_SYMBOL(__stop___ksymtab_gpl_future) = .;	\
313 	}								\
314 									\
315 	/* Kernel symbol table: Normal symbols */			\
316 	__kcrctab         : AT(ADDR(__kcrctab) - LOAD_OFFSET) {		\
317 		VMLINUX_SYMBOL(__start___kcrctab) = .;			\
318 		*(SORT(___kcrctab+*))					\
319 		VMLINUX_SYMBOL(__stop___kcrctab) = .;			\
320 	}								\
321 									\
322 	/* Kernel symbol table: GPL-only symbols */			\
323 	__kcrctab_gpl     : AT(ADDR(__kcrctab_gpl) - LOAD_OFFSET) {	\
324 		VMLINUX_SYMBOL(__start___kcrctab_gpl) = .;		\
325 		*(SORT(___kcrctab_gpl+*))				\
326 		VMLINUX_SYMBOL(__stop___kcrctab_gpl) = .;		\
327 	}								\
328 									\
329 	/* Kernel symbol table: Normal unused symbols */		\
330 	__kcrctab_unused  : AT(ADDR(__kcrctab_unused) - LOAD_OFFSET) {	\
331 		VMLINUX_SYMBOL(__start___kcrctab_unused) = .;		\
332 		*(SORT(___kcrctab_unused+*))				\
333 		VMLINUX_SYMBOL(__stop___kcrctab_unused) = .;		\
334 	}								\
335 									\
336 	/* Kernel symbol table: GPL-only unused symbols */		\
337 	__kcrctab_unused_gpl : AT(ADDR(__kcrctab_unused_gpl) - LOAD_OFFSET) { \
338 		VMLINUX_SYMBOL(__start___kcrctab_unused_gpl) = .;	\
339 		*(SORT(___kcrctab_unused_gpl+*))			\
340 		VMLINUX_SYMBOL(__stop___kcrctab_unused_gpl) = .;	\
341 	}								\
342 									\
343 	/* Kernel symbol table: GPL-future-only symbols */		\
344 	__kcrctab_gpl_future : AT(ADDR(__kcrctab_gpl_future) - LOAD_OFFSET) { \
345 		VMLINUX_SYMBOL(__start___kcrctab_gpl_future) = .;	\
346 		*(SORT(___kcrctab_gpl_future+*))			\
347 		VMLINUX_SYMBOL(__stop___kcrctab_gpl_future) = .;	\
348 	}								\
349 									\
350 	/* Kernel symbol table: strings */				\
351         __ksymtab_strings : AT(ADDR(__ksymtab_strings) - LOAD_OFFSET) {	\
352 		*(__ksymtab_strings)					\
353 	}								\
354 									\
355 	/* __*init sections */						\
356 	__init_rodata : AT(ADDR(__init_rodata) - LOAD_OFFSET) {		\
357 		*(.ref.rodata)						\
358 		CPU_KEEP(init.rodata)					\
359 		CPU_KEEP(exit.rodata)					\
360 		MEM_KEEP(init.rodata)					\
361 		MEM_KEEP(exit.rodata)					\
362 	}								\
363 									\
364 	/* Built-in module parameters. */				\
365 	__param : AT(ADDR(__param) - LOAD_OFFSET) {			\
366 		VMLINUX_SYMBOL(__start___param) = .;			\
367 		*(__param)						\
368 		VMLINUX_SYMBOL(__stop___param) = .;			\
369 	}								\
370 									\
371 	/* Built-in module versions. */					\
372 	__modver : AT(ADDR(__modver) - LOAD_OFFSET) {			\
373 		VMLINUX_SYMBOL(__start___modver) = .;			\
374 		*(__modver)						\
375 		VMLINUX_SYMBOL(__stop___modver) = .;			\
376 		. = ALIGN((align));					\
377 		VMLINUX_SYMBOL(__end_rodata) = .;			\
378 	}								\
379 	. = ALIGN((align));
380 
381 /* RODATA & RO_DATA provided for backward compatibility.
382  * All archs are supposed to use RO_DATA() */
383 #define RODATA          RO_DATA_SECTION(4096)
384 #define RO_DATA(align)  RO_DATA_SECTION(align)
385 
386 #define SECURITY_INIT							\
387 	.security_initcall.init : AT(ADDR(.security_initcall.init) - LOAD_OFFSET) { \
388 		VMLINUX_SYMBOL(__security_initcall_start) = .;		\
389 		*(.security_initcall.init) 				\
390 		VMLINUX_SYMBOL(__security_initcall_end) = .;		\
391 	}
392 
393 /* .text section. Map to function alignment to avoid address changes
394  * during second ld run in second ld pass when generating System.map */
395 #define TEXT_TEXT							\
396 		ALIGN_FUNCTION();					\
397 		*(.text.hot)						\
398 		*(.text)						\
399 		*(.ref.text)						\
400 	CPU_KEEP(init.text)						\
401 	CPU_KEEP(exit.text)						\
402 	MEM_KEEP(init.text)						\
403 	MEM_KEEP(exit.text)						\
404 		*(.text.unlikely)
405 
406 
407 /* sched.text is aling to function alignment to secure we have same
408  * address even at second ld pass when generating System.map */
409 #define SCHED_TEXT							\
410 		ALIGN_FUNCTION();					\
411 		VMLINUX_SYMBOL(__sched_text_start) = .;			\
412 		*(.sched.text)						\
413 		VMLINUX_SYMBOL(__sched_text_end) = .;
414 
415 /* spinlock.text is aling to function alignment to secure we have same
416  * address even at second ld pass when generating System.map */
417 #define LOCK_TEXT							\
418 		ALIGN_FUNCTION();					\
419 		VMLINUX_SYMBOL(__lock_text_start) = .;			\
420 		*(.spinlock.text)					\
421 		VMLINUX_SYMBOL(__lock_text_end) = .;
422 
423 #define KPROBES_TEXT							\
424 		ALIGN_FUNCTION();					\
425 		VMLINUX_SYMBOL(__kprobes_text_start) = .;		\
426 		*(.kprobes.text)					\
427 		VMLINUX_SYMBOL(__kprobes_text_end) = .;
428 
429 #define ENTRY_TEXT							\
430 		ALIGN_FUNCTION();					\
431 		VMLINUX_SYMBOL(__entry_text_start) = .;			\
432 		*(.entry.text)						\
433 		VMLINUX_SYMBOL(__entry_text_end) = .;
434 
435 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
436 #define IRQENTRY_TEXT							\
437 		ALIGN_FUNCTION();					\
438 		VMLINUX_SYMBOL(__irqentry_text_start) = .;		\
439 		*(.irqentry.text)					\
440 		VMLINUX_SYMBOL(__irqentry_text_end) = .;
441 #else
442 #define IRQENTRY_TEXT
443 #endif
444 
445 /* Section used for early init (in .S files) */
446 #define HEAD_TEXT  *(.head.text)
447 
448 #define HEAD_TEXT_SECTION							\
449 	.head.text : AT(ADDR(.head.text) - LOAD_OFFSET) {		\
450 		HEAD_TEXT						\
451 	}
452 
453 /*
454  * Exception table
455  */
456 #define EXCEPTION_TABLE(align)						\
457 	. = ALIGN(align);						\
458 	__ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) {		\
459 		VMLINUX_SYMBOL(__start___ex_table) = .;			\
460 		*(__ex_table)						\
461 		VMLINUX_SYMBOL(__stop___ex_table) = .;			\
462 	}
463 
464 /*
465  * Init task
466  */
467 #define INIT_TASK_DATA_SECTION(align)					\
468 	. = ALIGN(align);						\
469 	.data..init_task :  AT(ADDR(.data..init_task) - LOAD_OFFSET) {	\
470 		INIT_TASK_DATA(align)					\
471 	}
472 
473 #ifdef CONFIG_CONSTRUCTORS
474 #define KERNEL_CTORS()	. = ALIGN(8);			   \
475 			VMLINUX_SYMBOL(__ctors_start) = .; \
476 			*(.ctors)			   \
477 			VMLINUX_SYMBOL(__ctors_end) = .;
478 #else
479 #define KERNEL_CTORS()
480 #endif
481 
482 /* init and exit section handling */
483 #define INIT_DATA							\
484 	*(.init.data)							\
485 	CPU_DISCARD(init.data)						\
486 	MEM_DISCARD(init.data)						\
487 	KERNEL_CTORS()							\
488 	MCOUNT_REC()							\
489 	*(.init.rodata)							\
490 	FTRACE_EVENTS()							\
491 	TRACE_SYSCALLS()						\
492 	CPU_DISCARD(init.rodata)					\
493 	MEM_DISCARD(init.rodata)					\
494 	CLK_OF_TABLES()							\
495 	CLKSRC_OF_TABLES()						\
496 	KERNEL_DTB()							\
497 	IRQCHIP_OF_MATCH_TABLE()
498 
499 #define INIT_TEXT							\
500 	*(.init.text)							\
501 	CPU_DISCARD(init.text)						\
502 	MEM_DISCARD(init.text)
503 
504 #define EXIT_DATA							\
505 	*(.exit.data)							\
506 	CPU_DISCARD(exit.data)						\
507 	CPU_DISCARD(exit.rodata)					\
508 	MEM_DISCARD(exit.data)						\
509 	MEM_DISCARD(exit.rodata)
510 
511 #define EXIT_TEXT							\
512 	*(.exit.text)							\
513 	CPU_DISCARD(exit.text)						\
514 	MEM_DISCARD(exit.text)
515 
516 #define EXIT_CALL							\
517 	*(.exitcall.exit)
518 
519 /*
520  * bss (Block Started by Symbol) - uninitialized data
521  * zeroed during startup
522  */
523 #define SBSS(sbss_align)						\
524 	. = ALIGN(sbss_align);						\
525 	.sbss : AT(ADDR(.sbss) - LOAD_OFFSET) {				\
526 		*(.sbss)						\
527 		*(.scommon)						\
528 	}
529 
530 /*
531  * Allow archectures to redefine BSS_FIRST_SECTIONS to add extra
532  * sections to the front of bss.
533  */
534 #ifndef BSS_FIRST_SECTIONS
535 #define BSS_FIRST_SECTIONS
536 #endif
537 
538 #define BSS(bss_align)							\
539 	. = ALIGN(bss_align);						\
540 	.bss : AT(ADDR(.bss) - LOAD_OFFSET) {				\
541 		BSS_FIRST_SECTIONS					\
542 		*(.bss..page_aligned)					\
543 		*(.dynbss)						\
544 		*(.bss)							\
545 		*(COMMON)						\
546 	}
547 
548 /*
549  * DWARF debug sections.
550  * Symbols in the DWARF debugging sections are relative to
551  * the beginning of the section so we begin them at 0.
552  */
553 #define DWARF_DEBUG							\
554 		/* DWARF 1 */						\
555 		.debug          0 : { *(.debug) }			\
556 		.line           0 : { *(.line) }			\
557 		/* GNU DWARF 1 extensions */				\
558 		.debug_srcinfo  0 : { *(.debug_srcinfo) }		\
559 		.debug_sfnames  0 : { *(.debug_sfnames) }		\
560 		/* DWARF 1.1 and DWARF 2 */				\
561 		.debug_aranges  0 : { *(.debug_aranges) }		\
562 		.debug_pubnames 0 : { *(.debug_pubnames) }		\
563 		/* DWARF 2 */						\
564 		.debug_info     0 : { *(.debug_info			\
565 				.gnu.linkonce.wi.*) }			\
566 		.debug_abbrev   0 : { *(.debug_abbrev) }		\
567 		.debug_line     0 : { *(.debug_line) }			\
568 		.debug_frame    0 : { *(.debug_frame) }			\
569 		.debug_str      0 : { *(.debug_str) }			\
570 		.debug_loc      0 : { *(.debug_loc) }			\
571 		.debug_macinfo  0 : { *(.debug_macinfo) }		\
572 		/* SGI/MIPS DWARF 2 extensions */			\
573 		.debug_weaknames 0 : { *(.debug_weaknames) }		\
574 		.debug_funcnames 0 : { *(.debug_funcnames) }		\
575 		.debug_typenames 0 : { *(.debug_typenames) }		\
576 		.debug_varnames  0 : { *(.debug_varnames) }		\
577 
578 		/* Stabs debugging sections.  */
579 #define STABS_DEBUG							\
580 		.stab 0 : { *(.stab) }					\
581 		.stabstr 0 : { *(.stabstr) }				\
582 		.stab.excl 0 : { *(.stab.excl) }			\
583 		.stab.exclstr 0 : { *(.stab.exclstr) }			\
584 		.stab.index 0 : { *(.stab.index) }			\
585 		.stab.indexstr 0 : { *(.stab.indexstr) }		\
586 		.comment 0 : { *(.comment) }
587 
588 #ifdef CONFIG_GENERIC_BUG
589 #define BUG_TABLE							\
590 	. = ALIGN(8);							\
591 	__bug_table : AT(ADDR(__bug_table) - LOAD_OFFSET) {		\
592 		VMLINUX_SYMBOL(__start___bug_table) = .;		\
593 		*(__bug_table)						\
594 		VMLINUX_SYMBOL(__stop___bug_table) = .;			\
595 	}
596 #else
597 #define BUG_TABLE
598 #endif
599 
600 #ifdef CONFIG_PM_TRACE
601 #define TRACEDATA							\
602 	. = ALIGN(4);							\
603 	.tracedata : AT(ADDR(.tracedata) - LOAD_OFFSET) {		\
604 		VMLINUX_SYMBOL(__tracedata_start) = .;			\
605 		*(.tracedata)						\
606 		VMLINUX_SYMBOL(__tracedata_end) = .;			\
607 	}
608 #else
609 #define TRACEDATA
610 #endif
611 
612 #define NOTES								\
613 	.notes : AT(ADDR(.notes) - LOAD_OFFSET) {			\
614 		VMLINUX_SYMBOL(__start_notes) = .;			\
615 		*(.note.*)						\
616 		VMLINUX_SYMBOL(__stop_notes) = .;			\
617 	}
618 
619 #define INIT_SETUP(initsetup_align)					\
620 		. = ALIGN(initsetup_align);				\
621 		VMLINUX_SYMBOL(__setup_start) = .;			\
622 		*(.init.setup)						\
623 		VMLINUX_SYMBOL(__setup_end) = .;
624 
625 #define INIT_CALLS_LEVEL(level)						\
626 		VMLINUX_SYMBOL(__initcall##level##_start) = .;		\
627 		*(.initcall##level##.init)				\
628 		*(.initcall##level##s.init)				\
629 
630 #define INIT_CALLS							\
631 		VMLINUX_SYMBOL(__initcall_start) = .;			\
632 		*(.initcallearly.init)					\
633 		INIT_CALLS_LEVEL(0)					\
634 		INIT_CALLS_LEVEL(1)					\
635 		INIT_CALLS_LEVEL(2)					\
636 		INIT_CALLS_LEVEL(3)					\
637 		INIT_CALLS_LEVEL(4)					\
638 		INIT_CALLS_LEVEL(5)					\
639 		INIT_CALLS_LEVEL(rootfs)				\
640 		INIT_CALLS_LEVEL(6)					\
641 		INIT_CALLS_LEVEL(7)					\
642 		VMLINUX_SYMBOL(__initcall_end) = .;
643 
644 #define CON_INITCALL							\
645 		VMLINUX_SYMBOL(__con_initcall_start) = .;		\
646 		*(.con_initcall.init)					\
647 		VMLINUX_SYMBOL(__con_initcall_end) = .;
648 
649 #define SECURITY_INITCALL						\
650 		VMLINUX_SYMBOL(__security_initcall_start) = .;		\
651 		*(.security_initcall.init)				\
652 		VMLINUX_SYMBOL(__security_initcall_end) = .;
653 
654 #ifdef CONFIG_BLK_DEV_INITRD
655 #define INIT_RAM_FS							\
656 	. = ALIGN(4);							\
657 	VMLINUX_SYMBOL(__initramfs_start) = .;				\
658 	*(.init.ramfs)							\
659 	. = ALIGN(8);							\
660 	*(.init.ramfs.info)
661 #else
662 #define INIT_RAM_FS
663 #endif
664 
665 /*
666  * Default discarded sections.
667  *
668  * Some archs want to discard exit text/data at runtime rather than
669  * link time due to cross-section references such as alt instructions,
670  * bug table, eh_frame, etc.  DISCARDS must be the last of output
671  * section definitions so that such archs put those in earlier section
672  * definitions.
673  */
674 #define DISCARDS							\
675 	/DISCARD/ : {							\
676 	EXIT_TEXT							\
677 	EXIT_DATA							\
678 	EXIT_CALL							\
679 	*(.discard)							\
680 	*(.discard.*)							\
681 	}
682 
683 /**
684  * PERCPU_INPUT - the percpu input sections
685  * @cacheline: cacheline size
686  *
687  * The core percpu section names and core symbols which do not rely
688  * directly upon load addresses.
689  *
690  * @cacheline is used to align subsections to avoid false cacheline
691  * sharing between subsections for different purposes.
692  */
693 #define PERCPU_INPUT(cacheline)						\
694 	VMLINUX_SYMBOL(__per_cpu_start) = .;				\
695 	*(.data..percpu..first)						\
696 	. = ALIGN(PAGE_SIZE);						\
697 	*(.data..percpu..page_aligned)					\
698 	. = ALIGN(cacheline);						\
699 	*(.data..percpu..readmostly)					\
700 	. = ALIGN(cacheline);						\
701 	*(.data..percpu)						\
702 	*(.data..percpu..shared_aligned)				\
703 	VMLINUX_SYMBOL(__per_cpu_end) = .;
704 
705 /**
706  * PERCPU_VADDR - define output section for percpu area
707  * @cacheline: cacheline size
708  * @vaddr: explicit base address (optional)
709  * @phdr: destination PHDR (optional)
710  *
711  * Macro which expands to output section for percpu area.
712  *
713  * @cacheline is used to align subsections to avoid false cacheline
714  * sharing between subsections for different purposes.
715  *
716  * If @vaddr is not blank, it specifies explicit base address and all
717  * percpu symbols will be offset from the given address.  If blank,
718  * @vaddr always equals @laddr + LOAD_OFFSET.
719  *
720  * @phdr defines the output PHDR to use if not blank.  Be warned that
721  * output PHDR is sticky.  If @phdr is specified, the next output
722  * section in the linker script will go there too.  @phdr should have
723  * a leading colon.
724  *
725  * Note that this macros defines __per_cpu_load as an absolute symbol.
726  * If there is no need to put the percpu section at a predetermined
727  * address, use PERCPU_SECTION.
728  */
729 #define PERCPU_VADDR(cacheline, vaddr, phdr)				\
730 	VMLINUX_SYMBOL(__per_cpu_load) = .;				\
731 	.data..percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load)		\
732 				- LOAD_OFFSET) {			\
733 		PERCPU_INPUT(cacheline)					\
734 	} phdr								\
735 	. = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data..percpu);
736 
737 /**
738  * PERCPU_SECTION - define output section for percpu area, simple version
739  * @cacheline: cacheline size
740  *
741  * Align to PAGE_SIZE and outputs output section for percpu area.  This
742  * macro doesn't manipulate @vaddr or @phdr and __per_cpu_load and
743  * __per_cpu_start will be identical.
744  *
745  * This macro is equivalent to ALIGN(PAGE_SIZE); PERCPU_VADDR(@cacheline,,)
746  * except that __per_cpu_load is defined as a relative symbol against
747  * .data..percpu which is required for relocatable x86_32 configuration.
748  */
749 #define PERCPU_SECTION(cacheline)					\
750 	. = ALIGN(PAGE_SIZE);						\
751 	.data..percpu	: AT(ADDR(.data..percpu) - LOAD_OFFSET) {	\
752 		VMLINUX_SYMBOL(__per_cpu_load) = .;			\
753 		PERCPU_INPUT(cacheline)					\
754 	}
755 
756 
757 /*
758  * Definition of the high level *_SECTION macros
759  * They will fit only a subset of the architectures
760  */
761 
762 
763 /*
764  * Writeable data.
765  * All sections are combined in a single .data section.
766  * The sections following CONSTRUCTORS are arranged so their
767  * typical alignment matches.
768  * A cacheline is typical/always less than a PAGE_SIZE so
769  * the sections that has this restriction (or similar)
770  * is located before the ones requiring PAGE_SIZE alignment.
771  * NOSAVE_DATA starts and ends with a PAGE_SIZE alignment which
772  * matches the requirement of PAGE_ALIGNED_DATA.
773  *
774  * use 0 as page_align if page_aligned data is not used */
775 #define RW_DATA_SECTION(cacheline, pagealigned, inittask)		\
776 	. = ALIGN(PAGE_SIZE);						\
777 	.data : AT(ADDR(.data) - LOAD_OFFSET) {				\
778 		INIT_TASK_DATA(inittask)				\
779 		NOSAVE_DATA						\
780 		PAGE_ALIGNED_DATA(pagealigned)				\
781 		CACHELINE_ALIGNED_DATA(cacheline)			\
782 		READ_MOSTLY_DATA(cacheline)				\
783 		DATA_DATA						\
784 		CONSTRUCTORS						\
785 	}
786 
787 #define INIT_TEXT_SECTION(inittext_align)				\
788 	. = ALIGN(inittext_align);					\
789 	.init.text : AT(ADDR(.init.text) - LOAD_OFFSET) {		\
790 		VMLINUX_SYMBOL(_sinittext) = .;				\
791 		INIT_TEXT						\
792 		VMLINUX_SYMBOL(_einittext) = .;				\
793 	}
794 
795 #define INIT_DATA_SECTION(initsetup_align)				\
796 	.init.data : AT(ADDR(.init.data) - LOAD_OFFSET) {		\
797 		INIT_DATA						\
798 		INIT_SETUP(initsetup_align)				\
799 		INIT_CALLS						\
800 		CON_INITCALL						\
801 		SECURITY_INITCALL					\
802 		INIT_RAM_FS						\
803 	}
804 
805 #define BSS_SECTION(sbss_align, bss_align, stop_align)			\
806 	. = ALIGN(sbss_align);						\
807 	VMLINUX_SYMBOL(__bss_start) = .;				\
808 	SBSS(sbss_align)						\
809 	BSS(bss_align)							\
810 	. = ALIGN(stop_align);						\
811 	VMLINUX_SYMBOL(__bss_stop) = .;
812