xref: /linux/include/asm-generic/vmlinux.lds.h (revision bd5c6b81dd6025bd4c6ca7800a580b217d9899b9)
1 /*
2  * Helper macros to support writing architecture specific
3  * linker scripts.
4  *
5  * A minimal linker scripts has following content:
6  * [This is a sample, architectures may have special requiriements]
7  *
8  * OUTPUT_FORMAT(...)
9  * OUTPUT_ARCH(...)
10  * ENTRY(...)
11  * SECTIONS
12  * {
13  *	. = START;
14  *	__init_begin = .;
15  *	HEAD_TEXT_SECTION
16  *	INIT_TEXT_SECTION(PAGE_SIZE)
17  *	INIT_DATA_SECTION(...)
18  *	PERCPU_SECTION(CACHELINE_SIZE)
19  *	__init_end = .;
20  *
21  *	_stext = .;
22  *	TEXT_SECTION = 0
23  *	_etext = .;
24  *
25  *      _sdata = .;
26  *	RO_DATA(PAGE_SIZE)
27  *	RW_DATA(...)
28  *	_edata = .;
29  *
30  *	EXCEPTION_TABLE(...)
31  *
32  *	BSS_SECTION(0, 0, 0)
33  *	_end = .;
34  *
35  *	STABS_DEBUG
36  *	DWARF_DEBUG
37  *
38  *	DISCARDS		// must be the last
39  * }
40  *
41  * [__init_begin, __init_end] is the init section that may be freed after init
42  * 	// __init_begin and __init_end should be page aligned, so that we can
43  *	// free the whole .init memory
44  * [_stext, _etext] is the text section
45  * [_sdata, _edata] is the data section
46  *
47  * Some of the included output section have their own set of constants.
48  * Examples are: [__initramfs_start, __initramfs_end] for initramfs and
49  *               [__nosave_begin, __nosave_end] for the nosave data
50  */
51 
52 #ifndef LOAD_OFFSET
53 #define LOAD_OFFSET 0
54 #endif
55 
56 /*
57  * Only some architectures want to have the .notes segment visible in
58  * a separate PT_NOTE ELF Program Header. When this happens, it needs
59  * to be visible in both the kernel text's PT_LOAD and the PT_NOTE
60  * Program Headers. In this case, though, the PT_LOAD needs to be made
61  * the default again so that all the following sections don't also end
62  * up in the PT_NOTE Program Header.
63  */
64 #ifdef EMITS_PT_NOTE
65 #define NOTES_HEADERS		:text :note
66 #define NOTES_HEADERS_RESTORE	__restore_ph : { *(.__restore_ph) } :text
67 #else
68 #define NOTES_HEADERS
69 #define NOTES_HEADERS_RESTORE
70 #endif
71 
72 /*
73  * Some architectures have non-executable read-only exception tables.
74  * They can be added to the RO_DATA segment by specifying their desired
75  * alignment.
76  */
77 #ifdef RO_EXCEPTION_TABLE_ALIGN
78 #define RO_EXCEPTION_TABLE	EXCEPTION_TABLE(RO_EXCEPTION_TABLE_ALIGN)
79 #else
80 #define RO_EXCEPTION_TABLE
81 #endif
82 
83 /* Align . to a 8 byte boundary equals to maximum function alignment. */
84 #define ALIGN_FUNCTION()  . = ALIGN(8)
85 
86 /*
87  * LD_DEAD_CODE_DATA_ELIMINATION option enables -fdata-sections, which
88  * generates .data.identifier sections, which need to be pulled in with
89  * .data. We don't want to pull in .data..other sections, which Linux
90  * has defined. Same for text and bss.
91  *
92  * RODATA_MAIN is not used because existing code already defines .rodata.x
93  * sections to be brought in with rodata.
94  */
95 #ifdef CONFIG_LD_DEAD_CODE_DATA_ELIMINATION
96 #define TEXT_MAIN .text .text.[0-9a-zA-Z_]*
97 #define DATA_MAIN .data .data.[0-9a-zA-Z_]* .data..LPBX*
98 #define SDATA_MAIN .sdata .sdata.[0-9a-zA-Z_]*
99 #define RODATA_MAIN .rodata .rodata.[0-9a-zA-Z_]*
100 #define BSS_MAIN .bss .bss.[0-9a-zA-Z_]*
101 #define SBSS_MAIN .sbss .sbss.[0-9a-zA-Z_]*
102 #else
103 #define TEXT_MAIN .text
104 #define DATA_MAIN .data
105 #define SDATA_MAIN .sdata
106 #define RODATA_MAIN .rodata
107 #define BSS_MAIN .bss
108 #define SBSS_MAIN .sbss
109 #endif
110 
111 /*
112  * Align to a 32 byte boundary equal to the
113  * alignment gcc 4.5 uses for a struct
114  */
115 #define STRUCT_ALIGNMENT 32
116 #define STRUCT_ALIGN() . = ALIGN(STRUCT_ALIGNMENT)
117 
118 /* The actual configuration determine if the init/exit sections
119  * are handled as text/data or they can be discarded (which
120  * often happens at runtime)
121  */
122 #ifdef CONFIG_HOTPLUG_CPU
123 #define CPU_KEEP(sec)    *(.cpu##sec)
124 #define CPU_DISCARD(sec)
125 #else
126 #define CPU_KEEP(sec)
127 #define CPU_DISCARD(sec) *(.cpu##sec)
128 #endif
129 
130 #if defined(CONFIG_MEMORY_HOTPLUG)
131 #define MEM_KEEP(sec)    *(.mem##sec)
132 #define MEM_DISCARD(sec)
133 #else
134 #define MEM_KEEP(sec)
135 #define MEM_DISCARD(sec) *(.mem##sec)
136 #endif
137 
138 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
139 /*
140  * The ftrace call sites are logged to a section whose name depends on the
141  * compiler option used. A given kernel image will only use one, AKA
142  * FTRACE_CALLSITE_SECTION. We capture all of them here to avoid header
143  * dependencies for FTRACE_CALLSITE_SECTION's definition.
144  */
145 #define MCOUNT_REC()	. = ALIGN(8);				\
146 			__start_mcount_loc = .;			\
147 			KEEP(*(__mcount_loc))			\
148 			KEEP(*(__patchable_function_entries))	\
149 			__stop_mcount_loc = .;
150 #else
151 #define MCOUNT_REC()
152 #endif
153 
154 #ifdef CONFIG_TRACE_BRANCH_PROFILING
155 #define LIKELY_PROFILE()	__start_annotated_branch_profile = .;	\
156 				KEEP(*(_ftrace_annotated_branch))	\
157 				__stop_annotated_branch_profile = .;
158 #else
159 #define LIKELY_PROFILE()
160 #endif
161 
162 #ifdef CONFIG_PROFILE_ALL_BRANCHES
163 #define BRANCH_PROFILE()	__start_branch_profile = .;		\
164 				KEEP(*(_ftrace_branch))			\
165 				__stop_branch_profile = .;
166 #else
167 #define BRANCH_PROFILE()
168 #endif
169 
170 #ifdef CONFIG_KPROBES
171 #define KPROBE_BLACKLIST()	. = ALIGN(8);				      \
172 				__start_kprobe_blacklist = .;		      \
173 				KEEP(*(_kprobe_blacklist))		      \
174 				__stop_kprobe_blacklist = .;
175 #else
176 #define KPROBE_BLACKLIST()
177 #endif
178 
179 #ifdef CONFIG_FUNCTION_ERROR_INJECTION
180 #define ERROR_INJECT_WHITELIST()	STRUCT_ALIGN();			      \
181 			__start_error_injection_whitelist = .;		      \
182 			KEEP(*(_error_injection_whitelist))		      \
183 			__stop_error_injection_whitelist = .;
184 #else
185 #define ERROR_INJECT_WHITELIST()
186 #endif
187 
188 #ifdef CONFIG_EVENT_TRACING
189 #define FTRACE_EVENTS()	. = ALIGN(8);					\
190 			__start_ftrace_events = .;			\
191 			KEEP(*(_ftrace_events))				\
192 			__stop_ftrace_events = .;			\
193 			__start_ftrace_eval_maps = .;			\
194 			KEEP(*(_ftrace_eval_map))			\
195 			__stop_ftrace_eval_maps = .;
196 #else
197 #define FTRACE_EVENTS()
198 #endif
199 
200 #ifdef CONFIG_TRACING
201 #define TRACE_PRINTKS()	 __start___trace_bprintk_fmt = .;      \
202 			 KEEP(*(__trace_printk_fmt)) /* Trace_printk fmt' pointer */ \
203 			 __stop___trace_bprintk_fmt = .;
204 #define TRACEPOINT_STR() __start___tracepoint_str = .;	\
205 			 KEEP(*(__tracepoint_str)) /* Trace_printk fmt' pointer */ \
206 			 __stop___tracepoint_str = .;
207 #else
208 #define TRACE_PRINTKS()
209 #define TRACEPOINT_STR()
210 #endif
211 
212 #ifdef CONFIG_FTRACE_SYSCALLS
213 #define TRACE_SYSCALLS() . = ALIGN(8);					\
214 			 __start_syscalls_metadata = .;			\
215 			 KEEP(*(__syscalls_metadata))			\
216 			 __stop_syscalls_metadata = .;
217 #else
218 #define TRACE_SYSCALLS()
219 #endif
220 
221 #ifdef CONFIG_BPF_EVENTS
222 #define BPF_RAW_TP() STRUCT_ALIGN();					\
223 			 __start__bpf_raw_tp = .;			\
224 			 KEEP(*(__bpf_raw_tp_map))			\
225 			 __stop__bpf_raw_tp = .;
226 #else
227 #define BPF_RAW_TP()
228 #endif
229 
230 #ifdef CONFIG_SERIAL_EARLYCON
231 #define EARLYCON_TABLE() . = ALIGN(8);				\
232 			 __earlycon_table = .;			\
233 			 KEEP(*(__earlycon_table))		\
234 			 __earlycon_table_end = .;
235 #else
236 #define EARLYCON_TABLE()
237 #endif
238 
239 #ifdef CONFIG_SECURITY
240 #define LSM_TABLE()	. = ALIGN(8);					\
241 			__start_lsm_info = .;				\
242 			KEEP(*(.lsm_info.init))				\
243 			__end_lsm_info = .;
244 #define EARLY_LSM_TABLE()	. = ALIGN(8);				\
245 			__start_early_lsm_info = .;			\
246 			KEEP(*(.early_lsm_info.init))			\
247 			__end_early_lsm_info = .;
248 #else
249 #define LSM_TABLE()
250 #define EARLY_LSM_TABLE()
251 #endif
252 
253 #define ___OF_TABLE(cfg, name)	_OF_TABLE_##cfg(name)
254 #define __OF_TABLE(cfg, name)	___OF_TABLE(cfg, name)
255 #define OF_TABLE(cfg, name)	__OF_TABLE(IS_ENABLED(cfg), name)
256 #define _OF_TABLE_0(name)
257 #define _OF_TABLE_1(name)						\
258 	. = ALIGN(8);							\
259 	__##name##_of_table = .;					\
260 	KEEP(*(__##name##_of_table))					\
261 	KEEP(*(__##name##_of_table_end))
262 
263 #define TIMER_OF_TABLES()	OF_TABLE(CONFIG_TIMER_OF, timer)
264 #define IRQCHIP_OF_MATCH_TABLE() OF_TABLE(CONFIG_IRQCHIP, irqchip)
265 #define CLK_OF_TABLES()		OF_TABLE(CONFIG_COMMON_CLK, clk)
266 #define RESERVEDMEM_OF_TABLES()	OF_TABLE(CONFIG_OF_RESERVED_MEM, reservedmem)
267 #define CPU_METHOD_OF_TABLES()	OF_TABLE(CONFIG_SMP, cpu_method)
268 #define CPUIDLE_METHOD_OF_TABLES() OF_TABLE(CONFIG_CPU_IDLE, cpuidle_method)
269 
270 #ifdef CONFIG_ACPI
271 #define ACPI_PROBE_TABLE(name)						\
272 	. = ALIGN(8);							\
273 	__##name##_acpi_probe_table = .;				\
274 	KEEP(*(__##name##_acpi_probe_table))				\
275 	__##name##_acpi_probe_table_end = .;
276 #else
277 #define ACPI_PROBE_TABLE(name)
278 #endif
279 
280 #ifdef CONFIG_THERMAL
281 #define THERMAL_TABLE(name)						\
282 	. = ALIGN(8);							\
283 	__##name##_thermal_table = .;					\
284 	KEEP(*(__##name##_thermal_table))				\
285 	__##name##_thermal_table_end = .;
286 #else
287 #define THERMAL_TABLE(name)
288 #endif
289 
290 #define KERNEL_DTB()							\
291 	STRUCT_ALIGN();							\
292 	__dtb_start = .;						\
293 	KEEP(*(.dtb.init.rodata))					\
294 	__dtb_end = .;
295 
296 /*
297  * .data section
298  */
299 #define DATA_DATA							\
300 	*(.xiptext)							\
301 	*(DATA_MAIN)							\
302 	*(.ref.data)							\
303 	*(.data..shared_aligned) /* percpu related */			\
304 	MEM_KEEP(init.data*)						\
305 	MEM_KEEP(exit.data*)						\
306 	*(.data.unlikely)						\
307 	__start_once = .;						\
308 	*(.data.once)							\
309 	__end_once = .;							\
310 	STRUCT_ALIGN();							\
311 	*(__tracepoints)						\
312 	/* implement dynamic printk debug */				\
313 	. = ALIGN(8);							\
314 	__start___verbose = .;						\
315 	KEEP(*(__verbose))                                              \
316 	__stop___verbose = .;						\
317 	LIKELY_PROFILE()		       				\
318 	BRANCH_PROFILE()						\
319 	TRACE_PRINTKS()							\
320 	BPF_RAW_TP()							\
321 	TRACEPOINT_STR()
322 
323 /*
324  * Data section helpers
325  */
326 #define NOSAVE_DATA							\
327 	. = ALIGN(PAGE_SIZE);						\
328 	__nosave_begin = .;						\
329 	*(.data..nosave)						\
330 	. = ALIGN(PAGE_SIZE);						\
331 	__nosave_end = .;
332 
333 #define PAGE_ALIGNED_DATA(page_align)					\
334 	. = ALIGN(page_align);						\
335 	*(.data..page_aligned)
336 
337 #define READ_MOSTLY_DATA(align)						\
338 	. = ALIGN(align);						\
339 	*(.data..read_mostly)						\
340 	. = ALIGN(align);
341 
342 #define CACHELINE_ALIGNED_DATA(align)					\
343 	. = ALIGN(align);						\
344 	*(.data..cacheline_aligned)
345 
346 #define INIT_TASK_DATA(align)						\
347 	. = ALIGN(align);						\
348 	__start_init_task = .;						\
349 	init_thread_union = .;						\
350 	init_stack = .;							\
351 	KEEP(*(.data..init_task))					\
352 	KEEP(*(.data..init_thread_info))				\
353 	. = __start_init_task + THREAD_SIZE;				\
354 	__end_init_task = .;
355 
356 #define JUMP_TABLE_DATA							\
357 	. = ALIGN(8);							\
358 	__start___jump_table = .;					\
359 	KEEP(*(__jump_table))						\
360 	__stop___jump_table = .;
361 
362 /*
363  * Allow architectures to handle ro_after_init data on their
364  * own by defining an empty RO_AFTER_INIT_DATA.
365  */
366 #ifndef RO_AFTER_INIT_DATA
367 #define RO_AFTER_INIT_DATA						\
368 	__start_ro_after_init = .;					\
369 	*(.data..ro_after_init)						\
370 	JUMP_TABLE_DATA							\
371 	__end_ro_after_init = .;
372 #endif
373 
374 /*
375  * Read only Data
376  */
377 #define RO_DATA(align)							\
378 	. = ALIGN((align));						\
379 	.rodata           : AT(ADDR(.rodata) - LOAD_OFFSET) {		\
380 		__start_rodata = .;					\
381 		*(.rodata) *(.rodata.*)					\
382 		RO_AFTER_INIT_DATA	/* Read only after init */	\
383 		. = ALIGN(8);						\
384 		__start___tracepoints_ptrs = .;				\
385 		KEEP(*(__tracepoints_ptrs)) /* Tracepoints: pointer array */ \
386 		__stop___tracepoints_ptrs = .;				\
387 		*(__tracepoints_strings)/* Tracepoints: strings */	\
388 	}								\
389 									\
390 	.rodata1          : AT(ADDR(.rodata1) - LOAD_OFFSET) {		\
391 		*(.rodata1)						\
392 	}								\
393 									\
394 	/* PCI quirks */						\
395 	.pci_fixup        : AT(ADDR(.pci_fixup) - LOAD_OFFSET) {	\
396 		__start_pci_fixups_early = .;				\
397 		KEEP(*(.pci_fixup_early))				\
398 		__end_pci_fixups_early = .;				\
399 		__start_pci_fixups_header = .;				\
400 		KEEP(*(.pci_fixup_header))				\
401 		__end_pci_fixups_header = .;				\
402 		__start_pci_fixups_final = .;				\
403 		KEEP(*(.pci_fixup_final))				\
404 		__end_pci_fixups_final = .;				\
405 		__start_pci_fixups_enable = .;				\
406 		KEEP(*(.pci_fixup_enable))				\
407 		__end_pci_fixups_enable = .;				\
408 		__start_pci_fixups_resume = .;				\
409 		KEEP(*(.pci_fixup_resume))				\
410 		__end_pci_fixups_resume = .;				\
411 		__start_pci_fixups_resume_early = .;			\
412 		KEEP(*(.pci_fixup_resume_early))			\
413 		__end_pci_fixups_resume_early = .;			\
414 		__start_pci_fixups_suspend = .;				\
415 		KEEP(*(.pci_fixup_suspend))				\
416 		__end_pci_fixups_suspend = .;				\
417 		__start_pci_fixups_suspend_late = .;			\
418 		KEEP(*(.pci_fixup_suspend_late))			\
419 		__end_pci_fixups_suspend_late = .;			\
420 	}								\
421 									\
422 	/* Built-in firmware blobs */					\
423 	.builtin_fw        : AT(ADDR(.builtin_fw) - LOAD_OFFSET) {	\
424 		__start_builtin_fw = .;					\
425 		KEEP(*(.builtin_fw))					\
426 		__end_builtin_fw = .;					\
427 	}								\
428 									\
429 	TRACEDATA							\
430 									\
431 	/* Kernel symbol table: Normal symbols */			\
432 	__ksymtab         : AT(ADDR(__ksymtab) - LOAD_OFFSET) {		\
433 		__start___ksymtab = .;					\
434 		KEEP(*(SORT(___ksymtab+*)))				\
435 		__stop___ksymtab = .;					\
436 	}								\
437 									\
438 	/* Kernel symbol table: GPL-only symbols */			\
439 	__ksymtab_gpl     : AT(ADDR(__ksymtab_gpl) - LOAD_OFFSET) {	\
440 		__start___ksymtab_gpl = .;				\
441 		KEEP(*(SORT(___ksymtab_gpl+*)))				\
442 		__stop___ksymtab_gpl = .;				\
443 	}								\
444 									\
445 	/* Kernel symbol table: Normal unused symbols */		\
446 	__ksymtab_unused  : AT(ADDR(__ksymtab_unused) - LOAD_OFFSET) {	\
447 		__start___ksymtab_unused = .;				\
448 		KEEP(*(SORT(___ksymtab_unused+*)))			\
449 		__stop___ksymtab_unused = .;				\
450 	}								\
451 									\
452 	/* Kernel symbol table: GPL-only unused symbols */		\
453 	__ksymtab_unused_gpl : AT(ADDR(__ksymtab_unused_gpl) - LOAD_OFFSET) { \
454 		__start___ksymtab_unused_gpl = .;			\
455 		KEEP(*(SORT(___ksymtab_unused_gpl+*)))			\
456 		__stop___ksymtab_unused_gpl = .;			\
457 	}								\
458 									\
459 	/* Kernel symbol table: GPL-future-only symbols */		\
460 	__ksymtab_gpl_future : AT(ADDR(__ksymtab_gpl_future) - LOAD_OFFSET) { \
461 		__start___ksymtab_gpl_future = .;			\
462 		KEEP(*(SORT(___ksymtab_gpl_future+*)))			\
463 		__stop___ksymtab_gpl_future = .;			\
464 	}								\
465 									\
466 	/* Kernel symbol table: Normal symbols */			\
467 	__kcrctab         : AT(ADDR(__kcrctab) - LOAD_OFFSET) {		\
468 		__start___kcrctab = .;					\
469 		KEEP(*(SORT(___kcrctab+*)))				\
470 		__stop___kcrctab = .;					\
471 	}								\
472 									\
473 	/* Kernel symbol table: GPL-only symbols */			\
474 	__kcrctab_gpl     : AT(ADDR(__kcrctab_gpl) - LOAD_OFFSET) {	\
475 		__start___kcrctab_gpl = .;				\
476 		KEEP(*(SORT(___kcrctab_gpl+*)))				\
477 		__stop___kcrctab_gpl = .;				\
478 	}								\
479 									\
480 	/* Kernel symbol table: Normal unused symbols */		\
481 	__kcrctab_unused  : AT(ADDR(__kcrctab_unused) - LOAD_OFFSET) {	\
482 		__start___kcrctab_unused = .;				\
483 		KEEP(*(SORT(___kcrctab_unused+*)))			\
484 		__stop___kcrctab_unused = .;				\
485 	}								\
486 									\
487 	/* Kernel symbol table: GPL-only unused symbols */		\
488 	__kcrctab_unused_gpl : AT(ADDR(__kcrctab_unused_gpl) - LOAD_OFFSET) { \
489 		__start___kcrctab_unused_gpl = .;			\
490 		KEEP(*(SORT(___kcrctab_unused_gpl+*)))			\
491 		__stop___kcrctab_unused_gpl = .;			\
492 	}								\
493 									\
494 	/* Kernel symbol table: GPL-future-only symbols */		\
495 	__kcrctab_gpl_future : AT(ADDR(__kcrctab_gpl_future) - LOAD_OFFSET) { \
496 		__start___kcrctab_gpl_future = .;			\
497 		KEEP(*(SORT(___kcrctab_gpl_future+*)))			\
498 		__stop___kcrctab_gpl_future = .;			\
499 	}								\
500 									\
501 	/* Kernel symbol table: strings */				\
502         __ksymtab_strings : AT(ADDR(__ksymtab_strings) - LOAD_OFFSET) {	\
503 		*(__ksymtab_strings)					\
504 	}								\
505 									\
506 	/* __*init sections */						\
507 	__init_rodata : AT(ADDR(__init_rodata) - LOAD_OFFSET) {		\
508 		*(.ref.rodata)						\
509 		MEM_KEEP(init.rodata)					\
510 		MEM_KEEP(exit.rodata)					\
511 	}								\
512 									\
513 	/* Built-in module parameters. */				\
514 	__param : AT(ADDR(__param) - LOAD_OFFSET) {			\
515 		__start___param = .;					\
516 		KEEP(*(__param))					\
517 		__stop___param = .;					\
518 	}								\
519 									\
520 	/* Built-in module versions. */					\
521 	__modver : AT(ADDR(__modver) - LOAD_OFFSET) {			\
522 		__start___modver = .;					\
523 		KEEP(*(__modver))					\
524 		__stop___modver = .;					\
525 	}								\
526 									\
527 	RO_EXCEPTION_TABLE						\
528 	NOTES								\
529 									\
530 	. = ALIGN((align));						\
531 	__end_rodata = .;
532 
533 /*
534  * .text section. Map to function alignment to avoid address changes
535  * during second ld run in second ld pass when generating System.map
536  *
537  * TEXT_MAIN here will match .text.fixup and .text.unlikely if dead
538  * code elimination is enabled, so these sections should be converted
539  * to use ".." first.
540  */
541 #define TEXT_TEXT							\
542 		ALIGN_FUNCTION();					\
543 		*(.text.hot TEXT_MAIN .text.fixup .text.unlikely)	\
544 		*(.text..refcount)					\
545 		*(.ref.text)						\
546 	MEM_KEEP(init.text*)						\
547 	MEM_KEEP(exit.text*)						\
548 
549 
550 /* sched.text is aling to function alignment to secure we have same
551  * address even at second ld pass when generating System.map */
552 #define SCHED_TEXT							\
553 		ALIGN_FUNCTION();					\
554 		__sched_text_start = .;					\
555 		*(.sched.text)						\
556 		__sched_text_end = .;
557 
558 /* spinlock.text is aling to function alignment to secure we have same
559  * address even at second ld pass when generating System.map */
560 #define LOCK_TEXT							\
561 		ALIGN_FUNCTION();					\
562 		__lock_text_start = .;					\
563 		*(.spinlock.text)					\
564 		__lock_text_end = .;
565 
566 #define CPUIDLE_TEXT							\
567 		ALIGN_FUNCTION();					\
568 		__cpuidle_text_start = .;				\
569 		*(.cpuidle.text)					\
570 		__cpuidle_text_end = .;
571 
572 #define KPROBES_TEXT							\
573 		ALIGN_FUNCTION();					\
574 		__kprobes_text_start = .;				\
575 		*(.kprobes.text)					\
576 		__kprobes_text_end = .;
577 
578 #define ENTRY_TEXT							\
579 		ALIGN_FUNCTION();					\
580 		__entry_text_start = .;					\
581 		*(.entry.text)						\
582 		__entry_text_end = .;
583 
584 #define IRQENTRY_TEXT							\
585 		ALIGN_FUNCTION();					\
586 		__irqentry_text_start = .;				\
587 		*(.irqentry.text)					\
588 		__irqentry_text_end = .;
589 
590 #define SOFTIRQENTRY_TEXT						\
591 		ALIGN_FUNCTION();					\
592 		__softirqentry_text_start = .;				\
593 		*(.softirqentry.text)					\
594 		__softirqentry_text_end = .;
595 
596 /* Section used for early init (in .S files) */
597 #define HEAD_TEXT  KEEP(*(.head.text))
598 
599 #define HEAD_TEXT_SECTION							\
600 	.head.text : AT(ADDR(.head.text) - LOAD_OFFSET) {		\
601 		HEAD_TEXT						\
602 	}
603 
604 /*
605  * Exception table
606  */
607 #define EXCEPTION_TABLE(align)						\
608 	. = ALIGN(align);						\
609 	__ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) {		\
610 		__start___ex_table = .;					\
611 		KEEP(*(__ex_table))					\
612 		__stop___ex_table = .;					\
613 	}
614 
615 /*
616  * Init task
617  */
618 #define INIT_TASK_DATA_SECTION(align)					\
619 	. = ALIGN(align);						\
620 	.data..init_task :  AT(ADDR(.data..init_task) - LOAD_OFFSET) {	\
621 		INIT_TASK_DATA(align)					\
622 	}
623 
624 #ifdef CONFIG_CONSTRUCTORS
625 #define KERNEL_CTORS()	. = ALIGN(8);			   \
626 			__ctors_start = .;		   \
627 			KEEP(*(.ctors))			   \
628 			KEEP(*(SORT(.init_array.*)))	   \
629 			KEEP(*(.init_array))		   \
630 			__ctors_end = .;
631 #else
632 #define KERNEL_CTORS()
633 #endif
634 
635 /* init and exit section handling */
636 #define INIT_DATA							\
637 	KEEP(*(SORT(___kentry+*)))					\
638 	*(.init.data init.data.*)					\
639 	MEM_DISCARD(init.data*)						\
640 	KERNEL_CTORS()							\
641 	MCOUNT_REC()							\
642 	*(.init.rodata .init.rodata.*)					\
643 	FTRACE_EVENTS()							\
644 	TRACE_SYSCALLS()						\
645 	KPROBE_BLACKLIST()						\
646 	ERROR_INJECT_WHITELIST()					\
647 	MEM_DISCARD(init.rodata)					\
648 	CLK_OF_TABLES()							\
649 	RESERVEDMEM_OF_TABLES()						\
650 	TIMER_OF_TABLES()						\
651 	CPU_METHOD_OF_TABLES()						\
652 	CPUIDLE_METHOD_OF_TABLES()					\
653 	KERNEL_DTB()							\
654 	IRQCHIP_OF_MATCH_TABLE()					\
655 	ACPI_PROBE_TABLE(irqchip)					\
656 	ACPI_PROBE_TABLE(timer)						\
657 	THERMAL_TABLE(governor)						\
658 	EARLYCON_TABLE()						\
659 	LSM_TABLE()							\
660 	EARLY_LSM_TABLE()
661 
662 #define INIT_TEXT							\
663 	*(.init.text .init.text.*)					\
664 	*(.text.startup)						\
665 	MEM_DISCARD(init.text*)
666 
667 #define EXIT_DATA							\
668 	*(.exit.data .exit.data.*)					\
669 	*(.fini_array .fini_array.*)					\
670 	*(.dtors .dtors.*)						\
671 	MEM_DISCARD(exit.data*)						\
672 	MEM_DISCARD(exit.rodata*)
673 
674 #define EXIT_TEXT							\
675 	*(.exit.text)							\
676 	*(.text.exit)							\
677 	MEM_DISCARD(exit.text)
678 
679 #define EXIT_CALL							\
680 	*(.exitcall.exit)
681 
682 /*
683  * bss (Block Started by Symbol) - uninitialized data
684  * zeroed during startup
685  */
686 #define SBSS(sbss_align)						\
687 	. = ALIGN(sbss_align);						\
688 	.sbss : AT(ADDR(.sbss) - LOAD_OFFSET) {				\
689 		*(.dynsbss)						\
690 		*(SBSS_MAIN)						\
691 		*(.scommon)						\
692 	}
693 
694 /*
695  * Allow archectures to redefine BSS_FIRST_SECTIONS to add extra
696  * sections to the front of bss.
697  */
698 #ifndef BSS_FIRST_SECTIONS
699 #define BSS_FIRST_SECTIONS
700 #endif
701 
702 #define BSS(bss_align)							\
703 	. = ALIGN(bss_align);						\
704 	.bss : AT(ADDR(.bss) - LOAD_OFFSET) {				\
705 		BSS_FIRST_SECTIONS					\
706 		*(.bss..page_aligned)					\
707 		*(.dynbss)						\
708 		*(BSS_MAIN)						\
709 		*(COMMON)						\
710 	}
711 
712 /*
713  * DWARF debug sections.
714  * Symbols in the DWARF debugging sections are relative to
715  * the beginning of the section so we begin them at 0.
716  */
717 #define DWARF_DEBUG							\
718 		/* DWARF 1 */						\
719 		.debug          0 : { *(.debug) }			\
720 		.line           0 : { *(.line) }			\
721 		/* GNU DWARF 1 extensions */				\
722 		.debug_srcinfo  0 : { *(.debug_srcinfo) }		\
723 		.debug_sfnames  0 : { *(.debug_sfnames) }		\
724 		/* DWARF 1.1 and DWARF 2 */				\
725 		.debug_aranges  0 : { *(.debug_aranges) }		\
726 		.debug_pubnames 0 : { *(.debug_pubnames) }		\
727 		/* DWARF 2 */						\
728 		.debug_info     0 : { *(.debug_info			\
729 				.gnu.linkonce.wi.*) }			\
730 		.debug_abbrev   0 : { *(.debug_abbrev) }		\
731 		.debug_line     0 : { *(.debug_line) }			\
732 		.debug_frame    0 : { *(.debug_frame) }			\
733 		.debug_str      0 : { *(.debug_str) }			\
734 		.debug_loc      0 : { *(.debug_loc) }			\
735 		.debug_macinfo  0 : { *(.debug_macinfo) }		\
736 		.debug_pubtypes 0 : { *(.debug_pubtypes) }		\
737 		/* DWARF 3 */						\
738 		.debug_ranges	0 : { *(.debug_ranges) }		\
739 		/* SGI/MIPS DWARF 2 extensions */			\
740 		.debug_weaknames 0 : { *(.debug_weaknames) }		\
741 		.debug_funcnames 0 : { *(.debug_funcnames) }		\
742 		.debug_typenames 0 : { *(.debug_typenames) }		\
743 		.debug_varnames  0 : { *(.debug_varnames) }		\
744 		/* GNU DWARF 2 extensions */				\
745 		.debug_gnu_pubnames 0 : { *(.debug_gnu_pubnames) }	\
746 		.debug_gnu_pubtypes 0 : { *(.debug_gnu_pubtypes) }	\
747 		/* DWARF 4 */						\
748 		.debug_types	0 : { *(.debug_types) }			\
749 		/* DWARF 5 */						\
750 		.debug_macro	0 : { *(.debug_macro) }			\
751 		.debug_addr	0 : { *(.debug_addr) }
752 
753 		/* Stabs debugging sections.  */
754 #define STABS_DEBUG							\
755 		.stab 0 : { *(.stab) }					\
756 		.stabstr 0 : { *(.stabstr) }				\
757 		.stab.excl 0 : { *(.stab.excl) }			\
758 		.stab.exclstr 0 : { *(.stab.exclstr) }			\
759 		.stab.index 0 : { *(.stab.index) }			\
760 		.stab.indexstr 0 : { *(.stab.indexstr) }		\
761 		.comment 0 : { *(.comment) }
762 
763 #ifdef CONFIG_GENERIC_BUG
764 #define BUG_TABLE							\
765 	. = ALIGN(8);							\
766 	__bug_table : AT(ADDR(__bug_table) - LOAD_OFFSET) {		\
767 		__start___bug_table = .;				\
768 		KEEP(*(__bug_table))					\
769 		__stop___bug_table = .;					\
770 	}
771 #else
772 #define BUG_TABLE
773 #endif
774 
775 #ifdef CONFIG_UNWINDER_ORC
776 #define ORC_UNWIND_TABLE						\
777 	. = ALIGN(4);							\
778 	.orc_unwind_ip : AT(ADDR(.orc_unwind_ip) - LOAD_OFFSET) {	\
779 		__start_orc_unwind_ip = .;				\
780 		KEEP(*(.orc_unwind_ip))					\
781 		__stop_orc_unwind_ip = .;				\
782 	}								\
783 	. = ALIGN(2);							\
784 	.orc_unwind : AT(ADDR(.orc_unwind) - LOAD_OFFSET) {		\
785 		__start_orc_unwind = .;					\
786 		KEEP(*(.orc_unwind))					\
787 		__stop_orc_unwind = .;					\
788 	}								\
789 	. = ALIGN(4);							\
790 	.orc_lookup : AT(ADDR(.orc_lookup) - LOAD_OFFSET) {		\
791 		orc_lookup = .;						\
792 		. += (((SIZEOF(.text) + LOOKUP_BLOCK_SIZE - 1) /	\
793 			LOOKUP_BLOCK_SIZE) + 1) * 4;			\
794 		orc_lookup_end = .;					\
795 	}
796 #else
797 #define ORC_UNWIND_TABLE
798 #endif
799 
800 #ifdef CONFIG_PM_TRACE
801 #define TRACEDATA							\
802 	. = ALIGN(4);							\
803 	.tracedata : AT(ADDR(.tracedata) - LOAD_OFFSET) {		\
804 		__tracedata_start = .;					\
805 		KEEP(*(.tracedata))					\
806 		__tracedata_end = .;					\
807 	}
808 #else
809 #define TRACEDATA
810 #endif
811 
812 #define NOTES								\
813 	.notes : AT(ADDR(.notes) - LOAD_OFFSET) {			\
814 		__start_notes = .;					\
815 		KEEP(*(.note.*))					\
816 		__stop_notes = .;					\
817 	} NOTES_HEADERS							\
818 	NOTES_HEADERS_RESTORE
819 
820 #define INIT_SETUP(initsetup_align)					\
821 		. = ALIGN(initsetup_align);				\
822 		__setup_start = .;					\
823 		KEEP(*(.init.setup))					\
824 		__setup_end = .;
825 
826 #define INIT_CALLS_LEVEL(level)						\
827 		__initcall##level##_start = .;				\
828 		KEEP(*(.initcall##level##.init))			\
829 		KEEP(*(.initcall##level##s.init))			\
830 
831 #define INIT_CALLS							\
832 		__initcall_start = .;					\
833 		KEEP(*(.initcallearly.init))				\
834 		INIT_CALLS_LEVEL(0)					\
835 		INIT_CALLS_LEVEL(1)					\
836 		INIT_CALLS_LEVEL(2)					\
837 		INIT_CALLS_LEVEL(3)					\
838 		INIT_CALLS_LEVEL(4)					\
839 		INIT_CALLS_LEVEL(5)					\
840 		INIT_CALLS_LEVEL(rootfs)				\
841 		INIT_CALLS_LEVEL(6)					\
842 		INIT_CALLS_LEVEL(7)					\
843 		__initcall_end = .;
844 
845 #define CON_INITCALL							\
846 		__con_initcall_start = .;				\
847 		KEEP(*(.con_initcall.init))				\
848 		__con_initcall_end = .;
849 
850 #ifdef CONFIG_BLK_DEV_INITRD
851 #define INIT_RAM_FS							\
852 	. = ALIGN(4);							\
853 	__initramfs_start = .;						\
854 	KEEP(*(.init.ramfs))						\
855 	. = ALIGN(8);							\
856 	KEEP(*(.init.ramfs.info))
857 #else
858 #define INIT_RAM_FS
859 #endif
860 
861 /*
862  * Memory encryption operates on a page basis. Since we need to clear
863  * the memory encryption mask for this section, it needs to be aligned
864  * on a page boundary and be a page-size multiple in length.
865  *
866  * Note: We use a separate section so that only this section gets
867  * decrypted to avoid exposing more than we wish.
868  */
869 #ifdef CONFIG_AMD_MEM_ENCRYPT
870 #define PERCPU_DECRYPTED_SECTION					\
871 	. = ALIGN(PAGE_SIZE);						\
872 	*(.data..percpu..decrypted)					\
873 	. = ALIGN(PAGE_SIZE);
874 #else
875 #define PERCPU_DECRYPTED_SECTION
876 #endif
877 
878 
879 /*
880  * Default discarded sections.
881  *
882  * Some archs want to discard exit text/data at runtime rather than
883  * link time due to cross-section references such as alt instructions,
884  * bug table, eh_frame, etc.  DISCARDS must be the last of output
885  * section definitions so that such archs put those in earlier section
886  * definitions.
887  */
888 #define DISCARDS							\
889 	/DISCARD/ : {							\
890 	EXIT_TEXT							\
891 	EXIT_DATA							\
892 	EXIT_CALL							\
893 	*(.discard)							\
894 	*(.discard.*)							\
895 	*(.modinfo)							\
896 	}
897 
898 /**
899  * PERCPU_INPUT - the percpu input sections
900  * @cacheline: cacheline size
901  *
902  * The core percpu section names and core symbols which do not rely
903  * directly upon load addresses.
904  *
905  * @cacheline is used to align subsections to avoid false cacheline
906  * sharing between subsections for different purposes.
907  */
908 #define PERCPU_INPUT(cacheline)						\
909 	__per_cpu_start = .;						\
910 	*(.data..percpu..first)						\
911 	. = ALIGN(PAGE_SIZE);						\
912 	*(.data..percpu..page_aligned)					\
913 	. = ALIGN(cacheline);						\
914 	*(.data..percpu..read_mostly)					\
915 	. = ALIGN(cacheline);						\
916 	*(.data..percpu)						\
917 	*(.data..percpu..shared_aligned)				\
918 	PERCPU_DECRYPTED_SECTION					\
919 	__per_cpu_end = .;
920 
921 /**
922  * PERCPU_VADDR - define output section for percpu area
923  * @cacheline: cacheline size
924  * @vaddr: explicit base address (optional)
925  * @phdr: destination PHDR (optional)
926  *
927  * Macro which expands to output section for percpu area.
928  *
929  * @cacheline is used to align subsections to avoid false cacheline
930  * sharing between subsections for different purposes.
931  *
932  * If @vaddr is not blank, it specifies explicit base address and all
933  * percpu symbols will be offset from the given address.  If blank,
934  * @vaddr always equals @laddr + LOAD_OFFSET.
935  *
936  * @phdr defines the output PHDR to use if not blank.  Be warned that
937  * output PHDR is sticky.  If @phdr is specified, the next output
938  * section in the linker script will go there too.  @phdr should have
939  * a leading colon.
940  *
941  * Note that this macros defines __per_cpu_load as an absolute symbol.
942  * If there is no need to put the percpu section at a predetermined
943  * address, use PERCPU_SECTION.
944  */
945 #define PERCPU_VADDR(cacheline, vaddr, phdr)				\
946 	__per_cpu_load = .;						\
947 	.data..percpu vaddr : AT(__per_cpu_load - LOAD_OFFSET) {	\
948 		PERCPU_INPUT(cacheline)					\
949 	} phdr								\
950 	. = __per_cpu_load + SIZEOF(.data..percpu);
951 
952 /**
953  * PERCPU_SECTION - define output section for percpu area, simple version
954  * @cacheline: cacheline size
955  *
956  * Align to PAGE_SIZE and outputs output section for percpu area.  This
957  * macro doesn't manipulate @vaddr or @phdr and __per_cpu_load and
958  * __per_cpu_start will be identical.
959  *
960  * This macro is equivalent to ALIGN(PAGE_SIZE); PERCPU_VADDR(@cacheline,,)
961  * except that __per_cpu_load is defined as a relative symbol against
962  * .data..percpu which is required for relocatable x86_32 configuration.
963  */
964 #define PERCPU_SECTION(cacheline)					\
965 	. = ALIGN(PAGE_SIZE);						\
966 	.data..percpu	: AT(ADDR(.data..percpu) - LOAD_OFFSET) {	\
967 		__per_cpu_load = .;					\
968 		PERCPU_INPUT(cacheline)					\
969 	}
970 
971 
972 /*
973  * Definition of the high level *_SECTION macros
974  * They will fit only a subset of the architectures
975  */
976 
977 
978 /*
979  * Writeable data.
980  * All sections are combined in a single .data section.
981  * The sections following CONSTRUCTORS are arranged so their
982  * typical alignment matches.
983  * A cacheline is typical/always less than a PAGE_SIZE so
984  * the sections that has this restriction (or similar)
985  * is located before the ones requiring PAGE_SIZE alignment.
986  * NOSAVE_DATA starts and ends with a PAGE_SIZE alignment which
987  * matches the requirement of PAGE_ALIGNED_DATA.
988  *
989  * use 0 as page_align if page_aligned data is not used */
990 #define RW_DATA(cacheline, pagealigned, inittask)			\
991 	. = ALIGN(PAGE_SIZE);						\
992 	.data : AT(ADDR(.data) - LOAD_OFFSET) {				\
993 		INIT_TASK_DATA(inittask)				\
994 		NOSAVE_DATA						\
995 		PAGE_ALIGNED_DATA(pagealigned)				\
996 		CACHELINE_ALIGNED_DATA(cacheline)			\
997 		READ_MOSTLY_DATA(cacheline)				\
998 		DATA_DATA						\
999 		CONSTRUCTORS						\
1000 	}								\
1001 	BUG_TABLE							\
1002 
1003 #define INIT_TEXT_SECTION(inittext_align)				\
1004 	. = ALIGN(inittext_align);					\
1005 	.init.text : AT(ADDR(.init.text) - LOAD_OFFSET) {		\
1006 		_sinittext = .;						\
1007 		INIT_TEXT						\
1008 		_einittext = .;						\
1009 	}
1010 
1011 #define INIT_DATA_SECTION(initsetup_align)				\
1012 	.init.data : AT(ADDR(.init.data) - LOAD_OFFSET) {		\
1013 		INIT_DATA						\
1014 		INIT_SETUP(initsetup_align)				\
1015 		INIT_CALLS						\
1016 		CON_INITCALL						\
1017 		INIT_RAM_FS						\
1018 	}
1019 
1020 #define BSS_SECTION(sbss_align, bss_align, stop_align)			\
1021 	. = ALIGN(sbss_align);						\
1022 	__bss_start = .;						\
1023 	SBSS(sbss_align)						\
1024 	BSS(bss_align)							\
1025 	. = ALIGN(stop_align);						\
1026 	__bss_stop = .;
1027