xref: /linux/include/asm-generic/vmlinux.lds.h (revision 9ce7677cfd7cd871adb457c80bea3b581b839641)
1 #ifndef LOAD_OFFSET
2 #define LOAD_OFFSET 0
3 #endif
4 
5 #ifndef VMLINUX_SYMBOL
6 #define VMLINUX_SYMBOL(_sym_) _sym_
7 #endif
8 
9 /* Align . to a 8 byte boundary equals to maximum function alignment. */
10 #define ALIGN_FUNCTION()  . = ALIGN(8)
11 
12 #define RODATA								\
13 	.rodata           : AT(ADDR(.rodata) - LOAD_OFFSET) {		\
14 		*(.rodata) *(.rodata.*)					\
15 		*(__vermagic)		/* Kernel version magic */	\
16 	}								\
17 									\
18 	.rodata1          : AT(ADDR(.rodata1) - LOAD_OFFSET) {		\
19 		*(.rodata1)						\
20 	}								\
21 									\
22 	/* PCI quirks */						\
23 	.pci_fixup        : AT(ADDR(.pci_fixup) - LOAD_OFFSET) {	\
24 		VMLINUX_SYMBOL(__start_pci_fixups_early) = .;		\
25 		*(.pci_fixup_early)					\
26 		VMLINUX_SYMBOL(__end_pci_fixups_early) = .;		\
27 		VMLINUX_SYMBOL(__start_pci_fixups_header) = .;		\
28 		*(.pci_fixup_header)					\
29 		VMLINUX_SYMBOL(__end_pci_fixups_header) = .;		\
30 		VMLINUX_SYMBOL(__start_pci_fixups_final) = .;		\
31 		*(.pci_fixup_final)					\
32 		VMLINUX_SYMBOL(__end_pci_fixups_final) = .;		\
33 		VMLINUX_SYMBOL(__start_pci_fixups_enable) = .;		\
34 		*(.pci_fixup_enable)					\
35 		VMLINUX_SYMBOL(__end_pci_fixups_enable) = .;		\
36 	}								\
37 									\
38 	/* RapidIO route ops */						\
39 	.rio_route        : AT(ADDR(.rio_route) - LOAD_OFFSET) {	\
40 		VMLINUX_SYMBOL(__start_rio_route_ops) = .;		\
41 		*(.rio_route_ops)					\
42 		VMLINUX_SYMBOL(__end_rio_route_ops) = .;		\
43 	}								\
44 									\
45 	/* Kernel symbol table: Normal symbols */			\
46 	__ksymtab         : AT(ADDR(__ksymtab) - LOAD_OFFSET) {		\
47 		VMLINUX_SYMBOL(__start___ksymtab) = .;			\
48 		*(__ksymtab)						\
49 		VMLINUX_SYMBOL(__stop___ksymtab) = .;			\
50 	}								\
51 									\
52 	/* Kernel symbol table: GPL-only symbols */			\
53 	__ksymtab_gpl     : AT(ADDR(__ksymtab_gpl) - LOAD_OFFSET) {	\
54 		VMLINUX_SYMBOL(__start___ksymtab_gpl) = .;		\
55 		*(__ksymtab_gpl)					\
56 		VMLINUX_SYMBOL(__stop___ksymtab_gpl) = .;		\
57 	}								\
58 									\
59 	/* Kernel symbol table: Normal symbols */			\
60 	__kcrctab         : AT(ADDR(__kcrctab) - LOAD_OFFSET) {		\
61 		VMLINUX_SYMBOL(__start___kcrctab) = .;			\
62 		*(__kcrctab)						\
63 		VMLINUX_SYMBOL(__stop___kcrctab) = .;			\
64 	}								\
65 									\
66 	/* Kernel symbol table: GPL-only symbols */			\
67 	__kcrctab_gpl     : AT(ADDR(__kcrctab_gpl) - LOAD_OFFSET) {	\
68 		VMLINUX_SYMBOL(__start___kcrctab_gpl) = .;		\
69 		*(__kcrctab_gpl)					\
70 		VMLINUX_SYMBOL(__stop___kcrctab_gpl) = .;		\
71 	}								\
72 									\
73 	/* Kernel symbol table: strings */				\
74         __ksymtab_strings : AT(ADDR(__ksymtab_strings) - LOAD_OFFSET) {	\
75 		*(__ksymtab_strings)					\
76 	}								\
77 									\
78 	/* Built-in module parameters. */				\
79 	__param : AT(ADDR(__param) - LOAD_OFFSET) {			\
80 		VMLINUX_SYMBOL(__start___param) = .;			\
81 		*(__param)						\
82 		VMLINUX_SYMBOL(__stop___param) = .;			\
83 	}
84 
85 #define SECURITY_INIT							\
86 	.security_initcall.init : AT(ADDR(.security_initcall.init) - LOAD_OFFSET) { \
87 		VMLINUX_SYMBOL(__security_initcall_start) = .;		\
88 		*(.security_initcall.init) 				\
89 		VMLINUX_SYMBOL(__security_initcall_end) = .;		\
90 	}
91 
92 /* sched.text is aling to function alignment to secure we have same
93  * address even at second ld pass when generating System.map */
94 #define SCHED_TEXT							\
95 		ALIGN_FUNCTION();					\
96 		VMLINUX_SYMBOL(__sched_text_start) = .;			\
97 		*(.sched.text)						\
98 		VMLINUX_SYMBOL(__sched_text_end) = .;
99 
100 /* spinlock.text is aling to function alignment to secure we have same
101  * address even at second ld pass when generating System.map */
102 #define LOCK_TEXT							\
103 		ALIGN_FUNCTION();					\
104 		VMLINUX_SYMBOL(__lock_text_start) = .;			\
105 		*(.spinlock.text)					\
106 		VMLINUX_SYMBOL(__lock_text_end) = .;
107 
108 #define KPROBES_TEXT							\
109 		ALIGN_FUNCTION();					\
110 		VMLINUX_SYMBOL(__kprobes_text_start) = .;		\
111 		*(.kprobes.text)					\
112 		VMLINUX_SYMBOL(__kprobes_text_end) = .;
113 
114 		/* DWARF debug sections.
115 		Symbols in the DWARF debugging sections are relative to
116 		the beginning of the section so we begin them at 0.  */
117 #define DWARF_DEBUG							\
118 		/* DWARF 1 */						\
119 		.debug          0 : { *(.debug) }			\
120 		.line           0 : { *(.line) }			\
121 		/* GNU DWARF 1 extensions */				\
122 		.debug_srcinfo  0 : { *(.debug_srcinfo) }		\
123 		.debug_sfnames  0 : { *(.debug_sfnames) }		\
124 		/* DWARF 1.1 and DWARF 2 */				\
125 		.debug_aranges  0 : { *(.debug_aranges) }		\
126 		.debug_pubnames 0 : { *(.debug_pubnames) }		\
127 		/* DWARF 2 */						\
128 		.debug_info     0 : { *(.debug_info			\
129 				.gnu.linkonce.wi.*) }			\
130 		.debug_abbrev   0 : { *(.debug_abbrev) }		\
131 		.debug_line     0 : { *(.debug_line) }			\
132 		.debug_frame    0 : { *(.debug_frame) }			\
133 		.debug_str      0 : { *(.debug_str) }			\
134 		.debug_loc      0 : { *(.debug_loc) }			\
135 		.debug_macinfo  0 : { *(.debug_macinfo) }		\
136 		/* SGI/MIPS DWARF 2 extensions */			\
137 		.debug_weaknames 0 : { *(.debug_weaknames) }		\
138 		.debug_funcnames 0 : { *(.debug_funcnames) }		\
139 		.debug_typenames 0 : { *(.debug_typenames) }		\
140 		.debug_varnames  0 : { *(.debug_varnames) }		\
141 
142 		/* Stabs debugging sections.  */
143 #define STABS_DEBUG							\
144 		.stab 0 : { *(.stab) }					\
145 		.stabstr 0 : { *(.stabstr) }				\
146 		.stab.excl 0 : { *(.stab.excl) }			\
147 		.stab.exclstr 0 : { *(.stab.exclstr) }			\
148 		.stab.index 0 : { *(.stab.index) }			\
149 		.stab.indexstr 0 : { *(.stab.indexstr) }		\
150 		.comment 0 : { *(.comment) }
151