xref: /linux/include/asm-generic/vmlinux.lds.h (revision 606d099cdd1080bbb50ea50dc52d98252f8f10a1)
1 #ifndef LOAD_OFFSET
2 #define LOAD_OFFSET 0
3 #endif
4 
5 #ifndef VMLINUX_SYMBOL
6 #define VMLINUX_SYMBOL(_sym_) _sym_
7 #endif
8 
9 /* Align . to a 8 byte boundary equals to maximum function alignment. */
10 #define ALIGN_FUNCTION()  . = ALIGN(8)
11 
12 #define RODATA								\
13 	. = ALIGN(4096);						\
14 	.rodata           : AT(ADDR(.rodata) - LOAD_OFFSET) {		\
15 		VMLINUX_SYMBOL(__start_rodata) = .;			\
16 		*(.rodata) *(.rodata.*)					\
17 		*(__vermagic)		/* Kernel version magic */	\
18 	}								\
19 									\
20 	.rodata1          : AT(ADDR(.rodata1) - LOAD_OFFSET) {		\
21 		*(.rodata1)						\
22 	}								\
23 									\
24 	/* PCI quirks */						\
25 	.pci_fixup        : AT(ADDR(.pci_fixup) - LOAD_OFFSET) {	\
26 		VMLINUX_SYMBOL(__start_pci_fixups_early) = .;		\
27 		*(.pci_fixup_early)					\
28 		VMLINUX_SYMBOL(__end_pci_fixups_early) = .;		\
29 		VMLINUX_SYMBOL(__start_pci_fixups_header) = .;		\
30 		*(.pci_fixup_header)					\
31 		VMLINUX_SYMBOL(__end_pci_fixups_header) = .;		\
32 		VMLINUX_SYMBOL(__start_pci_fixups_final) = .;		\
33 		*(.pci_fixup_final)					\
34 		VMLINUX_SYMBOL(__end_pci_fixups_final) = .;		\
35 		VMLINUX_SYMBOL(__start_pci_fixups_enable) = .;		\
36 		*(.pci_fixup_enable)					\
37 		VMLINUX_SYMBOL(__end_pci_fixups_enable) = .;		\
38 	}								\
39 									\
40 	/* RapidIO route ops */						\
41 	.rio_route        : AT(ADDR(.rio_route) - LOAD_OFFSET) {	\
42 		VMLINUX_SYMBOL(__start_rio_route_ops) = .;		\
43 		*(.rio_route_ops)					\
44 		VMLINUX_SYMBOL(__end_rio_route_ops) = .;		\
45 	}								\
46 									\
47 	/* Kernel symbol table: Normal symbols */			\
48 	__ksymtab         : AT(ADDR(__ksymtab) - LOAD_OFFSET) {		\
49 		VMLINUX_SYMBOL(__start___ksymtab) = .;			\
50 		*(__ksymtab)						\
51 		VMLINUX_SYMBOL(__stop___ksymtab) = .;			\
52 	}								\
53 									\
54 	/* Kernel symbol table: GPL-only symbols */			\
55 	__ksymtab_gpl     : AT(ADDR(__ksymtab_gpl) - LOAD_OFFSET) {	\
56 		VMLINUX_SYMBOL(__start___ksymtab_gpl) = .;		\
57 		*(__ksymtab_gpl)					\
58 		VMLINUX_SYMBOL(__stop___ksymtab_gpl) = .;		\
59 	}								\
60 									\
61 	/* Kernel symbol table: Normal unused symbols */		\
62 	__ksymtab_unused  : AT(ADDR(__ksymtab_unused) - LOAD_OFFSET) {	\
63 		VMLINUX_SYMBOL(__start___ksymtab_unused) = .;		\
64 		*(__ksymtab_unused)					\
65 		VMLINUX_SYMBOL(__stop___ksymtab_unused) = .;		\
66 	}								\
67 									\
68 	/* Kernel symbol table: GPL-only unused symbols */		\
69 	__ksymtab_unused_gpl : AT(ADDR(__ksymtab_unused_gpl) - LOAD_OFFSET) { \
70 		VMLINUX_SYMBOL(__start___ksymtab_unused_gpl) = .;	\
71 		*(__ksymtab_unused_gpl)					\
72 		VMLINUX_SYMBOL(__stop___ksymtab_unused_gpl) = .;	\
73 	}								\
74 									\
75 	/* Kernel symbol table: GPL-future-only symbols */		\
76 	__ksymtab_gpl_future : AT(ADDR(__ksymtab_gpl_future) - LOAD_OFFSET) { \
77 		VMLINUX_SYMBOL(__start___ksymtab_gpl_future) = .;	\
78 		*(__ksymtab_gpl_future)					\
79 		VMLINUX_SYMBOL(__stop___ksymtab_gpl_future) = .;	\
80 	}								\
81 									\
82 	/* Kernel symbol table: Normal symbols */			\
83 	__kcrctab         : AT(ADDR(__kcrctab) - LOAD_OFFSET) {		\
84 		VMLINUX_SYMBOL(__start___kcrctab) = .;			\
85 		*(__kcrctab)						\
86 		VMLINUX_SYMBOL(__stop___kcrctab) = .;			\
87 	}								\
88 									\
89 	/* Kernel symbol table: GPL-only symbols */			\
90 	__kcrctab_gpl     : AT(ADDR(__kcrctab_gpl) - LOAD_OFFSET) {	\
91 		VMLINUX_SYMBOL(__start___kcrctab_gpl) = .;		\
92 		*(__kcrctab_gpl)					\
93 		VMLINUX_SYMBOL(__stop___kcrctab_gpl) = .;		\
94 	}								\
95 									\
96 	/* Kernel symbol table: Normal unused symbols */		\
97 	__kcrctab_unused  : AT(ADDR(__kcrctab_unused) - LOAD_OFFSET) {	\
98 		VMLINUX_SYMBOL(__start___kcrctab_unused) = .;		\
99 		*(__kcrctab_unused)					\
100 		VMLINUX_SYMBOL(__stop___kcrctab_unused) = .;		\
101 	}								\
102 									\
103 	/* Kernel symbol table: GPL-only unused symbols */		\
104 	__kcrctab_unused_gpl : AT(ADDR(__kcrctab_unused_gpl) - LOAD_OFFSET) { \
105 		VMLINUX_SYMBOL(__start___kcrctab_unused_gpl) = .;	\
106 		*(__kcrctab_unused_gpl)					\
107 		VMLINUX_SYMBOL(__stop___kcrctab_unused_gpl) = .;	\
108 	}								\
109 									\
110 	/* Kernel symbol table: GPL-future-only symbols */		\
111 	__kcrctab_gpl_future : AT(ADDR(__kcrctab_gpl_future) - LOAD_OFFSET) { \
112 		VMLINUX_SYMBOL(__start___kcrctab_gpl_future) = .;	\
113 		*(__kcrctab_gpl_future)					\
114 		VMLINUX_SYMBOL(__stop___kcrctab_gpl_future) = .;	\
115 	}								\
116 									\
117 	/* Kernel symbol table: strings */				\
118         __ksymtab_strings : AT(ADDR(__ksymtab_strings) - LOAD_OFFSET) {	\
119 		*(__ksymtab_strings)					\
120 	}								\
121 									\
122 	EH_FRAME							\
123 									\
124 	/* Built-in module parameters. */				\
125 	__param : AT(ADDR(__param) - LOAD_OFFSET) {			\
126 		VMLINUX_SYMBOL(__start___param) = .;			\
127 		*(__param)						\
128 		VMLINUX_SYMBOL(__stop___param) = .;			\
129 		VMLINUX_SYMBOL(__end_rodata) = .;			\
130 	}								\
131 									\
132 	. = ALIGN(4096);
133 
134 #define SECURITY_INIT							\
135 	.security_initcall.init : AT(ADDR(.security_initcall.init) - LOAD_OFFSET) { \
136 		VMLINUX_SYMBOL(__security_initcall_start) = .;		\
137 		*(.security_initcall.init) 				\
138 		VMLINUX_SYMBOL(__security_initcall_end) = .;		\
139 	}
140 
141 /* sched.text is aling to function alignment to secure we have same
142  * address even at second ld pass when generating System.map */
143 #define SCHED_TEXT							\
144 		ALIGN_FUNCTION();					\
145 		VMLINUX_SYMBOL(__sched_text_start) = .;			\
146 		*(.sched.text)						\
147 		VMLINUX_SYMBOL(__sched_text_end) = .;
148 
149 /* spinlock.text is aling to function alignment to secure we have same
150  * address even at second ld pass when generating System.map */
151 #define LOCK_TEXT							\
152 		ALIGN_FUNCTION();					\
153 		VMLINUX_SYMBOL(__lock_text_start) = .;			\
154 		*(.spinlock.text)					\
155 		VMLINUX_SYMBOL(__lock_text_end) = .;
156 
157 #define KPROBES_TEXT							\
158 		ALIGN_FUNCTION();					\
159 		VMLINUX_SYMBOL(__kprobes_text_start) = .;		\
160 		*(.kprobes.text)					\
161 		VMLINUX_SYMBOL(__kprobes_text_end) = .;
162 
163 #ifdef CONFIG_STACK_UNWIND
164 #define EH_FRAME							\
165 		/* Unwind data binary search table */			\
166 		. = ALIGN(8);						\
167         	.eh_frame_hdr : AT(ADDR(.eh_frame_hdr) - LOAD_OFFSET) {	\
168 			VMLINUX_SYMBOL(__start_unwind_hdr) = .;		\
169 			*(.eh_frame_hdr)				\
170 			VMLINUX_SYMBOL(__end_unwind_hdr) = .;		\
171 		}							\
172 		/* Unwind data */					\
173 		. = ALIGN(8);						\
174 		.eh_frame : AT(ADDR(.eh_frame) - LOAD_OFFSET) {		\
175 			VMLINUX_SYMBOL(__start_unwind) = .;		\
176 		  	*(.eh_frame)					\
177 			VMLINUX_SYMBOL(__end_unwind) = .;		\
178 		}
179 #else
180 #define EH_FRAME
181 #endif
182 
183 		/* DWARF debug sections.
184 		Symbols in the DWARF debugging sections are relative to
185 		the beginning of the section so we begin them at 0.  */
186 #define DWARF_DEBUG							\
187 		/* DWARF 1 */						\
188 		.debug          0 : { *(.debug) }			\
189 		.line           0 : { *(.line) }			\
190 		/* GNU DWARF 1 extensions */				\
191 		.debug_srcinfo  0 : { *(.debug_srcinfo) }		\
192 		.debug_sfnames  0 : { *(.debug_sfnames) }		\
193 		/* DWARF 1.1 and DWARF 2 */				\
194 		.debug_aranges  0 : { *(.debug_aranges) }		\
195 		.debug_pubnames 0 : { *(.debug_pubnames) }		\
196 		/* DWARF 2 */						\
197 		.debug_info     0 : { *(.debug_info			\
198 				.gnu.linkonce.wi.*) }			\
199 		.debug_abbrev   0 : { *(.debug_abbrev) }		\
200 		.debug_line     0 : { *(.debug_line) }			\
201 		.debug_frame    0 : { *(.debug_frame) }			\
202 		.debug_str      0 : { *(.debug_str) }			\
203 		.debug_loc      0 : { *(.debug_loc) }			\
204 		.debug_macinfo  0 : { *(.debug_macinfo) }		\
205 		/* SGI/MIPS DWARF 2 extensions */			\
206 		.debug_weaknames 0 : { *(.debug_weaknames) }		\
207 		.debug_funcnames 0 : { *(.debug_funcnames) }		\
208 		.debug_typenames 0 : { *(.debug_typenames) }		\
209 		.debug_varnames  0 : { *(.debug_varnames) }		\
210 
211 		/* Stabs debugging sections.  */
212 #define STABS_DEBUG							\
213 		.stab 0 : { *(.stab) }					\
214 		.stabstr 0 : { *(.stabstr) }				\
215 		.stab.excl 0 : { *(.stab.excl) }			\
216 		.stab.exclstr 0 : { *(.stab.exclstr) }			\
217 		.stab.index 0 : { *(.stab.index) }			\
218 		.stab.indexstr 0 : { *(.stab.indexstr) }		\
219 		.comment 0 : { *(.comment) }
220 
221 #define BUG_TABLE							\
222 	. = ALIGN(8);							\
223 	__bug_table : AT(ADDR(__bug_table) - LOAD_OFFSET) {		\
224 		__start___bug_table = .;				\
225 		*(__bug_table)						\
226 		__stop___bug_table = .;					\
227 	}
228 
229 #define NOTES								\
230 		.notes : { *(.note.*) } :note
231 
232 #define INITCALLS							\
233   	*(.initcall0.init)						\
234   	*(.initcall0s.init)						\
235   	*(.initcall1.init)						\
236   	*(.initcall1s.init)						\
237   	*(.initcall2.init)						\
238   	*(.initcall2s.init)						\
239   	*(.initcall3.init)						\
240   	*(.initcall3s.init)						\
241   	*(.initcall4.init)						\
242   	*(.initcall4s.init)						\
243   	*(.initcall5.init)						\
244   	*(.initcall5s.init)						\
245   	*(.initcall6.init)						\
246   	*(.initcall6s.init)						\
247   	*(.initcall7.init)						\
248   	*(.initcall7s.init)
249 
250