1 #ifndef LOAD_OFFSET 2 #define LOAD_OFFSET 0 3 #endif 4 5 #ifndef VMLINUX_SYMBOL 6 #define VMLINUX_SYMBOL(_sym_) _sym_ 7 #endif 8 9 /* Align . to a 8 byte boundary equals to maximum function alignment. */ 10 #define ALIGN_FUNCTION() . = ALIGN(8) 11 12 /* The actual configuration determine if the init/exit sections 13 * are handled as text/data or they can be discarded (which 14 * often happens at runtime) 15 */ 16 #ifdef CONFIG_HOTPLUG 17 #define DEV_KEEP(sec) *(.dev##sec) 18 #define DEV_DISCARD(sec) 19 #else 20 #define DEV_KEEP(sec) 21 #define DEV_DISCARD(sec) *(.dev##sec) 22 #endif 23 24 #ifdef CONFIG_HOTPLUG_CPU 25 #define CPU_KEEP(sec) *(.cpu##sec) 26 #define CPU_DISCARD(sec) 27 #else 28 #define CPU_KEEP(sec) 29 #define CPU_DISCARD(sec) *(.cpu##sec) 30 #endif 31 32 #if defined(CONFIG_MEMORY_HOTPLUG) 33 #define MEM_KEEP(sec) *(.mem##sec) 34 #define MEM_DISCARD(sec) 35 #else 36 #define MEM_KEEP(sec) 37 #define MEM_DISCARD(sec) *(.mem##sec) 38 #endif 39 40 41 /* .data section */ 42 #define DATA_DATA \ 43 *(.data) \ 44 *(.data.init.refok) \ 45 *(.ref.data) \ 46 DEV_KEEP(init.data) \ 47 DEV_KEEP(exit.data) \ 48 CPU_KEEP(init.data) \ 49 CPU_KEEP(exit.data) \ 50 MEM_KEEP(init.data) \ 51 MEM_KEEP(exit.data) \ 52 . = ALIGN(8); \ 53 VMLINUX_SYMBOL(__start___markers) = .; \ 54 *(__markers) \ 55 VMLINUX_SYMBOL(__stop___markers) = .; 56 57 #define RO_DATA(align) \ 58 . = ALIGN((align)); \ 59 .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \ 60 VMLINUX_SYMBOL(__start_rodata) = .; \ 61 *(.rodata) *(.rodata.*) \ 62 *(__vermagic) /* Kernel version magic */ \ 63 *(__markers_strings) /* Markers: strings */ \ 64 } \ 65 \ 66 .rodata1 : AT(ADDR(.rodata1) - LOAD_OFFSET) { \ 67 *(.rodata1) \ 68 } \ 69 \ 70 BUG_TABLE \ 71 \ 72 /* PCI quirks */ \ 73 .pci_fixup : AT(ADDR(.pci_fixup) - LOAD_OFFSET) { \ 74 VMLINUX_SYMBOL(__start_pci_fixups_early) = .; \ 75 *(.pci_fixup_early) \ 76 VMLINUX_SYMBOL(__end_pci_fixups_early) = .; \ 77 VMLINUX_SYMBOL(__start_pci_fixups_header) = .; \ 78 *(.pci_fixup_header) \ 79 VMLINUX_SYMBOL(__end_pci_fixups_header) = .; \ 80 VMLINUX_SYMBOL(__start_pci_fixups_final) = .; \ 81 *(.pci_fixup_final) \ 82 VMLINUX_SYMBOL(__end_pci_fixups_final) = .; \ 83 VMLINUX_SYMBOL(__start_pci_fixups_enable) = .; \ 84 *(.pci_fixup_enable) \ 85 VMLINUX_SYMBOL(__end_pci_fixups_enable) = .; \ 86 VMLINUX_SYMBOL(__start_pci_fixups_resume) = .; \ 87 *(.pci_fixup_resume) \ 88 VMLINUX_SYMBOL(__end_pci_fixups_resume) = .; \ 89 VMLINUX_SYMBOL(__start_pci_fixups_resume_early) = .; \ 90 *(.pci_fixup_resume_early) \ 91 VMLINUX_SYMBOL(__end_pci_fixups_resume_early) = .; \ 92 VMLINUX_SYMBOL(__start_pci_fixups_suspend) = .; \ 93 *(.pci_fixup_suspend) \ 94 VMLINUX_SYMBOL(__end_pci_fixups_suspend) = .; \ 95 } \ 96 \ 97 /* Built-in firmware blobs */ \ 98 .builtin_fw : AT(ADDR(.builtin_fw) - LOAD_OFFSET) { \ 99 VMLINUX_SYMBOL(__start_builtin_fw) = .; \ 100 *(.builtin_fw) \ 101 VMLINUX_SYMBOL(__end_builtin_fw) = .; \ 102 } \ 103 \ 104 /* RapidIO route ops */ \ 105 .rio_route : AT(ADDR(.rio_route) - LOAD_OFFSET) { \ 106 VMLINUX_SYMBOL(__start_rio_route_ops) = .; \ 107 *(.rio_route_ops) \ 108 VMLINUX_SYMBOL(__end_rio_route_ops) = .; \ 109 } \ 110 \ 111 TRACEDATA \ 112 \ 113 /* Kernel symbol table: Normal symbols */ \ 114 __ksymtab : AT(ADDR(__ksymtab) - LOAD_OFFSET) { \ 115 VMLINUX_SYMBOL(__start___ksymtab) = .; \ 116 *(__ksymtab) \ 117 VMLINUX_SYMBOL(__stop___ksymtab) = .; \ 118 } \ 119 \ 120 /* Kernel symbol table: GPL-only symbols */ \ 121 __ksymtab_gpl : AT(ADDR(__ksymtab_gpl) - LOAD_OFFSET) { \ 122 VMLINUX_SYMBOL(__start___ksymtab_gpl) = .; \ 123 *(__ksymtab_gpl) \ 124 VMLINUX_SYMBOL(__stop___ksymtab_gpl) = .; \ 125 } \ 126 \ 127 /* Kernel symbol table: Normal unused symbols */ \ 128 __ksymtab_unused : AT(ADDR(__ksymtab_unused) - LOAD_OFFSET) { \ 129 VMLINUX_SYMBOL(__start___ksymtab_unused) = .; \ 130 *(__ksymtab_unused) \ 131 VMLINUX_SYMBOL(__stop___ksymtab_unused) = .; \ 132 } \ 133 \ 134 /* Kernel symbol table: GPL-only unused symbols */ \ 135 __ksymtab_unused_gpl : AT(ADDR(__ksymtab_unused_gpl) - LOAD_OFFSET) { \ 136 VMLINUX_SYMBOL(__start___ksymtab_unused_gpl) = .; \ 137 *(__ksymtab_unused_gpl) \ 138 VMLINUX_SYMBOL(__stop___ksymtab_unused_gpl) = .; \ 139 } \ 140 \ 141 /* Kernel symbol table: GPL-future-only symbols */ \ 142 __ksymtab_gpl_future : AT(ADDR(__ksymtab_gpl_future) - LOAD_OFFSET) { \ 143 VMLINUX_SYMBOL(__start___ksymtab_gpl_future) = .; \ 144 *(__ksymtab_gpl_future) \ 145 VMLINUX_SYMBOL(__stop___ksymtab_gpl_future) = .; \ 146 } \ 147 \ 148 /* Kernel symbol table: Normal symbols */ \ 149 __kcrctab : AT(ADDR(__kcrctab) - LOAD_OFFSET) { \ 150 VMLINUX_SYMBOL(__start___kcrctab) = .; \ 151 *(__kcrctab) \ 152 VMLINUX_SYMBOL(__stop___kcrctab) = .; \ 153 } \ 154 \ 155 /* Kernel symbol table: GPL-only symbols */ \ 156 __kcrctab_gpl : AT(ADDR(__kcrctab_gpl) - LOAD_OFFSET) { \ 157 VMLINUX_SYMBOL(__start___kcrctab_gpl) = .; \ 158 *(__kcrctab_gpl) \ 159 VMLINUX_SYMBOL(__stop___kcrctab_gpl) = .; \ 160 } \ 161 \ 162 /* Kernel symbol table: Normal unused symbols */ \ 163 __kcrctab_unused : AT(ADDR(__kcrctab_unused) - LOAD_OFFSET) { \ 164 VMLINUX_SYMBOL(__start___kcrctab_unused) = .; \ 165 *(__kcrctab_unused) \ 166 VMLINUX_SYMBOL(__stop___kcrctab_unused) = .; \ 167 } \ 168 \ 169 /* Kernel symbol table: GPL-only unused symbols */ \ 170 __kcrctab_unused_gpl : AT(ADDR(__kcrctab_unused_gpl) - LOAD_OFFSET) { \ 171 VMLINUX_SYMBOL(__start___kcrctab_unused_gpl) = .; \ 172 *(__kcrctab_unused_gpl) \ 173 VMLINUX_SYMBOL(__stop___kcrctab_unused_gpl) = .; \ 174 } \ 175 \ 176 /* Kernel symbol table: GPL-future-only symbols */ \ 177 __kcrctab_gpl_future : AT(ADDR(__kcrctab_gpl_future) - LOAD_OFFSET) { \ 178 VMLINUX_SYMBOL(__start___kcrctab_gpl_future) = .; \ 179 *(__kcrctab_gpl_future) \ 180 VMLINUX_SYMBOL(__stop___kcrctab_gpl_future) = .; \ 181 } \ 182 \ 183 /* Kernel symbol table: strings */ \ 184 __ksymtab_strings : AT(ADDR(__ksymtab_strings) - LOAD_OFFSET) { \ 185 *(__ksymtab_strings) \ 186 } \ 187 \ 188 /* __*init sections */ \ 189 __init_rodata : AT(ADDR(__init_rodata) - LOAD_OFFSET) { \ 190 *(.ref.rodata) \ 191 DEV_KEEP(init.rodata) \ 192 DEV_KEEP(exit.rodata) \ 193 CPU_KEEP(init.rodata) \ 194 CPU_KEEP(exit.rodata) \ 195 MEM_KEEP(init.rodata) \ 196 MEM_KEEP(exit.rodata) \ 197 } \ 198 \ 199 /* Built-in module parameters. */ \ 200 __param : AT(ADDR(__param) - LOAD_OFFSET) { \ 201 VMLINUX_SYMBOL(__start___param) = .; \ 202 *(__param) \ 203 VMLINUX_SYMBOL(__stop___param) = .; \ 204 . = ALIGN((align)); \ 205 VMLINUX_SYMBOL(__end_rodata) = .; \ 206 } \ 207 . = ALIGN((align)); 208 209 /* RODATA provided for backward compatibility. 210 * All archs are supposed to use RO_DATA() */ 211 #define RODATA RO_DATA(4096) 212 213 #define SECURITY_INIT \ 214 .security_initcall.init : AT(ADDR(.security_initcall.init) - LOAD_OFFSET) { \ 215 VMLINUX_SYMBOL(__security_initcall_start) = .; \ 216 *(.security_initcall.init) \ 217 VMLINUX_SYMBOL(__security_initcall_end) = .; \ 218 } 219 220 /* .text section. Map to function alignment to avoid address changes 221 * during second ld run in second ld pass when generating System.map */ 222 #define TEXT_TEXT \ 223 ALIGN_FUNCTION(); \ 224 *(.text) \ 225 *(.ref.text) \ 226 *(.text.init.refok) \ 227 *(.exit.text.refok) \ 228 DEV_KEEP(init.text) \ 229 DEV_KEEP(exit.text) \ 230 CPU_KEEP(init.text) \ 231 CPU_KEEP(exit.text) \ 232 MEM_KEEP(init.text) \ 233 MEM_KEEP(exit.text) 234 235 236 /* sched.text is aling to function alignment to secure we have same 237 * address even at second ld pass when generating System.map */ 238 #define SCHED_TEXT \ 239 ALIGN_FUNCTION(); \ 240 VMLINUX_SYMBOL(__sched_text_start) = .; \ 241 *(.sched.text) \ 242 VMLINUX_SYMBOL(__sched_text_end) = .; 243 244 /* spinlock.text is aling to function alignment to secure we have same 245 * address even at second ld pass when generating System.map */ 246 #define LOCK_TEXT \ 247 ALIGN_FUNCTION(); \ 248 VMLINUX_SYMBOL(__lock_text_start) = .; \ 249 *(.spinlock.text) \ 250 VMLINUX_SYMBOL(__lock_text_end) = .; 251 252 #define KPROBES_TEXT \ 253 ALIGN_FUNCTION(); \ 254 VMLINUX_SYMBOL(__kprobes_text_start) = .; \ 255 *(.kprobes.text) \ 256 VMLINUX_SYMBOL(__kprobes_text_end) = .; 257 258 /* Section used for early init (in .S files) */ 259 #define HEAD_TEXT *(.head.text) 260 261 /* init and exit section handling */ 262 #define INIT_DATA \ 263 *(.init.data) \ 264 DEV_DISCARD(init.data) \ 265 DEV_DISCARD(init.rodata) \ 266 CPU_DISCARD(init.data) \ 267 CPU_DISCARD(init.rodata) \ 268 MEM_DISCARD(init.data) \ 269 MEM_DISCARD(init.rodata) 270 271 #define INIT_TEXT \ 272 *(.init.text) \ 273 DEV_DISCARD(init.text) \ 274 CPU_DISCARD(init.text) \ 275 MEM_DISCARD(init.text) 276 277 #define EXIT_DATA \ 278 *(.exit.data) \ 279 DEV_DISCARD(exit.data) \ 280 DEV_DISCARD(exit.rodata) \ 281 CPU_DISCARD(exit.data) \ 282 CPU_DISCARD(exit.rodata) \ 283 MEM_DISCARD(exit.data) \ 284 MEM_DISCARD(exit.rodata) 285 286 #define EXIT_TEXT \ 287 *(.exit.text) \ 288 DEV_DISCARD(exit.text) \ 289 CPU_DISCARD(exit.text) \ 290 MEM_DISCARD(exit.text) 291 292 /* DWARF debug sections. 293 Symbols in the DWARF debugging sections are relative to 294 the beginning of the section so we begin them at 0. */ 295 #define DWARF_DEBUG \ 296 /* DWARF 1 */ \ 297 .debug 0 : { *(.debug) } \ 298 .line 0 : { *(.line) } \ 299 /* GNU DWARF 1 extensions */ \ 300 .debug_srcinfo 0 : { *(.debug_srcinfo) } \ 301 .debug_sfnames 0 : { *(.debug_sfnames) } \ 302 /* DWARF 1.1 and DWARF 2 */ \ 303 .debug_aranges 0 : { *(.debug_aranges) } \ 304 .debug_pubnames 0 : { *(.debug_pubnames) } \ 305 /* DWARF 2 */ \ 306 .debug_info 0 : { *(.debug_info \ 307 .gnu.linkonce.wi.*) } \ 308 .debug_abbrev 0 : { *(.debug_abbrev) } \ 309 .debug_line 0 : { *(.debug_line) } \ 310 .debug_frame 0 : { *(.debug_frame) } \ 311 .debug_str 0 : { *(.debug_str) } \ 312 .debug_loc 0 : { *(.debug_loc) } \ 313 .debug_macinfo 0 : { *(.debug_macinfo) } \ 314 /* SGI/MIPS DWARF 2 extensions */ \ 315 .debug_weaknames 0 : { *(.debug_weaknames) } \ 316 .debug_funcnames 0 : { *(.debug_funcnames) } \ 317 .debug_typenames 0 : { *(.debug_typenames) } \ 318 .debug_varnames 0 : { *(.debug_varnames) } \ 319 320 /* Stabs debugging sections. */ 321 #define STABS_DEBUG \ 322 .stab 0 : { *(.stab) } \ 323 .stabstr 0 : { *(.stabstr) } \ 324 .stab.excl 0 : { *(.stab.excl) } \ 325 .stab.exclstr 0 : { *(.stab.exclstr) } \ 326 .stab.index 0 : { *(.stab.index) } \ 327 .stab.indexstr 0 : { *(.stab.indexstr) } \ 328 .comment 0 : { *(.comment) } 329 330 #ifdef CONFIG_GENERIC_BUG 331 #define BUG_TABLE \ 332 . = ALIGN(8); \ 333 __bug_table : AT(ADDR(__bug_table) - LOAD_OFFSET) { \ 334 __start___bug_table = .; \ 335 *(__bug_table) \ 336 __stop___bug_table = .; \ 337 } 338 #else 339 #define BUG_TABLE 340 #endif 341 342 #ifdef CONFIG_PM_TRACE 343 #define TRACEDATA \ 344 . = ALIGN(4); \ 345 .tracedata : AT(ADDR(.tracedata) - LOAD_OFFSET) { \ 346 __tracedata_start = .; \ 347 *(.tracedata) \ 348 __tracedata_end = .; \ 349 } 350 #else 351 #define TRACEDATA 352 #endif 353 354 #define NOTES \ 355 .notes : AT(ADDR(.notes) - LOAD_OFFSET) { \ 356 VMLINUX_SYMBOL(__start_notes) = .; \ 357 *(.note.*) \ 358 VMLINUX_SYMBOL(__stop_notes) = .; \ 359 } 360 361 #define INITCALLS \ 362 *(.initcall0.init) \ 363 *(.initcall0s.init) \ 364 *(.initcall1.init) \ 365 *(.initcall1s.init) \ 366 *(.initcall2.init) \ 367 *(.initcall2s.init) \ 368 *(.initcall3.init) \ 369 *(.initcall3s.init) \ 370 *(.initcall4.init) \ 371 *(.initcall4s.init) \ 372 *(.initcall5.init) \ 373 *(.initcall5s.init) \ 374 *(.initcallrootfs.init) \ 375 *(.initcall6.init) \ 376 *(.initcall6s.init) \ 377 *(.initcall7.init) \ 378 *(.initcall7s.init) 379 380 #define PERCPU(align) \ 381 . = ALIGN(align); \ 382 __per_cpu_start = .; \ 383 .data.percpu : AT(ADDR(.data.percpu) - LOAD_OFFSET) { \ 384 *(.data.percpu) \ 385 *(.data.percpu.shared_aligned) \ 386 } \ 387 __per_cpu_end = .; 388