1 /* 2 * Helper macros to support writing architecture specific 3 * linker scripts. 4 * 5 * A minimal linker scripts has following content: 6 * [This is a sample, architectures may have special requiriements] 7 * 8 * OUTPUT_FORMAT(...) 9 * OUTPUT_ARCH(...) 10 * ENTRY(...) 11 * SECTIONS 12 * { 13 * . = START; 14 * __init_begin = .; 15 * HEAD_TEXT_SECTION 16 * INIT_TEXT_SECTION(PAGE_SIZE) 17 * INIT_DATA_SECTION(...) 18 * PERCPU_SECTION(CACHELINE_SIZE) 19 * __init_end = .; 20 * 21 * _stext = .; 22 * TEXT_SECTION = 0 23 * _etext = .; 24 * 25 * _sdata = .; 26 * RO_DATA_SECTION(PAGE_SIZE) 27 * RW_DATA_SECTION(...) 28 * _edata = .; 29 * 30 * EXCEPTION_TABLE(...) 31 * NOTES 32 * 33 * BSS_SECTION(0, 0, 0) 34 * _end = .; 35 * 36 * STABS_DEBUG 37 * DWARF_DEBUG 38 * 39 * DISCARDS // must be the last 40 * } 41 * 42 * [__init_begin, __init_end] is the init section that may be freed after init 43 * // __init_begin and __init_end should be page aligned, so that we can 44 * // free the whole .init memory 45 * [_stext, _etext] is the text section 46 * [_sdata, _edata] is the data section 47 * 48 * Some of the included output section have their own set of constants. 49 * Examples are: [__initramfs_start, __initramfs_end] for initramfs and 50 * [__nosave_begin, __nosave_end] for the nosave data 51 */ 52 53 #ifndef LOAD_OFFSET 54 #define LOAD_OFFSET 0 55 #endif 56 57 #include <linux/export.h> 58 59 /* Align . to a 8 byte boundary equals to maximum function alignment. */ 60 #define ALIGN_FUNCTION() . = ALIGN(8) 61 62 /* 63 * Align to a 32 byte boundary equal to the 64 * alignment gcc 4.5 uses for a struct 65 */ 66 #define STRUCT_ALIGNMENT 32 67 #define STRUCT_ALIGN() . = ALIGN(STRUCT_ALIGNMENT) 68 69 /* The actual configuration determine if the init/exit sections 70 * are handled as text/data or they can be discarded (which 71 * often happens at runtime) 72 */ 73 #ifdef CONFIG_HOTPLUG_CPU 74 #define CPU_KEEP(sec) *(.cpu##sec) 75 #define CPU_DISCARD(sec) 76 #else 77 #define CPU_KEEP(sec) 78 #define CPU_DISCARD(sec) *(.cpu##sec) 79 #endif 80 81 #if defined(CONFIG_MEMORY_HOTPLUG) 82 #define MEM_KEEP(sec) *(.mem##sec) 83 #define MEM_DISCARD(sec) 84 #else 85 #define MEM_KEEP(sec) 86 #define MEM_DISCARD(sec) *(.mem##sec) 87 #endif 88 89 #ifdef CONFIG_FTRACE_MCOUNT_RECORD 90 #define MCOUNT_REC() . = ALIGN(8); \ 91 VMLINUX_SYMBOL(__start_mcount_loc) = .; \ 92 *(__mcount_loc) \ 93 VMLINUX_SYMBOL(__stop_mcount_loc) = .; 94 #else 95 #define MCOUNT_REC() 96 #endif 97 98 #ifdef CONFIG_TRACE_BRANCH_PROFILING 99 #define LIKELY_PROFILE() VMLINUX_SYMBOL(__start_annotated_branch_profile) = .; \ 100 *(_ftrace_annotated_branch) \ 101 VMLINUX_SYMBOL(__stop_annotated_branch_profile) = .; 102 #else 103 #define LIKELY_PROFILE() 104 #endif 105 106 #ifdef CONFIG_PROFILE_ALL_BRANCHES 107 #define BRANCH_PROFILE() VMLINUX_SYMBOL(__start_branch_profile) = .; \ 108 *(_ftrace_branch) \ 109 VMLINUX_SYMBOL(__stop_branch_profile) = .; 110 #else 111 #define BRANCH_PROFILE() 112 #endif 113 114 #ifdef CONFIG_KPROBES 115 #define KPROBE_BLACKLIST() . = ALIGN(8); \ 116 VMLINUX_SYMBOL(__start_kprobe_blacklist) = .; \ 117 *(_kprobe_blacklist) \ 118 VMLINUX_SYMBOL(__stop_kprobe_blacklist) = .; 119 #else 120 #define KPROBE_BLACKLIST() 121 #endif 122 123 #ifdef CONFIG_EVENT_TRACING 124 #define FTRACE_EVENTS() . = ALIGN(8); \ 125 VMLINUX_SYMBOL(__start_ftrace_events) = .; \ 126 *(_ftrace_events) \ 127 VMLINUX_SYMBOL(__stop_ftrace_events) = .; \ 128 VMLINUX_SYMBOL(__start_ftrace_enum_maps) = .; \ 129 *(_ftrace_enum_map) \ 130 VMLINUX_SYMBOL(__stop_ftrace_enum_maps) = .; 131 #else 132 #define FTRACE_EVENTS() 133 #endif 134 135 #ifdef CONFIG_TRACING 136 #define TRACE_PRINTKS() VMLINUX_SYMBOL(__start___trace_bprintk_fmt) = .; \ 137 *(__trace_printk_fmt) /* Trace_printk fmt' pointer */ \ 138 VMLINUX_SYMBOL(__stop___trace_bprintk_fmt) = .; 139 #define TRACEPOINT_STR() VMLINUX_SYMBOL(__start___tracepoint_str) = .; \ 140 *(__tracepoint_str) /* Trace_printk fmt' pointer */ \ 141 VMLINUX_SYMBOL(__stop___tracepoint_str) = .; 142 #else 143 #define TRACE_PRINTKS() 144 #define TRACEPOINT_STR() 145 #endif 146 147 #ifdef CONFIG_FTRACE_SYSCALLS 148 #define TRACE_SYSCALLS() . = ALIGN(8); \ 149 VMLINUX_SYMBOL(__start_syscalls_metadata) = .; \ 150 *(__syscalls_metadata) \ 151 VMLINUX_SYMBOL(__stop_syscalls_metadata) = .; 152 #else 153 #define TRACE_SYSCALLS() 154 #endif 155 156 157 #define ___OF_TABLE(cfg, name) _OF_TABLE_##cfg(name) 158 #define __OF_TABLE(cfg, name) ___OF_TABLE(cfg, name) 159 #define OF_TABLE(cfg, name) __OF_TABLE(config_enabled(cfg), name) 160 #define _OF_TABLE_0(name) 161 #define _OF_TABLE_1(name) \ 162 . = ALIGN(8); \ 163 VMLINUX_SYMBOL(__##name##_of_table) = .; \ 164 *(__##name##_of_table) \ 165 *(__##name##_of_table_end) 166 167 #define CLKSRC_OF_TABLES() OF_TABLE(CONFIG_CLKSRC_OF, clksrc) 168 #define IRQCHIP_OF_MATCH_TABLE() OF_TABLE(CONFIG_IRQCHIP, irqchip) 169 #define CLK_OF_TABLES() OF_TABLE(CONFIG_COMMON_CLK, clk) 170 #define IOMMU_OF_TABLES() OF_TABLE(CONFIG_OF_IOMMU, iommu) 171 #define RESERVEDMEM_OF_TABLES() OF_TABLE(CONFIG_OF_RESERVED_MEM, reservedmem) 172 #define CPU_METHOD_OF_TABLES() OF_TABLE(CONFIG_SMP, cpu_method) 173 #define CPUIDLE_METHOD_OF_TABLES() OF_TABLE(CONFIG_CPU_IDLE, cpuidle_method) 174 #define EARLYCON_OF_TABLES() OF_TABLE(CONFIG_SERIAL_EARLYCON, earlycon) 175 176 #define KERNEL_DTB() \ 177 STRUCT_ALIGN(); \ 178 VMLINUX_SYMBOL(__dtb_start) = .; \ 179 *(.dtb.init.rodata) \ 180 VMLINUX_SYMBOL(__dtb_end) = .; 181 182 /* .data section */ 183 #define DATA_DATA \ 184 *(.data) \ 185 *(.ref.data) \ 186 *(.data..shared_aligned) /* percpu related */ \ 187 MEM_KEEP(init.data) \ 188 MEM_KEEP(exit.data) \ 189 *(.data.unlikely) \ 190 STRUCT_ALIGN(); \ 191 *(__tracepoints) \ 192 /* implement dynamic printk debug */ \ 193 . = ALIGN(8); \ 194 VMLINUX_SYMBOL(__start___jump_table) = .; \ 195 *(__jump_table) \ 196 VMLINUX_SYMBOL(__stop___jump_table) = .; \ 197 . = ALIGN(8); \ 198 VMLINUX_SYMBOL(__start___verbose) = .; \ 199 *(__verbose) \ 200 VMLINUX_SYMBOL(__stop___verbose) = .; \ 201 LIKELY_PROFILE() \ 202 BRANCH_PROFILE() \ 203 TRACE_PRINTKS() \ 204 TRACEPOINT_STR() 205 206 /* 207 * Data section helpers 208 */ 209 #define NOSAVE_DATA \ 210 . = ALIGN(PAGE_SIZE); \ 211 VMLINUX_SYMBOL(__nosave_begin) = .; \ 212 *(.data..nosave) \ 213 . = ALIGN(PAGE_SIZE); \ 214 VMLINUX_SYMBOL(__nosave_end) = .; 215 216 #define PAGE_ALIGNED_DATA(page_align) \ 217 . = ALIGN(page_align); \ 218 *(.data..page_aligned) 219 220 #define READ_MOSTLY_DATA(align) \ 221 . = ALIGN(align); \ 222 *(.data..read_mostly) \ 223 . = ALIGN(align); 224 225 #define CACHELINE_ALIGNED_DATA(align) \ 226 . = ALIGN(align); \ 227 *(.data..cacheline_aligned) 228 229 #define INIT_TASK_DATA(align) \ 230 . = ALIGN(align); \ 231 *(.data..init_task) 232 233 /* 234 * Read only Data 235 */ 236 #define RO_DATA_SECTION(align) \ 237 . = ALIGN((align)); \ 238 .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \ 239 VMLINUX_SYMBOL(__start_rodata) = .; \ 240 *(.rodata) *(.rodata.*) \ 241 *(__vermagic) /* Kernel version magic */ \ 242 . = ALIGN(8); \ 243 VMLINUX_SYMBOL(__start___tracepoints_ptrs) = .; \ 244 *(__tracepoints_ptrs) /* Tracepoints: pointer array */\ 245 VMLINUX_SYMBOL(__stop___tracepoints_ptrs) = .; \ 246 *(__tracepoints_strings)/* Tracepoints: strings */ \ 247 } \ 248 \ 249 .rodata1 : AT(ADDR(.rodata1) - LOAD_OFFSET) { \ 250 *(.rodata1) \ 251 } \ 252 \ 253 BUG_TABLE \ 254 \ 255 /* PCI quirks */ \ 256 .pci_fixup : AT(ADDR(.pci_fixup) - LOAD_OFFSET) { \ 257 VMLINUX_SYMBOL(__start_pci_fixups_early) = .; \ 258 *(.pci_fixup_early) \ 259 VMLINUX_SYMBOL(__end_pci_fixups_early) = .; \ 260 VMLINUX_SYMBOL(__start_pci_fixups_header) = .; \ 261 *(.pci_fixup_header) \ 262 VMLINUX_SYMBOL(__end_pci_fixups_header) = .; \ 263 VMLINUX_SYMBOL(__start_pci_fixups_final) = .; \ 264 *(.pci_fixup_final) \ 265 VMLINUX_SYMBOL(__end_pci_fixups_final) = .; \ 266 VMLINUX_SYMBOL(__start_pci_fixups_enable) = .; \ 267 *(.pci_fixup_enable) \ 268 VMLINUX_SYMBOL(__end_pci_fixups_enable) = .; \ 269 VMLINUX_SYMBOL(__start_pci_fixups_resume) = .; \ 270 *(.pci_fixup_resume) \ 271 VMLINUX_SYMBOL(__end_pci_fixups_resume) = .; \ 272 VMLINUX_SYMBOL(__start_pci_fixups_resume_early) = .; \ 273 *(.pci_fixup_resume_early) \ 274 VMLINUX_SYMBOL(__end_pci_fixups_resume_early) = .; \ 275 VMLINUX_SYMBOL(__start_pci_fixups_suspend) = .; \ 276 *(.pci_fixup_suspend) \ 277 VMLINUX_SYMBOL(__end_pci_fixups_suspend) = .; \ 278 VMLINUX_SYMBOL(__start_pci_fixups_suspend_late) = .; \ 279 *(.pci_fixup_suspend_late) \ 280 VMLINUX_SYMBOL(__end_pci_fixups_suspend_late) = .; \ 281 } \ 282 \ 283 /* Built-in firmware blobs */ \ 284 .builtin_fw : AT(ADDR(.builtin_fw) - LOAD_OFFSET) { \ 285 VMLINUX_SYMBOL(__start_builtin_fw) = .; \ 286 *(.builtin_fw) \ 287 VMLINUX_SYMBOL(__end_builtin_fw) = .; \ 288 } \ 289 \ 290 TRACEDATA \ 291 \ 292 /* Kernel symbol table: Normal symbols */ \ 293 __ksymtab : AT(ADDR(__ksymtab) - LOAD_OFFSET) { \ 294 VMLINUX_SYMBOL(__start___ksymtab) = .; \ 295 *(SORT(___ksymtab+*)) \ 296 VMLINUX_SYMBOL(__stop___ksymtab) = .; \ 297 } \ 298 \ 299 /* Kernel symbol table: GPL-only symbols */ \ 300 __ksymtab_gpl : AT(ADDR(__ksymtab_gpl) - LOAD_OFFSET) { \ 301 VMLINUX_SYMBOL(__start___ksymtab_gpl) = .; \ 302 *(SORT(___ksymtab_gpl+*)) \ 303 VMLINUX_SYMBOL(__stop___ksymtab_gpl) = .; \ 304 } \ 305 \ 306 /* Kernel symbol table: Normal unused symbols */ \ 307 __ksymtab_unused : AT(ADDR(__ksymtab_unused) - LOAD_OFFSET) { \ 308 VMLINUX_SYMBOL(__start___ksymtab_unused) = .; \ 309 *(SORT(___ksymtab_unused+*)) \ 310 VMLINUX_SYMBOL(__stop___ksymtab_unused) = .; \ 311 } \ 312 \ 313 /* Kernel symbol table: GPL-only unused symbols */ \ 314 __ksymtab_unused_gpl : AT(ADDR(__ksymtab_unused_gpl) - LOAD_OFFSET) { \ 315 VMLINUX_SYMBOL(__start___ksymtab_unused_gpl) = .; \ 316 *(SORT(___ksymtab_unused_gpl+*)) \ 317 VMLINUX_SYMBOL(__stop___ksymtab_unused_gpl) = .; \ 318 } \ 319 \ 320 /* Kernel symbol table: GPL-future-only symbols */ \ 321 __ksymtab_gpl_future : AT(ADDR(__ksymtab_gpl_future) - LOAD_OFFSET) { \ 322 VMLINUX_SYMBOL(__start___ksymtab_gpl_future) = .; \ 323 *(SORT(___ksymtab_gpl_future+*)) \ 324 VMLINUX_SYMBOL(__stop___ksymtab_gpl_future) = .; \ 325 } \ 326 \ 327 /* Kernel symbol table: Normal symbols */ \ 328 __kcrctab : AT(ADDR(__kcrctab) - LOAD_OFFSET) { \ 329 VMLINUX_SYMBOL(__start___kcrctab) = .; \ 330 *(SORT(___kcrctab+*)) \ 331 VMLINUX_SYMBOL(__stop___kcrctab) = .; \ 332 } \ 333 \ 334 /* Kernel symbol table: GPL-only symbols */ \ 335 __kcrctab_gpl : AT(ADDR(__kcrctab_gpl) - LOAD_OFFSET) { \ 336 VMLINUX_SYMBOL(__start___kcrctab_gpl) = .; \ 337 *(SORT(___kcrctab_gpl+*)) \ 338 VMLINUX_SYMBOL(__stop___kcrctab_gpl) = .; \ 339 } \ 340 \ 341 /* Kernel symbol table: Normal unused symbols */ \ 342 __kcrctab_unused : AT(ADDR(__kcrctab_unused) - LOAD_OFFSET) { \ 343 VMLINUX_SYMBOL(__start___kcrctab_unused) = .; \ 344 *(SORT(___kcrctab_unused+*)) \ 345 VMLINUX_SYMBOL(__stop___kcrctab_unused) = .; \ 346 } \ 347 \ 348 /* Kernel symbol table: GPL-only unused symbols */ \ 349 __kcrctab_unused_gpl : AT(ADDR(__kcrctab_unused_gpl) - LOAD_OFFSET) { \ 350 VMLINUX_SYMBOL(__start___kcrctab_unused_gpl) = .; \ 351 *(SORT(___kcrctab_unused_gpl+*)) \ 352 VMLINUX_SYMBOL(__stop___kcrctab_unused_gpl) = .; \ 353 } \ 354 \ 355 /* Kernel symbol table: GPL-future-only symbols */ \ 356 __kcrctab_gpl_future : AT(ADDR(__kcrctab_gpl_future) - LOAD_OFFSET) { \ 357 VMLINUX_SYMBOL(__start___kcrctab_gpl_future) = .; \ 358 *(SORT(___kcrctab_gpl_future+*)) \ 359 VMLINUX_SYMBOL(__stop___kcrctab_gpl_future) = .; \ 360 } \ 361 \ 362 /* Kernel symbol table: strings */ \ 363 __ksymtab_strings : AT(ADDR(__ksymtab_strings) - LOAD_OFFSET) { \ 364 *(__ksymtab_strings) \ 365 } \ 366 \ 367 /* __*init sections */ \ 368 __init_rodata : AT(ADDR(__init_rodata) - LOAD_OFFSET) { \ 369 *(.ref.rodata) \ 370 MEM_KEEP(init.rodata) \ 371 MEM_KEEP(exit.rodata) \ 372 } \ 373 \ 374 /* Built-in module parameters. */ \ 375 __param : AT(ADDR(__param) - LOAD_OFFSET) { \ 376 VMLINUX_SYMBOL(__start___param) = .; \ 377 *(__param) \ 378 VMLINUX_SYMBOL(__stop___param) = .; \ 379 } \ 380 \ 381 /* Built-in module versions. */ \ 382 __modver : AT(ADDR(__modver) - LOAD_OFFSET) { \ 383 VMLINUX_SYMBOL(__start___modver) = .; \ 384 *(__modver) \ 385 VMLINUX_SYMBOL(__stop___modver) = .; \ 386 . = ALIGN((align)); \ 387 VMLINUX_SYMBOL(__end_rodata) = .; \ 388 } \ 389 . = ALIGN((align)); 390 391 /* RODATA & RO_DATA provided for backward compatibility. 392 * All archs are supposed to use RO_DATA() */ 393 #define RODATA RO_DATA_SECTION(4096) 394 #define RO_DATA(align) RO_DATA_SECTION(align) 395 396 #define SECURITY_INIT \ 397 .security_initcall.init : AT(ADDR(.security_initcall.init) - LOAD_OFFSET) { \ 398 VMLINUX_SYMBOL(__security_initcall_start) = .; \ 399 *(.security_initcall.init) \ 400 VMLINUX_SYMBOL(__security_initcall_end) = .; \ 401 } 402 403 /* .text section. Map to function alignment to avoid address changes 404 * during second ld run in second ld pass when generating System.map */ 405 #define TEXT_TEXT \ 406 ALIGN_FUNCTION(); \ 407 *(.text.hot) \ 408 *(.text .text.fixup) \ 409 *(.ref.text) \ 410 MEM_KEEP(init.text) \ 411 MEM_KEEP(exit.text) \ 412 *(.text.unlikely) 413 414 415 /* sched.text is aling to function alignment to secure we have same 416 * address even at second ld pass when generating System.map */ 417 #define SCHED_TEXT \ 418 ALIGN_FUNCTION(); \ 419 VMLINUX_SYMBOL(__sched_text_start) = .; \ 420 *(.sched.text) \ 421 VMLINUX_SYMBOL(__sched_text_end) = .; 422 423 /* spinlock.text is aling to function alignment to secure we have same 424 * address even at second ld pass when generating System.map */ 425 #define LOCK_TEXT \ 426 ALIGN_FUNCTION(); \ 427 VMLINUX_SYMBOL(__lock_text_start) = .; \ 428 *(.spinlock.text) \ 429 VMLINUX_SYMBOL(__lock_text_end) = .; 430 431 #define KPROBES_TEXT \ 432 ALIGN_FUNCTION(); \ 433 VMLINUX_SYMBOL(__kprobes_text_start) = .; \ 434 *(.kprobes.text) \ 435 VMLINUX_SYMBOL(__kprobes_text_end) = .; 436 437 #define ENTRY_TEXT \ 438 ALIGN_FUNCTION(); \ 439 VMLINUX_SYMBOL(__entry_text_start) = .; \ 440 *(.entry.text) \ 441 VMLINUX_SYMBOL(__entry_text_end) = .; 442 443 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 444 #define IRQENTRY_TEXT \ 445 ALIGN_FUNCTION(); \ 446 VMLINUX_SYMBOL(__irqentry_text_start) = .; \ 447 *(.irqentry.text) \ 448 VMLINUX_SYMBOL(__irqentry_text_end) = .; 449 #else 450 #define IRQENTRY_TEXT 451 #endif 452 453 /* Section used for early init (in .S files) */ 454 #define HEAD_TEXT *(.head.text) 455 456 #define HEAD_TEXT_SECTION \ 457 .head.text : AT(ADDR(.head.text) - LOAD_OFFSET) { \ 458 HEAD_TEXT \ 459 } 460 461 /* 462 * Exception table 463 */ 464 #define EXCEPTION_TABLE(align) \ 465 . = ALIGN(align); \ 466 __ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) { \ 467 VMLINUX_SYMBOL(__start___ex_table) = .; \ 468 *(__ex_table) \ 469 VMLINUX_SYMBOL(__stop___ex_table) = .; \ 470 } 471 472 /* 473 * Init task 474 */ 475 #define INIT_TASK_DATA_SECTION(align) \ 476 . = ALIGN(align); \ 477 .data..init_task : AT(ADDR(.data..init_task) - LOAD_OFFSET) { \ 478 INIT_TASK_DATA(align) \ 479 } 480 481 #ifdef CONFIG_CONSTRUCTORS 482 #define KERNEL_CTORS() . = ALIGN(8); \ 483 VMLINUX_SYMBOL(__ctors_start) = .; \ 484 *(.ctors) \ 485 *(SORT(.init_array.*)) \ 486 *(.init_array) \ 487 VMLINUX_SYMBOL(__ctors_end) = .; 488 #else 489 #define KERNEL_CTORS() 490 #endif 491 492 /* init and exit section handling */ 493 #define INIT_DATA \ 494 *(.init.data) \ 495 MEM_DISCARD(init.data) \ 496 KERNEL_CTORS() \ 497 MCOUNT_REC() \ 498 *(.init.rodata) \ 499 FTRACE_EVENTS() \ 500 TRACE_SYSCALLS() \ 501 KPROBE_BLACKLIST() \ 502 MEM_DISCARD(init.rodata) \ 503 CLK_OF_TABLES() \ 504 RESERVEDMEM_OF_TABLES() \ 505 CLKSRC_OF_TABLES() \ 506 IOMMU_OF_TABLES() \ 507 CPU_METHOD_OF_TABLES() \ 508 CPUIDLE_METHOD_OF_TABLES() \ 509 KERNEL_DTB() \ 510 IRQCHIP_OF_MATCH_TABLE() \ 511 EARLYCON_OF_TABLES() 512 513 #define INIT_TEXT \ 514 *(.init.text) \ 515 MEM_DISCARD(init.text) 516 517 #define EXIT_DATA \ 518 *(.exit.data) \ 519 MEM_DISCARD(exit.data) \ 520 MEM_DISCARD(exit.rodata) 521 522 #define EXIT_TEXT \ 523 *(.exit.text) \ 524 MEM_DISCARD(exit.text) 525 526 #define EXIT_CALL \ 527 *(.exitcall.exit) 528 529 /* 530 * bss (Block Started by Symbol) - uninitialized data 531 * zeroed during startup 532 */ 533 #define SBSS(sbss_align) \ 534 . = ALIGN(sbss_align); \ 535 .sbss : AT(ADDR(.sbss) - LOAD_OFFSET) { \ 536 *(.sbss) \ 537 *(.scommon) \ 538 } 539 540 /* 541 * Allow archectures to redefine BSS_FIRST_SECTIONS to add extra 542 * sections to the front of bss. 543 */ 544 #ifndef BSS_FIRST_SECTIONS 545 #define BSS_FIRST_SECTIONS 546 #endif 547 548 #define BSS(bss_align) \ 549 . = ALIGN(bss_align); \ 550 .bss : AT(ADDR(.bss) - LOAD_OFFSET) { \ 551 BSS_FIRST_SECTIONS \ 552 *(.bss..page_aligned) \ 553 *(.dynbss) \ 554 *(.bss) \ 555 *(COMMON) \ 556 } 557 558 /* 559 * DWARF debug sections. 560 * Symbols in the DWARF debugging sections are relative to 561 * the beginning of the section so we begin them at 0. 562 */ 563 #define DWARF_DEBUG \ 564 /* DWARF 1 */ \ 565 .debug 0 : { *(.debug) } \ 566 .line 0 : { *(.line) } \ 567 /* GNU DWARF 1 extensions */ \ 568 .debug_srcinfo 0 : { *(.debug_srcinfo) } \ 569 .debug_sfnames 0 : { *(.debug_sfnames) } \ 570 /* DWARF 1.1 and DWARF 2 */ \ 571 .debug_aranges 0 : { *(.debug_aranges) } \ 572 .debug_pubnames 0 : { *(.debug_pubnames) } \ 573 /* DWARF 2 */ \ 574 .debug_info 0 : { *(.debug_info \ 575 .gnu.linkonce.wi.*) } \ 576 .debug_abbrev 0 : { *(.debug_abbrev) } \ 577 .debug_line 0 : { *(.debug_line) } \ 578 .debug_frame 0 : { *(.debug_frame) } \ 579 .debug_str 0 : { *(.debug_str) } \ 580 .debug_loc 0 : { *(.debug_loc) } \ 581 .debug_macinfo 0 : { *(.debug_macinfo) } \ 582 /* SGI/MIPS DWARF 2 extensions */ \ 583 .debug_weaknames 0 : { *(.debug_weaknames) } \ 584 .debug_funcnames 0 : { *(.debug_funcnames) } \ 585 .debug_typenames 0 : { *(.debug_typenames) } \ 586 .debug_varnames 0 : { *(.debug_varnames) } \ 587 588 /* Stabs debugging sections. */ 589 #define STABS_DEBUG \ 590 .stab 0 : { *(.stab) } \ 591 .stabstr 0 : { *(.stabstr) } \ 592 .stab.excl 0 : { *(.stab.excl) } \ 593 .stab.exclstr 0 : { *(.stab.exclstr) } \ 594 .stab.index 0 : { *(.stab.index) } \ 595 .stab.indexstr 0 : { *(.stab.indexstr) } \ 596 .comment 0 : { *(.comment) } 597 598 #ifdef CONFIG_GENERIC_BUG 599 #define BUG_TABLE \ 600 . = ALIGN(8); \ 601 __bug_table : AT(ADDR(__bug_table) - LOAD_OFFSET) { \ 602 VMLINUX_SYMBOL(__start___bug_table) = .; \ 603 *(__bug_table) \ 604 VMLINUX_SYMBOL(__stop___bug_table) = .; \ 605 } 606 #else 607 #define BUG_TABLE 608 #endif 609 610 #ifdef CONFIG_PM_TRACE 611 #define TRACEDATA \ 612 . = ALIGN(4); \ 613 .tracedata : AT(ADDR(.tracedata) - LOAD_OFFSET) { \ 614 VMLINUX_SYMBOL(__tracedata_start) = .; \ 615 *(.tracedata) \ 616 VMLINUX_SYMBOL(__tracedata_end) = .; \ 617 } 618 #else 619 #define TRACEDATA 620 #endif 621 622 #define NOTES \ 623 .notes : AT(ADDR(.notes) - LOAD_OFFSET) { \ 624 VMLINUX_SYMBOL(__start_notes) = .; \ 625 *(.note.*) \ 626 VMLINUX_SYMBOL(__stop_notes) = .; \ 627 } 628 629 #define INIT_SETUP(initsetup_align) \ 630 . = ALIGN(initsetup_align); \ 631 VMLINUX_SYMBOL(__setup_start) = .; \ 632 *(.init.setup) \ 633 VMLINUX_SYMBOL(__setup_end) = .; 634 635 #define INIT_CALLS_LEVEL(level) \ 636 VMLINUX_SYMBOL(__initcall##level##_start) = .; \ 637 *(.initcall##level##.init) \ 638 *(.initcall##level##s.init) \ 639 640 #define INIT_CALLS \ 641 VMLINUX_SYMBOL(__initcall_start) = .; \ 642 *(.initcallearly.init) \ 643 INIT_CALLS_LEVEL(0) \ 644 INIT_CALLS_LEVEL(1) \ 645 INIT_CALLS_LEVEL(2) \ 646 INIT_CALLS_LEVEL(3) \ 647 INIT_CALLS_LEVEL(4) \ 648 INIT_CALLS_LEVEL(5) \ 649 INIT_CALLS_LEVEL(rootfs) \ 650 INIT_CALLS_LEVEL(6) \ 651 INIT_CALLS_LEVEL(7) \ 652 VMLINUX_SYMBOL(__initcall_end) = .; 653 654 #define CON_INITCALL \ 655 VMLINUX_SYMBOL(__con_initcall_start) = .; \ 656 *(.con_initcall.init) \ 657 VMLINUX_SYMBOL(__con_initcall_end) = .; 658 659 #define SECURITY_INITCALL \ 660 VMLINUX_SYMBOL(__security_initcall_start) = .; \ 661 *(.security_initcall.init) \ 662 VMLINUX_SYMBOL(__security_initcall_end) = .; 663 664 #ifdef CONFIG_BLK_DEV_INITRD 665 #define INIT_RAM_FS \ 666 . = ALIGN(4); \ 667 VMLINUX_SYMBOL(__initramfs_start) = .; \ 668 *(.init.ramfs) \ 669 . = ALIGN(8); \ 670 *(.init.ramfs.info) 671 #else 672 #define INIT_RAM_FS 673 #endif 674 675 /* 676 * Default discarded sections. 677 * 678 * Some archs want to discard exit text/data at runtime rather than 679 * link time due to cross-section references such as alt instructions, 680 * bug table, eh_frame, etc. DISCARDS must be the last of output 681 * section definitions so that such archs put those in earlier section 682 * definitions. 683 */ 684 #define DISCARDS \ 685 /DISCARD/ : { \ 686 EXIT_TEXT \ 687 EXIT_DATA \ 688 EXIT_CALL \ 689 *(.discard) \ 690 *(.discard.*) \ 691 } 692 693 /** 694 * PERCPU_INPUT - the percpu input sections 695 * @cacheline: cacheline size 696 * 697 * The core percpu section names and core symbols which do not rely 698 * directly upon load addresses. 699 * 700 * @cacheline is used to align subsections to avoid false cacheline 701 * sharing between subsections for different purposes. 702 */ 703 #define PERCPU_INPUT(cacheline) \ 704 VMLINUX_SYMBOL(__per_cpu_start) = .; \ 705 *(.data..percpu..first) \ 706 . = ALIGN(PAGE_SIZE); \ 707 *(.data..percpu..page_aligned) \ 708 . = ALIGN(cacheline); \ 709 *(.data..percpu..read_mostly) \ 710 . = ALIGN(cacheline); \ 711 *(.data..percpu) \ 712 *(.data..percpu..shared_aligned) \ 713 VMLINUX_SYMBOL(__per_cpu_end) = .; 714 715 /** 716 * PERCPU_VADDR - define output section for percpu area 717 * @cacheline: cacheline size 718 * @vaddr: explicit base address (optional) 719 * @phdr: destination PHDR (optional) 720 * 721 * Macro which expands to output section for percpu area. 722 * 723 * @cacheline is used to align subsections to avoid false cacheline 724 * sharing between subsections for different purposes. 725 * 726 * If @vaddr is not blank, it specifies explicit base address and all 727 * percpu symbols will be offset from the given address. If blank, 728 * @vaddr always equals @laddr + LOAD_OFFSET. 729 * 730 * @phdr defines the output PHDR to use if not blank. Be warned that 731 * output PHDR is sticky. If @phdr is specified, the next output 732 * section in the linker script will go there too. @phdr should have 733 * a leading colon. 734 * 735 * Note that this macros defines __per_cpu_load as an absolute symbol. 736 * If there is no need to put the percpu section at a predetermined 737 * address, use PERCPU_SECTION. 738 */ 739 #define PERCPU_VADDR(cacheline, vaddr, phdr) \ 740 VMLINUX_SYMBOL(__per_cpu_load) = .; \ 741 .data..percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \ 742 - LOAD_OFFSET) { \ 743 PERCPU_INPUT(cacheline) \ 744 } phdr \ 745 . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data..percpu); 746 747 /** 748 * PERCPU_SECTION - define output section for percpu area, simple version 749 * @cacheline: cacheline size 750 * 751 * Align to PAGE_SIZE and outputs output section for percpu area. This 752 * macro doesn't manipulate @vaddr or @phdr and __per_cpu_load and 753 * __per_cpu_start will be identical. 754 * 755 * This macro is equivalent to ALIGN(PAGE_SIZE); PERCPU_VADDR(@cacheline,,) 756 * except that __per_cpu_load is defined as a relative symbol against 757 * .data..percpu which is required for relocatable x86_32 configuration. 758 */ 759 #define PERCPU_SECTION(cacheline) \ 760 . = ALIGN(PAGE_SIZE); \ 761 .data..percpu : AT(ADDR(.data..percpu) - LOAD_OFFSET) { \ 762 VMLINUX_SYMBOL(__per_cpu_load) = .; \ 763 PERCPU_INPUT(cacheline) \ 764 } 765 766 767 /* 768 * Definition of the high level *_SECTION macros 769 * They will fit only a subset of the architectures 770 */ 771 772 773 /* 774 * Writeable data. 775 * All sections are combined in a single .data section. 776 * The sections following CONSTRUCTORS are arranged so their 777 * typical alignment matches. 778 * A cacheline is typical/always less than a PAGE_SIZE so 779 * the sections that has this restriction (or similar) 780 * is located before the ones requiring PAGE_SIZE alignment. 781 * NOSAVE_DATA starts and ends with a PAGE_SIZE alignment which 782 * matches the requirement of PAGE_ALIGNED_DATA. 783 * 784 * use 0 as page_align if page_aligned data is not used */ 785 #define RW_DATA_SECTION(cacheline, pagealigned, inittask) \ 786 . = ALIGN(PAGE_SIZE); \ 787 .data : AT(ADDR(.data) - LOAD_OFFSET) { \ 788 INIT_TASK_DATA(inittask) \ 789 NOSAVE_DATA \ 790 PAGE_ALIGNED_DATA(pagealigned) \ 791 CACHELINE_ALIGNED_DATA(cacheline) \ 792 READ_MOSTLY_DATA(cacheline) \ 793 DATA_DATA \ 794 CONSTRUCTORS \ 795 } 796 797 #define INIT_TEXT_SECTION(inittext_align) \ 798 . = ALIGN(inittext_align); \ 799 .init.text : AT(ADDR(.init.text) - LOAD_OFFSET) { \ 800 VMLINUX_SYMBOL(_sinittext) = .; \ 801 INIT_TEXT \ 802 VMLINUX_SYMBOL(_einittext) = .; \ 803 } 804 805 #define INIT_DATA_SECTION(initsetup_align) \ 806 .init.data : AT(ADDR(.init.data) - LOAD_OFFSET) { \ 807 INIT_DATA \ 808 INIT_SETUP(initsetup_align) \ 809 INIT_CALLS \ 810 CON_INITCALL \ 811 SECURITY_INITCALL \ 812 INIT_RAM_FS \ 813 } 814 815 #define BSS_SECTION(sbss_align, bss_align, stop_align) \ 816 . = ALIGN(sbss_align); \ 817 VMLINUX_SYMBOL(__bss_start) = .; \ 818 SBSS(sbss_align) \ 819 BSS(bss_align) \ 820 . = ALIGN(stop_align); \ 821 VMLINUX_SYMBOL(__bss_stop) = .; 822