1/* SPDX-License-Identifier: GPL-2.0 */ 2/* 3 * ld script for the x86 kernel 4 * 5 * Historic 32-bit version written by Martin Mares <mj@atrey.karlin.mff.cuni.cz> 6 * 7 * Modernisation, unification and other changes and fixes: 8 * Copyright (C) 2007-2009 Sam Ravnborg <sam@ravnborg.org> 9 * 10 * 11 * Don't define absolute symbols until and unless you know that symbol 12 * value is should remain constant even if kernel image is relocated 13 * at run time. Absolute symbols are not relocated. If symbol value should 14 * change if kernel is relocated, make the symbol section relative and 15 * put it inside the section definition. 16 */ 17 18#define LOAD_OFFSET __START_KERNEL_map 19 20#define RUNTIME_DISCARD_EXIT 21#define EMITS_PT_NOTE 22#define RO_EXCEPTION_TABLE_ALIGN 16 23 24#include <asm-generic/vmlinux.lds.h> 25#include <asm/asm-offsets.h> 26#include <asm/thread_info.h> 27#include <asm/page_types.h> 28#include <asm/orc_lookup.h> 29#include <asm/cache.h> 30#include <asm/boot.h> 31 32#undef i386 /* in case the preprocessor is a 32bit one */ 33 34OUTPUT_FORMAT(CONFIG_OUTPUT_FORMAT) 35 36#ifdef CONFIG_X86_32 37OUTPUT_ARCH(i386) 38ENTRY(phys_startup_32) 39#else 40OUTPUT_ARCH(i386:x86-64) 41ENTRY(phys_startup_64) 42#endif 43 44jiffies = jiffies_64; 45const_pcpu_hot = pcpu_hot; 46 47#if defined(CONFIG_X86_64) 48/* 49 * On 64-bit, align RODATA to 2MB so we retain large page mappings for 50 * boundaries spanning kernel text, rodata and data sections. 51 * 52 * However, kernel identity mappings will have different RWX permissions 53 * to the pages mapping to text and to the pages padding (which are freed) the 54 * text section. Hence kernel identity mappings will be broken to smaller 55 * pages. For 64-bit, kernel text and kernel identity mappings are different, 56 * so we can enable protection checks as well as retain 2MB large page 57 * mappings for kernel text. 58 */ 59#define X86_ALIGN_RODATA_BEGIN . = ALIGN(HPAGE_SIZE); 60 61#define X86_ALIGN_RODATA_END \ 62 . = ALIGN(HPAGE_SIZE); \ 63 __end_rodata_hpage_align = .; \ 64 __end_rodata_aligned = .; 65 66#define ALIGN_ENTRY_TEXT_BEGIN . = ALIGN(PMD_SIZE); 67#define ALIGN_ENTRY_TEXT_END . = ALIGN(PMD_SIZE); 68 69/* 70 * This section contains data which will be mapped as decrypted. Memory 71 * encryption operates on a page basis. Make this section PMD-aligned 72 * to avoid splitting the pages while mapping the section early. 73 * 74 * Note: We use a separate section so that only this section gets 75 * decrypted to avoid exposing more than we wish. 76 */ 77#define BSS_DECRYPTED \ 78 . = ALIGN(PMD_SIZE); \ 79 __start_bss_decrypted = .; \ 80 *(.bss..decrypted); \ 81 . = ALIGN(PAGE_SIZE); \ 82 __start_bss_decrypted_unused = .; \ 83 . = ALIGN(PMD_SIZE); \ 84 __end_bss_decrypted = .; \ 85 86#else 87 88#define X86_ALIGN_RODATA_BEGIN 89#define X86_ALIGN_RODATA_END \ 90 . = ALIGN(PAGE_SIZE); \ 91 __end_rodata_aligned = .; 92 93#define ALIGN_ENTRY_TEXT_BEGIN 94#define ALIGN_ENTRY_TEXT_END 95#define BSS_DECRYPTED 96 97#endif 98 99PHDRS { 100 text PT_LOAD FLAGS(5); /* R_E */ 101 data PT_LOAD FLAGS(6); /* RW_ */ 102#ifdef CONFIG_X86_64 103#ifdef CONFIG_SMP 104 percpu PT_LOAD FLAGS(6); /* RW_ */ 105#endif 106 init PT_LOAD FLAGS(7); /* RWE */ 107#endif 108 note PT_NOTE FLAGS(0); /* ___ */ 109} 110 111SECTIONS 112{ 113 . = __START_KERNEL; 114#ifdef CONFIG_X86_32 115 phys_startup_32 = ABSOLUTE(startup_32 - LOAD_OFFSET); 116#else 117 phys_startup_64 = ABSOLUTE(startup_64 - LOAD_OFFSET); 118#endif 119 120 /* Text and read-only data */ 121 .text : AT(ADDR(.text) - LOAD_OFFSET) { 122 _text = .; 123 _stext = .; 124 /* bootstrapping code */ 125 HEAD_TEXT 126 TEXT_TEXT 127 SCHED_TEXT 128 LOCK_TEXT 129 KPROBES_TEXT 130 SOFTIRQENTRY_TEXT 131#ifdef CONFIG_MITIGATION_RETPOLINE 132 *(.text..__x86.indirect_thunk) 133 *(.text..__x86.return_thunk) 134#endif 135 STATIC_CALL_TEXT 136 137 ALIGN_ENTRY_TEXT_BEGIN 138 *(.text..__x86.rethunk_untrain) 139 ENTRY_TEXT 140 141#ifdef CONFIG_MITIGATION_SRSO 142 /* 143 * See the comment above srso_alias_untrain_ret()'s 144 * definition. 145 */ 146 . = srso_alias_untrain_ret | (1 << 2) | (1 << 8) | (1 << 14) | (1 << 20); 147 *(.text..__x86.rethunk_safe) 148#endif 149 ALIGN_ENTRY_TEXT_END 150 *(.gnu.warning) 151 152 } :text = 0xcccccccc 153 154 /* End of text section, which should occupy whole number of pages */ 155 _etext = .; 156 . = ALIGN(PAGE_SIZE); 157 158 X86_ALIGN_RODATA_BEGIN 159 RO_DATA(PAGE_SIZE) 160 X86_ALIGN_RODATA_END 161 162 /* Data */ 163 .data : AT(ADDR(.data) - LOAD_OFFSET) { 164 /* Start of data section */ 165 _sdata = .; 166 167 /* init_task */ 168 INIT_TASK_DATA(THREAD_SIZE) 169 170 /* equivalent to task_pt_regs(&init_task) */ 171 __top_init_kernel_stack = __end_init_stack - TOP_OF_KERNEL_STACK_PADDING - PTREGS_SIZE; 172 173#ifdef CONFIG_X86_32 174 /* 32 bit has nosave before _edata */ 175 NOSAVE_DATA 176#endif 177 178 PAGE_ALIGNED_DATA(PAGE_SIZE) 179 180 CACHELINE_ALIGNED_DATA(L1_CACHE_BYTES) 181 182 DATA_DATA 183 CONSTRUCTORS 184 185 /* rarely changed data like cpu maps */ 186 READ_MOSTLY_DATA(INTERNODE_CACHE_BYTES) 187 188 /* End of data section */ 189 _edata = .; 190 } :data 191 192 BUG_TABLE 193 194 ORC_UNWIND_TABLE 195 196 /* Init code and data - will be freed after init */ 197 . = ALIGN(PAGE_SIZE); 198 .init.begin : AT(ADDR(.init.begin) - LOAD_OFFSET) { 199 __init_begin = .; /* paired with __init_end */ 200 } 201 202#if defined(CONFIG_X86_64) && defined(CONFIG_SMP) 203 /* 204 * percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the 205 * output PHDR, so the next output section - .init.text - should 206 * start another segment - init. 207 */ 208 PERCPU_VADDR(INTERNODE_CACHE_BYTES, 0, :percpu) 209 ASSERT(SIZEOF(.data..percpu) < CONFIG_PHYSICAL_START, 210 "per-CPU data too large - increase CONFIG_PHYSICAL_START") 211#endif 212 213 INIT_TEXT_SECTION(PAGE_SIZE) 214#ifdef CONFIG_X86_64 215 :init 216#endif 217 218 /* 219 * Section for code used exclusively before alternatives are run. All 220 * references to such code must be patched out by alternatives, normally 221 * by using X86_FEATURE_ALWAYS CPU feature bit. 222 * 223 * See static_cpu_has() for an example. 224 */ 225 .altinstr_aux : AT(ADDR(.altinstr_aux) - LOAD_OFFSET) { 226 *(.altinstr_aux) 227 } 228 229 INIT_DATA_SECTION(16) 230 231 .x86_cpu_dev.init : AT(ADDR(.x86_cpu_dev.init) - LOAD_OFFSET) { 232 __x86_cpu_dev_start = .; 233 *(.x86_cpu_dev.init) 234 __x86_cpu_dev_end = .; 235 } 236 237#ifdef CONFIG_X86_INTEL_MID 238 .x86_intel_mid_dev.init : AT(ADDR(.x86_intel_mid_dev.init) - \ 239 LOAD_OFFSET) { 240 __x86_intel_mid_dev_start = .; 241 *(.x86_intel_mid_dev.init) 242 __x86_intel_mid_dev_end = .; 243 } 244#endif 245 246#ifdef CONFIG_MITIGATION_RETPOLINE 247 /* 248 * List of instructions that call/jmp/jcc to retpoline thunks 249 * __x86_indirect_thunk_*(). These instructions can be patched along 250 * with alternatives, after which the section can be freed. 251 */ 252 . = ALIGN(8); 253 .retpoline_sites : AT(ADDR(.retpoline_sites) - LOAD_OFFSET) { 254 __retpoline_sites = .; 255 *(.retpoline_sites) 256 __retpoline_sites_end = .; 257 } 258 259 . = ALIGN(8); 260 .return_sites : AT(ADDR(.return_sites) - LOAD_OFFSET) { 261 __return_sites = .; 262 *(.return_sites) 263 __return_sites_end = .; 264 } 265 266 . = ALIGN(8); 267 .call_sites : AT(ADDR(.call_sites) - LOAD_OFFSET) { 268 __call_sites = .; 269 *(.call_sites) 270 __call_sites_end = .; 271 } 272#endif 273 274#ifdef CONFIG_X86_KERNEL_IBT 275 . = ALIGN(8); 276 .ibt_endbr_seal : AT(ADDR(.ibt_endbr_seal) - LOAD_OFFSET) { 277 __ibt_endbr_seal = .; 278 *(.ibt_endbr_seal) 279 __ibt_endbr_seal_end = .; 280 } 281#endif 282 283#ifdef CONFIG_FINEIBT 284 . = ALIGN(8); 285 .cfi_sites : AT(ADDR(.cfi_sites) - LOAD_OFFSET) { 286 __cfi_sites = .; 287 *(.cfi_sites) 288 __cfi_sites_end = .; 289 } 290#endif 291 292 /* 293 * struct alt_inst entries. From the header (alternative.h): 294 * "Alternative instructions for different CPU types or capabilities" 295 * Think locking instructions on spinlocks. 296 */ 297 . = ALIGN(8); 298 .altinstructions : AT(ADDR(.altinstructions) - LOAD_OFFSET) { 299 __alt_instructions = .; 300 *(.altinstructions) 301 __alt_instructions_end = .; 302 } 303 304 /* 305 * And here are the replacement instructions. The linker sticks 306 * them as binary blobs. The .altinstructions has enough data to 307 * get the address and the length of them to patch the kernel safely. 308 */ 309 .altinstr_replacement : AT(ADDR(.altinstr_replacement) - LOAD_OFFSET) { 310 *(.altinstr_replacement) 311 } 312 313 . = ALIGN(8); 314 .apicdrivers : AT(ADDR(.apicdrivers) - LOAD_OFFSET) { 315 __apicdrivers = .; 316 *(.apicdrivers); 317 __apicdrivers_end = .; 318 } 319 320 . = ALIGN(8); 321 /* 322 * .exit.text is discarded at runtime, not link time, to deal with 323 * references from .altinstructions 324 */ 325 .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) { 326 EXIT_TEXT 327 } 328 329 .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) { 330 EXIT_DATA 331 } 332 333#if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP) 334 PERCPU_SECTION(INTERNODE_CACHE_BYTES) 335#endif 336 337 RUNTIME_CONST_VARIABLES 338 RUNTIME_CONST(ptr, USER_PTR_MAX) 339 340 . = ALIGN(PAGE_SIZE); 341 342 /* freed after init ends here */ 343 .init.end : AT(ADDR(.init.end) - LOAD_OFFSET) { 344 __init_end = .; 345 } 346 347 /* 348 * smp_locks might be freed after init 349 * start/end must be page aligned 350 */ 351 . = ALIGN(PAGE_SIZE); 352 .smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) { 353 __smp_locks = .; 354 *(.smp_locks) 355 . = ALIGN(PAGE_SIZE); 356 __smp_locks_end = .; 357 } 358 359#ifdef CONFIG_X86_64 360 .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) { 361 NOSAVE_DATA 362 } 363#endif 364 365 /* BSS */ 366 . = ALIGN(PAGE_SIZE); 367 .bss : AT(ADDR(.bss) - LOAD_OFFSET) { 368 __bss_start = .; 369 *(.bss..page_aligned) 370 . = ALIGN(PAGE_SIZE); 371 *(BSS_MAIN) 372 BSS_DECRYPTED 373 . = ALIGN(PAGE_SIZE); 374 __bss_stop = .; 375 } 376 377 /* 378 * The memory occupied from _text to here, __end_of_kernel_reserve, is 379 * automatically reserved in setup_arch(). Anything after here must be 380 * explicitly reserved using memblock_reserve() or it will be discarded 381 * and treated as available memory. 382 */ 383 __end_of_kernel_reserve = .; 384 385 . = ALIGN(PAGE_SIZE); 386 .brk : AT(ADDR(.brk) - LOAD_OFFSET) { 387 __brk_base = .; 388 . += 64 * 1024; /* 64k alignment slop space */ 389 *(.bss..brk) /* areas brk users have reserved */ 390 __brk_limit = .; 391 } 392 393 . = ALIGN(PAGE_SIZE); /* keep VO_INIT_SIZE page aligned */ 394 _end = .; 395 396#ifdef CONFIG_AMD_MEM_ENCRYPT 397 /* 398 * Early scratch/workarea section: Lives outside of the kernel proper 399 * (_text - _end). 400 * 401 * Resides after _end because even though the .brk section is after 402 * __end_of_kernel_reserve, the .brk section is later reserved as a 403 * part of the kernel. Since it is located after __end_of_kernel_reserve 404 * it will be discarded and become part of the available memory. As 405 * such, it can only be used by very early boot code and must not be 406 * needed afterwards. 407 * 408 * Currently used by SME for performing in-place encryption of the 409 * kernel during boot. Resides on a 2MB boundary to simplify the 410 * pagetable setup used for SME in-place encryption. 411 */ 412 . = ALIGN(HPAGE_SIZE); 413 .init.scratch : AT(ADDR(.init.scratch) - LOAD_OFFSET) { 414 __init_scratch_begin = .; 415 *(.init.scratch) 416 . = ALIGN(HPAGE_SIZE); 417 __init_scratch_end = .; 418 } 419#endif 420 421 STABS_DEBUG 422 DWARF_DEBUG 423#ifdef CONFIG_PROPELLER_CLANG 424 .llvm_bb_addr_map : { *(.llvm_bb_addr_map) } 425#endif 426 427 ELF_DETAILS 428 429 DISCARDS 430 431 /* 432 * Make sure that the .got.plt is either completely empty or it 433 * contains only the lazy dispatch entries. 434 */ 435 .got.plt (INFO) : { *(.got.plt) } 436 ASSERT(SIZEOF(.got.plt) == 0 || 437#ifdef CONFIG_X86_64 438 SIZEOF(.got.plt) == 0x18, 439#else 440 SIZEOF(.got.plt) == 0xc, 441#endif 442 "Unexpected GOT/PLT entries detected!") 443 444 /* 445 * Sections that should stay zero sized, which is safer to 446 * explicitly check instead of blindly discarding. 447 */ 448 .got : { 449 *(.got) *(.igot.*) 450 } 451 ASSERT(SIZEOF(.got) == 0, "Unexpected GOT entries detected!") 452 453 .plt : { 454 *(.plt) *(.plt.*) *(.iplt) 455 } 456 ASSERT(SIZEOF(.plt) == 0, "Unexpected run-time procedure linkages detected!") 457 458 .rel.dyn : { 459 *(.rel.*) *(.rel_*) 460 } 461 ASSERT(SIZEOF(.rel.dyn) == 0, "Unexpected run-time relocations (.rel) detected!") 462 463 .rela.dyn : { 464 *(.rela.*) *(.rela_*) 465 } 466 ASSERT(SIZEOF(.rela.dyn) == 0, "Unexpected run-time relocations (.rela) detected!") 467} 468 469/* 470 * The ASSERT() sink to . is intentional, for binutils 2.14 compatibility: 471 */ 472. = ASSERT((_end - LOAD_OFFSET <= KERNEL_IMAGE_SIZE), 473 "kernel image bigger than KERNEL_IMAGE_SIZE"); 474 475/* needed for Clang - see arch/x86/entry/entry.S */ 476PROVIDE(__ref_stack_chk_guard = __stack_chk_guard); 477 478#ifdef CONFIG_X86_64 479/* 480 * Per-cpu symbols which need to be offset from __per_cpu_load 481 * for the boot processor. 482 */ 483#define INIT_PER_CPU(x) init_per_cpu__##x = ABSOLUTE(x) + __per_cpu_load 484INIT_PER_CPU(gdt_page); 485INIT_PER_CPU(fixed_percpu_data); 486INIT_PER_CPU(irq_stack_backing_store); 487 488#ifdef CONFIG_SMP 489. = ASSERT((fixed_percpu_data == 0), 490 "fixed_percpu_data is not at start of per-cpu area"); 491#endif 492 493#ifdef CONFIG_MITIGATION_UNRET_ENTRY 494. = ASSERT((retbleed_return_thunk & 0x3f) == 0, "retbleed_return_thunk not cacheline-aligned"); 495#endif 496 497#ifdef CONFIG_MITIGATION_SRSO 498. = ASSERT((srso_safe_ret & 0x3f) == 0, "srso_safe_ret not cacheline-aligned"); 499/* 500 * GNU ld cannot do XOR until 2.41. 501 * https://sourceware.org/git/?p=binutils-gdb.git;a=commit;h=f6f78318fca803c4907fb8d7f6ded8295f1947b1 502 * 503 * LLVM lld cannot do XOR until lld-17. 504 * https://github.com/llvm/llvm-project/commit/fae96104d4378166cbe5c875ef8ed808a356f3fb 505 * 506 * Instead do: (A | B) - (A & B) in order to compute the XOR 507 * of the two function addresses: 508 */ 509. = ASSERT(((ABSOLUTE(srso_alias_untrain_ret) | srso_alias_safe_ret) - 510 (ABSOLUTE(srso_alias_untrain_ret) & srso_alias_safe_ret)) == ((1 << 2) | (1 << 8) | (1 << 14) | (1 << 20)), 511 "SRSO function pair won't alias"); 512#endif 513 514#endif /* CONFIG_X86_64 */ 515 516/* 517 * The symbols below are referenced using relative relocations in the 518 * respective ELF notes. This produces build time constants that the 519 * linker will never mark as relocatable. (Using just ABSOLUTE() is not 520 * sufficient for that). 521 */ 522#ifdef CONFIG_XEN_PV 523xen_elfnote_entry_value = 524 ABSOLUTE(xen_elfnote_entry) + ABSOLUTE(startup_xen); 525#endif 526#ifdef CONFIG_PVH 527xen_elfnote_phys32_entry_value = 528 ABSOLUTE(xen_elfnote_phys32_entry) + ABSOLUTE(pvh_start_xen - LOAD_OFFSET); 529#endif 530