Lines Matching +full:boot +full:- +full:pages

1 /* SPDX-License-Identifier: GPL-2.0 */
5 * Historic 32-bit version written by Martin Mares <mj@atrey.karlin.mff.cuni.cz>
8 * Copyright (C) 2007-2009 Sam Ravnborg <sam@ravnborg.org>
24 #include <asm-generic/vmlinux.lds.h>
25 #include <asm/asm-offsets.h>
30 #include <asm/boot.h>
41 OUTPUT_ARCH(i386:x86-64)
51 * On 64-bit, align RODATA to 2MB so we retain large page mappings for
55 * to the pages mapping to text and to the pages padding (which are freed) the
57 * pages. For 64-bit, kernel text and kernel identity mappings are different,
73 * encryption operates on a page basis. Make this section PMD-aligned
74 * to avoid splitting the pages while mapping the section early.
110 ASSERT(__relocate_kernel_end - __relocate_kernel_start <= KEXEC_CONTROL_CODE_MAX_SIZE,
125 phys_startup_32 = ABSOLUTE(startup_32 - LOAD_OFFSET);
127 phys_startup_64 = ABSOLUTE(startup_64 - LOAD_OFFSET);
130 /* Text and read-only data */
131 .text : AT(ADDR(.text) - LOAD_OFFSET) {
164 .head.text : AT(ADDR(.head.text) - LOAD_OFFSET) {
168 /* End of text section, which should occupy whole number of pages */
177 .data : AT(ADDR(.data) - LOAD_OFFSET) {
185 __top_init_kernel_stack = __end_init_stack - TOP_OF_KERNEL_STACK_PADDING - PTREGS_SIZE;
213 /* Init code and data - will be freed after init */
215 .init.begin : AT(ADDR(.init.begin) - LOAD_OFFSET) {
228 .altinstr_aux : AT(ADDR(.altinstr_aux) - LOAD_OFFSET) {
234 .x86_cpu_dev.init : AT(ADDR(.x86_cpu_dev.init) - LOAD_OFFSET) {
241 .x86_intel_mid_dev.init : AT(ADDR(.x86_intel_mid_dev.init) - \
256 .retpoline_sites : AT(ADDR(.retpoline_sites) - LOAD_OFFSET) {
263 .return_sites : AT(ADDR(.return_sites) - LOAD_OFFSET) {
270 .call_sites : AT(ADDR(.call_sites) - LOAD_OFFSET) {
279 .ibt_endbr_seal : AT(ADDR(.ibt_endbr_seal) - LOAD_OFFSET) {
288 .cfi_sites : AT(ADDR(.cfi_sites) - LOAD_OFFSET) {
301 .altinstructions : AT(ADDR(.altinstructions) - LOAD_OFFSET) {
312 .altinstr_replacement : AT(ADDR(.altinstr_replacement) - LOAD_OFFSET) {
317 .apicdrivers : AT(ADDR(.apicdrivers) - LOAD_OFFSET) {
328 .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
332 .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) {
337 ASSERT(__per_cpu_hot_end - __per_cpu_hot_start <= 64, "percpu cache hot data too large")
345 .init.end : AT(ADDR(.init.end) - LOAD_OFFSET) {
354 .smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) {
362 .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
369 .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
388 .brk : AT(ADDR(.brk) - LOAD_OFFSET) {
402 * (_text - _end).
408 * such, it can only be used by very early boot code and must not be
411 * Currently used by SME for performing in-place encryption of the
412 * kernel during boot. Resides on a 2MB boundary to simplify the
413 * pagetable setup used for SME in-place encryption.
416 .init.scratch : AT(ADDR(.init.scratch) - LOAD_OFFSET) {
459 ASSERT(SIZEOF(.plt) == 0, "Unexpected run-time procedure linkages detected!")
464 ASSERT(SIZEOF(.rel.dyn) == 0, "Unexpected run-time relocations (.rel) detected!")
469 ASSERT(SIZEOF(.rela.dyn) == 0, "Unexpected run-time relocations (.rela) detected!")
473 * COMPILE_TEST kernels can be large - CONFIG_KASAN, for example, can cause
482 . = ASSERT((_end - LOAD_OFFSET <= KERNEL_IMAGE_SIZE),
486 /* needed for Clang - see arch/x86/entry/entry.S */
492 . = ASSERT((retbleed_return_thunk & 0x3f) == 0, "retbleed_return_thunk not cacheline-aligned");
496 . = ASSERT((srso_safe_ret & 0x3f) == 0, "srso_safe_ret not cacheline-aligned");
499 …* https://sourceware.org/git/?p=binutils-gdb.git;a=commit;h=f6f78318fca803c4907fb8d7f6ded8295f1947…
501 * LLVM lld cannot do XOR until lld-17.
502 * https://github.com/llvm/llvm-project/commit/fae96104d4378166cbe5c875ef8ed808a356f3fb
504 * Instead do: (A | B) - (A & B) in order to compute the XOR
507 . = ASSERT(((ABSOLUTE(srso_alias_untrain_ret) | srso_alias_safe_ret) -
514 . = ASSERT(((__x86_indirect_its_thunk_rcx - __x86_indirect_its_thunk_rax) % 64) == 0, "Indirect thu…
536 ABSOLUTE(xen_elfnote_phys32_entry) + ABSOLUTE(pvh_start_xen - LOAD_OFFSET);