1/* 2 * ld script for the x86 kernel 3 * 4 * Historic 32-bit version written by Martin Mares <mj@atrey.karlin.mff.cuni.cz> 5 * 6 * Modernisation, unification and other changes and fixes: 7 * Copyright (C) 2007-2009 Sam Ravnborg <sam@ravnborg.org> 8 * 9 * 10 * Don't define absolute symbols until and unless you know that symbol 11 * value is should remain constant even if kernel image is relocated 12 * at run time. Absolute symbols are not relocated. If symbol value should 13 * change if kernel is relocated, make the symbol section relative and 14 * put it inside the section definition. 15 */ 16 17#ifdef CONFIG_X86_32 18#define LOAD_OFFSET __PAGE_OFFSET 19#else 20#define LOAD_OFFSET __START_KERNEL_map 21#endif 22 23#include <asm-generic/vmlinux.lds.h> 24#include <asm/asm-offsets.h> 25#include <asm/thread_info.h> 26#include <asm/page_types.h> 27#include <asm/cache.h> 28#include <asm/boot.h> 29 30#undef i386 /* in case the preprocessor is a 32bit one */ 31 32OUTPUT_FORMAT(CONFIG_OUTPUT_FORMAT, CONFIG_OUTPUT_FORMAT, CONFIG_OUTPUT_FORMAT) 33 34#ifdef CONFIG_X86_32 35OUTPUT_ARCH(i386) 36ENTRY(phys_startup_32) 37jiffies = jiffies_64; 38#else 39OUTPUT_ARCH(i386:x86-64) 40ENTRY(phys_startup_64) 41jiffies_64 = jiffies; 42#endif 43 44#if defined(CONFIG_X86_64) 45/* 46 * On 64-bit, align RODATA to 2MB so we retain large page mappings for 47 * boundaries spanning kernel text, rodata and data sections. 48 * 49 * However, kernel identity mappings will have different RWX permissions 50 * to the pages mapping to text and to the pages padding (which are freed) the 51 * text section. Hence kernel identity mappings will be broken to smaller 52 * pages. For 64-bit, kernel text and kernel identity mappings are different, 53 * so we can enable protection checks as well as retain 2MB large page 54 * mappings for kernel text. 55 */ 56#define X64_ALIGN_RODATA_BEGIN . = ALIGN(HPAGE_SIZE); 57 58#define X64_ALIGN_RODATA_END \ 59 . = ALIGN(HPAGE_SIZE); \ 60 __end_rodata_hpage_align = .; 61 62#else 63 64#define X64_ALIGN_RODATA_BEGIN 65#define X64_ALIGN_RODATA_END 66 67#endif 68 69PHDRS { 70 text PT_LOAD FLAGS(5); /* R_E */ 71 data PT_LOAD FLAGS(6); /* RW_ */ 72#ifdef CONFIG_X86_64 73#ifdef CONFIG_SMP 74 percpu PT_LOAD FLAGS(6); /* RW_ */ 75#endif 76 init PT_LOAD FLAGS(7); /* RWE */ 77#endif 78 note PT_NOTE FLAGS(0); /* ___ */ 79} 80 81SECTIONS 82{ 83#ifdef CONFIG_X86_32 84 . = LOAD_OFFSET + LOAD_PHYSICAL_ADDR; 85 phys_startup_32 = startup_32 - LOAD_OFFSET; 86#else 87 . = __START_KERNEL; 88 phys_startup_64 = startup_64 - LOAD_OFFSET; 89#endif 90 91 /* Text and read-only data */ 92 .text : AT(ADDR(.text) - LOAD_OFFSET) { 93 _text = .; 94 /* bootstrapping code */ 95 HEAD_TEXT 96 . = ALIGN(8); 97 _stext = .; 98 TEXT_TEXT 99 SCHED_TEXT 100 LOCK_TEXT 101 KPROBES_TEXT 102 ENTRY_TEXT 103 IRQENTRY_TEXT 104 *(.fixup) 105 *(.gnu.warning) 106 /* End of text section */ 107 _etext = .; 108 } :text = 0x9090 109 110 NOTES :text :note 111 112 EXCEPTION_TABLE(16) :text = 0x9090 113 114 /* .text should occupy whole number of pages */ 115 . = ALIGN(PAGE_SIZE); 116 X64_ALIGN_RODATA_BEGIN 117 RO_DATA(PAGE_SIZE) 118 X64_ALIGN_RODATA_END 119 120 /* Data */ 121 .data : AT(ADDR(.data) - LOAD_OFFSET) { 122 /* Start of data section */ 123 _sdata = .; 124 125 /* init_task */ 126 INIT_TASK_DATA(THREAD_SIZE) 127 128#ifdef CONFIG_X86_32 129 /* 32 bit has nosave before _edata */ 130 NOSAVE_DATA 131#endif 132 133 PAGE_ALIGNED_DATA(PAGE_SIZE) 134 135 CACHELINE_ALIGNED_DATA(L1_CACHE_BYTES) 136 137 DATA_DATA 138 CONSTRUCTORS 139 140 /* rarely changed data like cpu maps */ 141 READ_MOSTLY_DATA(INTERNODE_CACHE_BYTES) 142 143 /* End of data section */ 144 _edata = .; 145 } :data 146 147 148 . = ALIGN(PAGE_SIZE); 149 __vvar_page = .; 150 151 .vvar : AT(ADDR(.vvar) - LOAD_OFFSET) { 152 /* work around gold bug 13023 */ 153 __vvar_beginning_hack = .; 154 155 /* Place all vvars at the offsets in asm/vvar.h. */ 156#define EMIT_VVAR(name, offset) \ 157 . = __vvar_beginning_hack + offset; \ 158 *(.vvar_ ## name) 159#define __VVAR_KERNEL_LDS 160#include <asm/vvar.h> 161#undef __VVAR_KERNEL_LDS 162#undef EMIT_VVAR 163 164 /* 165 * Pad the rest of the page with zeros. Otherwise the loader 166 * can leave garbage here. 167 */ 168 . = __vvar_beginning_hack + PAGE_SIZE; 169 } :data 170 171 . = ALIGN(__vvar_page + PAGE_SIZE, PAGE_SIZE); 172 173 /* Init code and data - will be freed after init */ 174 . = ALIGN(PAGE_SIZE); 175 .init.begin : AT(ADDR(.init.begin) - LOAD_OFFSET) { 176 __init_begin = .; /* paired with __init_end */ 177 } 178 179#if defined(CONFIG_X86_64) && defined(CONFIG_SMP) 180 /* 181 * percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the 182 * output PHDR, so the next output section - .init.text - should 183 * start another segment - init. 184 */ 185 PERCPU_VADDR(INTERNODE_CACHE_BYTES, 0, :percpu) 186 ASSERT(SIZEOF(.data..percpu) < CONFIG_PHYSICAL_START, 187 "per-CPU data too large - increase CONFIG_PHYSICAL_START") 188#endif 189 190 INIT_TEXT_SECTION(PAGE_SIZE) 191#ifdef CONFIG_X86_64 192 :init 193#endif 194 195 /* 196 * Section for code used exclusively before alternatives are run. All 197 * references to such code must be patched out by alternatives, normally 198 * by using X86_FEATURE_ALWAYS CPU feature bit. 199 * 200 * See static_cpu_has() for an example. 201 */ 202 .altinstr_aux : AT(ADDR(.altinstr_aux) - LOAD_OFFSET) { 203 *(.altinstr_aux) 204 } 205 206 INIT_DATA_SECTION(16) 207 208 .x86_cpu_dev.init : AT(ADDR(.x86_cpu_dev.init) - LOAD_OFFSET) { 209 __x86_cpu_dev_start = .; 210 *(.x86_cpu_dev.init) 211 __x86_cpu_dev_end = .; 212 } 213 214#ifdef CONFIG_X86_INTEL_MID 215 .x86_intel_mid_dev.init : AT(ADDR(.x86_intel_mid_dev.init) - \ 216 LOAD_OFFSET) { 217 __x86_intel_mid_dev_start = .; 218 *(.x86_intel_mid_dev.init) 219 __x86_intel_mid_dev_end = .; 220 } 221#endif 222 223 /* 224 * start address and size of operations which during runtime 225 * can be patched with virtualization friendly instructions or 226 * baremetal native ones. Think page table operations. 227 * Details in paravirt_types.h 228 */ 229 . = ALIGN(8); 230 .parainstructions : AT(ADDR(.parainstructions) - LOAD_OFFSET) { 231 __parainstructions = .; 232 *(.parainstructions) 233 __parainstructions_end = .; 234 } 235 236 /* 237 * struct alt_inst entries. From the header (alternative.h): 238 * "Alternative instructions for different CPU types or capabilities" 239 * Think locking instructions on spinlocks. 240 */ 241 . = ALIGN(8); 242 .altinstructions : AT(ADDR(.altinstructions) - LOAD_OFFSET) { 243 __alt_instructions = .; 244 *(.altinstructions) 245 __alt_instructions_end = .; 246 } 247 248 /* 249 * And here are the replacement instructions. The linker sticks 250 * them as binary blobs. The .altinstructions has enough data to 251 * get the address and the length of them to patch the kernel safely. 252 */ 253 .altinstr_replacement : AT(ADDR(.altinstr_replacement) - LOAD_OFFSET) { 254 *(.altinstr_replacement) 255 } 256 257 /* 258 * struct iommu_table_entry entries are injected in this section. 259 * It is an array of IOMMUs which during run time gets sorted depending 260 * on its dependency order. After rootfs_initcall is complete 261 * this section can be safely removed. 262 */ 263 .iommu_table : AT(ADDR(.iommu_table) - LOAD_OFFSET) { 264 __iommu_table = .; 265 *(.iommu_table) 266 __iommu_table_end = .; 267 } 268 269 . = ALIGN(8); 270 .apicdrivers : AT(ADDR(.apicdrivers) - LOAD_OFFSET) { 271 __apicdrivers = .; 272 *(.apicdrivers); 273 __apicdrivers_end = .; 274 } 275 276 . = ALIGN(8); 277 /* 278 * .exit.text is discard at runtime, not link time, to deal with 279 * references from .altinstructions and .eh_frame 280 */ 281 .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) { 282 EXIT_TEXT 283 } 284 285 .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) { 286 EXIT_DATA 287 } 288 289#if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP) 290 PERCPU_SECTION(INTERNODE_CACHE_BYTES) 291#endif 292 293 . = ALIGN(PAGE_SIZE); 294 295 /* freed after init ends here */ 296 .init.end : AT(ADDR(.init.end) - LOAD_OFFSET) { 297 __init_end = .; 298 } 299 300 /* 301 * smp_locks might be freed after init 302 * start/end must be page aligned 303 */ 304 . = ALIGN(PAGE_SIZE); 305 .smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) { 306 __smp_locks = .; 307 *(.smp_locks) 308 . = ALIGN(PAGE_SIZE); 309 __smp_locks_end = .; 310 } 311 312#ifdef CONFIG_X86_64 313 .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) { 314 NOSAVE_DATA 315 } 316#endif 317 318 /* BSS */ 319 . = ALIGN(PAGE_SIZE); 320 .bss : AT(ADDR(.bss) - LOAD_OFFSET) { 321 __bss_start = .; 322 *(.bss..page_aligned) 323 *(.bss) 324 . = ALIGN(PAGE_SIZE); 325 __bss_stop = .; 326 } 327 328 . = ALIGN(PAGE_SIZE); 329 .brk : AT(ADDR(.brk) - LOAD_OFFSET) { 330 __brk_base = .; 331 . += 64 * 1024; /* 64k alignment slop space */ 332 *(.brk_reservation) /* areas brk users have reserved */ 333 __brk_limit = .; 334 } 335 336 _end = .; 337 338 STABS_DEBUG 339 DWARF_DEBUG 340 341 /* Sections to be discarded */ 342 DISCARDS 343 /DISCARD/ : { *(.eh_frame) } 344} 345 346 347#ifdef CONFIG_X86_32 348/* 349 * The ASSERT() sink to . is intentional, for binutils 2.14 compatibility: 350 */ 351. = ASSERT((_end - LOAD_OFFSET <= KERNEL_IMAGE_SIZE), 352 "kernel image bigger than KERNEL_IMAGE_SIZE"); 353#else 354/* 355 * Per-cpu symbols which need to be offset from __per_cpu_load 356 * for the boot processor. 357 */ 358#define INIT_PER_CPU(x) init_per_cpu__##x = x + __per_cpu_load 359INIT_PER_CPU(gdt_page); 360INIT_PER_CPU(irq_stack_union); 361 362/* 363 * Build-time check on the image size: 364 */ 365. = ASSERT((_end - _text <= KERNEL_IMAGE_SIZE), 366 "kernel image bigger than KERNEL_IMAGE_SIZE"); 367 368#ifdef CONFIG_SMP 369. = ASSERT((irq_stack_union == 0), 370 "irq_stack_union is not at start of per-cpu area"); 371#endif 372 373#endif /* CONFIG_X86_32 */ 374 375#ifdef CONFIG_KEXEC_CORE 376#include <asm/kexec.h> 377 378. = ASSERT(kexec_control_code_size <= KEXEC_CONTROL_CODE_MAX_SIZE, 379 "kexec control code size is too big"); 380#endif 381 382