1/* 2 * ld script for the x86 kernel 3 * 4 * Historic 32-bit version written by Martin Mares <mj@atrey.karlin.mff.cuni.cz> 5 * 6 * Modernisation, unification and other changes and fixes: 7 * Copyright (C) 2007-2009 Sam Ravnborg <sam@ravnborg.org> 8 * 9 * 10 * Don't define absolute symbols until and unless you know that symbol 11 * value is should remain constant even if kernel image is relocated 12 * at run time. Absolute symbols are not relocated. If symbol value should 13 * change if kernel is relocated, make the symbol section relative and 14 * put it inside the section definition. 15 */ 16 17#ifdef CONFIG_X86_32 18#define LOAD_OFFSET __PAGE_OFFSET 19#else 20#define LOAD_OFFSET __START_KERNEL_map 21#endif 22 23#include <asm-generic/vmlinux.lds.h> 24#include <asm/asm-offsets.h> 25#include <asm/thread_info.h> 26#include <asm/page_types.h> 27#include <asm/cache.h> 28#include <asm/boot.h> 29 30#undef i386 /* in case the preprocessor is a 32bit one */ 31 32OUTPUT_FORMAT(CONFIG_OUTPUT_FORMAT, CONFIG_OUTPUT_FORMAT, CONFIG_OUTPUT_FORMAT) 33 34#ifdef CONFIG_X86_32 35OUTPUT_ARCH(i386) 36ENTRY(phys_startup_32) 37jiffies = jiffies_64; 38#else 39OUTPUT_ARCH(i386:x86-64) 40ENTRY(phys_startup_64) 41jiffies_64 = jiffies; 42#endif 43 44#if defined(CONFIG_X86_64) 45/* 46 * On 64-bit, align RODATA to 2MB so we retain large page mappings for 47 * boundaries spanning kernel text, rodata and data sections. 48 * 49 * However, kernel identity mappings will have different RWX permissions 50 * to the pages mapping to text and to the pages padding (which are freed) the 51 * text section. Hence kernel identity mappings will be broken to smaller 52 * pages. For 64-bit, kernel text and kernel identity mappings are different, 53 * so we can enable protection checks as well as retain 2MB large page 54 * mappings for kernel text. 55 */ 56#define X64_ALIGN_RODATA_BEGIN . = ALIGN(HPAGE_SIZE); 57 58#define X64_ALIGN_RODATA_END \ 59 . = ALIGN(HPAGE_SIZE); \ 60 __end_rodata_hpage_align = .; 61 62#else 63 64#define X64_ALIGN_RODATA_BEGIN 65#define X64_ALIGN_RODATA_END 66 67#endif 68 69PHDRS { 70 text PT_LOAD FLAGS(5); /* R_E */ 71 data PT_LOAD FLAGS(6); /* RW_ */ 72#ifdef CONFIG_X86_64 73#ifdef CONFIG_SMP 74 percpu PT_LOAD FLAGS(6); /* RW_ */ 75#endif 76 init PT_LOAD FLAGS(7); /* RWE */ 77#endif 78 note PT_NOTE FLAGS(0); /* ___ */ 79} 80 81SECTIONS 82{ 83#ifdef CONFIG_X86_32 84 . = LOAD_OFFSET + LOAD_PHYSICAL_ADDR; 85 phys_startup_32 = ABSOLUTE(startup_32 - LOAD_OFFSET); 86#else 87 . = __START_KERNEL; 88 phys_startup_64 = ABSOLUTE(startup_64 - LOAD_OFFSET); 89#endif 90 91 /* Text and read-only data */ 92 .text : AT(ADDR(.text) - LOAD_OFFSET) { 93 _text = .; 94 _stext = .; 95 /* bootstrapping code */ 96 HEAD_TEXT 97 . = ALIGN(8); 98 TEXT_TEXT 99 SCHED_TEXT 100 CPUIDLE_TEXT 101 LOCK_TEXT 102 KPROBES_TEXT 103 ENTRY_TEXT 104 IRQENTRY_TEXT 105 SOFTIRQENTRY_TEXT 106 *(.fixup) 107 *(.gnu.warning) 108 /* End of text section */ 109 _etext = .; 110 } :text = 0x9090 111 112 NOTES :text :note 113 114 EXCEPTION_TABLE(16) :text = 0x9090 115 116 /* .text should occupy whole number of pages */ 117 . = ALIGN(PAGE_SIZE); 118 X64_ALIGN_RODATA_BEGIN 119 RO_DATA(PAGE_SIZE) 120 X64_ALIGN_RODATA_END 121 122 /* Data */ 123 .data : AT(ADDR(.data) - LOAD_OFFSET) { 124 /* Start of data section */ 125 _sdata = .; 126 127 /* init_task */ 128 INIT_TASK_DATA(THREAD_SIZE) 129 130#ifdef CONFIG_X86_32 131 /* 32 bit has nosave before _edata */ 132 NOSAVE_DATA 133#endif 134 135 PAGE_ALIGNED_DATA(PAGE_SIZE) 136 137 CACHELINE_ALIGNED_DATA(L1_CACHE_BYTES) 138 139 DATA_DATA 140 CONSTRUCTORS 141 142 /* rarely changed data like cpu maps */ 143 READ_MOSTLY_DATA(INTERNODE_CACHE_BYTES) 144 145 /* End of data section */ 146 _edata = .; 147 } :data 148 149 150 . = ALIGN(PAGE_SIZE); 151 __vvar_page = .; 152 153 .vvar : AT(ADDR(.vvar) - LOAD_OFFSET) { 154 /* work around gold bug 13023 */ 155 __vvar_beginning_hack = .; 156 157 /* Place all vvars at the offsets in asm/vvar.h. */ 158#define EMIT_VVAR(name, offset) \ 159 . = __vvar_beginning_hack + offset; \ 160 *(.vvar_ ## name) 161#define __VVAR_KERNEL_LDS 162#include <asm/vvar.h> 163#undef __VVAR_KERNEL_LDS 164#undef EMIT_VVAR 165 166 /* 167 * Pad the rest of the page with zeros. Otherwise the loader 168 * can leave garbage here. 169 */ 170 . = __vvar_beginning_hack + PAGE_SIZE; 171 } :data 172 173 . = ALIGN(__vvar_page + PAGE_SIZE, PAGE_SIZE); 174 175 /* Init code and data - will be freed after init */ 176 . = ALIGN(PAGE_SIZE); 177 .init.begin : AT(ADDR(.init.begin) - LOAD_OFFSET) { 178 __init_begin = .; /* paired with __init_end */ 179 } 180 181#if defined(CONFIG_X86_64) && defined(CONFIG_SMP) 182 /* 183 * percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the 184 * output PHDR, so the next output section - .init.text - should 185 * start another segment - init. 186 */ 187 PERCPU_VADDR(INTERNODE_CACHE_BYTES, 0, :percpu) 188 ASSERT(SIZEOF(.data..percpu) < CONFIG_PHYSICAL_START, 189 "per-CPU data too large - increase CONFIG_PHYSICAL_START") 190#endif 191 192 INIT_TEXT_SECTION(PAGE_SIZE) 193#ifdef CONFIG_X86_64 194 :init 195#endif 196 197 /* 198 * Section for code used exclusively before alternatives are run. All 199 * references to such code must be patched out by alternatives, normally 200 * by using X86_FEATURE_ALWAYS CPU feature bit. 201 * 202 * See static_cpu_has() for an example. 203 */ 204 .altinstr_aux : AT(ADDR(.altinstr_aux) - LOAD_OFFSET) { 205 *(.altinstr_aux) 206 } 207 208 INIT_DATA_SECTION(16) 209 210 .x86_cpu_dev.init : AT(ADDR(.x86_cpu_dev.init) - LOAD_OFFSET) { 211 __x86_cpu_dev_start = .; 212 *(.x86_cpu_dev.init) 213 __x86_cpu_dev_end = .; 214 } 215 216#ifdef CONFIG_X86_INTEL_MID 217 .x86_intel_mid_dev.init : AT(ADDR(.x86_intel_mid_dev.init) - \ 218 LOAD_OFFSET) { 219 __x86_intel_mid_dev_start = .; 220 *(.x86_intel_mid_dev.init) 221 __x86_intel_mid_dev_end = .; 222 } 223#endif 224 225 /* 226 * start address and size of operations which during runtime 227 * can be patched with virtualization friendly instructions or 228 * baremetal native ones. Think page table operations. 229 * Details in paravirt_types.h 230 */ 231 . = ALIGN(8); 232 .parainstructions : AT(ADDR(.parainstructions) - LOAD_OFFSET) { 233 __parainstructions = .; 234 *(.parainstructions) 235 __parainstructions_end = .; 236 } 237 238 /* 239 * struct alt_inst entries. From the header (alternative.h): 240 * "Alternative instructions for different CPU types or capabilities" 241 * Think locking instructions on spinlocks. 242 */ 243 . = ALIGN(8); 244 .altinstructions : AT(ADDR(.altinstructions) - LOAD_OFFSET) { 245 __alt_instructions = .; 246 *(.altinstructions) 247 __alt_instructions_end = .; 248 } 249 250 /* 251 * And here are the replacement instructions. The linker sticks 252 * them as binary blobs. The .altinstructions has enough data to 253 * get the address and the length of them to patch the kernel safely. 254 */ 255 .altinstr_replacement : AT(ADDR(.altinstr_replacement) - LOAD_OFFSET) { 256 *(.altinstr_replacement) 257 } 258 259 /* 260 * struct iommu_table_entry entries are injected in this section. 261 * It is an array of IOMMUs which during run time gets sorted depending 262 * on its dependency order. After rootfs_initcall is complete 263 * this section can be safely removed. 264 */ 265 .iommu_table : AT(ADDR(.iommu_table) - LOAD_OFFSET) { 266 __iommu_table = .; 267 *(.iommu_table) 268 __iommu_table_end = .; 269 } 270 271 . = ALIGN(8); 272 .apicdrivers : AT(ADDR(.apicdrivers) - LOAD_OFFSET) { 273 __apicdrivers = .; 274 *(.apicdrivers); 275 __apicdrivers_end = .; 276 } 277 278 . = ALIGN(8); 279 /* 280 * .exit.text is discard at runtime, not link time, to deal with 281 * references from .altinstructions and .eh_frame 282 */ 283 .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) { 284 EXIT_TEXT 285 } 286 287 .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) { 288 EXIT_DATA 289 } 290 291#if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP) 292 PERCPU_SECTION(INTERNODE_CACHE_BYTES) 293#endif 294 295 . = ALIGN(PAGE_SIZE); 296 297 /* freed after init ends here */ 298 .init.end : AT(ADDR(.init.end) - LOAD_OFFSET) { 299 __init_end = .; 300 } 301 302 /* 303 * smp_locks might be freed after init 304 * start/end must be page aligned 305 */ 306 . = ALIGN(PAGE_SIZE); 307 .smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) { 308 __smp_locks = .; 309 *(.smp_locks) 310 . = ALIGN(PAGE_SIZE); 311 __smp_locks_end = .; 312 } 313 314#ifdef CONFIG_X86_64 315 .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) { 316 NOSAVE_DATA 317 } 318#endif 319 320 /* BSS */ 321 . = ALIGN(PAGE_SIZE); 322 .bss : AT(ADDR(.bss) - LOAD_OFFSET) { 323 __bss_start = .; 324 *(.bss..page_aligned) 325 *(.bss) 326 . = ALIGN(PAGE_SIZE); 327 __bss_stop = .; 328 } 329 330 . = ALIGN(PAGE_SIZE); 331 .brk : AT(ADDR(.brk) - LOAD_OFFSET) { 332 __brk_base = .; 333 . += 64 * 1024; /* 64k alignment slop space */ 334 *(.brk_reservation) /* areas brk users have reserved */ 335 __brk_limit = .; 336 } 337 338 . = ALIGN(PAGE_SIZE); /* keep VO_INIT_SIZE page aligned */ 339 _end = .; 340 341 STABS_DEBUG 342 DWARF_DEBUG 343 344 /* Sections to be discarded */ 345 DISCARDS 346 /DISCARD/ : { 347 *(.eh_frame) 348 } 349} 350 351 352#ifdef CONFIG_X86_32 353/* 354 * The ASSERT() sink to . is intentional, for binutils 2.14 compatibility: 355 */ 356. = ASSERT((_end - LOAD_OFFSET <= KERNEL_IMAGE_SIZE), 357 "kernel image bigger than KERNEL_IMAGE_SIZE"); 358#else 359/* 360 * Per-cpu symbols which need to be offset from __per_cpu_load 361 * for the boot processor. 362 */ 363#define INIT_PER_CPU(x) init_per_cpu__##x = x + __per_cpu_load 364INIT_PER_CPU(gdt_page); 365INIT_PER_CPU(irq_stack_union); 366 367/* 368 * Build-time check on the image size: 369 */ 370. = ASSERT((_end - _text <= KERNEL_IMAGE_SIZE), 371 "kernel image bigger than KERNEL_IMAGE_SIZE"); 372 373#ifdef CONFIG_SMP 374. = ASSERT((irq_stack_union == 0), 375 "irq_stack_union is not at start of per-cpu area"); 376#endif 377 378#endif /* CONFIG_X86_32 */ 379 380#ifdef CONFIG_KEXEC_CORE 381#include <asm/kexec.h> 382 383. = ASSERT(kexec_control_code_size <= KEXEC_CONTROL_CODE_MAX_SIZE, 384 "kexec control code size is too big"); 385#endif 386 387