1/* 2 * ld script for the x86 kernel 3 * 4 * Historic 32-bit version written by Martin Mares <mj@atrey.karlin.mff.cuni.cz> 5 * 6 * Modernisation, unification and other changes and fixes: 7 * Copyright (C) 2007-2009 Sam Ravnborg <sam@ravnborg.org> 8 * 9 * 10 * Don't define absolute symbols until and unless you know that symbol 11 * value is should remain constant even if kernel image is relocated 12 * at run time. Absolute symbols are not relocated. If symbol value should 13 * change if kernel is relocated, make the symbol section relative and 14 * put it inside the section definition. 15 */ 16 17#ifdef CONFIG_X86_32 18#define LOAD_OFFSET __PAGE_OFFSET 19#else 20#define LOAD_OFFSET __START_KERNEL_map 21#endif 22 23#include <asm-generic/vmlinux.lds.h> 24#include <asm/asm-offsets.h> 25#include <asm/thread_info.h> 26#include <asm/page_types.h> 27#include <asm/cache.h> 28#include <asm/boot.h> 29 30#undef i386 /* in case the preprocessor is a 32bit one */ 31 32OUTPUT_FORMAT(CONFIG_OUTPUT_FORMAT, CONFIG_OUTPUT_FORMAT, CONFIG_OUTPUT_FORMAT) 33 34#ifdef CONFIG_X86_32 35OUTPUT_ARCH(i386) 36ENTRY(phys_startup_32) 37jiffies = jiffies_64; 38#else 39OUTPUT_ARCH(i386:x86-64) 40ENTRY(phys_startup_64) 41jiffies_64 = jiffies; 42#endif 43 44#if defined(CONFIG_X86_64) 45/* 46 * On 64-bit, align RODATA to 2MB so we retain large page mappings for 47 * boundaries spanning kernel text, rodata and data sections. 48 * 49 * However, kernel identity mappings will have different RWX permissions 50 * to the pages mapping to text and to the pages padding (which are freed) the 51 * text section. Hence kernel identity mappings will be broken to smaller 52 * pages. For 64-bit, kernel text and kernel identity mappings are different, 53 * so we can enable protection checks as well as retain 2MB large page 54 * mappings for kernel text. 55 */ 56#define X64_ALIGN_RODATA_BEGIN . = ALIGN(HPAGE_SIZE); 57 58#define X64_ALIGN_RODATA_END \ 59 . = ALIGN(HPAGE_SIZE); \ 60 __end_rodata_hpage_align = .; 61 62#else 63 64#define X64_ALIGN_RODATA_BEGIN 65#define X64_ALIGN_RODATA_END 66 67#endif 68 69PHDRS { 70 text PT_LOAD FLAGS(5); /* R_E */ 71 data PT_LOAD FLAGS(6); /* RW_ */ 72#ifdef CONFIG_X86_64 73#ifdef CONFIG_SMP 74 percpu PT_LOAD FLAGS(6); /* RW_ */ 75#endif 76 init PT_LOAD FLAGS(7); /* RWE */ 77#endif 78 note PT_NOTE FLAGS(0); /* ___ */ 79} 80 81SECTIONS 82{ 83#ifdef CONFIG_X86_32 84 . = LOAD_OFFSET + LOAD_PHYSICAL_ADDR; 85 phys_startup_32 = ABSOLUTE(startup_32 - LOAD_OFFSET); 86#else 87 . = __START_KERNEL; 88 phys_startup_64 = ABSOLUTE(startup_64 - LOAD_OFFSET); 89#endif 90 91 /* Text and read-only data */ 92 .text : AT(ADDR(.text) - LOAD_OFFSET) { 93 _text = .; 94 /* bootstrapping code */ 95 HEAD_TEXT 96 . = ALIGN(8); 97 _stext = .; 98 TEXT_TEXT 99 SCHED_TEXT 100 LOCK_TEXT 101 KPROBES_TEXT 102 ENTRY_TEXT 103 IRQENTRY_TEXT 104 SOFTIRQENTRY_TEXT 105 *(.fixup) 106 *(.gnu.warning) 107 /* End of text section */ 108 _etext = .; 109 } :text = 0x9090 110 111 NOTES :text :note 112 113 EXCEPTION_TABLE(16) :text = 0x9090 114 115 /* .text should occupy whole number of pages */ 116 . = ALIGN(PAGE_SIZE); 117 X64_ALIGN_RODATA_BEGIN 118 RO_DATA(PAGE_SIZE) 119 X64_ALIGN_RODATA_END 120 121 /* Data */ 122 .data : AT(ADDR(.data) - LOAD_OFFSET) { 123 /* Start of data section */ 124 _sdata = .; 125 126 /* init_task */ 127 INIT_TASK_DATA(THREAD_SIZE) 128 129#ifdef CONFIG_X86_32 130 /* 32 bit has nosave before _edata */ 131 NOSAVE_DATA 132#endif 133 134 PAGE_ALIGNED_DATA(PAGE_SIZE) 135 136 CACHELINE_ALIGNED_DATA(L1_CACHE_BYTES) 137 138 DATA_DATA 139 CONSTRUCTORS 140 141 /* rarely changed data like cpu maps */ 142 READ_MOSTLY_DATA(INTERNODE_CACHE_BYTES) 143 144 /* End of data section */ 145 _edata = .; 146 } :data 147 148 149 . = ALIGN(PAGE_SIZE); 150 __vvar_page = .; 151 152 .vvar : AT(ADDR(.vvar) - LOAD_OFFSET) { 153 /* work around gold bug 13023 */ 154 __vvar_beginning_hack = .; 155 156 /* Place all vvars at the offsets in asm/vvar.h. */ 157#define EMIT_VVAR(name, offset) \ 158 . = __vvar_beginning_hack + offset; \ 159 *(.vvar_ ## name) 160#define __VVAR_KERNEL_LDS 161#include <asm/vvar.h> 162#undef __VVAR_KERNEL_LDS 163#undef EMIT_VVAR 164 165 /* 166 * Pad the rest of the page with zeros. Otherwise the loader 167 * can leave garbage here. 168 */ 169 . = __vvar_beginning_hack + PAGE_SIZE; 170 } :data 171 172 . = ALIGN(__vvar_page + PAGE_SIZE, PAGE_SIZE); 173 174 /* Init code and data - will be freed after init */ 175 . = ALIGN(PAGE_SIZE); 176 .init.begin : AT(ADDR(.init.begin) - LOAD_OFFSET) { 177 __init_begin = .; /* paired with __init_end */ 178 } 179 180#if defined(CONFIG_X86_64) && defined(CONFIG_SMP) 181 /* 182 * percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the 183 * output PHDR, so the next output section - .init.text - should 184 * start another segment - init. 185 */ 186 PERCPU_VADDR(INTERNODE_CACHE_BYTES, 0, :percpu) 187 ASSERT(SIZEOF(.data..percpu) < CONFIG_PHYSICAL_START, 188 "per-CPU data too large - increase CONFIG_PHYSICAL_START") 189#endif 190 191 INIT_TEXT_SECTION(PAGE_SIZE) 192#ifdef CONFIG_X86_64 193 :init 194#endif 195 196 /* 197 * Section for code used exclusively before alternatives are run. All 198 * references to such code must be patched out by alternatives, normally 199 * by using X86_FEATURE_ALWAYS CPU feature bit. 200 * 201 * See static_cpu_has() for an example. 202 */ 203 .altinstr_aux : AT(ADDR(.altinstr_aux) - LOAD_OFFSET) { 204 *(.altinstr_aux) 205 } 206 207 INIT_DATA_SECTION(16) 208 209 .x86_cpu_dev.init : AT(ADDR(.x86_cpu_dev.init) - LOAD_OFFSET) { 210 __x86_cpu_dev_start = .; 211 *(.x86_cpu_dev.init) 212 __x86_cpu_dev_end = .; 213 } 214 215#ifdef CONFIG_X86_INTEL_MID 216 .x86_intel_mid_dev.init : AT(ADDR(.x86_intel_mid_dev.init) - \ 217 LOAD_OFFSET) { 218 __x86_intel_mid_dev_start = .; 219 *(.x86_intel_mid_dev.init) 220 __x86_intel_mid_dev_end = .; 221 } 222#endif 223 224 /* 225 * start address and size of operations which during runtime 226 * can be patched with virtualization friendly instructions or 227 * baremetal native ones. Think page table operations. 228 * Details in paravirt_types.h 229 */ 230 . = ALIGN(8); 231 .parainstructions : AT(ADDR(.parainstructions) - LOAD_OFFSET) { 232 __parainstructions = .; 233 *(.parainstructions) 234 __parainstructions_end = .; 235 } 236 237 /* 238 * struct alt_inst entries. From the header (alternative.h): 239 * "Alternative instructions for different CPU types or capabilities" 240 * Think locking instructions on spinlocks. 241 */ 242 . = ALIGN(8); 243 .altinstructions : AT(ADDR(.altinstructions) - LOAD_OFFSET) { 244 __alt_instructions = .; 245 *(.altinstructions) 246 __alt_instructions_end = .; 247 } 248 249 /* 250 * And here are the replacement instructions. The linker sticks 251 * them as binary blobs. The .altinstructions has enough data to 252 * get the address and the length of them to patch the kernel safely. 253 */ 254 .altinstr_replacement : AT(ADDR(.altinstr_replacement) - LOAD_OFFSET) { 255 *(.altinstr_replacement) 256 } 257 258 /* 259 * struct iommu_table_entry entries are injected in this section. 260 * It is an array of IOMMUs which during run time gets sorted depending 261 * on its dependency order. After rootfs_initcall is complete 262 * this section can be safely removed. 263 */ 264 .iommu_table : AT(ADDR(.iommu_table) - LOAD_OFFSET) { 265 __iommu_table = .; 266 *(.iommu_table) 267 __iommu_table_end = .; 268 } 269 270 . = ALIGN(8); 271 .apicdrivers : AT(ADDR(.apicdrivers) - LOAD_OFFSET) { 272 __apicdrivers = .; 273 *(.apicdrivers); 274 __apicdrivers_end = .; 275 } 276 277 . = ALIGN(8); 278 /* 279 * .exit.text is discard at runtime, not link time, to deal with 280 * references from .altinstructions and .eh_frame 281 */ 282 .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) { 283 EXIT_TEXT 284 } 285 286 .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) { 287 EXIT_DATA 288 } 289 290#if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP) 291 PERCPU_SECTION(INTERNODE_CACHE_BYTES) 292#endif 293 294 . = ALIGN(PAGE_SIZE); 295 296 /* freed after init ends here */ 297 .init.end : AT(ADDR(.init.end) - LOAD_OFFSET) { 298 __init_end = .; 299 } 300 301 /* 302 * smp_locks might be freed after init 303 * start/end must be page aligned 304 */ 305 . = ALIGN(PAGE_SIZE); 306 .smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) { 307 __smp_locks = .; 308 *(.smp_locks) 309 . = ALIGN(PAGE_SIZE); 310 __smp_locks_end = .; 311 } 312 313#ifdef CONFIG_X86_64 314 .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) { 315 NOSAVE_DATA 316 } 317#endif 318 319 /* BSS */ 320 . = ALIGN(PAGE_SIZE); 321 .bss : AT(ADDR(.bss) - LOAD_OFFSET) { 322 __bss_start = .; 323 *(.bss..page_aligned) 324 *(.bss) 325 . = ALIGN(PAGE_SIZE); 326 __bss_stop = .; 327 } 328 329 . = ALIGN(PAGE_SIZE); 330 .brk : AT(ADDR(.brk) - LOAD_OFFSET) { 331 __brk_base = .; 332 . += 64 * 1024; /* 64k alignment slop space */ 333 *(.brk_reservation) /* areas brk users have reserved */ 334 __brk_limit = .; 335 } 336 337 . = ALIGN(PAGE_SIZE); /* keep VO_INIT_SIZE page aligned */ 338 _end = .; 339 340 STABS_DEBUG 341 DWARF_DEBUG 342 343 /* Sections to be discarded */ 344 DISCARDS 345 /DISCARD/ : { 346 *(.eh_frame) 347 *(__func_stack_frame_non_standard) 348 } 349} 350 351 352#ifdef CONFIG_X86_32 353/* 354 * The ASSERT() sink to . is intentional, for binutils 2.14 compatibility: 355 */ 356. = ASSERT((_end - LOAD_OFFSET <= KERNEL_IMAGE_SIZE), 357 "kernel image bigger than KERNEL_IMAGE_SIZE"); 358#else 359/* 360 * Per-cpu symbols which need to be offset from __per_cpu_load 361 * for the boot processor. 362 */ 363#define INIT_PER_CPU(x) init_per_cpu__##x = x + __per_cpu_load 364INIT_PER_CPU(gdt_page); 365INIT_PER_CPU(irq_stack_union); 366 367/* 368 * Build-time check on the image size: 369 */ 370. = ASSERT((_end - _text <= KERNEL_IMAGE_SIZE), 371 "kernel image bigger than KERNEL_IMAGE_SIZE"); 372 373#ifdef CONFIG_SMP 374. = ASSERT((irq_stack_union == 0), 375 "irq_stack_union is not at start of per-cpu area"); 376#endif 377 378#endif /* CONFIG_X86_32 */ 379 380#ifdef CONFIG_KEXEC_CORE 381#include <asm/kexec.h> 382 383. = ASSERT(kexec_control_code_size <= KEXEC_CONTROL_CODE_MAX_SIZE, 384 "kexec control code size is too big"); 385#endif 386 387