xref: /linux/arch/x86/kernel/vmlinux.lds.S (revision 5bdef865eb358b6f3760e25e591ae115e9eeddef)
1/*
2 * ld script for the x86 kernel
3 *
4 * Historic 32-bit version written by Martin Mares <mj@atrey.karlin.mff.cuni.cz>
5 *
6 * Modernisation, unification and other changes and fixes:
7 *   Copyright (C) 2007-2009  Sam Ravnborg <sam@ravnborg.org>
8 *
9 *
10 * Don't define absolute symbols until and unless you know that symbol
11 * value is should remain constant even if kernel image is relocated
12 * at run time. Absolute symbols are not relocated. If symbol value should
13 * change if kernel is relocated, make the symbol section relative and
14 * put it inside the section definition.
15 */
16
17#ifdef CONFIG_X86_32
18#define LOAD_OFFSET __PAGE_OFFSET
19#else
20#define LOAD_OFFSET __START_KERNEL_map
21#endif
22
23#include <asm-generic/vmlinux.lds.h>
24#include <asm/asm-offsets.h>
25#include <asm/thread_info.h>
26#include <asm/page_types.h>
27#include <asm/cache.h>
28#include <asm/boot.h>
29
30#undef i386     /* in case the preprocessor is a 32bit one */
31
32OUTPUT_FORMAT(CONFIG_OUTPUT_FORMAT, CONFIG_OUTPUT_FORMAT, CONFIG_OUTPUT_FORMAT)
33
34#ifdef CONFIG_X86_32
35OUTPUT_ARCH(i386)
36ENTRY(phys_startup_32)
37jiffies = jiffies_64;
38#else
39OUTPUT_ARCH(i386:x86-64)
40ENTRY(phys_startup_64)
41jiffies_64 = jiffies;
42#endif
43
44PHDRS {
45	text PT_LOAD FLAGS(5);          /* R_E */
46	data PT_LOAD FLAGS(7);          /* RWE */
47#ifdef CONFIG_X86_64
48	user PT_LOAD FLAGS(7);          /* RWE */
49	data.init PT_LOAD FLAGS(7);     /* RWE */
50#ifdef CONFIG_SMP
51	percpu PT_LOAD FLAGS(7);        /* RWE */
52#endif
53	data.init2 PT_LOAD FLAGS(7);    /* RWE */
54#endif
55	note PT_NOTE FLAGS(0);          /* ___ */
56}
57
58SECTIONS
59{
60#ifdef CONFIG_X86_32
61        . = LOAD_OFFSET + LOAD_PHYSICAL_ADDR;
62        phys_startup_32 = startup_32 - LOAD_OFFSET;
63#else
64        . = __START_KERNEL;
65        phys_startup_64 = startup_64 - LOAD_OFFSET;
66#endif
67
68	/* Text and read-only data */
69
70	/* bootstrapping code */
71	.text.head : AT(ADDR(.text.head) - LOAD_OFFSET) {
72		_text = .;
73		*(.text.head)
74	} :text = 0x9090
75
76	/* The rest of the text */
77	.text :  AT(ADDR(.text) - LOAD_OFFSET) {
78#ifdef CONFIG_X86_32
79		/* not really needed, already page aligned */
80		. = ALIGN(PAGE_SIZE);
81		*(.text.page_aligned)
82#endif
83		. = ALIGN(8);
84		_stext = .;
85		TEXT_TEXT
86		SCHED_TEXT
87		LOCK_TEXT
88		KPROBES_TEXT
89		IRQENTRY_TEXT
90		*(.fixup)
91		*(.gnu.warning)
92		/* End of text section */
93		_etext = .;
94	} :text = 0x9090
95
96	NOTES :text :note
97
98	/* Exception table */
99	. = ALIGN(16);
100	__ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) {
101		__start___ex_table = .;
102		*(__ex_table)
103		__stop___ex_table = .;
104	} :text = 0x9090
105
106	RODATA
107
108	/* Data */
109	. = ALIGN(PAGE_SIZE);
110	.data : AT(ADDR(.data) - LOAD_OFFSET) {
111		/* Start of data section */
112		_sdata = .;
113		DATA_DATA
114		CONSTRUCTORS
115
116#ifdef CONFIG_X86_64
117		/* End of data section */
118		_edata = .;
119#endif
120	} :data
121
122#ifdef CONFIG_X86_32
123	/* 32 bit has nosave before _edata */
124	. = ALIGN(PAGE_SIZE);
125	.data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
126		__nosave_begin = .;
127		*(.data.nosave)
128		. = ALIGN(PAGE_SIZE);
129		__nosave_end = .;
130	}
131#endif
132
133	. = ALIGN(PAGE_SIZE);
134	.data.page_aligned : AT(ADDR(.data.page_aligned) - LOAD_OFFSET) {
135		*(.data.page_aligned)
136		*(.data.idt)
137	}
138
139#ifdef CONFIG_X86_32
140	. = ALIGN(32);
141#else
142	. = ALIGN(PAGE_SIZE);
143	. = ALIGN(CONFIG_X86_L1_CACHE_BYTES);
144#endif
145	.data.cacheline_aligned :
146		AT(ADDR(.data.cacheline_aligned) - LOAD_OFFSET) {
147		*(.data.cacheline_aligned)
148	}
149
150	/* rarely changed data like cpu maps */
151#ifdef CONFIG_X86_32
152	. = ALIGN(32);
153#else
154	. = ALIGN(CONFIG_X86_INTERNODE_CACHE_BYTES);
155#endif
156	.data.read_mostly : AT(ADDR(.data.read_mostly) - LOAD_OFFSET) {
157		*(.data.read_mostly)
158
159#ifdef CONFIG_X86_32
160		/* End of data section */
161		_edata = .;
162#endif
163	}
164
165#ifdef CONFIG_X86_64
166
167#define VSYSCALL_ADDR (-10*1024*1024)
168#define VSYSCALL_PHYS_ADDR ((LOADADDR(.data.read_mostly) + \
169                            SIZEOF(.data.read_mostly) + 4095) & ~(4095))
170#define VSYSCALL_VIRT_ADDR ((ADDR(.data.read_mostly) + \
171                            SIZEOF(.data.read_mostly) + 4095) & ~(4095))
172
173#define VLOAD_OFFSET (VSYSCALL_ADDR - VSYSCALL_PHYS_ADDR)
174#define VLOAD(x) (ADDR(x) - VLOAD_OFFSET)
175
176#define VVIRT_OFFSET (VSYSCALL_ADDR - VSYSCALL_VIRT_ADDR)
177#define VVIRT(x) (ADDR(x) - VVIRT_OFFSET)
178
179	. = VSYSCALL_ADDR;
180	.vsyscall_0 : AT(VSYSCALL_PHYS_ADDR) {
181		*(.vsyscall_0)
182	} :user
183
184	__vsyscall_0 = VSYSCALL_VIRT_ADDR;
185
186	. = ALIGN(CONFIG_X86_L1_CACHE_BYTES);
187	.vsyscall_fn : AT(VLOAD(.vsyscall_fn)) {
188		*(.vsyscall_fn)
189	}
190
191	. = ALIGN(CONFIG_X86_L1_CACHE_BYTES);
192	.vsyscall_gtod_data : AT(VLOAD(.vsyscall_gtod_data)) {
193		*(.vsyscall_gtod_data)
194	}
195
196	vsyscall_gtod_data = VVIRT(.vsyscall_gtod_data);
197	.vsyscall_clock : AT(VLOAD(.vsyscall_clock)) {
198		*(.vsyscall_clock)
199	}
200	vsyscall_clock = VVIRT(.vsyscall_clock);
201
202
203	.vsyscall_1 ADDR(.vsyscall_0) + 1024: AT(VLOAD(.vsyscall_1)) {
204		*(.vsyscall_1)
205	}
206	.vsyscall_2 ADDR(.vsyscall_0) + 2048: AT(VLOAD(.vsyscall_2)) {
207		*(.vsyscall_2)
208	}
209
210	.vgetcpu_mode : AT(VLOAD(.vgetcpu_mode)) {
211		*(.vgetcpu_mode)
212	}
213	vgetcpu_mode = VVIRT(.vgetcpu_mode);
214
215	. = ALIGN(CONFIG_X86_L1_CACHE_BYTES);
216	.jiffies : AT(VLOAD(.jiffies)) {
217		*(.jiffies)
218	}
219	jiffies = VVIRT(.jiffies);
220
221	.vsyscall_3 ADDR(.vsyscall_0) + 3072: AT(VLOAD(.vsyscall_3)) {
222		*(.vsyscall_3)
223	}
224
225	. = VSYSCALL_VIRT_ADDR + PAGE_SIZE;
226
227#undef VSYSCALL_ADDR
228#undef VSYSCALL_PHYS_ADDR
229#undef VSYSCALL_VIRT_ADDR
230#undef VLOAD_OFFSET
231#undef VLOAD
232#undef VVIRT_OFFSET
233#undef VVIRT
234
235#endif /* CONFIG_X86_64 */
236
237	/* init_task */
238	. = ALIGN(THREAD_SIZE);
239	.data.init_task : AT(ADDR(.data.init_task) - LOAD_OFFSET) {
240		*(.data.init_task)
241	}
242#ifdef CONFIG_X86_64
243	 :data.init
244#endif
245
246	/*
247	 * smp_locks might be freed after init
248	 * start/end must be page aligned
249	 */
250	. = ALIGN(PAGE_SIZE);
251	.smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) {
252		__smp_locks = .;
253		*(.smp_locks)
254		__smp_locks_end = .;
255		. = ALIGN(PAGE_SIZE);
256	}
257
258	/* Init code and data - will be freed after init */
259	. = ALIGN(PAGE_SIZE);
260	.init.text : AT(ADDR(.init.text) - LOAD_OFFSET) {
261		__init_begin = .; /* paired with __init_end */
262		_sinittext = .;
263		INIT_TEXT
264		_einittext = .;
265	}
266
267	.init.data : AT(ADDR(.init.data) - LOAD_OFFSET) {
268		INIT_DATA
269	}
270
271	. = ALIGN(16);
272	.init.setup : AT(ADDR(.init.setup) - LOAD_OFFSET) {
273		__setup_start = .;
274		*(.init.setup)
275		__setup_end = .;
276	}
277	.initcall.init : AT(ADDR(.initcall.init) - LOAD_OFFSET) {
278		__initcall_start = .;
279		INITCALLS
280		__initcall_end = .;
281	}
282
283	.con_initcall.init : AT(ADDR(.con_initcall.init) - LOAD_OFFSET) {
284		__con_initcall_start = .;
285		*(.con_initcall.init)
286		__con_initcall_end = .;
287	}
288
289	.x86_cpu_dev.init : AT(ADDR(.x86_cpu_dev.init) - LOAD_OFFSET) {
290		__x86_cpu_dev_start = .;
291		*(.x86_cpu_dev.init)
292		__x86_cpu_dev_end = .;
293	}
294
295	SECURITY_INIT
296
297	. = ALIGN(8);
298	.parainstructions : AT(ADDR(.parainstructions) - LOAD_OFFSET) {
299		__parainstructions = .;
300		*(.parainstructions)
301		__parainstructions_end = .;
302	}
303
304	. = ALIGN(8);
305	.altinstructions : AT(ADDR(.altinstructions) - LOAD_OFFSET) {
306		__alt_instructions = .;
307		*(.altinstructions)
308		__alt_instructions_end = .;
309	}
310
311	.altinstr_replacement : AT(ADDR(.altinstr_replacement) - LOAD_OFFSET) {
312		*(.altinstr_replacement)
313	}
314
315	/*
316	 * .exit.text is discard at runtime, not link time, to deal with
317	 *  references from .altinstructions and .eh_frame
318	 */
319	.exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
320		EXIT_TEXT
321	}
322
323	.exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) {
324		EXIT_DATA
325	}
326
327#ifdef CONFIG_BLK_DEV_INITRD
328	. = ALIGN(PAGE_SIZE);
329	.init.ramfs : AT(ADDR(.init.ramfs) - LOAD_OFFSET) {
330		__initramfs_start = .;
331		*(.init.ramfs)
332		__initramfs_end = .;
333	}
334#endif
335
336#if defined(CONFIG_X86_64) && defined(CONFIG_SMP)
337	/*
338	 * percpu offsets are zero-based on SMP.  PERCPU_VADDR() changes the
339	 * output PHDR, so the next output section - __data_nosave - should
340	 * start another section data.init2.  Also, pda should be at the head of
341	 * percpu area.  Preallocate it and define the percpu offset symbol
342	 * so that it can be accessed as a percpu variable.
343	 */
344	. = ALIGN(PAGE_SIZE);
345	PERCPU_VADDR(0, :percpu)
346#else
347	PERCPU(PAGE_SIZE)
348#endif
349
350	. = ALIGN(PAGE_SIZE);
351
352	/* freed after init ends here */
353	.init.end : AT(ADDR(.init.end) - LOAD_OFFSET) {
354		__init_end = .;
355	}
356
357#ifdef CONFIG_X86_64
358	.data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
359		. = ALIGN(PAGE_SIZE);
360		__nosave_begin = .;
361		*(.data.nosave)
362		. = ALIGN(PAGE_SIZE);
363		__nosave_end = .;
364	} :data.init2
365	/* use another section data.init2, see PERCPU_VADDR() above */
366#endif
367
368	/* BSS */
369	. = ALIGN(PAGE_SIZE);
370	.bss : AT(ADDR(.bss) - LOAD_OFFSET) {
371		__bss_start = .;
372		*(.bss.page_aligned)
373		*(.bss)
374		. = ALIGN(4);
375		__bss_stop = .;
376	}
377
378	. = ALIGN(PAGE_SIZE);
379	.brk : AT(ADDR(.brk) - LOAD_OFFSET) {
380		__brk_base = .;
381		. += 64 * 1024;		/* 64k alignment slop space */
382		*(.brk_reservation)	/* areas brk users have reserved */
383		__brk_limit = .;
384	}
385
386	.end : AT(ADDR(.end) - LOAD_OFFSET) {
387		_end = .;
388	}
389
390	/* Sections to be discarded */
391	/DISCARD/ : {
392		*(.exitcall.exit)
393		*(.eh_frame)
394		*(.discard)
395	}
396
397        STABS_DEBUG
398        DWARF_DEBUG
399}
400
401
402#ifdef CONFIG_X86_32
403ASSERT((_end - LOAD_OFFSET <= KERNEL_IMAGE_SIZE),
404        "kernel image bigger than KERNEL_IMAGE_SIZE")
405#else
406/*
407 * Per-cpu symbols which need to be offset from __per_cpu_load
408 * for the boot processor.
409 */
410#define INIT_PER_CPU(x) init_per_cpu__##x = per_cpu__##x + __per_cpu_load
411INIT_PER_CPU(gdt_page);
412INIT_PER_CPU(irq_stack_union);
413
414/*
415 * Build-time check on the image size:
416 */
417ASSERT((_end - _text <= KERNEL_IMAGE_SIZE),
418	"kernel image bigger than KERNEL_IMAGE_SIZE")
419
420#ifdef CONFIG_SMP
421ASSERT((per_cpu__irq_stack_union == 0),
422        "irq_stack_union is not at start of per-cpu area");
423#endif
424
425#endif /* CONFIG_X86_32 */
426
427#ifdef CONFIG_KEXEC
428#include <asm/kexec.h>
429
430ASSERT(kexec_control_code_size <= KEXEC_CONTROL_CODE_MAX_SIZE,
431       "kexec control code size is too big")
432#endif
433
434