xref: /linux/arch/x86/kernel/vmlinux.lds.S (revision 913df4453f85f1fe79b35ecf3c9a0c0b707d22a2)
1/*
2 * ld script for the x86 kernel
3 *
4 * Historic 32-bit version written by Martin Mares <mj@atrey.karlin.mff.cuni.cz>
5 *
6 * Modernisation, unification and other changes and fixes:
7 *   Copyright (C) 2007-2009  Sam Ravnborg <sam@ravnborg.org>
8 *
9 *
10 * Don't define absolute symbols until and unless you know that symbol
11 * value is should remain constant even if kernel image is relocated
12 * at run time. Absolute symbols are not relocated. If symbol value should
13 * change if kernel is relocated, make the symbol section relative and
14 * put it inside the section definition.
15 */
16
17#ifdef CONFIG_X86_32
18#define LOAD_OFFSET __PAGE_OFFSET
19#else
20#define LOAD_OFFSET __START_KERNEL_map
21#endif
22
23#include <asm-generic/vmlinux.lds.h>
24#include <asm/asm-offsets.h>
25#include <asm/thread_info.h>
26#include <asm/page_types.h>
27#include <asm/cache.h>
28#include <asm/boot.h>
29
30#undef i386     /* in case the preprocessor is a 32bit one */
31
32OUTPUT_FORMAT(CONFIG_OUTPUT_FORMAT, CONFIG_OUTPUT_FORMAT, CONFIG_OUTPUT_FORMAT)
33
34#ifdef CONFIG_X86_32
35OUTPUT_ARCH(i386)
36ENTRY(phys_startup_32)
37jiffies = jiffies_64;
38#else
39OUTPUT_ARCH(i386:x86-64)
40ENTRY(phys_startup_64)
41jiffies_64 = jiffies;
42#endif
43
44PHDRS {
45	text PT_LOAD FLAGS(5);          /* R_E */
46	data PT_LOAD FLAGS(7);          /* RWE */
47#ifdef CONFIG_X86_64
48	user PT_LOAD FLAGS(5);          /* R_E */
49#ifdef CONFIG_SMP
50	percpu PT_LOAD FLAGS(6);        /* RW_ */
51#endif
52	init PT_LOAD FLAGS(7);          /* RWE */
53#endif
54	note PT_NOTE FLAGS(0);          /* ___ */
55}
56
57SECTIONS
58{
59#ifdef CONFIG_X86_32
60        . = LOAD_OFFSET + LOAD_PHYSICAL_ADDR;
61        phys_startup_32 = startup_32 - LOAD_OFFSET;
62#else
63        . = __START_KERNEL;
64        phys_startup_64 = startup_64 - LOAD_OFFSET;
65#endif
66
67	/* Text and read-only data */
68	.text :  AT(ADDR(.text) - LOAD_OFFSET) {
69		_text = .;
70		/* bootstrapping code */
71		HEAD_TEXT
72#ifdef CONFIG_X86_32
73		. = ALIGN(PAGE_SIZE);
74		*(.text.page_aligned)
75#endif
76		. = ALIGN(8);
77		_stext = .;
78		TEXT_TEXT
79		SCHED_TEXT
80		LOCK_TEXT
81		KPROBES_TEXT
82		IRQENTRY_TEXT
83		*(.fixup)
84		*(.gnu.warning)
85		/* End of text section */
86		_etext = .;
87	} :text = 0x9090
88
89	NOTES :text :note
90
91	EXCEPTION_TABLE(16) :text = 0x9090
92
93	RO_DATA(PAGE_SIZE)
94
95	/* Data */
96	.data : AT(ADDR(.data) - LOAD_OFFSET) {
97		/* Start of data section */
98		_sdata = .;
99
100		/* init_task */
101		INIT_TASK_DATA(THREAD_SIZE)
102
103#ifdef CONFIG_X86_32
104		/* 32 bit has nosave before _edata */
105		NOSAVE_DATA
106#endif
107
108		PAGE_ALIGNED_DATA(PAGE_SIZE)
109
110		CACHELINE_ALIGNED_DATA(CONFIG_X86_L1_CACHE_BYTES)
111
112		DATA_DATA
113		CONSTRUCTORS
114
115		/* rarely changed data like cpu maps */
116		READ_MOSTLY_DATA(CONFIG_X86_INTERNODE_CACHE_BYTES)
117
118		/* End of data section */
119		_edata = .;
120	} :data
121
122#ifdef CONFIG_X86_64
123
124#define VSYSCALL_ADDR (-10*1024*1024)
125
126#define VLOAD_OFFSET (VSYSCALL_ADDR - __vsyscall_0 + LOAD_OFFSET)
127#define VLOAD(x) (ADDR(x) - VLOAD_OFFSET)
128
129#define VVIRT_OFFSET (VSYSCALL_ADDR - __vsyscall_0)
130#define VVIRT(x) (ADDR(x) - VVIRT_OFFSET)
131
132	. = ALIGN(4096);
133	__vsyscall_0 = .;
134
135	. = VSYSCALL_ADDR;
136	.vsyscall_0 : AT(VLOAD(.vsyscall_0)) {
137		*(.vsyscall_0)
138	} :user
139
140	. = ALIGN(CONFIG_X86_L1_CACHE_BYTES);
141	.vsyscall_fn : AT(VLOAD(.vsyscall_fn)) {
142		*(.vsyscall_fn)
143	}
144
145	. = ALIGN(CONFIG_X86_L1_CACHE_BYTES);
146	.vsyscall_gtod_data : AT(VLOAD(.vsyscall_gtod_data)) {
147		*(.vsyscall_gtod_data)
148	}
149
150	vsyscall_gtod_data = VVIRT(.vsyscall_gtod_data);
151	.vsyscall_clock : AT(VLOAD(.vsyscall_clock)) {
152		*(.vsyscall_clock)
153	}
154	vsyscall_clock = VVIRT(.vsyscall_clock);
155
156
157	.vsyscall_1 ADDR(.vsyscall_0) + 1024: AT(VLOAD(.vsyscall_1)) {
158		*(.vsyscall_1)
159	}
160	.vsyscall_2 ADDR(.vsyscall_0) + 2048: AT(VLOAD(.vsyscall_2)) {
161		*(.vsyscall_2)
162	}
163
164	.vgetcpu_mode : AT(VLOAD(.vgetcpu_mode)) {
165		*(.vgetcpu_mode)
166	}
167	vgetcpu_mode = VVIRT(.vgetcpu_mode);
168
169	. = ALIGN(CONFIG_X86_L1_CACHE_BYTES);
170	.jiffies : AT(VLOAD(.jiffies)) {
171		*(.jiffies)
172	}
173	jiffies = VVIRT(.jiffies);
174
175	.vsyscall_3 ADDR(.vsyscall_0) + 3072: AT(VLOAD(.vsyscall_3)) {
176		*(.vsyscall_3)
177	}
178
179	. = __vsyscall_0 + PAGE_SIZE;
180
181#undef VSYSCALL_ADDR
182#undef VLOAD_OFFSET
183#undef VLOAD
184#undef VVIRT_OFFSET
185#undef VVIRT
186
187#endif /* CONFIG_X86_64 */
188
189	/* Init code and data - will be freed after init */
190	. = ALIGN(PAGE_SIZE);
191	.init.begin : AT(ADDR(.init.begin) - LOAD_OFFSET) {
192		__init_begin = .; /* paired with __init_end */
193	}
194
195#if defined(CONFIG_X86_64) && defined(CONFIG_SMP)
196	/*
197	 * percpu offsets are zero-based on SMP.  PERCPU_VADDR() changes the
198	 * output PHDR, so the next output section - .init.text - should
199	 * start another segment - init.
200	 */
201	PERCPU_VADDR(0, :percpu)
202#endif
203
204	INIT_TEXT_SECTION(PAGE_SIZE)
205#ifdef CONFIG_X86_64
206	:init
207#endif
208
209	INIT_DATA_SECTION(16)
210
211	.x86_cpu_dev.init : AT(ADDR(.x86_cpu_dev.init) - LOAD_OFFSET) {
212		__x86_cpu_dev_start = .;
213		*(.x86_cpu_dev.init)
214		__x86_cpu_dev_end = .;
215	}
216
217	. = ALIGN(8);
218	.parainstructions : AT(ADDR(.parainstructions) - LOAD_OFFSET) {
219		__parainstructions = .;
220		*(.parainstructions)
221		__parainstructions_end = .;
222	}
223
224	. = ALIGN(8);
225	.altinstructions : AT(ADDR(.altinstructions) - LOAD_OFFSET) {
226		__alt_instructions = .;
227		*(.altinstructions)
228		__alt_instructions_end = .;
229	}
230
231	.altinstr_replacement : AT(ADDR(.altinstr_replacement) - LOAD_OFFSET) {
232		*(.altinstr_replacement)
233	}
234
235	/*
236	 * .exit.text is discard at runtime, not link time, to deal with
237	 *  references from .altinstructions and .eh_frame
238	 */
239	.exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
240		EXIT_TEXT
241	}
242
243	.exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) {
244		EXIT_DATA
245	}
246
247#if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP)
248	PERCPU(PAGE_SIZE)
249#endif
250
251	. = ALIGN(PAGE_SIZE);
252
253	/* freed after init ends here */
254	.init.end : AT(ADDR(.init.end) - LOAD_OFFSET) {
255		__init_end = .;
256	}
257
258	/*
259	 * smp_locks might be freed after init
260	 * start/end must be page aligned
261	 */
262	. = ALIGN(PAGE_SIZE);
263	.smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) {
264		__smp_locks = .;
265		*(.smp_locks)
266		__smp_locks_end = .;
267		. = ALIGN(PAGE_SIZE);
268	}
269
270#ifdef CONFIG_X86_64
271	.data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
272		NOSAVE_DATA
273	}
274#endif
275
276	/* BSS */
277	. = ALIGN(PAGE_SIZE);
278	.bss : AT(ADDR(.bss) - LOAD_OFFSET) {
279		__bss_start = .;
280		*(.bss.page_aligned)
281		*(.bss)
282		. = ALIGN(4);
283		__bss_stop = .;
284	}
285
286	. = ALIGN(PAGE_SIZE);
287	.brk : AT(ADDR(.brk) - LOAD_OFFSET) {
288		__brk_base = .;
289		. += 64 * 1024;		/* 64k alignment slop space */
290		*(.brk_reservation)	/* areas brk users have reserved */
291		__brk_limit = .;
292	}
293
294	.end : AT(ADDR(.end) - LOAD_OFFSET) {
295		_end = .;
296	}
297
298        STABS_DEBUG
299        DWARF_DEBUG
300
301	/* Sections to be discarded */
302	DISCARDS
303	/DISCARD/ : { *(.eh_frame) }
304}
305
306
307#ifdef CONFIG_X86_32
308. = ASSERT((_end - LOAD_OFFSET <= KERNEL_IMAGE_SIZE),
309	   "kernel image bigger than KERNEL_IMAGE_SIZE");
310#else
311/*
312 * Per-cpu symbols which need to be offset from __per_cpu_load
313 * for the boot processor.
314 */
315#define INIT_PER_CPU(x) init_per_cpu__##x = per_cpu__##x + __per_cpu_load
316INIT_PER_CPU(gdt_page);
317INIT_PER_CPU(irq_stack_union);
318
319/*
320 * Build-time check on the image size:
321 */
322. = ASSERT((_end - _text <= KERNEL_IMAGE_SIZE),
323	   "kernel image bigger than KERNEL_IMAGE_SIZE");
324
325#ifdef CONFIG_SMP
326. = ASSERT((per_cpu__irq_stack_union == 0),
327           "irq_stack_union is not at start of per-cpu area");
328#endif
329
330#endif /* CONFIG_X86_32 */
331
332#ifdef CONFIG_KEXEC
333#include <asm/kexec.h>
334
335. = ASSERT(kexec_control_code_size <= KEXEC_CONTROL_CODE_MAX_SIZE,
336           "kexec control code size is too big");
337#endif
338
339