xref: /linux/arch/arm/kernel/vmlinux.lds.S (revision 5e4e38446a62a4f50d77b0dd11d4b379dee08988)
1/* ld script to make ARM Linux kernel
2 * taken from the i386 version by Russell King
3 * Written by Martin Mares <mj@atrey.karlin.mff.cuni.cz>
4 */
5
6#include <asm-generic/vmlinux.lds.h>
7#include <asm/cache.h>
8#include <asm/thread_info.h>
9#include <asm/memory.h>
10#include <asm/page.h>
11#ifdef CONFIG_ARM_KERNMEM_PERMS
12#include <asm/pgtable.h>
13#endif
14
15#define PROC_INFO							\
16	. = ALIGN(4);							\
17	VMLINUX_SYMBOL(__proc_info_begin) = .;				\
18	*(.proc.info.init)						\
19	VMLINUX_SYMBOL(__proc_info_end) = .;
20
21#define HYPERVISOR_TEXT							\
22	VMLINUX_SYMBOL(__hyp_text_start) = .;				\
23	*(.hyp.text)							\
24	VMLINUX_SYMBOL(__hyp_text_end) = .;
25
26#define IDMAP_TEXT							\
27	ALIGN_FUNCTION();						\
28	VMLINUX_SYMBOL(__idmap_text_start) = .;				\
29	*(.idmap.text)							\
30	VMLINUX_SYMBOL(__idmap_text_end) = .;				\
31	. = ALIGN(PAGE_SIZE);						\
32	VMLINUX_SYMBOL(__hyp_idmap_text_start) = .;			\
33	*(.hyp.idmap.text)						\
34	VMLINUX_SYMBOL(__hyp_idmap_text_end) = .;
35
36#ifdef CONFIG_HOTPLUG_CPU
37#define ARM_CPU_DISCARD(x)
38#define ARM_CPU_KEEP(x)		x
39#else
40#define ARM_CPU_DISCARD(x)	x
41#define ARM_CPU_KEEP(x)
42#endif
43
44#if (defined(CONFIG_SMP_ON_UP) && !defined(CONFIG_DEBUG_SPINLOCK)) || \
45	defined(CONFIG_GENERIC_BUG)
46#define ARM_EXIT_KEEP(x)	x
47#define ARM_EXIT_DISCARD(x)
48#else
49#define ARM_EXIT_KEEP(x)
50#define ARM_EXIT_DISCARD(x)	x
51#endif
52
53OUTPUT_ARCH(arm)
54ENTRY(stext)
55
56#ifndef __ARMEB__
57jiffies = jiffies_64;
58#else
59jiffies = jiffies_64 + 4;
60#endif
61
62SECTIONS
63{
64	/*
65	 * XXX: The linker does not define how output sections are
66	 * assigned to input sections when there are multiple statements
67	 * matching the same input section name.  There is no documented
68	 * order of matching.
69	 *
70	 * unwind exit sections must be discarded before the rest of the
71	 * unwind sections get included.
72	 */
73	/DISCARD/ : {
74		*(.ARM.exidx.exit.text)
75		*(.ARM.extab.exit.text)
76		ARM_CPU_DISCARD(*(.ARM.exidx.cpuexit.text))
77		ARM_CPU_DISCARD(*(.ARM.extab.cpuexit.text))
78		ARM_EXIT_DISCARD(EXIT_TEXT)
79		ARM_EXIT_DISCARD(EXIT_DATA)
80		EXIT_CALL
81#ifndef CONFIG_MMU
82		*(.text.fixup)
83		*(__ex_table)
84#endif
85#ifndef CONFIG_SMP_ON_UP
86		*(.alt.smp.init)
87#endif
88		*(.discard)
89		*(.discard.*)
90	}
91
92#ifdef CONFIG_XIP_KERNEL
93	. = XIP_VIRT_ADDR(CONFIG_XIP_PHYS_ADDR);
94#else
95	. = PAGE_OFFSET + TEXT_OFFSET;
96#endif
97	.head.text : {
98		_text = .;
99		HEAD_TEXT
100	}
101
102#ifdef CONFIG_ARM_KERNMEM_PERMS
103	. = ALIGN(1<<SECTION_SHIFT);
104#endif
105
106	.text : {			/* Real text segment		*/
107		_stext = .;		/* Text and read-only data	*/
108			IDMAP_TEXT
109			__exception_text_start = .;
110			*(.exception.text)
111			__exception_text_end = .;
112			IRQENTRY_TEXT
113			TEXT_TEXT
114			SCHED_TEXT
115			LOCK_TEXT
116			HYPERVISOR_TEXT
117			KPROBES_TEXT
118			*(.gnu.warning)
119			*(.glue_7)
120			*(.glue_7t)
121		. = ALIGN(4);
122		*(.got)			/* Global offset table		*/
123			ARM_CPU_KEEP(PROC_INFO)
124	}
125
126#ifdef CONFIG_DEBUG_RODATA
127	. = ALIGN(1<<SECTION_SHIFT);
128#endif
129	RO_DATA(PAGE_SIZE)
130
131	. = ALIGN(4);
132	__ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) {
133		__start___ex_table = .;
134#ifdef CONFIG_MMU
135		*(__ex_table)
136#endif
137		__stop___ex_table = .;
138	}
139
140#ifdef CONFIG_ARM_UNWIND
141	/*
142	 * Stack unwinding tables
143	 */
144	. = ALIGN(8);
145	.ARM.unwind_idx : {
146		__start_unwind_idx = .;
147		*(.ARM.exidx*)
148		__stop_unwind_idx = .;
149	}
150	.ARM.unwind_tab : {
151		__start_unwind_tab = .;
152		*(.ARM.extab*)
153		__stop_unwind_tab = .;
154	}
155#endif
156
157	NOTES
158
159	_etext = .;			/* End of text and rodata section */
160
161#ifndef CONFIG_XIP_KERNEL
162# ifdef CONFIG_ARM_KERNMEM_PERMS
163	. = ALIGN(1<<SECTION_SHIFT);
164# else
165	. = ALIGN(PAGE_SIZE);
166# endif
167	__init_begin = .;
168#endif
169	/*
170	 * The vectors and stubs are relocatable code, and the
171	 * only thing that matters is their relative offsets
172	 */
173	__vectors_start = .;
174	.vectors 0 : AT(__vectors_start) {
175		*(.vectors)
176	}
177	. = __vectors_start + SIZEOF(.vectors);
178	__vectors_end = .;
179
180	__stubs_start = .;
181	.stubs 0x1000 : AT(__stubs_start) {
182		*(.stubs)
183	}
184	. = __stubs_start + SIZEOF(.stubs);
185	__stubs_end = .;
186
187	INIT_TEXT_SECTION(8)
188	.exit.text : {
189		ARM_EXIT_KEEP(EXIT_TEXT)
190	}
191	.init.proc.info : {
192		ARM_CPU_DISCARD(PROC_INFO)
193	}
194	.init.arch.info : {
195		__arch_info_begin = .;
196		*(.arch.info.init)
197		__arch_info_end = .;
198	}
199	.init.tagtable : {
200		__tagtable_begin = .;
201		*(.taglist.init)
202		__tagtable_end = .;
203	}
204#ifdef CONFIG_SMP_ON_UP
205	.init.smpalt : {
206		__smpalt_begin = .;
207		*(.alt.smp.init)
208		__smpalt_end = .;
209	}
210#endif
211	.init.pv_table : {
212		__pv_table_begin = .;
213		*(.pv_table)
214		__pv_table_end = .;
215	}
216	.init.data : {
217#ifndef CONFIG_XIP_KERNEL
218		INIT_DATA
219#endif
220		INIT_SETUP(16)
221		INIT_CALLS
222		CON_INITCALL
223		SECURITY_INITCALL
224		INIT_RAM_FS
225	}
226#ifndef CONFIG_XIP_KERNEL
227	.exit.data : {
228		ARM_EXIT_KEEP(EXIT_DATA)
229	}
230#endif
231
232#ifdef CONFIG_SMP
233	PERCPU_SECTION(L1_CACHE_BYTES)
234#endif
235
236#ifdef CONFIG_XIP_KERNEL
237	__data_loc = ALIGN(4);		/* location in binary */
238	. = PAGE_OFFSET + TEXT_OFFSET;
239#else
240#ifdef CONFIG_ARM_KERNMEM_PERMS
241	. = ALIGN(1<<SECTION_SHIFT);
242#else
243	. = ALIGN(THREAD_SIZE);
244#endif
245	__init_end = .;
246	__data_loc = .;
247#endif
248
249	.data : AT(__data_loc) {
250		_data = .;		/* address in memory */
251		_sdata = .;
252
253		/*
254		 * first, the init task union, aligned
255		 * to an 8192 byte boundary.
256		 */
257		INIT_TASK_DATA(THREAD_SIZE)
258
259#ifdef CONFIG_XIP_KERNEL
260		. = ALIGN(PAGE_SIZE);
261		__init_begin = .;
262		INIT_DATA
263		ARM_EXIT_KEEP(EXIT_DATA)
264		. = ALIGN(PAGE_SIZE);
265		__init_end = .;
266#endif
267
268		NOSAVE_DATA
269		CACHELINE_ALIGNED_DATA(L1_CACHE_BYTES)
270		READ_MOSTLY_DATA(L1_CACHE_BYTES)
271
272		/*
273		 * and the usual data section
274		 */
275		DATA_DATA
276		CONSTRUCTORS
277
278		_edata = .;
279	}
280	_edata_loc = __data_loc + SIZEOF(.data);
281
282#ifdef CONFIG_HAVE_TCM
283        /*
284	 * We align everything to a page boundary so we can
285	 * free it after init has commenced and TCM contents have
286	 * been copied to its destination.
287	 */
288	.tcm_start : {
289		. = ALIGN(PAGE_SIZE);
290		__tcm_start = .;
291		__itcm_start = .;
292	}
293
294	/*
295	 * Link these to the ITCM RAM
296	 * Put VMA to the TCM address and LMA to the common RAM
297	 * and we'll upload the contents from RAM to TCM and free
298	 * the used RAM after that.
299	 */
300	.text_itcm ITCM_OFFSET : AT(__itcm_start)
301	{
302		__sitcm_text = .;
303		*(.tcm.text)
304		*(.tcm.rodata)
305		. = ALIGN(4);
306		__eitcm_text = .;
307	}
308
309	/*
310	 * Reset the dot pointer, this is needed to create the
311	 * relative __dtcm_start below (to be used as extern in code).
312	 */
313	. = ADDR(.tcm_start) + SIZEOF(.tcm_start) + SIZEOF(.text_itcm);
314
315	.dtcm_start : {
316		__dtcm_start = .;
317	}
318
319	/* TODO: add remainder of ITCM as well, that can be used for data! */
320	.data_dtcm DTCM_OFFSET : AT(__dtcm_start)
321	{
322		. = ALIGN(4);
323		__sdtcm_data = .;
324		*(.tcm.data)
325		. = ALIGN(4);
326		__edtcm_data = .;
327	}
328
329	/* Reset the dot pointer or the linker gets confused */
330	. = ADDR(.dtcm_start) + SIZEOF(.data_dtcm);
331
332	/* End marker for freeing TCM copy in linked object */
333	.tcm_end : AT(ADDR(.dtcm_start) + SIZEOF(.data_dtcm)){
334		. = ALIGN(PAGE_SIZE);
335		__tcm_end = .;
336	}
337#endif
338
339	BSS_SECTION(0, 0, 0)
340	_end = .;
341
342	STABS_DEBUG
343}
344
345/*
346 * These must never be empty
347 * If you have to comment these two assert statements out, your
348 * binutils is too old (for other reasons as well)
349 */
350ASSERT((__proc_info_end - __proc_info_begin), "missing CPU support")
351ASSERT((__arch_info_end - __arch_info_begin), "no machine record defined")
352
353/*
354 * The HYP init code can't be more than a page long,
355 * and should not cross a page boundary.
356 * The above comment applies as well.
357 */
358ASSERT(__hyp_idmap_text_end - (__hyp_idmap_text_start & PAGE_MASK) <= PAGE_SIZE,
359	"HYP init code too big or misaligned")
360