xref: /linux/arch/arm/kernel/vmlinux.lds.S (revision b85d45947951d23cb22d90caecf4c1eb81342c96)
1/* ld script to make ARM Linux kernel
2 * taken from the i386 version by Russell King
3 * Written by Martin Mares <mj@atrey.karlin.mff.cuni.cz>
4 */
5
6#include <asm-generic/vmlinux.lds.h>
7#include <asm/cache.h>
8#include <asm/thread_info.h>
9#include <asm/memory.h>
10#include <asm/page.h>
11#ifdef CONFIG_ARM_KERNMEM_PERMS
12#include <asm/pgtable.h>
13#endif
14
15#define PROC_INFO							\
16	. = ALIGN(4);							\
17	VMLINUX_SYMBOL(__proc_info_begin) = .;				\
18	*(.proc.info.init)						\
19	VMLINUX_SYMBOL(__proc_info_end) = .;
20
21#define IDMAP_TEXT							\
22	ALIGN_FUNCTION();						\
23	VMLINUX_SYMBOL(__idmap_text_start) = .;				\
24	*(.idmap.text)							\
25	VMLINUX_SYMBOL(__idmap_text_end) = .;				\
26	. = ALIGN(PAGE_SIZE);						\
27	VMLINUX_SYMBOL(__hyp_idmap_text_start) = .;			\
28	*(.hyp.idmap.text)						\
29	VMLINUX_SYMBOL(__hyp_idmap_text_end) = .;
30
31#ifdef CONFIG_HOTPLUG_CPU
32#define ARM_CPU_DISCARD(x)
33#define ARM_CPU_KEEP(x)		x
34#else
35#define ARM_CPU_DISCARD(x)	x
36#define ARM_CPU_KEEP(x)
37#endif
38
39#if (defined(CONFIG_SMP_ON_UP) && !defined(CONFIG_DEBUG_SPINLOCK)) || \
40	defined(CONFIG_GENERIC_BUG)
41#define ARM_EXIT_KEEP(x)	x
42#define ARM_EXIT_DISCARD(x)
43#else
44#define ARM_EXIT_KEEP(x)
45#define ARM_EXIT_DISCARD(x)	x
46#endif
47
48OUTPUT_ARCH(arm)
49ENTRY(stext)
50
51#ifndef __ARMEB__
52jiffies = jiffies_64;
53#else
54jiffies = jiffies_64 + 4;
55#endif
56
57SECTIONS
58{
59	/*
60	 * XXX: The linker does not define how output sections are
61	 * assigned to input sections when there are multiple statements
62	 * matching the same input section name.  There is no documented
63	 * order of matching.
64	 *
65	 * unwind exit sections must be discarded before the rest of the
66	 * unwind sections get included.
67	 */
68	/DISCARD/ : {
69		*(.ARM.exidx.exit.text)
70		*(.ARM.extab.exit.text)
71		ARM_CPU_DISCARD(*(.ARM.exidx.cpuexit.text))
72		ARM_CPU_DISCARD(*(.ARM.extab.cpuexit.text))
73		ARM_EXIT_DISCARD(EXIT_TEXT)
74		ARM_EXIT_DISCARD(EXIT_DATA)
75		EXIT_CALL
76#ifndef CONFIG_MMU
77		*(.text.fixup)
78		*(__ex_table)
79#endif
80#ifndef CONFIG_SMP_ON_UP
81		*(.alt.smp.init)
82#endif
83		*(.discard)
84		*(.discard.*)
85	}
86
87#ifdef CONFIG_XIP_KERNEL
88	. = XIP_VIRT_ADDR(CONFIG_XIP_PHYS_ADDR);
89#else
90	. = PAGE_OFFSET + TEXT_OFFSET;
91#endif
92	.head.text : {
93		_text = .;
94		HEAD_TEXT
95	}
96
97#ifdef CONFIG_ARM_KERNMEM_PERMS
98	. = ALIGN(1<<SECTION_SHIFT);
99#endif
100
101	.text : {			/* Real text segment		*/
102		_stext = .;		/* Text and read-only data	*/
103			IDMAP_TEXT
104			__exception_text_start = .;
105			*(.exception.text)
106			__exception_text_end = .;
107			IRQENTRY_TEXT
108			TEXT_TEXT
109			SCHED_TEXT
110			LOCK_TEXT
111			KPROBES_TEXT
112			*(.gnu.warning)
113			*(.glue_7)
114			*(.glue_7t)
115		. = ALIGN(4);
116		*(.got)			/* Global offset table		*/
117			ARM_CPU_KEEP(PROC_INFO)
118	}
119
120#ifdef CONFIG_DEBUG_RODATA
121	. = ALIGN(1<<SECTION_SHIFT);
122#endif
123	RO_DATA(PAGE_SIZE)
124
125	. = ALIGN(4);
126	__ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) {
127		__start___ex_table = .;
128#ifdef CONFIG_MMU
129		*(__ex_table)
130#endif
131		__stop___ex_table = .;
132	}
133
134#ifdef CONFIG_ARM_UNWIND
135	/*
136	 * Stack unwinding tables
137	 */
138	. = ALIGN(8);
139	.ARM.unwind_idx : {
140		__start_unwind_idx = .;
141		*(.ARM.exidx*)
142		__stop_unwind_idx = .;
143	}
144	.ARM.unwind_tab : {
145		__start_unwind_tab = .;
146		*(.ARM.extab*)
147		__stop_unwind_tab = .;
148	}
149#endif
150
151	NOTES
152
153	_etext = .;			/* End of text and rodata section */
154
155#ifndef CONFIG_XIP_KERNEL
156# ifdef CONFIG_ARM_KERNMEM_PERMS
157	. = ALIGN(1<<SECTION_SHIFT);
158# else
159	. = ALIGN(PAGE_SIZE);
160# endif
161	__init_begin = .;
162#endif
163	/*
164	 * The vectors and stubs are relocatable code, and the
165	 * only thing that matters is their relative offsets
166	 */
167	__vectors_start = .;
168	.vectors 0 : AT(__vectors_start) {
169		*(.vectors)
170	}
171	. = __vectors_start + SIZEOF(.vectors);
172	__vectors_end = .;
173
174	__stubs_start = .;
175	.stubs 0x1000 : AT(__stubs_start) {
176		*(.stubs)
177	}
178	. = __stubs_start + SIZEOF(.stubs);
179	__stubs_end = .;
180
181	INIT_TEXT_SECTION(8)
182	.exit.text : {
183		ARM_EXIT_KEEP(EXIT_TEXT)
184	}
185	.init.proc.info : {
186		ARM_CPU_DISCARD(PROC_INFO)
187	}
188	.init.arch.info : {
189		__arch_info_begin = .;
190		*(.arch.info.init)
191		__arch_info_end = .;
192	}
193	.init.tagtable : {
194		__tagtable_begin = .;
195		*(.taglist.init)
196		__tagtable_end = .;
197	}
198#ifdef CONFIG_SMP_ON_UP
199	.init.smpalt : {
200		__smpalt_begin = .;
201		*(.alt.smp.init)
202		__smpalt_end = .;
203	}
204#endif
205	.init.pv_table : {
206		__pv_table_begin = .;
207		*(.pv_table)
208		__pv_table_end = .;
209	}
210	.init.data : {
211#ifndef CONFIG_XIP_KERNEL
212		INIT_DATA
213#endif
214		INIT_SETUP(16)
215		INIT_CALLS
216		CON_INITCALL
217		SECURITY_INITCALL
218		INIT_RAM_FS
219	}
220#ifndef CONFIG_XIP_KERNEL
221	.exit.data : {
222		ARM_EXIT_KEEP(EXIT_DATA)
223	}
224#endif
225
226#ifdef CONFIG_SMP
227	PERCPU_SECTION(L1_CACHE_BYTES)
228#endif
229
230#ifdef CONFIG_XIP_KERNEL
231	__data_loc = ALIGN(4);		/* location in binary */
232	. = PAGE_OFFSET + TEXT_OFFSET;
233#else
234#ifdef CONFIG_ARM_KERNMEM_PERMS
235	. = ALIGN(1<<SECTION_SHIFT);
236#else
237	. = ALIGN(THREAD_SIZE);
238#endif
239	__init_end = .;
240	__data_loc = .;
241#endif
242
243	.data : AT(__data_loc) {
244		_data = .;		/* address in memory */
245		_sdata = .;
246
247		/*
248		 * first, the init task union, aligned
249		 * to an 8192 byte boundary.
250		 */
251		INIT_TASK_DATA(THREAD_SIZE)
252
253#ifdef CONFIG_XIP_KERNEL
254		. = ALIGN(PAGE_SIZE);
255		__init_begin = .;
256		INIT_DATA
257		ARM_EXIT_KEEP(EXIT_DATA)
258		. = ALIGN(PAGE_SIZE);
259		__init_end = .;
260#endif
261
262		NOSAVE_DATA
263		CACHELINE_ALIGNED_DATA(L1_CACHE_BYTES)
264		READ_MOSTLY_DATA(L1_CACHE_BYTES)
265
266		/*
267		 * and the usual data section
268		 */
269		DATA_DATA
270		CONSTRUCTORS
271
272		_edata = .;
273	}
274	_edata_loc = __data_loc + SIZEOF(.data);
275
276#ifdef CONFIG_HAVE_TCM
277        /*
278	 * We align everything to a page boundary so we can
279	 * free it after init has commenced and TCM contents have
280	 * been copied to its destination.
281	 */
282	.tcm_start : {
283		. = ALIGN(PAGE_SIZE);
284		__tcm_start = .;
285		__itcm_start = .;
286	}
287
288	/*
289	 * Link these to the ITCM RAM
290	 * Put VMA to the TCM address and LMA to the common RAM
291	 * and we'll upload the contents from RAM to TCM and free
292	 * the used RAM after that.
293	 */
294	.text_itcm ITCM_OFFSET : AT(__itcm_start)
295	{
296		__sitcm_text = .;
297		*(.tcm.text)
298		*(.tcm.rodata)
299		. = ALIGN(4);
300		__eitcm_text = .;
301	}
302
303	/*
304	 * Reset the dot pointer, this is needed to create the
305	 * relative __dtcm_start below (to be used as extern in code).
306	 */
307	. = ADDR(.tcm_start) + SIZEOF(.tcm_start) + SIZEOF(.text_itcm);
308
309	.dtcm_start : {
310		__dtcm_start = .;
311	}
312
313	/* TODO: add remainder of ITCM as well, that can be used for data! */
314	.data_dtcm DTCM_OFFSET : AT(__dtcm_start)
315	{
316		. = ALIGN(4);
317		__sdtcm_data = .;
318		*(.tcm.data)
319		. = ALIGN(4);
320		__edtcm_data = .;
321	}
322
323	/* Reset the dot pointer or the linker gets confused */
324	. = ADDR(.dtcm_start) + SIZEOF(.data_dtcm);
325
326	/* End marker for freeing TCM copy in linked object */
327	.tcm_end : AT(ADDR(.dtcm_start) + SIZEOF(.data_dtcm)){
328		. = ALIGN(PAGE_SIZE);
329		__tcm_end = .;
330	}
331#endif
332
333	BSS_SECTION(0, 0, 0)
334	_end = .;
335
336	STABS_DEBUG
337}
338
339/*
340 * These must never be empty
341 * If you have to comment these two assert statements out, your
342 * binutils is too old (for other reasons as well)
343 */
344ASSERT((__proc_info_end - __proc_info_begin), "missing CPU support")
345ASSERT((__arch_info_end - __arch_info_begin), "no machine record defined")
346
347/*
348 * The HYP init code can't be more than a page long,
349 * and should not cross a page boundary.
350 * The above comment applies as well.
351 */
352ASSERT(__hyp_idmap_text_end - (__hyp_idmap_text_start & PAGE_MASK) <= PAGE_SIZE,
353	"HYP init code too big or misaligned")
354