xref: /linux/arch/parisc/kernel/vmlinux.lds.S (revision 606d099cdd1080bbb50ea50dc52d98252f8f10a1)
1/*    Kernel link layout for various "sections"
2 *
3 *    Copyright (C) 1999-2003 Matthew Wilcox <willy at parisc-linux.org>
4 *    Copyright (C) 2000-2003 Paul Bame <bame at parisc-linux.org>
5 *    Copyright (C) 2000 John Marvin <jsm at parisc-linux.org>
6 *    Copyright (C) 2000 Michael Ang <mang with subcarrier.org>
7 *    Copyright (C) 2002 Randolph Chung <tausq with parisc-linux.org>
8 *    Copyright (C) 2003 James Bottomley <jejb with parisc-linux.org>
9 *    Copyright (C) 2006 Helge Deller <deller@gmx.de>
10 *
11 *
12 *    This program is free software; you can redistribute it and/or modify
13 *    it under the terms of the GNU General Public License as published by
14 *    the Free Software Foundation; either version 2 of the License, or
15 *    (at your option) any later version.
16 *
17 *    This program is distributed in the hope that it will be useful,
18 *    but WITHOUT ANY WARRANTY; without even the implied warranty of
19 *    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
20 *    GNU General Public License for more details.
21 *
22 *    You should have received a copy of the GNU General Public License
23 *    along with this program; if not, write to the Free Software
24 *    Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
25 */
26#include <asm-generic/vmlinux.lds.h>
27/* needed for the processor specific cache alignment size */
28#include <asm/cache.h>
29#include <asm/page.h>
30#include <asm/asm-offsets.h>
31
32/* ld script to make hppa Linux kernel */
33#ifndef CONFIG_64BIT
34OUTPUT_FORMAT("elf32-hppa-linux")
35OUTPUT_ARCH(hppa)
36#else
37OUTPUT_FORMAT("elf64-hppa-linux")
38OUTPUT_ARCH(hppa:hppa2.0w)
39#endif
40
41ENTRY(_stext)
42#ifndef CONFIG_64BIT
43jiffies = jiffies_64 + 4;
44#else
45jiffies = jiffies_64;
46#endif
47SECTIONS
48{
49
50  . = KERNEL_BINARY_TEXT_START;
51
52  _text = .;			/* Text and read-only data */
53  .text ALIGN(16) : {
54	*(.text)
55	SCHED_TEXT
56	LOCK_TEXT
57	*(.text.do_softirq)
58	*(.text.sys_exit)
59	*(.text.do_sigaltstack)
60	*(.text.do_fork)
61	*(.text.*)
62	*(.fixup)
63	*(.lock.text)		/* out-of-line lock text */
64	*(.gnu.warning)
65	} = 0
66
67  _etext = .;			/* End of text section */
68
69  RODATA
70
71  /* writeable */
72  . = ALIGN(ASM_PAGE_SIZE);	/* Make sure this is page aligned so
73  				   that we can properly leave these
74				   as writable */
75  data_start = .;
76
77  . = ALIGN(16);		/* Exception table */
78  __start___ex_table = .;
79  __ex_table : { *(__ex_table) }
80  __stop___ex_table = .;
81
82  __start___unwind = .;         /* unwind info */
83  .PARISC.unwind : { *(.PARISC.unwind) }
84  __stop___unwind = .;
85
86  /* rarely changed data like cpu maps */
87  . = ALIGN(16);
88  .data.read_mostly : { *(.data.read_mostly) }
89
90  . = ALIGN(L1_CACHE_BYTES);
91  .data : {			/* Data */
92	*(.data)
93	CONSTRUCTORS
94	}
95
96  . = ALIGN(L1_CACHE_BYTES);
97  .data.cacheline_aligned : { *(.data.cacheline_aligned) }
98
99  /* PA-RISC locks requires 16-byte alignment */
100  . = ALIGN(16);
101  .data.lock_aligned : { *(.data.lock_aligned) }
102
103  . = ALIGN(ASM_PAGE_SIZE);
104  /* nosave data is really only used for software suspend...it's here
105   * just in case we ever implement it */
106  __nosave_begin = .;
107  .data_nosave : { *(.data.nosave) }
108  . = ALIGN(ASM_PAGE_SIZE);
109  __nosave_end = .;
110
111  _edata = .;			/* End of data section */
112
113  __bss_start = .;		/* BSS */
114  /* page table entries need to be PAGE_SIZE aligned */
115  . = ALIGN(ASM_PAGE_SIZE);
116  .data.vmpages : {
117	*(.data.vm0.pmd)
118	*(.data.vm0.pgd)
119	*(.data.vm0.pte)
120	}
121  .bss : { *(.bss) *(COMMON) }
122  __bss_stop = .;
123
124
125  /* assembler code expects init_task to be 16k aligned */
126  . = ALIGN(16384); 		/* init_task */
127  .data.init_task : { *(.data.init_task) }
128
129  /* The interrupt stack is currently partially coded, but not yet
130   * implemented */
131  . = ALIGN(16384);
132  init_istack : { *(init_istack) }
133
134#ifdef CONFIG_64BIT
135  . = ALIGN(16);               /* Linkage tables */
136  .opd : { *(.opd) } PROVIDE (__gp = .);
137  .plt : { *(.plt) }
138  .dlt : { *(.dlt) }
139#endif
140
141  /* reserve space for interrupt stack by aligning __init* to 16k */
142  . = ALIGN(16384);
143  __init_begin = .;
144  .init.text : {
145	_sinittext = .;
146	*(.init.text)
147	_einittext = .;
148  }
149  .init.data : { *(.init.data) }
150  . = ALIGN(16);
151  __setup_start = .;
152  .init.setup : { *(.init.setup) }
153  __setup_end = .;
154  __initcall_start = .;
155  .initcall.init : {
156	INITCALLS
157  }
158  __initcall_end = .;
159  __con_initcall_start = .;
160  .con_initcall.init : { *(.con_initcall.init) }
161  __con_initcall_end = .;
162  SECURITY_INIT
163  /* alternate instruction replacement.  This is a mechanism x86 uses
164   * to detect the CPU type and replace generic instruction sequences
165   * with CPU specific ones.  We don't currently do this in PA, but
166   * it seems like a good idea... */
167  . = ALIGN(4);
168  __alt_instructions = .;
169  .altinstructions : { *(.altinstructions) }
170  __alt_instructions_end = .;
171 .altinstr_replacement : { *(.altinstr_replacement) }
172  /* .exit.text is discard at runtime, not link time, to deal with references
173     from .altinstructions and .eh_frame */
174  .exit.text : { *(.exit.text) }
175  .exit.data : { *(.exit.data) }
176  . = ALIGN(ASM_PAGE_SIZE);
177  __initramfs_start = .;
178  .init.ramfs : { *(.init.ramfs) }
179  __initramfs_end = .;
180  . = ALIGN(32);
181  __per_cpu_start = .;
182  .data.percpu  : { *(.data.percpu) }
183  __per_cpu_end = .;
184  . = ALIGN(ASM_PAGE_SIZE);
185  __init_end = .;
186  /* freed after init ends here */
187
188  _end = . ;
189
190  /* Sections to be discarded */
191  /DISCARD/ : {
192	*(.exitcall.exit)
193#ifdef CONFIG_64BIT
194	/* temporary hack until binutils is fixed to not emit these
195	 for static binaries */
196	*(.interp)
197	*(.dynsym)
198	*(.dynstr)
199	*(.dynamic)
200	*(.hash)
201	*(.gnu.hash)
202#endif
203	}
204
205  STABS_DEBUG
206  .note 0 : { *(.note) }
207
208}
209