xref: /linux/arch/parisc/kernel/vmlinux.lds.S (revision 7b12b9137930eb821b68e1bfa11e9de692208620)
1/*    Kernel link layout for various "sections"
2 *
3 *    Copyright (C) 1999-2003 Matthew Wilcox <willy at parisc-linux.org>
4 *    Copyright (C) 2000-2003 Paul Bame <bame at parisc-linux.org>
5 *    Copyright (C) 2000 John Marvin <jsm at parisc-linux.org>
6 *    Copyright (C) 2000 Michael Ang <mang with subcarrier.org>
7 *    Copyright (C) 2002 Randolph Chung <tausq with parisc-linux.org>
8 *    Copyright (C) 2003 James Bottomley <jejb with parisc-linux.org>
9 *    Copyright (C) 2006 Helge Deller <deller@gmx.de>
10 *
11 *
12 *    This program is free software; you can redistribute it and/or modify
13 *    it under the terms of the GNU General Public License as published by
14 *    the Free Software Foundation; either version 2 of the License, or
15 *    (at your option) any later version.
16 *
17 *    This program is distributed in the hope that it will be useful,
18 *    but WITHOUT ANY WARRANTY; without even the implied warranty of
19 *    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
20 *    GNU General Public License for more details.
21 *
22 *    You should have received a copy of the GNU General Public License
23 *    along with this program; if not, write to the Free Software
24 *    Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
25 */
26#include <linux/config.h>
27#include <asm-generic/vmlinux.lds.h>
28/* needed for the processor specific cache alignment size */
29#include <asm/cache.h>
30#include <asm/page.h>
31#include <asm/asm-offsets.h>
32
33/* ld script to make hppa Linux kernel */
34#ifndef CONFIG_64BIT
35OUTPUT_FORMAT("elf32-hppa-linux")
36OUTPUT_ARCH(hppa)
37#else
38OUTPUT_FORMAT("elf64-hppa-linux")
39OUTPUT_ARCH(hppa:hppa2.0w)
40#endif
41
42ENTRY(_stext)
43#ifndef CONFIG_64BIT
44jiffies = jiffies_64 + 4;
45#else
46jiffies = jiffies_64;
47#endif
48SECTIONS
49{
50
51  . = KERNEL_BINARY_TEXT_START;
52
53  _text = .;			/* Text and read-only data */
54  .text ALIGN(16) : {
55	*(.text)
56	SCHED_TEXT
57	LOCK_TEXT
58	*(.text.do_softirq)
59	*(.text.sys_exit)
60	*(.text.do_sigaltstack)
61	*(.text.do_fork)
62	*(.text.*)
63	*(.fixup)
64	*(.lock.text)		/* out-of-line lock text */
65	*(.gnu.warning)
66	} = 0
67
68  _etext = .;			/* End of text section */
69
70  RODATA
71
72  /* writeable */
73  . = ALIGN(ASM_PAGE_SIZE);	/* Make sure this is page aligned so
74  				   that we can properly leave these
75				   as writable */
76  data_start = .;
77
78  . = ALIGN(16);		/* Exception table */
79  __start___ex_table = .;
80  __ex_table : { *(__ex_table) }
81  __stop___ex_table = .;
82
83  __start___unwind = .;         /* unwind info */
84  .PARISC.unwind : { *(.PARISC.unwind) }
85  __stop___unwind = .;
86
87  /* rarely changed data like cpu maps */
88  . = ALIGN(16);
89  .data.read_mostly : { *(.data.read_mostly) }
90
91  . = ALIGN(L1_CACHE_BYTES);
92  .data : {			/* Data */
93	*(.data)
94	CONSTRUCTORS
95	}
96
97  . = ALIGN(L1_CACHE_BYTES);
98  .data.cacheline_aligned : { *(.data.cacheline_aligned) }
99
100  /* PA-RISC locks requires 16-byte alignment */
101  . = ALIGN(16);
102  .data.lock_aligned : { *(.data.lock_aligned) }
103
104  . = ALIGN(ASM_PAGE_SIZE);
105  /* nosave data is really only used for software suspend...it's here
106   * just in case we ever implement it */
107  __nosave_begin = .;
108  .data_nosave : { *(.data.nosave) }
109  . = ALIGN(ASM_PAGE_SIZE);
110  __nosave_end = .;
111
112  _edata = .;			/* End of data section */
113
114  __bss_start = .;		/* BSS */
115  /* page table entries need to be PAGE_SIZE aligned */
116  . = ALIGN(ASM_PAGE_SIZE);
117  .data.vmpages : {
118	*(.data.vm0.pmd)
119	*(.data.vm0.pgd)
120	*(.data.vm0.pte)
121	}
122  .bss : { *(.bss) *(COMMON) }
123  __bss_stop = .;
124
125
126  /* assembler code expects init_task to be 16k aligned */
127  . = ALIGN(16384); 		/* init_task */
128  .data.init_task : { *(.data.init_task) }
129
130  /* The interrupt stack is currently partially coded, but not yet
131   * implemented */
132  . = ALIGN(16384);
133  init_istack : { *(init_istack) }
134
135#ifdef CONFIG_64BIT
136  . = ALIGN(16);               /* Linkage tables */
137  .opd : { *(.opd) } PROVIDE (__gp = .);
138  .plt : { *(.plt) }
139  .dlt : { *(.dlt) }
140#endif
141
142  /* reserve space for interrupt stack by aligning __init* to 16k */
143  . = ALIGN(16384);
144  __init_begin = .;
145  .init.text : {
146	_sinittext = .;
147	*(.init.text)
148	_einittext = .;
149  }
150  .init.data : { *(.init.data) }
151  . = ALIGN(16);
152  __setup_start = .;
153  .init.setup : { *(.init.setup) }
154  __setup_end = .;
155  __initcall_start = .;
156  .initcall.init : {
157	*(.initcall1.init)
158	*(.initcall2.init)
159	*(.initcall3.init)
160	*(.initcall4.init)
161	*(.initcall5.init)
162	*(.initcall6.init)
163	*(.initcall7.init)
164  }
165  __initcall_end = .;
166  __con_initcall_start = .;
167  .con_initcall.init : { *(.con_initcall.init) }
168  __con_initcall_end = .;
169  SECURITY_INIT
170  /* alternate instruction replacement.  This is a mechanism x86 uses
171   * to detect the CPU type and replace generic instruction sequences
172   * with CPU specific ones.  We don't currently do this in PA, but
173   * it seems like a good idea... */
174  . = ALIGN(4);
175  __alt_instructions = .;
176  .altinstructions : { *(.altinstructions) }
177  __alt_instructions_end = .;
178 .altinstr_replacement : { *(.altinstr_replacement) }
179  /* .exit.text is discard at runtime, not link time, to deal with references
180     from .altinstructions and .eh_frame */
181  .exit.text : { *(.exit.text) }
182  .exit.data : { *(.exit.data) }
183  . = ALIGN(ASM_PAGE_SIZE);
184  __initramfs_start = .;
185  .init.ramfs : { *(.init.ramfs) }
186  __initramfs_end = .;
187  . = ALIGN(32);
188  __per_cpu_start = .;
189  .data.percpu  : { *(.data.percpu) }
190  __per_cpu_end = .;
191  . = ALIGN(ASM_PAGE_SIZE);
192  __init_end = .;
193  /* freed after init ends here */
194
195  _end = . ;
196
197  /* Sections to be discarded */
198  /DISCARD/ : {
199	*(.exitcall.exit)
200#ifdef CONFIG_64BIT
201	/* temporary hack until binutils is fixed to not emit these
202	 for static binaries */
203	*(.interp)
204	*(.dynsym)
205	*(.dynstr)
206	*(.dynamic)
207	*(.hash)
208#endif
209	}
210
211  STABS_DEBUG
212  .note 0 : { *(.note) }
213
214}
215