xref: /titanic_44/usr/src/cmd/sgs/libld/common/entry.c (revision a38ddfee9c8c6b6c5a2947ff52fd2338362a4444)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  *	Copyright (c) 1988 AT&T
24  *	  All Rights Reserved
25  *
26  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
27  * Use is subject to license terms.
28  */
29 #pragma ident	"%Z%%M%	%I%	%E% SMI"
30 
31 #define	ELF_TARGET_AMD64
32 
33 #include	<stdio.h>
34 #include	<memory.h>
35 #include	<debug.h>
36 #include	"msg.h"
37 #include	"_libld.h"
38 
39 
40 /*
41  * Types of segment index.
42  */
43 typedef enum {
44 	LD_PHDR,
45 	LD_INTERP,
46 	LD_SUNWCAP,
47 	LD_TEXT,
48 	LD_DATA,
49 	LD_BSS,
50 #if	defined(_ELF64)
51 	LD_LRODATA,		/* (amd64-only) */
52 	LD_LDATA,		/* (amd64-only) */
53 #endif
54 	LD_DYN,
55 	LD_DTRACE,
56 	LD_SUNWBSS,
57 	LD_TLS,
58 #if	defined(_ELF64)
59 	LD_UNWIND,		/* (amd64-only) */
60 #endif
61 	LD_NOTE,
62 	LD_EXTRA,
63 	LD_NUM
64 } Segment_ndx;
65 
66 /*
67  * The loader uses a `segment descriptor' list to describe the output
68  * segments it can potentially create. This list is initially seeded
69  * using the templates contained in the sg_desc[] array below. Additional
70  * segments may be added using a map file.
71  *
72  * The entries in sg_desc[] must be put in the order defined by the
73  * Segment_ndx enum, such that a given LD_XXX value can serve as
74  * an index into sg_desc[] for the corresponding descriptor.
75  *
76  * The entries in sg_desc[] are initialized using the SG_DESC_INIT macro
77  * for two reasons:
78  *
79  *	1) The first field of the Sg_desc struct is a program header
80  *		entry. ELF32_Phdr and ELF64_Phdr have the same fields,
81  *		but their order is different. Use of a macro allows us
82  *		to handle this transparently.
83  *	2) Most of the fields in the Sg_desc entries are set to 0.
84  *		Use of a macro allows us to hide the clutter.
85  */
86 #ifdef _ELF64
87 #define	SG_DESC_INIT(p_type, p_flags, sg_name, sg_flags) \
88 	{ { p_type, p_flags, 0, 0, 0, 0, 0, 0}, \
89 	    sg_name, 0, 0, NULL, NULL, sg_flags, NULL, 0, 0}
90 #else
91 #define	SG_DESC_INIT(p_type, p_flags, sg_name, sg_flags) \
92 	{ { p_type, 0, 0, 0, 0, 0, p_flags, 0}, \
93 	    sg_name, 0, 0, NULL, NULL, sg_flags, NULL, 0, 0}
94 #endif
95 
96 static const Sg_desc sg_desc[LD_NUM] = {
97 	/* LD_PHDR */
98 	SG_DESC_INIT(PT_PHDR, PF_R + PF_X, MSG_ORIG(MSG_ENT_PHDR),
99 	    (FLG_SG_TYPE | FLG_SG_FLAGS)),
100 
101 	/* LD_INTERP */
102 	SG_DESC_INIT(PT_INTERP, PF_R, MSG_ORIG(MSG_ENT_INTERP),
103 	    (FLG_SG_TYPE | FLG_SG_FLAGS)),
104 
105 	/* LD_SUNWCAP */
106 	SG_DESC_INIT(PT_SUNWCAP, PF_R, MSG_ORIG(MSG_ENT_SUNWCAP),
107 	    (FLG_SG_TYPE | FLG_SG_FLAGS)),
108 
109 	/* LD_TEXT */
110 	SG_DESC_INIT(PT_LOAD, PF_R + PF_X, MSG_ORIG(MSG_ENT_TEXT),
111 	    (FLG_SG_TYPE | FLG_SG_FLAGS)),
112 
113 	/* LD_DATA */
114 	SG_DESC_INIT(PT_LOAD, 0, MSG_ORIG(MSG_ENT_DATA),
115 	    (FLG_SG_TYPE | FLG_SG_FLAGS)),
116 
117 	/* LD_BSS */
118 	SG_DESC_INIT(PT_LOAD, 0, MSG_ORIG(MSG_ENT_BSS),
119 	    (FLG_SG_TYPE | FLG_SG_FLAGS | FLG_SG_DISABLED)),
120 
121 #if	defined(_ELF64)
122 	/* LD_LRODATA (amd64-only) */
123 	SG_DESC_INIT(PT_LOAD, PF_R, MSG_ORIG(MSG_ENT_LRODATA),
124 	    (FLG_SG_TYPE | FLG_SG_FLAGS)),
125 
126 	/* LD_LDATA (amd64-only) */
127 	SG_DESC_INIT(PT_LOAD, 0, MSG_ORIG(MSG_ENT_LDATA),
128 	    (FLG_SG_TYPE | FLG_SG_FLAGS)),
129 #endif
130 
131 	/* LD_DYN */
132 	SG_DESC_INIT(PT_DYNAMIC, 0, MSG_ORIG(MSG_ENT_DYNAMIC),
133 	    (FLG_SG_TYPE | FLG_SG_FLAGS)),
134 
135 	/* LD_DTRACE */
136 	SG_DESC_INIT(PT_SUNWDTRACE, 0,
137 		MSG_ORIG(MSG_ENT_DTRACE), (FLG_SG_TYPE | FLG_SG_FLAGS)),
138 
139 	/* LD_SUNWBSS */
140 	SG_DESC_INIT(PT_SUNWBSS, 0, MSG_ORIG(MSG_ENT_SUNWBSS), FLG_SG_TYPE),
141 
142 	/* LD_TLS */
143 	SG_DESC_INIT(PT_TLS, PF_R, MSG_ORIG(MSG_ENT_TLS),
144 	    (FLG_SG_TYPE | FLG_SG_FLAGS)),
145 
146 #if	defined(_ELF64)
147 	/* LD_UNWIND (amd64-only) */
148 	SG_DESC_INIT(PT_SUNW_UNWIND, PF_R, MSG_ORIG(MSG_ENT_UNWIND),
149 	    (FLG_SG_TYPE | FLG_SG_FLAGS)),
150 #endif
151 
152 	/* LD_NOTE */
153 	SG_DESC_INIT(PT_NOTE, 0, MSG_ORIG(MSG_ENT_NOTE), FLG_SG_TYPE),
154 
155 	/* LD_EXTRA */
156 	SG_DESC_INIT(PT_NULL, 0, MSG_ORIG(MSG_STR_EMPTY), FLG_SG_TYPE)
157 };
158 
159 
160 
161 /*
162  * The input processing of the loader involves matching the sections of its
163  * input files to an `entrance descriptor definition'.  The entrance criteria
164  * is different for either a static or dynamic linkage, and may even be
165  * modified further using a map file.  Each entrance criteria is associated
166  * with a segment descriptor, thus a mapping of input sections to output
167  * segments is maintained.
168  *
169  * Note the trick used for the ec_segment field, which is supposed to
170  * be a pointer to a segment descriptor. We initialize this with the
171  * index of the descriptor, and then turn it into an actual pointer
172  * at runtime, once memory has been allocated and the templates copied.
173  */
174 static const Ent_desc	ent_desc[] = {
175 	{{NULL, NULL}, MSG_ORIG(MSG_SCN_SUNWBSS), NULL,
176 		SHF_ALLOC + SHF_WRITE, SHF_ALLOC + SHF_WRITE,
177 		(Sg_desc *)LD_SUNWBSS, 0, FALSE},
178 
179 	{{NULL, NULL}, NULL, SHT_NOTE, 0, 0,
180 		(Sg_desc *)LD_NOTE, 0, FALSE},
181 
182 #if	defined(_ELF64)		/* (amd64-only) */
183 	{{NULL, NULL}, MSG_ORIG(MSG_SCN_LRODATA), NULL,
184 		SHF_ALLOC + SHF_AMD64_LARGE, SHF_ALLOC + SHF_AMD64_LARGE,
185 		(Sg_desc *)LD_LRODATA, 0, FALSE},
186 #endif
187 
188 	{{NULL, NULL}, NULL, NULL,
189 		SHF_ALLOC + SHF_WRITE, SHF_ALLOC,
190 		(Sg_desc *)LD_TEXT, 0, FALSE},
191 
192 	{{NULL, NULL}, NULL, SHT_NOBITS,
193 		SHF_ALLOC + SHF_WRITE, SHF_ALLOC + SHF_WRITE,
194 		(Sg_desc *)LD_BSS, 0, FALSE},
195 
196 #if	defined(_ELF64)		/* (amd64-only) */
197 	{{NULL, NULL}, NULL, SHT_NOBITS,
198 		SHF_ALLOC + SHF_WRITE + SHF_AMD64_LARGE,
199 		SHF_ALLOC + SHF_WRITE + SHF_AMD64_LARGE,
200 		(Sg_desc *)LD_DATA, 0, FALSE},
201 
202 	{{NULL, NULL}, NULL, NULL,
203 		SHF_ALLOC + SHF_WRITE + SHF_AMD64_LARGE,
204 		SHF_ALLOC + SHF_WRITE + SHF_AMD64_LARGE,
205 		(Sg_desc *)LD_LDATA, 0, FALSE},
206 #endif
207 
208 	{{NULL, NULL}, NULL, NULL,
209 		SHF_ALLOC + SHF_WRITE, SHF_ALLOC + SHF_WRITE,
210 		(Sg_desc *)LD_DATA, 0, FALSE},
211 
212 	{{NULL, NULL}, NULL, 0, 0, 0,
213 		(Sg_desc *)LD_EXTRA, 0, FALSE}
214 };
215 
216 /*
217  * Initialize new entrance and segment descriptors and add them as lists to
218  * the output file descriptor.
219  */
220 uintptr_t
221 ld_ent_setup(Ofl_desc *ofl, Xword segalign)
222 {
223 	Ent_desc	*enp;
224 	Sg_desc		*sgp;
225 	size_t		idx;
226 
227 	/*
228 	 * Initialize the elf library.
229 	 */
230 	if (elf_version(EV_CURRENT) == EV_NONE) {
231 		eprintf(ofl->ofl_lml, ERR_FATAL, MSG_INTL(MSG_ELF_LIBELF),
232 		    EV_CURRENT);
233 		return (S_ERROR);
234 	}
235 
236 	/*
237 	 * Initialize internal Global Symbol Table AVL tree
238 	 */
239 	avl_create(&ofl->ofl_symavl, &ld_sym_avl_comp, sizeof (Sym_avlnode),
240 	    SGSOFFSETOF(Sym_avlnode, sav_node));
241 
242 	/*
243 	 * Allocate and initialize writable copies of both the entrance and
244 	 * segment descriptors.
245 	 *
246 	 * Note that on non-amd64 targets, this allocates a few more
247 	 * elements than are needed. For now, we are willing to overallocate
248 	 * a small amount to simplify the code.
249 	 */
250 	if ((sgp = libld_malloc(sizeof (sg_desc))) == 0)
251 		return (S_ERROR);
252 	(void) memcpy(sgp, sg_desc, sizeof (sg_desc));
253 	if ((enp = libld_malloc(sizeof (ent_desc))) == 0)
254 		return (S_ERROR);
255 	(void) memcpy(enp, ent_desc, sizeof (ent_desc));
256 
257 	/*
258 	 * The data segment permissions can differ:
259 	 *
260 	 *	- Architecural/ABI per-platform differences
261 	 *	- Whether the object is built statically or dynamically
262 	 *
263 	 * Those segments so affected have their program header flags
264 	 * set here at runtime, rather than in the sg_desc templates above.
265 	 */
266 	sgp[LD_DATA].sg_phdr.p_flags = ld_targ.t_m.m_dataseg_perm;
267 	sgp[LD_BSS].sg_phdr.p_flags = ld_targ.t_m.m_dataseg_perm;
268 	sgp[LD_DYN].sg_phdr.p_flags = ld_targ.t_m.m_dataseg_perm;
269 	sgp[LD_DTRACE].sg_phdr.p_flags = ld_targ.t_m.m_dataseg_perm;
270 #if	defined(_ELF64)
271 	sgp[LD_LDATA].sg_phdr.p_flags = ld_targ.t_m.m_dataseg_perm;
272 	sgp[LD_DTRACE].sg_phdr.p_flags |= PF_X;
273 #endif
274 	if (ofl->ofl_flags & FLG_OF_DYNAMIC) {
275 		sgp[LD_SUNWBSS].sg_phdr.p_flags = ld_targ.t_m.m_dataseg_perm;
276 	} else {
277 		sgp[LD_DATA].sg_phdr.p_flags |= PF_X;
278 	}
279 
280 	/*
281 	 * Traverse the new entrance descriptor list converting the segment
282 	 * pointer entries to the absolute address within the new segment
283 	 * descriptor list.  Add each entrance descriptor to the output file
284 	 * list.
285 	 */
286 	for (idx = 0; idx < (sizeof (ent_desc) / sizeof (ent_desc[0]));
287 	    idx++, enp++) {
288 #if	defined(_ELF64)
289 		/* Don't use the amd64 entry conditions for non-amd64 targets */
290 		if ((enp->ec_attrmask & SHF_AMD64_LARGE) &&
291 		    (ld_targ.t_m.m_mach != EM_AMD64))
292 			continue;
293 #endif
294 		enp->ec_segment = &sgp[(long)enp->ec_segment];
295 		if ((list_appendc(&ofl->ofl_ents, enp)) == 0)
296 			return (S_ERROR);
297 	}
298 
299 	/*
300 	 * Traverse the new segment descriptor list adding each entry to the
301 	 * segment descriptor list.  For each loadable segment initialize
302 	 * a default alignment (ld(1) and ld.so.1 initialize this differently).
303 	 */
304 	for (idx = 0; idx < LD_NUM; idx++, sgp++) {
305 		Phdr	*phdr = &(sgp->sg_phdr);
306 
307 #if	defined(_ELF64)
308 		/* Ignore amd64 segment templates for non-amd64 targets */
309 		switch (idx) {
310 		case LD_LRODATA:
311 		case LD_LDATA:
312 		case LD_UNWIND:
313 			if ((ld_targ.t_m.m_mach != EM_AMD64))
314 				continue;
315 		}
316 #endif
317 
318 		if ((list_appendc(&ofl->ofl_segs, sgp)) == 0)
319 			return (S_ERROR);
320 		if (phdr->p_type == PT_LOAD)
321 			phdr->p_align = segalign;
322 	}
323 
324 	return (1);
325 }
326