xref: /titanic_41/usr/src/cmd/sgs/libld/common/entry.c (revision bbb1277b6ec1b0daad4e3ed1a2b891d3e2ece2eb)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  *	Copyright (c) 1988 AT&T
24  *	  All Rights Reserved
25  *
26  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
27  * Use is subject to license terms.
28  */
29 
30 #define	ELF_TARGET_AMD64
31 
32 #include	<stdio.h>
33 #include	<memory.h>
34 #include	<debug.h>
35 #include	"msg.h"
36 #include	"_libld.h"
37 
38 /*
39  * The loader uses a `segment descriptor' list to describe the output
40  * segments it can potentially create. This list is initially seeded
41  * using the templates contained in the sg_desc[] array below. Additional
42  * segments may be added using a mapfile.
43  *
44  * The entries in sg_desc[] must be put in the order defined by the
45  * Segment_id enum, such that a given LD_XXX value can serve as
46  * an index into sg_desc[] for the corresponding descriptor.
47  *
48  * The entries in sg_desc[] are initialized using the SG_DESC_INIT macro
49  * for two reasons:
50  *
51  *	1) The first field of the Sg_desc struct is a program header
52  *		entry. ELF32_Phdr and ELF64_Phdr have the same fields,
53  *		but their order is different. Use of a macro allows us
54  *		to handle this transparently.
55  *	2) Most of the fields in the Sg_desc entries are set to 0.
56  *		Use of a macro allows us to hide the clutter.
57  */
58 #ifdef _ELF64
59 #define	SG_DESC_INIT(id, p_type, p_flags, sg_name, sg_flags) \
60 	{ id, { p_type, p_flags, 0, 0, 0, 0, 0, 0}, \
61 	    sg_name, 0, 0, NULL, NULL, sg_flags, NULL, 0, NULL}
62 #else
63 #define	SG_DESC_INIT(id, p_type, p_flags, sg_name, sg_flags) \
64 	{ id, { p_type, 0, 0, 0, 0, 0, p_flags, 0}, \
65 	    sg_name, 0, 0, NULL, NULL, sg_flags, NULL, 0, NULL}
66 #endif
67 
68 static const Sg_desc sg_desc[LD_NUM] = {
69 	SG_DESC_INIT(LD_PHDR, PT_PHDR, PF_R + PF_X, MSG_ORIG(MSG_ENT_PHDR),
70 	    (FLG_SG_TYPE | FLG_SG_FLAGS)),
71 
72 	SG_DESC_INIT(LD_INTERP, PT_INTERP, PF_R, MSG_ORIG(MSG_ENT_INTERP),
73 	    (FLG_SG_TYPE | FLG_SG_FLAGS)),
74 
75 	SG_DESC_INIT(LD_SUNWCAP, PT_SUNWCAP, PF_R, MSG_ORIG(MSG_ENT_SUNWCAP),
76 	    (FLG_SG_TYPE | FLG_SG_FLAGS)),
77 
78 	SG_DESC_INIT(LD_TEXT, PT_LOAD, PF_R + PF_X, MSG_ORIG(MSG_ENT_TEXT),
79 	    (FLG_SG_TYPE | FLG_SG_FLAGS)),
80 
81 	SG_DESC_INIT(LD_DATA, PT_LOAD, 0, MSG_ORIG(MSG_ENT_DATA),
82 	    (FLG_SG_TYPE | FLG_SG_FLAGS)),
83 
84 	SG_DESC_INIT(LD_BSS, PT_LOAD, 0, MSG_ORIG(MSG_ENT_BSS),
85 	    (FLG_SG_TYPE | FLG_SG_FLAGS | FLG_SG_DISABLED)),
86 
87 #if	defined(_ELF64)
88 	/* (amd64-only) */
89 	SG_DESC_INIT(LD_LRODATA, PT_LOAD, PF_R, MSG_ORIG(MSG_ENT_LRODATA),
90 	    (FLG_SG_TYPE | FLG_SG_FLAGS)),
91 
92 	/* (amd64-only) */
93 	SG_DESC_INIT(LD_LDATA, PT_LOAD, 0, MSG_ORIG(MSG_ENT_LDATA),
94 	    (FLG_SG_TYPE | FLG_SG_FLAGS)),
95 #endif
96 	SG_DESC_INIT(LD_DYN, PT_DYNAMIC, 0, MSG_ORIG(MSG_ENT_DYNAMIC),
97 	    (FLG_SG_TYPE | FLG_SG_FLAGS)),
98 
99 	SG_DESC_INIT(LD_DTRACE, PT_SUNWDTRACE, 0, MSG_ORIG(MSG_ENT_DTRACE),
100 	    (FLG_SG_TYPE | FLG_SG_FLAGS)),
101 
102 	SG_DESC_INIT(LD_TLS, PT_TLS, PF_R, MSG_ORIG(MSG_ENT_TLS),
103 	    (FLG_SG_TYPE | FLG_SG_FLAGS)),
104 
105 	SG_DESC_INIT(LD_UNWIND, PT_SUNW_UNWIND, PF_R, MSG_ORIG(MSG_ENT_UNWIND),
106 	    (FLG_SG_TYPE | FLG_SG_FLAGS)),
107 
108 	SG_DESC_INIT(LD_NOTE, PT_NOTE, 0, MSG_ORIG(MSG_ENT_NOTE), FLG_SG_TYPE),
109 
110 	SG_DESC_INIT(LD_EXTRA, PT_NULL, 0, MSG_ORIG(MSG_ENT_EXTRA), FLG_SG_TYPE)
111 };
112 
113 /*
114  * The processing of input files by the link-editor involves matching the
115  * files sections to an `entrance descriptor definition'.  The entrance
116  * criteria can be modified further using a mapfile.  Each entrance criteria
117  * is associated with a segment descriptor, thus a mapping of input sections
118  * to output segments is maintained.
119  *
120  * Note the trick used for the ec_segment field, which is supposed to be a
121  * pointer to a segment descriptor.  We initialize this with the index of the
122  * descriptor, and then turn it into an actual pointer at runtime, once memory
123  * has been allocated and the templates copied.
124  */
125 static const Ent_desc	ent_desc[] = {
126 	{NULL, MSG_ORIG(MSG_ENT_NOTE), SHT_NOTE, 0, 0,
127 		(Sg_desc *)LD_NOTE, 0, FLG_EC_BUILTIN},
128 
129 #if	defined(_ELF64)		/* (amd64-only) */
130 	{NULL, MSG_ORIG(MSG_ENT_LRODATA), NULL,
131 		SHF_ALLOC + SHF_WRITE + SHF_AMD64_LARGE,
132 		SHF_ALLOC + SHF_AMD64_LARGE,
133 		(Sg_desc *)LD_LRODATA, 0, FLG_EC_BUILTIN},
134 #endif
135 	{NULL, MSG_ORIG(MSG_ENT_TEXT), NULL,
136 		SHF_ALLOC + SHF_WRITE, SHF_ALLOC,
137 		(Sg_desc *)LD_TEXT, 0, FLG_EC_BUILTIN},
138 
139 	{NULL, MSG_ORIG(MSG_ENT_BSS), SHT_NOBITS,
140 		SHF_ALLOC + SHF_WRITE, SHF_ALLOC + SHF_WRITE,
141 		(Sg_desc *)LD_BSS, 0, FLG_EC_BUILTIN},
142 
143 #if	defined(_ELF64)		/* (amd64-only) */
144 	{NULL, MSG_ORIG(MSG_ENT_LBSS), SHT_NOBITS,
145 		SHF_ALLOC + SHF_WRITE + SHF_AMD64_LARGE,
146 		SHF_ALLOC + SHF_WRITE + SHF_AMD64_LARGE,
147 		(Sg_desc *)LD_DATA, 0, FLG_EC_BUILTIN},
148 
149 	{NULL, MSG_ORIG(MSG_ENT_LDATA), NULL,
150 		SHF_ALLOC + SHF_WRITE + SHF_AMD64_LARGE,
151 		SHF_ALLOC + SHF_WRITE + SHF_AMD64_LARGE,
152 		(Sg_desc *)LD_LDATA, 0, FLG_EC_BUILTIN},
153 #endif
154 	{NULL, MSG_ORIG(MSG_ENT_DATA), NULL,
155 		SHF_ALLOC + SHF_WRITE, SHF_ALLOC + SHF_WRITE,
156 		(Sg_desc *)LD_DATA, 0, FLG_EC_BUILTIN},
157 
158 	{NULL, MSG_ORIG(MSG_ENT_EXTRA), 0, 0, 0,
159 		(Sg_desc *)LD_EXTRA, 0, FLG_EC_BUILTIN}
160 };
161 
162 /*
163  * Initialize new entrance and segment descriptors and add them as lists to
164  * the output file descriptor.
165  */
166 uintptr_t
167 ld_ent_setup(Ofl_desc *ofl, Xword segalign)
168 {
169 	const Ent_desc	*oenp;
170 	Sg_desc		*sgp;
171 	size_t		idx;
172 
173 	/*
174 	 * Initialize the elf library.
175 	 */
176 	if (elf_version(EV_CURRENT) == EV_NONE) {
177 		eprintf(ofl->ofl_lml, ERR_FATAL, MSG_INTL(MSG_ELF_LIBELF),
178 		    EV_CURRENT);
179 		return (S_ERROR);
180 	}
181 
182 	/*
183 	 * Initialize internal Global Symbol Table AVL tree
184 	 */
185 	avl_create(&ofl->ofl_symavl, &ld_sym_avl_comp, sizeof (Sym_avlnode),
186 	    SGSOFFSETOF(Sym_avlnode, sav_node));
187 
188 	/*
189 	 * Allocate and initialize writable copies of both the entrance and
190 	 * segment descriptors.
191 	 *
192 	 * Note that on non-amd64 targets, this allocates a few more
193 	 * elements than are needed. For now, we are willing to overallocate
194 	 * a small amount to simplify the code.
195 	 */
196 	if ((sgp = libld_malloc(sizeof (sg_desc))) == NULL)
197 		return (S_ERROR);
198 	(void) memcpy(sgp, sg_desc, sizeof (sg_desc));
199 
200 	/*
201 	 * The data segment permissions can differ:
202 	 *
203 	 *	- Architecural/ABI per-platform differences
204 	 *	- Whether the object is built statically or dynamically
205 	 *
206 	 * Those segments so affected have their program header flags
207 	 * set here at runtime, rather than in the sg_desc templates above.
208 	 */
209 	sgp[LD_DATA].sg_phdr.p_flags = ld_targ.t_m.m_dataseg_perm;
210 	sgp[LD_BSS].sg_phdr.p_flags = ld_targ.t_m.m_dataseg_perm;
211 	sgp[LD_DYN].sg_phdr.p_flags = ld_targ.t_m.m_dataseg_perm;
212 	sgp[LD_DTRACE].sg_phdr.p_flags = ld_targ.t_m.m_dataseg_perm;
213 #if	defined(_ELF64)
214 	sgp[LD_LDATA].sg_phdr.p_flags = ld_targ.t_m.m_dataseg_perm;
215 	sgp[LD_DTRACE].sg_phdr.p_flags |= PF_X;
216 #endif
217 	if ((ofl->ofl_flags & FLG_OF_DYNAMIC) == 0)
218 		sgp[LD_DATA].sg_phdr.p_flags |= PF_X;
219 
220 	/*
221 	 * Traverse the new entrance descriptor list converting the segment
222 	 * pointer entries to the absolute address within the new segment
223 	 * descriptor list.  Add each entrance descriptor to the output file
224 	 * list.
225 	 */
226 	for (idx = 0, oenp = ent_desc;
227 	    idx < (sizeof (ent_desc) / sizeof (ent_desc[0])); idx++, oenp++) {
228 		Ent_desc	*nenp;
229 
230 #if	defined(_ELF64)
231 		/* Don't use the amd64 entry conditions for non-amd64 targets */
232 		if ((oenp->ec_attrmask & SHF_AMD64_LARGE) &&
233 		    (ld_targ.t_m.m_mach != EM_AMD64))
234 			continue;
235 #endif
236 		if ((nenp = alist_append(&(ofl->ofl_ents), oenp,
237 		    sizeof (Ent_desc), AL_CNT_OFL_ENTRANCE)) == NULL)
238 			return (S_ERROR);
239 
240 		nenp->ec_segment = &sgp[(long)oenp->ec_segment];
241 	}
242 
243 	/*
244 	 * Traverse the new segment descriptor list adding each entry to the
245 	 * segment descriptor list.  For each loadable segment initialize
246 	 * a default alignment (ld(1) and ld.so.1 initialize this differently).
247 	 */
248 	for (idx = 0; idx < LD_NUM; idx++, sgp++) {
249 		Phdr	*phdr = &(sgp->sg_phdr);
250 
251 #if	defined(_ELF64)
252 		/* Ignore amd64 segment templates for non-amd64 targets */
253 		switch (idx) {
254 		case LD_LRODATA:
255 		case LD_LDATA:
256 			if ((ld_targ.t_m.m_mach != EM_AMD64))
257 				continue;
258 		}
259 #endif
260 		if ((aplist_append(&ofl->ofl_segs, sgp,
261 		    AL_CNT_SEGMENTS)) == NULL)
262 			return (S_ERROR);
263 		if (phdr->p_type == PT_LOAD)
264 			phdr->p_align = segalign;
265 	}
266 
267 	return (1);
268 }
269