1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright (c) 1988 AT&T 24 * All Rights Reserved 25 * 26 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 27 * Use is subject to license terms. 28 */ 29 30 #define ELF_TARGET_AMD64 31 32 #include <stdio.h> 33 #include <memory.h> 34 #include <debug.h> 35 #include "msg.h" 36 #include "_libld.h" 37 38 /* 39 * The loader uses a `segment descriptor' list to describe the output 40 * segments it can potentially create. This list is initially seeded 41 * using the templates contained in the sg_desc[] array below. Additional 42 * segments may be added using a mapfile. 43 * 44 * The entries in sg_desc[] must be put in the order defined by the 45 * Segment_id enum, such that a given LD_XXX value can serve as 46 * an index into sg_desc[] for the corresponding descriptor. 47 * 48 * The entries in sg_desc[] are initialized using the SG_DESC_INIT macro 49 * for two reasons: 50 * 51 * 1) The first field of the Sg_desc struct is a program header 52 * entry. ELF32_Phdr and ELF64_Phdr have the same fields, 53 * but their order is different. Use of a macro allows us 54 * to handle this transparently. 55 * 2) Most of the fields in the Sg_desc entries are set to 0. 56 * Use of a macro allows us to hide the clutter. 57 */ 58 #ifdef _ELF64 59 #define SG_DESC_INIT(id, p_type, p_flags, sg_name, sg_flags) \ 60 { id, { p_type, p_flags, 0, 0, 0, 0, 0, 0}, \ 61 sg_name, 0, 0, NULL, NULL, sg_flags, NULL, 0, NULL} 62 #else 63 #define SG_DESC_INIT(id, p_type, p_flags, sg_name, sg_flags) \ 64 { id, { p_type, 0, 0, 0, 0, 0, p_flags, 0}, \ 65 sg_name, 0, 0, NULL, NULL, sg_flags, NULL, 0, NULL} 66 #endif 67 68 static const Sg_desc sg_desc[LD_NUM] = { 69 SG_DESC_INIT(LD_PHDR, PT_PHDR, PF_R + PF_X, MSG_ORIG(MSG_ENT_PHDR), 70 (FLG_SG_TYPE | FLG_SG_FLAGS)), 71 72 SG_DESC_INIT(LD_INTERP, PT_INTERP, PF_R, MSG_ORIG(MSG_ENT_INTERP), 73 (FLG_SG_TYPE | FLG_SG_FLAGS)), 74 75 SG_DESC_INIT(LD_SUNWCAP, PT_SUNWCAP, PF_R, MSG_ORIG(MSG_ENT_SUNWCAP), 76 (FLG_SG_TYPE | FLG_SG_FLAGS)), 77 78 SG_DESC_INIT(LD_TEXT, PT_LOAD, PF_R + PF_X, MSG_ORIG(MSG_ENT_TEXT), 79 (FLG_SG_TYPE | FLG_SG_FLAGS)), 80 81 SG_DESC_INIT(LD_DATA, PT_LOAD, 0, MSG_ORIG(MSG_ENT_DATA), 82 (FLG_SG_TYPE | FLG_SG_FLAGS)), 83 84 SG_DESC_INIT(LD_BSS, PT_LOAD, 0, MSG_ORIG(MSG_ENT_BSS), 85 (FLG_SG_TYPE | FLG_SG_FLAGS | FLG_SG_DISABLED)), 86 87 #if defined(_ELF64) 88 /* (amd64-only) */ 89 SG_DESC_INIT(LD_LRODATA, PT_LOAD, PF_R, MSG_ORIG(MSG_ENT_LRODATA), 90 (FLG_SG_TYPE | FLG_SG_FLAGS)), 91 92 /* (amd64-only) */ 93 SG_DESC_INIT(LD_LDATA, PT_LOAD, 0, MSG_ORIG(MSG_ENT_LDATA), 94 (FLG_SG_TYPE | FLG_SG_FLAGS)), 95 #endif 96 SG_DESC_INIT(LD_DYN, PT_DYNAMIC, 0, MSG_ORIG(MSG_ENT_DYNAMIC), 97 (FLG_SG_TYPE | FLG_SG_FLAGS)), 98 99 SG_DESC_INIT(LD_DTRACE, PT_SUNWDTRACE, 0, MSG_ORIG(MSG_ENT_DTRACE), 100 (FLG_SG_TYPE | FLG_SG_FLAGS)), 101 102 SG_DESC_INIT(LD_TLS, PT_TLS, PF_R, MSG_ORIG(MSG_ENT_TLS), 103 (FLG_SG_TYPE | FLG_SG_FLAGS)), 104 105 SG_DESC_INIT(LD_UNWIND, PT_SUNW_UNWIND, PF_R, MSG_ORIG(MSG_ENT_UNWIND), 106 (FLG_SG_TYPE | FLG_SG_FLAGS)), 107 108 SG_DESC_INIT(LD_NOTE, PT_NOTE, 0, MSG_ORIG(MSG_ENT_NOTE), FLG_SG_TYPE), 109 110 SG_DESC_INIT(LD_EXTRA, PT_NULL, 0, MSG_ORIG(MSG_ENT_EXTRA), FLG_SG_TYPE) 111 }; 112 113 /* 114 * The processing of input files by the link-editor involves matching the 115 * files sections to an `entrance descriptor definition'. The entrance 116 * criteria can be modified further using a mapfile. Each entrance criteria 117 * is associated with a segment descriptor, thus a mapping of input sections 118 * to output segments is maintained. 119 * 120 * Note the trick used for the ec_segment field, which is supposed to be a 121 * pointer to a segment descriptor. We initialize this with the index of the 122 * descriptor, and then turn it into an actual pointer at runtime, once memory 123 * has been allocated and the templates copied. 124 */ 125 static const Ent_desc ent_desc[] = { 126 {NULL, MSG_ORIG(MSG_ENT_NOTE), SHT_NOTE, 0, 0, 127 (Sg_desc *)LD_NOTE, 0, FLG_EC_BUILTIN}, 128 129 #if defined(_ELF64) /* (amd64-only) */ 130 {NULL, MSG_ORIG(MSG_ENT_LRODATA), NULL, 131 SHF_ALLOC + SHF_AMD64_LARGE, SHF_ALLOC + SHF_AMD64_LARGE, 132 (Sg_desc *)LD_LRODATA, 0, FLG_EC_BUILTIN}, 133 #endif 134 {NULL, MSG_ORIG(MSG_ENT_TEXT), NULL, 135 SHF_ALLOC + SHF_WRITE, SHF_ALLOC, 136 (Sg_desc *)LD_TEXT, 0, FLG_EC_BUILTIN}, 137 138 {NULL, MSG_ORIG(MSG_ENT_BSS), SHT_NOBITS, 139 SHF_ALLOC + SHF_WRITE, SHF_ALLOC + SHF_WRITE, 140 (Sg_desc *)LD_BSS, 0, FLG_EC_BUILTIN}, 141 142 #if defined(_ELF64) /* (amd64-only) */ 143 {NULL, MSG_ORIG(MSG_ENT_LBSS), SHT_NOBITS, 144 SHF_ALLOC + SHF_WRITE + SHF_AMD64_LARGE, 145 SHF_ALLOC + SHF_WRITE + SHF_AMD64_LARGE, 146 (Sg_desc *)LD_DATA, 0, FLG_EC_BUILTIN}, 147 148 {NULL, MSG_ORIG(MSG_ENT_LDATA), NULL, 149 SHF_ALLOC + SHF_WRITE + SHF_AMD64_LARGE, 150 SHF_ALLOC + SHF_WRITE + SHF_AMD64_LARGE, 151 (Sg_desc *)LD_LDATA, 0, FLG_EC_BUILTIN}, 152 #endif 153 {NULL, MSG_ORIG(MSG_ENT_DATA), NULL, 154 SHF_ALLOC + SHF_WRITE, SHF_ALLOC + SHF_WRITE, 155 (Sg_desc *)LD_DATA, 0, FLG_EC_BUILTIN}, 156 157 {NULL, MSG_ORIG(MSG_ENT_EXTRA), 0, 0, 0, 158 (Sg_desc *)LD_EXTRA, 0, FLG_EC_BUILTIN} 159 }; 160 161 /* 162 * Initialize new entrance and segment descriptors and add them as lists to 163 * the output file descriptor. 164 */ 165 uintptr_t 166 ld_ent_setup(Ofl_desc *ofl, Xword segalign) 167 { 168 const Ent_desc *oenp; 169 Sg_desc *sgp; 170 size_t idx; 171 172 /* 173 * Initialize the elf library. 174 */ 175 if (elf_version(EV_CURRENT) == EV_NONE) { 176 eprintf(ofl->ofl_lml, ERR_FATAL, MSG_INTL(MSG_ELF_LIBELF), 177 EV_CURRENT); 178 return (S_ERROR); 179 } 180 181 /* 182 * Initialize internal Global Symbol Table AVL tree 183 */ 184 avl_create(&ofl->ofl_symavl, &ld_sym_avl_comp, sizeof (Sym_avlnode), 185 SGSOFFSETOF(Sym_avlnode, sav_node)); 186 187 /* 188 * Allocate and initialize writable copies of both the entrance and 189 * segment descriptors. 190 * 191 * Note that on non-amd64 targets, this allocates a few more 192 * elements than are needed. For now, we are willing to overallocate 193 * a small amount to simplify the code. 194 */ 195 if ((sgp = libld_malloc(sizeof (sg_desc))) == NULL) 196 return (S_ERROR); 197 (void) memcpy(sgp, sg_desc, sizeof (sg_desc)); 198 199 /* 200 * The data segment permissions can differ: 201 * 202 * - Architecural/ABI per-platform differences 203 * - Whether the object is built statically or dynamically 204 * 205 * Those segments so affected have their program header flags 206 * set here at runtime, rather than in the sg_desc templates above. 207 */ 208 sgp[LD_DATA].sg_phdr.p_flags = ld_targ.t_m.m_dataseg_perm; 209 sgp[LD_BSS].sg_phdr.p_flags = ld_targ.t_m.m_dataseg_perm; 210 sgp[LD_DYN].sg_phdr.p_flags = ld_targ.t_m.m_dataseg_perm; 211 sgp[LD_DTRACE].sg_phdr.p_flags = ld_targ.t_m.m_dataseg_perm; 212 #if defined(_ELF64) 213 sgp[LD_LDATA].sg_phdr.p_flags = ld_targ.t_m.m_dataseg_perm; 214 sgp[LD_DTRACE].sg_phdr.p_flags |= PF_X; 215 #endif 216 if ((ofl->ofl_flags & FLG_OF_DYNAMIC) == 0) 217 sgp[LD_DATA].sg_phdr.p_flags |= PF_X; 218 219 /* 220 * Traverse the new entrance descriptor list converting the segment 221 * pointer entries to the absolute address within the new segment 222 * descriptor list. Add each entrance descriptor to the output file 223 * list. 224 */ 225 for (idx = 0, oenp = ent_desc; 226 idx < (sizeof (ent_desc) / sizeof (ent_desc[0])); idx++, oenp++) { 227 Ent_desc *nenp; 228 229 #if defined(_ELF64) 230 /* Don't use the amd64 entry conditions for non-amd64 targets */ 231 if ((oenp->ec_attrmask & SHF_AMD64_LARGE) && 232 (ld_targ.t_m.m_mach != EM_AMD64)) 233 continue; 234 #endif 235 if ((nenp = alist_append(&(ofl->ofl_ents), oenp, 236 sizeof (Ent_desc), AL_CNT_OFL_ENTRANCE)) == NULL) 237 return (S_ERROR); 238 239 nenp->ec_segment = &sgp[(long)oenp->ec_segment]; 240 } 241 242 /* 243 * Traverse the new segment descriptor list adding each entry to the 244 * segment descriptor list. For each loadable segment initialize 245 * a default alignment (ld(1) and ld.so.1 initialize this differently). 246 */ 247 for (idx = 0; idx < LD_NUM; idx++, sgp++) { 248 Phdr *phdr = &(sgp->sg_phdr); 249 250 #if defined(_ELF64) 251 /* Ignore amd64 segment templates for non-amd64 targets */ 252 switch (idx) { 253 case LD_LRODATA: 254 case LD_LDATA: 255 if ((ld_targ.t_m.m_mach != EM_AMD64)) 256 continue; 257 } 258 #endif 259 if ((aplist_append(&ofl->ofl_segs, sgp, 260 AL_CNT_SEGMENTS)) == NULL) 261 return (S_ERROR); 262 if (phdr->p_type == PT_LOAD) 263 phdr->p_align = segalign; 264 } 265 266 return (1); 267 } 268