xref: /titanic_41/usr/src/cmd/sgs/rtld/amd64/amd64_elf.c (revision 6a1af1a67532df169a657cce07140be64bdea084)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved.
24  */
25 
26 /*
27  * amd64 machine dependent and ELF file class dependent functions.
28  * Contains routines for performing function binding and symbol relocations.
29  */
30 
31 #include	<stdio.h>
32 #include	<sys/elf.h>
33 #include	<sys/elf_amd64.h>
34 #include	<sys/mman.h>
35 #include	<dlfcn.h>
36 #include	<synch.h>
37 #include	<string.h>
38 #include	<debug.h>
39 #include	<reloc.h>
40 #include	<conv.h>
41 #include	"_rtld.h"
42 #include	"_audit.h"
43 #include	"_elf.h"
44 #include	"_inline_gen.h"
45 #include	"_inline_reloc.h"
46 #include	"msg.h"
47 
48 extern void	elf_rtbndr(Rt_map *, ulong_t, caddr_t);
49 
50 int
51 elf_mach_flags_check(Rej_desc *rej, Ehdr *ehdr)
52 {
53 	/*
54 	 * Check machine type and flags.
55 	 */
56 	if (ehdr->e_flags != 0) {
57 		rej->rej_type = SGS_REJ_BADFLAG;
58 		rej->rej_info = (uint_t)ehdr->e_flags;
59 		return (0);
60 	}
61 	return (1);
62 }
63 
64 void
65 ldso_plt_init(Rt_map *lmp)
66 {
67 	/*
68 	 * There is no need to analyze ld.so because we don't map in any of
69 	 * its dependencies.  However we may map these dependencies in later
70 	 * (as if ld.so had dlopened them), so initialize the plt and the
71 	 * permission information.
72 	 */
73 	if (PLTGOT(lmp))
74 		elf_plt_init((PLTGOT(lmp)), (caddr_t)lmp);
75 }
76 
77 static const uchar_t dyn_plt_template[] = {
78 /* 0x00 */  0x55,			/* pushq %rbp */
79 /* 0x01 */  0x48, 0x89, 0xe5,		/* movq  %rsp, %rbp */
80 /* 0x04 */  0x48, 0x83, 0xec, 0x10,	/* subq	 $0x10, %rsp */
81 /* 0x08 */  0x4c, 0x8d, 0x1d, 0x00,	/* leaq  trace_fields(%rip), %r11 */
82 		0x00, 0x00, 0x00,
83 /* 0x0f */  0x4c, 0x89, 0x5d, 0xf8,	/* movq  %r11, -0x8(%rbp) */
84 /* 0x13 */  0x49, 0xbb, 0x00, 0x00, 	/* movq  $elf_plt_trace, %r11 */
85 		0x00, 0x00, 0x00,
86 		0x00, 0x00, 0x00,
87 /* 0x1d */  0x41, 0xff, 0xe3		/* jmp   *%r11 */
88 /* 0x20 */
89 };
90 
91 /*
92  * And the virutal outstanding relocations against the
93  * above block are:
94  *
95  *	reloc		offset	Addend	symbol
96  *	R_AMD64_PC32	0x0b	-4	trace_fields
97  *	R_AMD64_64	0x15	0	elf_plt_trace
98  */
99 
100 #define	TRCREL1OFF	0x0b
101 #define	TRCREL2OFF	0x15
102 
103 int	dyn_plt_ent_size = sizeof (dyn_plt_template);
104 
105 /*
106  * the dynamic plt entry is:
107  *
108  *	pushq	%rbp
109  *	movq	%rsp, %rbp
110  *	subq	$0x10, %rsp
111  *	leaq	trace_fields(%rip), %r11
112  *	movq	%r11, -0x8(%rbp)
113  *	movq	$elf_plt_trace, %r11
114  *	jmp	*%r11
115  * dyn_data:
116  *	.align  8
117  *	uintptr_t	reflmp
118  *	uintptr_t	deflmp
119  *	uint_t		symndx
120  *	uint_t		sb_flags
121  *	Sym		symdef
122  */
123 static caddr_t
124 elf_plt_trace_write(ulong_t roffset, Rt_map *rlmp, Rt_map *dlmp, Sym *sym,
125     uint_t symndx, uint_t pltndx, caddr_t to, uint_t sb_flags, int *fail)
126 {
127 	extern int	elf_plt_trace();
128 	ulong_t		got_entry;
129 	uchar_t		*dyn_plt;
130 	uintptr_t	*dyndata;
131 
132 	/*
133 	 * We only need to add the glue code if there is an auditing
134 	 * library that is interested in this binding.
135 	 */
136 	dyn_plt = (uchar_t *)((uintptr_t)AUDINFO(rlmp)->ai_dynplts +
137 	    (pltndx * dyn_plt_ent_size));
138 
139 	/*
140 	 * Have we initialized this dynamic plt entry yet?  If we haven't do it
141 	 * now.  Otherwise this function has been called before, but from a
142 	 * different plt (ie. from another shared object).  In that case
143 	 * we just set the plt to point to the new dyn_plt.
144 	 */
145 	if (*dyn_plt == 0) {
146 		Sym	*symp;
147 		Xword	symvalue;
148 		Lm_list	*lml = LIST(rlmp);
149 
150 		(void) memcpy((void *)dyn_plt, dyn_plt_template,
151 		    sizeof (dyn_plt_template));
152 		dyndata = (uintptr_t *)((uintptr_t)dyn_plt +
153 		    ROUND(sizeof (dyn_plt_template), M_WORD_ALIGN));
154 
155 		/*
156 		 * relocate:
157 		 *	leaq	trace_fields(%rip), %r11
158 		 *	R_AMD64_PC32	0x0b	-4	trace_fields
159 		 */
160 		symvalue = (Xword)((uintptr_t)dyndata -
161 		    (uintptr_t)(&dyn_plt[TRCREL1OFF]) - 4);
162 		if (do_reloc_rtld(R_AMD64_PC32, &dyn_plt[TRCREL1OFF],
163 		    &symvalue, MSG_ORIG(MSG_SYM_LADYNDATA),
164 		    MSG_ORIG(MSG_SPECFIL_DYNPLT), lml) == 0) {
165 			*fail = 1;
166 			return (0);
167 		}
168 
169 		/*
170 		 * relocating:
171 		 *	movq	$elf_plt_trace, %r11
172 		 *	R_AMD64_64	0x15	0	elf_plt_trace
173 		 */
174 		symvalue = (Xword)elf_plt_trace;
175 		if (do_reloc_rtld(R_AMD64_64, &dyn_plt[TRCREL2OFF],
176 		    &symvalue, MSG_ORIG(MSG_SYM_ELFPLTTRACE),
177 		    MSG_ORIG(MSG_SPECFIL_DYNPLT), lml) == 0) {
178 			*fail = 1;
179 			return (0);
180 		}
181 
182 		*dyndata++ = (uintptr_t)rlmp;
183 		*dyndata++ = (uintptr_t)dlmp;
184 		*dyndata = (uintptr_t)(((uint64_t)sb_flags << 32) | symndx);
185 		dyndata++;
186 		symp = (Sym *)dyndata;
187 		*symp = *sym;
188 		symp->st_value = (Addr)to;
189 	}
190 
191 	got_entry = (ulong_t)roffset;
192 	*(ulong_t *)got_entry = (ulong_t)dyn_plt;
193 	return ((caddr_t)dyn_plt);
194 }
195 
196 /*
197  * Function binding routine - invoked on the first call to a function through
198  * the procedure linkage table;
199  * passes first through an assembly language interface.
200  *
201  * Takes the offset into the relocation table of the associated
202  * relocation entry and the address of the link map (rt_private_map struct)
203  * for the entry.
204  *
205  * Returns the address of the function referenced after re-writing the PLT
206  * entry to invoke the function directly.
207  *
208  * On error, causes process to terminate with a signal.
209  */
210 ulong_t
211 elf_bndr(Rt_map *lmp, ulong_t pltndx, caddr_t from)
212 {
213 	Rt_map		*nlmp, *llmp;
214 	ulong_t		addr, reloff, symval, rsymndx;
215 	char		*name;
216 	Rela		*rptr;
217 	Sym		*rsym, *nsym;
218 	uint_t		binfo, sb_flags = 0, dbg_class;
219 	Slookup		sl;
220 	Sresult		sr;
221 	int		entry, lmflags;
222 	Lm_list		*lml;
223 
224 	/*
225 	 * For compatibility with libthread (TI_VERSION 1) we track the entry
226 	 * value.  A zero value indicates we have recursed into ld.so.1 to
227 	 * further process a locking request.  Under this recursion we disable
228 	 * tsort and cleanup activities.
229 	 */
230 	entry = enter(0);
231 
232 	lml = LIST(lmp);
233 	if ((lmflags = lml->lm_flags) & LML_FLG_RTLDLM) {
234 		dbg_class = dbg_desc->d_class;
235 		dbg_desc->d_class = 0;
236 	}
237 
238 	/*
239 	 * Perform some basic sanity checks.  If we didn't get a load map or
240 	 * the relocation offset is invalid then its possible someone has walked
241 	 * over the .got entries or jumped to plt0 out of the blue.
242 	 */
243 	if ((!lmp) && (pltndx <=
244 	    (ulong_t)PLTRELSZ(lmp) / (ulong_t)RELENT(lmp))) {
245 		Conv_inv_buf_t inv_buf;
246 
247 		eprintf(lml, ERR_FATAL, MSG_INTL(MSG_REL_PLTREF),
248 		    conv_reloc_amd64_type(R_AMD64_JUMP_SLOT, 0, &inv_buf),
249 		    EC_NATPTR(lmp), EC_XWORD(pltndx), EC_NATPTR(from));
250 		rtldexit(lml, 1);
251 	}
252 	reloff = pltndx * (ulong_t)RELENT(lmp);
253 
254 	/*
255 	 * Use relocation entry to get symbol table entry and symbol name.
256 	 */
257 	addr = (ulong_t)JMPREL(lmp);
258 	rptr = (Rela *)(addr + reloff);
259 	rsymndx = ELF_R_SYM(rptr->r_info);
260 	rsym = (Sym *)((ulong_t)SYMTAB(lmp) + (rsymndx * SYMENT(lmp)));
261 	name = (char *)(STRTAB(lmp) + rsym->st_name);
262 
263 	/*
264 	 * Determine the last link-map of this list, this'll be the starting
265 	 * point for any tsort() processing.
266 	 */
267 	llmp = lml->lm_tail;
268 
269 	/*
270 	 * Find definition for symbol.  Initialize the symbol lookup, and
271 	 * symbol result, data structures.
272 	 */
273 	SLOOKUP_INIT(sl, name, lmp, lml->lm_head, ld_entry_cnt, 0,
274 	    rsymndx, rsym, 0, LKUP_DEFT);
275 	SRESULT_INIT(sr, name);
276 
277 	if (lookup_sym(&sl, &sr, &binfo, NULL) == 0) {
278 		eprintf(lml, ERR_FATAL, MSG_INTL(MSG_REL_NOSYM), NAME(lmp),
279 		    demangle(name));
280 		rtldexit(lml, 1);
281 	}
282 
283 	name = (char *)sr.sr_name;
284 	nlmp = sr.sr_dmap;
285 	nsym = sr.sr_sym;
286 
287 	symval = nsym->st_value;
288 
289 	if (!(FLAGS(nlmp) & FLG_RT_FIXED) &&
290 	    (nsym->st_shndx != SHN_ABS))
291 		symval += ADDR(nlmp);
292 	if ((lmp != nlmp) && ((FLAGS1(nlmp) & FL1_RT_NOINIFIN) == 0)) {
293 		/*
294 		 * Record that this new link map is now bound to the caller.
295 		 */
296 		if (bind_one(lmp, nlmp, BND_REFER) == 0)
297 			rtldexit(lml, 1);
298 	}
299 
300 	if ((lml->lm_tflags | AFLAGS(lmp)) & LML_TFLG_AUD_SYMBIND) {
301 		uint_t	symndx = (((uintptr_t)nsym -
302 		    (uintptr_t)SYMTAB(nlmp)) / SYMENT(nlmp));
303 		symval = audit_symbind(lmp, nlmp, nsym, symndx, symval,
304 		    &sb_flags);
305 	}
306 
307 	if (!(rtld_flags & RT_FL_NOBIND)) {
308 		addr = rptr->r_offset;
309 		if (!(FLAGS(lmp) & FLG_RT_FIXED))
310 			addr += ADDR(lmp);
311 		if (((lml->lm_tflags | AFLAGS(lmp)) &
312 		    (LML_TFLG_AUD_PLTENTER | LML_TFLG_AUD_PLTEXIT)) &&
313 		    AUDINFO(lmp)->ai_dynplts) {
314 			int	fail = 0;
315 			uint_t	pltndx = reloff / sizeof (Rela);
316 			uint_t	symndx = (((uintptr_t)nsym -
317 			    (uintptr_t)SYMTAB(nlmp)) / SYMENT(nlmp));
318 
319 			symval = (ulong_t)elf_plt_trace_write(addr, lmp, nlmp,
320 			    nsym, symndx, pltndx, (caddr_t)symval, sb_flags,
321 			    &fail);
322 			if (fail)
323 				rtldexit(lml, 1);
324 		} else {
325 			/*
326 			 * Write standard PLT entry to jump directly
327 			 * to newly bound function.
328 			 */
329 			*(ulong_t *)addr = symval;
330 		}
331 	}
332 
333 	/*
334 	 * Print binding information and rebuild PLT entry.
335 	 */
336 	DBG_CALL(Dbg_bind_global(lmp, (Addr)from, (Off)(from - ADDR(lmp)),
337 	    (Xword)(reloff / sizeof (Rela)), PLT_T_FULL, nlmp, (Addr)symval,
338 	    nsym->st_value, name, binfo));
339 
340 	/*
341 	 * Complete any processing for newly loaded objects.  Note we don't
342 	 * know exactly where any new objects are loaded (we know the object
343 	 * that supplied the symbol, but others may have been loaded lazily as
344 	 * we searched for the symbol), so sorting starts from the last
345 	 * link-map know on entry to this routine.
346 	 */
347 	if (entry)
348 		load_completion(llmp);
349 
350 	/*
351 	 * Some operations like dldump() or dlopen()'ing a relocatable object
352 	 * result in objects being loaded on rtld's link-map, make sure these
353 	 * objects are initialized also.
354 	 */
355 	if ((LIST(nlmp)->lm_flags & LML_FLG_RTLDLM) && LIST(nlmp)->lm_init)
356 		load_completion(nlmp);
357 
358 	/*
359 	 * Make sure the object to which we've bound has had it's .init fired.
360 	 * Cleanup before return to user code.
361 	 */
362 	if (entry) {
363 		is_dep_init(nlmp, lmp);
364 		leave(lml, 0);
365 	}
366 
367 	if (lmflags & LML_FLG_RTLDLM)
368 		dbg_desc->d_class = dbg_class;
369 
370 	return (symval);
371 }
372 
373 /*
374  * Read and process the relocations for one link object, we assume all
375  * relocation sections for loadable segments are stored contiguously in
376  * the file.
377  */
378 int
379 elf_reloc(Rt_map *lmp, uint_t plt, int *in_nfavl, APlist **textrel)
380 {
381 	ulong_t		relbgn, relend, relsiz, basebgn;
382 	ulong_t		pltbgn, pltend, _pltbgn, _pltend;
383 	ulong_t		roffset, rsymndx, psymndx = 0;
384 	ulong_t		dsymndx;
385 	uchar_t		rtype;
386 	long		reladd, value, pvalue;
387 	Sym		*symref, *psymref, *symdef, *psymdef;
388 	Syminfo		*sip;
389 	char		*name, *pname;
390 	Rt_map		*_lmp, *plmp;
391 	int		ret = 1, noplt = 0;
392 	int		relacount = RELACOUNT(lmp), plthint = 0;
393 	Rela		*rel;
394 	uint_t		binfo, pbinfo;
395 	APlist		*bound = NULL;
396 
397 	/*
398 	 * Although only necessary for lazy binding, initialize the first
399 	 * global offset entry to go to elf_rtbndr().  dbx(1) seems
400 	 * to find this useful.
401 	 */
402 	if ((plt == 0) && PLTGOT(lmp)) {
403 		mmapobj_result_t	*mpp;
404 
405 		/*
406 		 * Make sure the segment is writable.
407 		 */
408 		if ((((mpp =
409 		    find_segment((caddr_t)PLTGOT(lmp), lmp)) != NULL) &&
410 		    ((mpp->mr_prot & PROT_WRITE) == 0)) &&
411 		    ((set_prot(lmp, mpp, 1) == 0) ||
412 		    (aplist_append(textrel, mpp, AL_CNT_TEXTREL) == NULL)))
413 			return (0);
414 
415 		elf_plt_init(PLTGOT(lmp), (caddr_t)lmp);
416 	}
417 
418 	/*
419 	 * Initialize the plt start and end addresses.
420 	 */
421 	if ((pltbgn = (ulong_t)JMPREL(lmp)) != 0)
422 		pltend = pltbgn + (ulong_t)(PLTRELSZ(lmp));
423 
424 	relsiz = (ulong_t)(RELENT(lmp));
425 	basebgn = ADDR(lmp);
426 
427 	if (PLTRELSZ(lmp))
428 		plthint = PLTRELSZ(lmp) / relsiz;
429 
430 	/*
431 	 * If we've been called upon to promote an RTLD_LAZY object to an
432 	 * RTLD_NOW then we're only interested in scaning the .plt table.
433 	 * An uninitialized .plt is the case where the associated got entry
434 	 * points back to the plt itself.  Determine the range of the real .plt
435 	 * entries using the _PROCEDURE_LINKAGE_TABLE_ symbol.
436 	 */
437 	if (plt) {
438 		Slookup	sl;
439 		Sresult	sr;
440 
441 		relbgn = pltbgn;
442 		relend = pltend;
443 		if (!relbgn || (relbgn == relend))
444 			return (1);
445 
446 		/*
447 		 * Initialize the symbol lookup, and symbol result, data
448 		 * structures.
449 		 */
450 		SLOOKUP_INIT(sl, MSG_ORIG(MSG_SYM_PLT), lmp, lmp, ld_entry_cnt,
451 		    elf_hash(MSG_ORIG(MSG_SYM_PLT)), 0, 0, 0, LKUP_DEFT);
452 		SRESULT_INIT(sr, MSG_ORIG(MSG_SYM_PLT));
453 
454 		if (elf_find_sym(&sl, &sr, &binfo, NULL) == 0)
455 			return (1);
456 
457 		symdef = sr.sr_sym;
458 		_pltbgn = symdef->st_value;
459 		if (!(FLAGS(lmp) & FLG_RT_FIXED) &&
460 		    (symdef->st_shndx != SHN_ABS))
461 			_pltbgn += basebgn;
462 		_pltend = _pltbgn + (((PLTRELSZ(lmp) / relsiz)) *
463 		    M_PLT_ENTSIZE) + M_PLT_RESERVSZ;
464 
465 	} else {
466 		/*
467 		 * The relocation sections appear to the run-time linker as a
468 		 * single table.  Determine the address of the beginning and end
469 		 * of this table.  There are two different interpretations of
470 		 * the ABI at this point:
471 		 *
472 		 *   o	The REL table and its associated RELSZ indicate the
473 		 *	concatenation of *all* relocation sections (this is the
474 		 *	model our link-editor constructs).
475 		 *
476 		 *   o	The REL table and its associated RELSZ indicate the
477 		 *	concatenation of all *but* the .plt relocations.  These
478 		 *	relocations are specified individually by the JMPREL and
479 		 *	PLTRELSZ entries.
480 		 *
481 		 * Determine from our knowledege of the relocation range and
482 		 * .plt range, the range of the total relocation table.  Note
483 		 * that one other ABI assumption seems to be that the .plt
484 		 * relocations always follow any other relocations, the
485 		 * following range checking drops that assumption.
486 		 */
487 		relbgn = (ulong_t)(REL(lmp));
488 		relend = relbgn + (ulong_t)(RELSZ(lmp));
489 		if (pltbgn) {
490 			if (!relbgn || (relbgn > pltbgn))
491 				relbgn = pltbgn;
492 			if (!relbgn || (relend < pltend))
493 				relend = pltend;
494 		}
495 	}
496 	if (!relbgn || (relbgn == relend)) {
497 		DBG_CALL(Dbg_reloc_run(lmp, 0, plt, DBG_REL_NONE));
498 		return (1);
499 	}
500 	DBG_CALL(Dbg_reloc_run(lmp, M_REL_SHT_TYPE, plt, DBG_REL_START));
501 
502 	/*
503 	 * If we're processing a dynamic executable in lazy mode there is no
504 	 * need to scan the .rel.plt table, however if we're processing a shared
505 	 * object in lazy mode the .got addresses associated to each .plt must
506 	 * be relocated to reflect the location of the shared object.
507 	 */
508 	if (pltbgn && ((MODE(lmp) & RTLD_NOW) == 0) &&
509 	    (FLAGS(lmp) & FLG_RT_FIXED))
510 		noplt = 1;
511 
512 	sip = SYMINFO(lmp);
513 	/*
514 	 * Loop through relocations.
515 	 */
516 	while (relbgn < relend) {
517 		mmapobj_result_t	*mpp;
518 		uint_t			sb_flags = 0;
519 
520 		rtype = ELF_R_TYPE(((Rela *)relbgn)->r_info, M_MACH);
521 
522 		/*
523 		 * If this is a RELATIVE relocation in a shared object (the
524 		 * common case), and if we are not debugging, then jump into a
525 		 * tighter relocation loop (elf_reloc_relative).
526 		 */
527 		if ((rtype == R_AMD64_RELATIVE) &&
528 		    ((FLAGS(lmp) & FLG_RT_FIXED) == 0) && (DBG_ENABLED == 0)) {
529 			if (relacount) {
530 				relbgn = elf_reloc_relative_count(relbgn,
531 				    relacount, relsiz, basebgn, lmp,
532 				    textrel, 0);
533 				relacount = 0;
534 			} else {
535 				relbgn = elf_reloc_relative(relbgn, relend,
536 				    relsiz, basebgn, lmp, textrel, 0);
537 			}
538 			if (relbgn >= relend)
539 				break;
540 			rtype = ELF_R_TYPE(((Rela *)relbgn)->r_info, M_MACH);
541 		}
542 
543 		roffset = ((Rela *)relbgn)->r_offset;
544 
545 		/*
546 		 * If this is a shared object, add the base address to offset.
547 		 */
548 		if (!(FLAGS(lmp) & FLG_RT_FIXED)) {
549 			/*
550 			 * If we're processing lazy bindings, we have to step
551 			 * through the plt entries and add the base address
552 			 * to the corresponding got entry.
553 			 */
554 			if (plthint && (plt == 0) &&
555 			    (rtype == R_AMD64_JUMP_SLOT) &&
556 			    ((MODE(lmp) & RTLD_NOW) == 0)) {
557 				relbgn = elf_reloc_relative_count(relbgn,
558 				    plthint, relsiz, basebgn, lmp, textrel, 1);
559 				plthint = 0;
560 				continue;
561 			}
562 			roffset += basebgn;
563 		}
564 
565 		reladd = (long)(((Rela *)relbgn)->r_addend);
566 		rsymndx = ELF_R_SYM(((Rela *)relbgn)->r_info);
567 		rel = (Rela *)relbgn;
568 		relbgn += relsiz;
569 
570 		/*
571 		 * Optimizations.
572 		 */
573 		if (rtype == R_AMD64_NONE)
574 			continue;
575 		if (noplt && ((ulong_t)rel >= pltbgn) &&
576 		    ((ulong_t)rel < pltend)) {
577 			relbgn = pltend;
578 			continue;
579 		}
580 
581 		/*
582 		 * If we're promoting plts, determine if this one has already
583 		 * been written.
584 		 */
585 		if (plt && ((*(ulong_t *)roffset < _pltbgn) ||
586 		    (*(ulong_t *)roffset > _pltend)))
587 			continue;
588 
589 		/*
590 		 * If this relocation is not against part of the image
591 		 * mapped into memory we skip it.
592 		 */
593 		if ((mpp = find_segment((caddr_t)roffset, lmp)) == NULL) {
594 			elf_reloc_bad(lmp, (void *)rel, rtype, roffset,
595 			    rsymndx);
596 			continue;
597 		}
598 
599 		binfo = 0;
600 		/*
601 		 * If a symbol index is specified then get the symbol table
602 		 * entry, locate the symbol definition, and determine its
603 		 * address.
604 		 */
605 		if (rsymndx) {
606 			/*
607 			 * If a Syminfo section is provided, determine if this
608 			 * symbol is deferred, and if so, skip this relocation.
609 			 */
610 			if (sip && is_sym_deferred((ulong_t)rel, basebgn, lmp,
611 			    textrel, sip, rsymndx))
612 				continue;
613 
614 			/*
615 			 * Get the local symbol table entry.
616 			 */
617 			symref = (Sym *)((ulong_t)SYMTAB(lmp) +
618 			    (rsymndx * SYMENT(lmp)));
619 
620 			/*
621 			 * If this is a local symbol, just use the base address.
622 			 * (we should have no local relocations in the
623 			 * executable).
624 			 */
625 			if (ELF_ST_BIND(symref->st_info) == STB_LOCAL) {
626 				value = basebgn;
627 				name = NULL;
628 
629 				/*
630 				 * Special case TLS relocations.
631 				 */
632 				if (rtype == R_AMD64_DTPMOD64) {
633 					/*
634 					 * Use the TLS modid.
635 					 */
636 					value = TLSMODID(lmp);
637 
638 				} else if ((rtype == R_AMD64_TPOFF64) ||
639 				    (rtype == R_AMD64_TPOFF32)) {
640 					if ((value = elf_static_tls(lmp, symref,
641 					    rel, rtype, 0, roffset, 0)) == 0) {
642 						ret = 0;
643 						break;
644 					}
645 				}
646 			} else {
647 				/*
648 				 * If the symbol index is equal to the previous
649 				 * symbol index relocation we processed then
650 				 * reuse the previous values. (Note that there
651 				 * have been cases where a relocation exists
652 				 * against a copy relocation symbol, our ld(1)
653 				 * should optimize this away, but make sure we
654 				 * don't use the same symbol information should
655 				 * this case exist).
656 				 */
657 				if ((rsymndx == psymndx) &&
658 				    (rtype != R_AMD64_COPY)) {
659 					/* LINTED */
660 					if (psymdef == 0) {
661 						DBG_CALL(Dbg_bind_weak(lmp,
662 						    (Addr)roffset, (Addr)
663 						    (roffset - basebgn), name));
664 						continue;
665 					}
666 					/* LINTED */
667 					value = pvalue;
668 					/* LINTED */
669 					name = pname;
670 					/* LINTED */
671 					symdef = psymdef;
672 					/* LINTED */
673 					symref = psymref;
674 					/* LINTED */
675 					_lmp = plmp;
676 					/* LINTED */
677 					binfo = pbinfo;
678 
679 					if ((LIST(_lmp)->lm_tflags |
680 					    AFLAGS(_lmp)) &
681 					    LML_TFLG_AUD_SYMBIND) {
682 						value = audit_symbind(lmp, _lmp,
683 						    /* LINTED */
684 						    symdef, dsymndx, value,
685 						    &sb_flags);
686 					}
687 				} else {
688 					Slookup		sl;
689 					Sresult		sr;
690 
691 					/*
692 					 * Lookup the symbol definition.
693 					 * Initialize the symbol lookup, and
694 					 * symbol result, data structure.
695 					 */
696 					name = (char *)(STRTAB(lmp) +
697 					    symref->st_name);
698 
699 					SLOOKUP_INIT(sl, name, lmp, 0,
700 					    ld_entry_cnt, 0, rsymndx, symref,
701 					    rtype, LKUP_STDRELOC);
702 					SRESULT_INIT(sr, name);
703 					symdef = NULL;
704 
705 					if (lookup_sym(&sl, &sr, &binfo,
706 					    in_nfavl)) {
707 						name = (char *)sr.sr_name;
708 						_lmp = sr.sr_dmap;
709 						symdef = sr.sr_sym;
710 					}
711 
712 					/*
713 					 * If the symbol is not found and the
714 					 * reference was not to a weak symbol,
715 					 * report an error.  Weak references
716 					 * may be unresolved.
717 					 */
718 					/* BEGIN CSTYLED */
719 					if (symdef == 0) {
720 					    if (sl.sl_bind != STB_WEAK) {
721 						if (elf_reloc_error(lmp, name,
722 						    rel, binfo))
723 							continue;
724 
725 						ret = 0;
726 						break;
727 
728 					    } else {
729 						psymndx = rsymndx;
730 						psymdef = 0;
731 
732 						DBG_CALL(Dbg_bind_weak(lmp,
733 						    (Addr)roffset, (Addr)
734 						    (roffset - basebgn), name));
735 						continue;
736 					    }
737 					}
738 					/* END CSTYLED */
739 
740 					/*
741 					 * If symbol was found in an object
742 					 * other than the referencing object
743 					 * then record the binding.
744 					 */
745 					if ((lmp != _lmp) && ((FLAGS1(_lmp) &
746 					    FL1_RT_NOINIFIN) == 0)) {
747 						if (aplist_test(&bound, _lmp,
748 						    AL_CNT_RELBIND) == 0) {
749 							ret = 0;
750 							break;
751 						}
752 					}
753 
754 					/*
755 					 * Calculate the location of definition;
756 					 * symbol value plus base address of
757 					 * containing shared object.
758 					 */
759 					if (IS_SIZE(rtype))
760 						value = symdef->st_size;
761 					else
762 						value = symdef->st_value;
763 
764 					if (!(FLAGS(_lmp) & FLG_RT_FIXED) &&
765 					    !(IS_SIZE(rtype)) &&
766 					    (symdef->st_shndx != SHN_ABS) &&
767 					    (ELF_ST_TYPE(symdef->st_info) !=
768 					    STT_TLS))
769 						value += ADDR(_lmp);
770 
771 					/*
772 					 * Retain this symbol index and the
773 					 * value in case it can be used for the
774 					 * subsequent relocations.
775 					 */
776 					if (rtype != R_AMD64_COPY) {
777 						psymndx = rsymndx;
778 						pvalue = value;
779 						pname = name;
780 						psymdef = symdef;
781 						psymref = symref;
782 						plmp = _lmp;
783 						pbinfo = binfo;
784 					}
785 					if ((LIST(_lmp)->lm_tflags |
786 					    AFLAGS(_lmp)) &
787 					    LML_TFLG_AUD_SYMBIND) {
788 						dsymndx = (((uintptr_t)symdef -
789 						    (uintptr_t)SYMTAB(_lmp)) /
790 						    SYMENT(_lmp));
791 						value = audit_symbind(lmp, _lmp,
792 						    symdef, dsymndx, value,
793 						    &sb_flags);
794 					}
795 				}
796 
797 				/*
798 				 * If relocation is PC-relative, subtract
799 				 * offset address.
800 				 */
801 				if (IS_PC_RELATIVE(rtype))
802 					value -= roffset;
803 
804 				/*
805 				 * Special case TLS relocations.
806 				 */
807 				if (rtype == R_AMD64_DTPMOD64) {
808 					/*
809 					 * Relocation value is the TLS modid.
810 					 */
811 					value = TLSMODID(_lmp);
812 
813 				} else if ((rtype == R_AMD64_TPOFF64) ||
814 				    (rtype == R_AMD64_TPOFF32)) {
815 					if ((value = elf_static_tls(_lmp,
816 					    symdef, rel, rtype, name, roffset,
817 					    value)) == 0) {
818 						ret = 0;
819 						break;
820 					}
821 				}
822 			}
823 		} else {
824 			/*
825 			 * Special cases.
826 			 */
827 			if (rtype == R_AMD64_DTPMOD64) {
828 				/*
829 				 * TLS relocation value is the TLS modid.
830 				 */
831 				value = TLSMODID(lmp);
832 			} else
833 				value = basebgn;
834 
835 			name = NULL;
836 		}
837 
838 		DBG_CALL(Dbg_reloc_in(LIST(lmp), ELF_DBG_RTLD, M_MACH,
839 		    M_REL_SHT_TYPE, rel, NULL, 0, name));
840 
841 		/*
842 		 * Make sure the segment is writable.
843 		 */
844 		if (((mpp->mr_prot & PROT_WRITE) == 0) &&
845 		    ((set_prot(lmp, mpp, 1) == 0) ||
846 		    (aplist_append(textrel, mpp, AL_CNT_TEXTREL) == NULL))) {
847 			ret = 0;
848 			break;
849 		}
850 
851 		/*
852 		 * Call relocation routine to perform required relocation.
853 		 */
854 		switch (rtype) {
855 		case R_AMD64_COPY:
856 			if (elf_copy_reloc(name, symref, lmp, (void *)roffset,
857 			    symdef, _lmp, (const void *)value) == 0)
858 				ret = 0;
859 			break;
860 		case R_AMD64_JUMP_SLOT:
861 			if (((LIST(lmp)->lm_tflags | AFLAGS(lmp)) &
862 			    (LML_TFLG_AUD_PLTENTER | LML_TFLG_AUD_PLTEXIT)) &&
863 			    AUDINFO(lmp)->ai_dynplts) {
864 				int	fail = 0;
865 				int	pltndx = (((ulong_t)rel -
866 				    (uintptr_t)JMPREL(lmp)) / relsiz);
867 				int	symndx = (((uintptr_t)symdef -
868 				    (uintptr_t)SYMTAB(_lmp)) / SYMENT(_lmp));
869 
870 				(void) elf_plt_trace_write(roffset, lmp, _lmp,
871 				    symdef, symndx, pltndx, (caddr_t)value,
872 				    sb_flags, &fail);
873 				if (fail)
874 					ret = 0;
875 			} else {
876 				/*
877 				 * Write standard PLT entry to jump directly
878 				 * to newly bound function.
879 				 */
880 				DBG_CALL(Dbg_reloc_apply_val(LIST(lmp),
881 				    ELF_DBG_RTLD, (Xword)roffset,
882 				    (Xword)value));
883 				*(ulong_t *)roffset = value;
884 			}
885 			break;
886 		default:
887 			value += reladd;
888 			/*
889 			 * Write the relocation out.
890 			 */
891 			if (do_reloc_rtld(rtype, (uchar_t *)roffset,
892 			    (Xword *)&value, name, NAME(lmp), LIST(lmp)) == 0)
893 				ret = 0;
894 
895 			DBG_CALL(Dbg_reloc_apply_val(LIST(lmp), ELF_DBG_RTLD,
896 			    (Xword)roffset, (Xword)value));
897 		}
898 
899 		if ((ret == 0) &&
900 		    ((LIST(lmp)->lm_flags & LML_FLG_TRC_WARN) == 0))
901 			break;
902 
903 		if (binfo) {
904 			DBG_CALL(Dbg_bind_global(lmp, (Addr)roffset,
905 			    (Off)(roffset - basebgn), (Xword)(-1), PLT_T_FULL,
906 			    _lmp, (Addr)value, symdef->st_value, name, binfo));
907 		}
908 	}
909 
910 	return (relocate_finish(lmp, bound, ret));
911 }
912 
913 /*
914  * Initialize the first few got entries so that function calls go to
915  * elf_rtbndr:
916  *
917  *	GOT[GOT_XLINKMAP] =	the address of the link map
918  *	GOT[GOT_XRTLD] =	the address of rtbinder
919  */
920 void
921 elf_plt_init(void *got, caddr_t l)
922 {
923 	uint64_t	*_got;
924 	/* LINTED */
925 	Rt_map		*lmp = (Rt_map *)l;
926 
927 	_got = (uint64_t *)got + M_GOT_XLINKMAP;
928 	*_got = (uint64_t)lmp;
929 	_got = (uint64_t *)got + M_GOT_XRTLD;
930 	*_got = (uint64_t)elf_rtbndr;
931 }
932 
933 /*
934  * Plt writing interface to allow debugging initialization to be generic.
935  */
936 Pltbindtype
937 /* ARGSUSED1 */
938 elf_plt_write(uintptr_t addr, uintptr_t vaddr, void *rptr, uintptr_t symval,
939 	Xword pltndx)
940 {
941 	Rela		*rel = (Rela*)rptr;
942 	uintptr_t	pltaddr;
943 
944 	pltaddr = addr + rel->r_offset;
945 	*(ulong_t *)pltaddr = (ulong_t)symval + rel->r_addend;
946 	DBG_CALL(pltcntfull++);
947 	return (PLT_T_FULL);
948 }
949 
950 /*
951  * Provide a machine specific interface to the conversion routine.  By calling
952  * the machine specific version, rather than the generic version, we insure that
953  * the data tables/strings for all known machine versions aren't dragged into
954  * ld.so.1.
955  */
956 const char *
957 _conv_reloc_type(uint_t rel)
958 {
959 	static Conv_inv_buf_t	inv_buf;
960 
961 	return (conv_reloc_amd64_type(rel, 0, &inv_buf));
962 }
963