xref: /titanic_52/usr/src/cmd/sgs/rtld/sparcv9/sparc_elf.c (revision fb3fb4f3d76d55b64440afd0af72775dfad3bd1d)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License, Version 1.0 only
6  * (the "License").  You may not use this file except in compliance
7  * with the License.
8  *
9  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10  * or http://www.opensolaris.org/os/licensing.
11  * See the License for the specific language governing permissions
12  * and limitations under the License.
13  *
14  * When distributing Covered Code, include this CDDL HEADER in each
15  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16  * If applicable, add the following below this CDDL HEADER, with the
17  * fields enclosed by brackets "[]" replaced with your own identifying
18  * information: Portions Copyright [yyyy] [name of copyright owner]
19  *
20  * CDDL HEADER END
21  */
22 /*
23  *	Copyright 2005 Sun Microsystems, Inc.  All rights reserved.
24  *	Use is subject to license terms.
25  */
26 #pragma ident	"%Z%%M%	%I%	%E% SMI"
27 
28 /*
29  * SPARC V9 machine dependent and ELF file class dependent functions.
30  * Contains routines for performing function binding and symbol relocations.
31  */
32 #include	"_synonyms.h"
33 
34 #include	<stdio.h>
35 #include	<sys/elf.h>
36 #include	<sys/elf_SPARC.h>
37 #include	<sys/mman.h>
38 #include	<dlfcn.h>
39 #include	<synch.h>
40 #include	<string.h>
41 #include	"_rtld.h"
42 #include	"_audit.h"
43 #include	"_elf.h"
44 #include	"msg.h"
45 #include	"debug.h"
46 #include	"reloc.h"
47 #include	"conv.h"
48 
49 extern void	iflush_range(caddr_t, size_t);
50 extern void	plt_upper_32(uintptr_t, uintptr_t);
51 extern void	plt_upper_44(uintptr_t, uintptr_t);
52 extern void	plt_full_range(uintptr_t, uintptr_t);
53 extern void	elf_rtbndr(Rt_map *, ulong_t, caddr_t);
54 extern void	elf_rtbndr_far(Rt_map *, ulong_t, caddr_t);
55 
56 
57 int
58 elf_mach_flags_check(Rej_desc *rej, Ehdr *ehdr)
59 {
60 	/*
61 	 * Check machine type and flags.
62 	 */
63 	if (ehdr->e_flags & EF_SPARC_EXT_MASK) {
64 		/*
65 		 * Check vendor-specific extensions.
66 		 */
67 		if (ehdr->e_flags & EF_SPARC_HAL_R1) {
68 			rej->rej_type = SGS_REJ_HAL;
69 			rej->rej_info = (uint_t)ehdr->e_flags;
70 			return (0);
71 		}
72 		if ((ehdr->e_flags & EF_SPARC_SUN_US3) & ~at_flags) {
73 			rej->rej_type = SGS_REJ_US3;
74 			rej->rej_info = (uint_t)ehdr->e_flags;
75 			return (0);
76 		}
77 
78 		/*
79 		 * Generic check.
80 		 * All of our 64-bit SPARC's support the US1 (UltraSPARC 1)
81 		 * instructions so that bit isn't worth checking for explicitly.
82 		 */
83 		if ((ehdr->e_flags & EF_SPARC_EXT_MASK) & ~at_flags) {
84 			rej->rej_type = SGS_REJ_BADFLAG;
85 			rej->rej_info = (uint_t)ehdr->e_flags;
86 			return (0);
87 		}
88 	} else if ((ehdr->e_flags & ~EF_SPARCV9_MM) != 0) {
89 		rej->rej_type = SGS_REJ_BADFLAG;
90 		rej->rej_info = (uint_t)ehdr->e_flags;
91 		return (0);
92 	}
93 	return (1);
94 }
95 
96 
97 void
98 ldso_plt_init(Rt_map * lmp)
99 {
100 	/*
101 	 * There is no need to analyze ld.so because we don't map in any of
102 	 * its dependencies.  However we may map these dependencies in later
103 	 * (as if ld.so had dlopened them), so initialize the plt and the
104 	 * permission information.
105 	 */
106 	if (PLTGOT(lmp)) {
107 		Xword pltoff;
108 
109 		/*
110 		 * Install the lm pointer in .PLT2 as per the ABI.
111 		 */
112 		pltoff = (2 * M_PLT_ENTSIZE) / M_PLT_INSSIZE;
113 		elf_plt2_init(PLTGOT(lmp) + pltoff, lmp);
114 
115 		/*
116 		 * The V9 ABI states that the first 32k PLT entries
117 		 * use .PLT1, with .PLT0 used by the "latter" entries.
118 		 * We don't currently implement the extendend format,
119 		 * so install an error handler in .PLT0 to catch anyone
120 		 * trying to use it.
121 		 */
122 		elf_plt_init(PLTGOT(lmp), (caddr_t)elf_rtbndr_far);
123 
124 		/*
125 		 * Initialize .PLT1
126 		 */
127 		pltoff = M_PLT_ENTSIZE / M_PLT_INSSIZE;
128 		elf_plt_init(PLTGOT(lmp) + pltoff, (caddr_t)elf_rtbndr);
129 	}
130 }
131 
132 /*
133  * elf_plt_write() will test to see how far away our destination
134  *	address lies.  If it is close enough that a branch can
135  *	be used instead of a jmpl - we will fill the plt in with
136  * 	single branch.  The branches are much quicker then
137  *	a jmpl instruction - see bug#4356879 for further
138  *	details.
139  *
140  *	NOTE: we pass in both a 'pltaddr' and a 'vpltaddr' since
141  *		librtld/dldump update PLT's who's physical
142  *		address is not the same as the 'virtual' runtime
143  *		address.
144  */
145 Pltbindtype
146 elf_plt_write(uintptr_t addr, uintptr_t vaddr, void *rptr, uintptr_t symval,
147 	Xword pltndx)
148 {
149 	Rela		*rel = (Rela *)rptr;
150 	uintptr_t	nsym = ~symval;
151 	uintptr_t	vpltaddr, pltaddr;
152 	long		disp;
153 
154 
155 	pltaddr = addr + rel->r_offset;
156 	vpltaddr = vaddr + rel->r_offset;
157 	disp = symval - vpltaddr - 4;
158 
159 	if (pltndx >= (M64_PLT_NEARPLTS - M_PLT_XNumber)) {
160 		*((Sxword *)pltaddr) = (uintptr_t)symval +
161 			(uintptr_t)rel->r_addend - vaddr;
162 		DBG_CALL(pltcntfar++);
163 		return (PLT_T_FAR);
164 	}
165 
166 	/*
167 	 * Test if the destination address is close enough to use
168 	 * a ba,a... instruction to reach it.
169 	 */
170 	if (S_INRANGE(disp, 23) && !(rtld_flags & RT_FL_NOBAPLT)) {
171 		uint_t		*pltent, bainstr;
172 		Pltbindtype	rc;
173 
174 		pltent = (uint_t *)pltaddr;
175 		/*
176 		 * The
177 		 *
178 		 *	ba,a,pt %icc, <dest>
179 		 *
180 		 * is the most efficient of the PLT's.  If we
181 		 * are within +-20 bits - use that branch.
182 		 */
183 		if (S_INRANGE(disp, 20)) {
184 			bainstr = M_BA_A_PT;	/* ba,a,pt %icc,<dest> */
185 			/* LINTED */
186 			bainstr |= (uint_t)(S_MASK(19) & (disp >> 2));
187 			rc = PLT_T_21D;
188 			DBG_CALL(pltcnt21d++);
189 		} else {
190 			/*
191 			 * Otherwise - we fall back to the good old
192 			 *
193 			 *	ba,a	<dest>
194 			 *
195 			 * Which still beats a jmpl instruction.
196 			 */
197 			bainstr = M_BA_A;		/* ba,a <dest> */
198 			/* LINTED */
199 			bainstr |= (uint_t)(S_MASK(22) & (disp >> 2));
200 			rc = PLT_T_24D;
201 			DBG_CALL(pltcnt24d++);
202 		}
203 
204 		pltent[2] = M_NOP;		/* nop instr */
205 		pltent[1] = bainstr;
206 
207 		iflush_range((char *)(&pltent[1]), 4);
208 		pltent[0] = M_NOP;		/* nop instr */
209 		iflush_range((char *)(&pltent[0]), 4);
210 		return (rc);
211 	}
212 
213 	if ((nsym >> 32) == 0) {
214 		plt_upper_32(pltaddr, symval);
215 		DBG_CALL(pltcntu32++);
216 		return (PLT_T_U32);
217 	}
218 
219 	if ((nsym >> 44) == 0) {
220 		plt_upper_44(pltaddr, symval);
221 		DBG_CALL(pltcntu44++);
222 		return (PLT_T_U44);
223 	}
224 
225 	/*
226 	 * The PLT destination is not in reach of
227 	 * a branch instruction - so we fall back
228 	 * to a 'jmpl' sequence.
229 	 */
230 	plt_full_range(pltaddr, symval);
231 	DBG_CALL(pltcntfull++);
232 	return (PLT_T_FULL);
233 }
234 
235 /*
236  * Local storage space created on the stack created for this glue
237  * code includes space for:
238  *		0x8	pointer to dyn_data
239  *		0x8	size prev stack frame
240  */
241 static const Byte dyn_plt_template[] = {
242 /* 0x0 */	0x2a, 0xcf, 0x80, 0x03,	/* brnz,a,pt %fp, 0xc	*/
243 /* 0x4 */	0x82, 0x27, 0x80, 0x0e,	/* sub %fp, %sp, %g1 */
244 /* 0x8 */	0x82, 0x10, 0x20, 0xb0,	/* mov 176, %g1	*/
245 /* 0xc */	0x9d, 0xe3, 0xbf, 0x40,	/* save %sp, -192, %sp	*/
246 /* 0x10 */	0xc2, 0x77, 0xa7, 0xef,	/* stx %g1, [%fp + 2031] */
247 /* 0x14 */	0x0b, 0x00, 0x00, 0x00,	/* sethi %hh(dyn_data), %g5 */
248 /* 0x18 */	0x8a, 0x11, 0x60, 0x00,	/* or %g5, %hm(dyn_data), %g5	*/
249 /* 0x1c */	0x8b, 0x29, 0x70, 0x20,	/* sllx %g5, 32, %g5	*/
250 /* 0x20 */	0x03, 0x00, 0x00, 0x00,	/* sethi %lm(dyn_data), %g1	*/
251 /* 0x24 */	0x82, 0x10, 0x60, 0x00,	/* or %g1, %lo(dyn_data), %g1	*/
252 /* 0x28 */	0x82, 0x10, 0x40, 0x05,	/* or %g1, %g5, %g1	*/
253 /* 0x2c */	0x40, 0x00, 0x00, 0x00,	/* call <rel_addr>	*/
254 /* 0x30 */	0xc2, 0x77, 0xa7, 0xf7,	/* stx %g1, [%fp + 2039] */
255 /* 0x34 */	0x01, 0x00, 0x00, 0x00	/* nop ! for 8-byte alignment */
256 };
257 
258 
259 int	dyn_plt_ent_size = sizeof (dyn_plt_template) +
260 		sizeof (Addr) +		/* reflmp */
261 		sizeof (Addr) +		/* deflmp */
262 		sizeof (Word) +		/* symndx */
263 		sizeof (Word) +		/* sb_flags */
264 		sizeof (Sym);		/* symdef */
265 
266 
267 /*
268  * the dynamic plt entry is:
269  *
270  *	brnz,a,pt	%fp, 1f
271  *	 sub     	%sp, %fp, %g1
272  *	mov     	SA(MINFRAME), %g1
273  * 1:
274  *	save    	%sp, -(SA(MINFRAME) + (2 * CLONGSIZE)), %sp
275  *
276  *	! store prev stack size
277  *	stx     	%g1, [%fp + STACK_BIAS - (2 * CLONGSIZE)]
278  *
279  *	sethi   	%hh(dyn_data), %g5
280  *	or      	%g5, %hm(dyn_data), %g5
281  *	sllx    	%g5, 32, %g5
282  *	sethi   	%lm(dyn_data), %g1
283  *	or      	%g1, %lo(dyn_data), %g1
284  *	or      	%g1, %g5, %g1
285  *
286  *	! store dyn_data ptr and call
287  *	call    	elf_plt_trace
288  *	 stx     	%g1, [%fp + STACK_BIAS - CLONGSIZE]
289  *	nop
290  * * dyn data:
291  *	Addr		reflmp
292  *	Addr		deflmp
293  *	Word		symndx
294  *	Word		sb_flags
295  *	Sym		symdef  (Elf64_Sym = 24-bytes)
296  */
297 static caddr_t
298 elf_plt_trace_write(caddr_t addr, Rela * rptr, Rt_map * rlmp, Rt_map * dlmp,
299     Sym * sym, uint_t symndx, ulong_t pltndx, caddr_t to, uint_t sb_flags,
300     int *fail)
301 {
302 	extern ulong_t	elf_plt_trace();
303 	Addr		dyn_plt;
304 	Addr *		dyndata;
305 
306 	/*
307 	 * If both pltenter & pltexit have been disabled there
308 	 * there is no reason to even create the glue code.
309 	 */
310 	if ((sb_flags & (LA_SYMB_NOPLTENTER | LA_SYMB_NOPLTEXIT)) ==
311 	    (LA_SYMB_NOPLTENTER | LA_SYMB_NOPLTEXIT)) {
312 		(void) elf_plt_write((uintptr_t)addr, (uintptr_t)addr,
313 		    rptr, (uintptr_t)to, pltndx);
314 		return (to);
315 	}
316 
317 	/*
318 	 * We only need to add the glue code if there is an auditing
319 	 * library that is interested in this binding.
320 	 */
321 	dyn_plt = (Xword)AUDINFO(rlmp)->ai_dynplts +
322 		(pltndx * dyn_plt_ent_size);
323 
324 	/*
325 	 * Have we initialized this dynamic plt entry yet?  If we haven't do it
326 	 * now.  Otherwise this function has been called before, but from a
327 	 * different plt (ie. from another shared object).  In that case
328 	 * we just set the plt to point to the new dyn_plt.
329 	 */
330 	if (*(Word *)dyn_plt == 0) {
331 		Sym *	symp;
332 		Xword	symvalue;
333 
334 		(void) memcpy((void *)dyn_plt, dyn_plt_template,
335 		    sizeof (dyn_plt_template));
336 		dyndata = (Addr *)(dyn_plt + sizeof (dyn_plt_template));
337 
338 		/*
339 		 * relocating:
340 		 *	sethi	%hh(dyndata), %g5
341 		 */
342 		symvalue = (Xword)dyndata;
343 		if (do_reloc(R_SPARC_HH22, (Byte *)(dyn_plt + 0x14),
344 		    &symvalue, MSG_ORIG(MSG_SYM_LADYNDATA),
345 		    MSG_ORIG(MSG_SPECFIL_DYNPLT)) == 0) {
346 			*fail = 1;
347 			return (0);
348 		}
349 
350 		/*
351 		 * relocating:
352 		 *	or	%g5, %hm(dyndata), %g5
353 		 */
354 		symvalue = (Xword)dyndata;
355 		if (do_reloc(R_SPARC_HM10, (Byte *)(dyn_plt + 0x18),
356 		    &symvalue, MSG_ORIG(MSG_SYM_LADYNDATA),
357 		    MSG_ORIG(MSG_SPECFIL_DYNPLT)) == 0) {
358 			*fail = 1;
359 			return (0);
360 		}
361 
362 		/*
363 		 * relocating:
364 		 *	sethi	%lm(dyndata), %g1
365 		 */
366 		symvalue = (Xword)dyndata;
367 		if (do_reloc(R_SPARC_LM22, (Byte *)(dyn_plt + 0x20),
368 		    &symvalue, MSG_ORIG(MSG_SYM_LADYNDATA),
369 		    MSG_ORIG(MSG_SPECFIL_DYNPLT)) == 0) {
370 			*fail = 1;
371 			return (0);
372 		}
373 
374 		/*
375 		 * relocating:
376 		 *	or	%g1, %lo(dyndata), %g1
377 		 */
378 		symvalue = (Xword)dyndata;
379 		if (do_reloc(R_SPARC_LO10, (Byte *)(dyn_plt + 0x24),
380 		    &symvalue, MSG_ORIG(MSG_SYM_LADYNDATA),
381 		    MSG_ORIG(MSG_SPECFIL_DYNPLT)) == 0) {
382 			*fail = 1;
383 			return (0);
384 		}
385 
386 		/*
387 		 * relocating:
388 		 *	call	elf_plt_trace
389 		 */
390 		symvalue = (Xword)((Addr)&elf_plt_trace -
391 			(Addr)(dyn_plt + 0x2c));
392 		if (do_reloc(R_SPARC_WDISP30, (Byte *)(dyn_plt + 0x2c),
393 		    &symvalue, MSG_ORIG(MSG_SYM_ELFPLTTRACE),
394 		    MSG_ORIG(MSG_SPECFIL_DYNPLT)) == 0) {
395 			*fail = 1;
396 			return (0);
397 		}
398 
399 		*dyndata++ = (Addr)rlmp;
400 		*dyndata++ = (Addr)dlmp;
401 
402 		/*
403 		 * symndx in the high word, sb_flags in the low.
404 		 */
405 		*dyndata = (Addr)sb_flags;
406 		*(Word *)dyndata = symndx;
407 		dyndata++;
408 
409 		symp = (Sym *)dyndata;
410 		*symp = *sym;
411 		symp->st_value = (Addr)to;
412 		iflush_range((void *)dyn_plt, sizeof (dyn_plt_template));
413 	}
414 
415 	(void) elf_plt_write((uintptr_t)addr, (uintptr_t)addr,
416 		rptr, (uintptr_t)dyn_plt, pltndx);
417 	return ((caddr_t)dyn_plt);
418 }
419 
420 
421 /*
422  * Function binding routine - invoked on the first call to a function through
423  * the procedure linkage table;
424  * passes first through an assembly language interface.
425  *
426  * Takes the address of the PLT entry where the call originated,
427  * the offset into the relocation table of the associated
428  * relocation entry and the address of the link map (rt_private_map struct)
429  * for the entry.
430  *
431  * Returns the address of the function referenced after re-writing the PLT
432  * entry to invoke the function directly.
433  *
434  * On error, causes process to terminate with a signal.
435  */
436 
437 ulong_t
438 elf_bndr(Rt_map *lmp, ulong_t pltoff, caddr_t from)
439 {
440 	Rt_map		*nlmp, * llmp;
441 	Addr		addr, vaddr, reloff, symval;
442 	char		*name;
443 	Rela		*rptr;
444 	Sym		*sym, *nsym;
445 	Xword		pltndx;
446 	uint_t		binfo, sb_flags = 0;
447 	ulong_t		rsymndx;
448 	Slookup		sl;
449 	Pltbindtype	pbtype;
450 	int		entry, dbg_save, lmflags, farplt = 0;
451 
452 	/*
453 	 * For compatibility with libthread (TI_VERSION 1) we track the entry
454 	 * value.  A zero value indicates we have recursed into ld.so.1 to
455 	 * further process a locking request.  Under this recursion we disable
456 	 * tsort and cleanup activities.
457 	 */
458 	entry = enter();
459 
460 	if ((lmflags = LIST(lmp)->lm_flags) & LML_FLG_RTLDLM) {
461 		dbg_save = dbg_mask;
462 		dbg_mask = 0;
463 	}
464 
465 	/*
466 	 * Must calculate true plt relocation address from reloc.
467 	 * Take offset, subtract number of reserved PLT entries, and divide
468 	 * by PLT entry size, which should give the index of the plt
469 	 * entry (and relocation entry since they have been defined to be
470 	 * in the same order).  Then we must multiply by the size of
471 	 * a relocation entry, which will give us the offset of the
472 	 * plt relocation entry from the start of them given by JMPREL(lm).
473 	 */
474 	addr = pltoff - M_PLT_RESERVSZ;
475 
476 	if (pltoff < (M64_PLT_NEARPLTS * M_PLT_ENTSIZE)) {
477 		pltndx = addr / M_PLT_ENTSIZE;
478 	} else {
479 		ulong_t	pltblockoff;
480 
481 		pltblockoff = pltoff - (M64_PLT_NEARPLTS * M_PLT_ENTSIZE);
482 		pltndx = M64_PLT_NEARPLTS +
483 			((pltblockoff / M64_PLT_FBLOCKSZ) * M64_PLT_FBLKCNTS) +
484 			((pltblockoff % M64_PLT_FBLOCKSZ) / M64_PLT_FENTSIZE) -
485 			M_PLT_XNumber;
486 		farplt = 1;
487 	}
488 
489 	/*
490 	 * Perform some basic sanity checks.  If we didn't get a load map
491 	 * or the plt offset is invalid then its possible someone has walked
492 	 * over the plt entries or jumped to plt[01] out of the blue.
493 	 */
494 	if (!lmp || (!farplt && (addr % M_PLT_ENTSIZE) != 0) ||
495 	    (farplt && (addr % M_PLT_INSSIZE))) {
496 		eprintf(ERR_FATAL, MSG_INTL(MSG_REL_PLTREF),
497 		    conv_reloc_SPARC_type_str(R_SPARC_JMP_SLOT),
498 		    EC_ADDR(lmp), EC_XWORD(pltoff), EC_ADDR(from));
499 		rtldexit(LIST(lmp), 1);
500 	}
501 	reloff = pltndx * sizeof (Rela);
502 
503 	/*
504 	 * Use relocation entry to get symbol table entry and symbol name.
505 	 */
506 	addr = (ulong_t)JMPREL(lmp);
507 	rptr = (Rela *)(addr + reloff);
508 	rsymndx = ELF_R_SYM(rptr->r_info);
509 	sym = (Sym *)((ulong_t)SYMTAB(lmp) + (rsymndx * SYMENT(lmp)));
510 	name = (char *)(STRTAB(lmp) + sym->st_name);
511 
512 	/*
513 	 * Determine the last link-map of this list, this'll be the starting
514 	 * point for any tsort() processing.
515 	 */
516 	llmp = LIST(lmp)->lm_tail;
517 
518 	/*
519 	 * Find definition for symbol.
520 	 */
521 	sl.sl_name = name;
522 	sl.sl_cmap = lmp;
523 	sl.sl_imap = LIST(lmp)->lm_head;
524 	sl.sl_hash = 0;
525 	sl.sl_rsymndx = rsymndx;
526 	sl.sl_flags = LKUP_DEFT;
527 	if ((nsym = lookup_sym(&sl, &nlmp, &binfo)) == 0) {
528 		eprintf(ERR_FATAL, MSG_INTL(MSG_REL_NOSYM), NAME(lmp),
529 		    demangle(name));
530 		rtldexit(LIST(lmp), 1);
531 	}
532 
533 	symval = nsym->st_value;
534 	if (!(FLAGS(nlmp) & FLG_RT_FIXED) &&
535 	    (nsym->st_shndx != SHN_ABS))
536 		symval += ADDR(nlmp);
537 	if ((lmp != nlmp) && ((FLAGS1(nlmp) & FL1_RT_NOINIFIN) == 0)) {
538 		/*
539 		 * Record that this new link map is now bound to the caller.
540 		 */
541 		if (bind_one(lmp, nlmp, BND_REFER) == 0)
542 			rtldexit(LIST(lmp), 1);
543 	}
544 
545 	if ((LIST(lmp)->lm_tflags | FLAGS1(lmp)) & LML_TFLG_AUD_SYMBIND) {
546 		/* LINTED */
547 		uint_t	symndx = (uint_t)(((uintptr_t)nsym -
548 			(uintptr_t)SYMTAB(nlmp)) / SYMENT(nlmp));
549 
550 		symval = audit_symbind(lmp, nlmp, nsym, symndx, symval,
551 			&sb_flags);
552 	}
553 
554 	if (FLAGS(lmp) & FLG_RT_FIXED)
555 		vaddr = 0;
556 	else
557 		vaddr = ADDR(lmp);
558 
559 	pbtype = PLT_T_NONE;
560 	if (!(rtld_flags & RT_FL_NOBIND)) {
561 		if (((LIST(lmp)->lm_tflags | FLAGS1(lmp)) &
562 		    (LML_TFLG_AUD_PLTENTER | LML_TFLG_AUD_PLTEXIT)) &&
563 		    AUDINFO(lmp)->ai_dynplts) {
564 			int	fail = 0;
565 			/* LINTED */
566 			uint_t	symndx = (uint_t)(((uintptr_t)nsym -
567 				(uintptr_t)SYMTAB(nlmp)) / SYMENT(nlmp));
568 
569 			symval = (ulong_t)elf_plt_trace_write((caddr_t)vaddr,
570 			    rptr, lmp, nlmp, nsym, symndx, pltndx,
571 			    (caddr_t)symval, sb_flags, &fail);
572 			if (fail)
573 				rtldexit(LIST(lmp), 1);
574 		} else {
575 			/*
576 			 * Write standard PLT entry to jump directly
577 			 * to newly bound function.
578 			 */
579 			pbtype = elf_plt_write((uintptr_t)vaddr,
580 				(uintptr_t)vaddr, rptr, symval, pltndx);
581 		}
582 	}
583 
584 	/*
585 	 * Print binding information and rebuild PLT entry.
586 	 */
587 	DBG_CALL(Dbg_bind_global(NAME(lmp), from, from - ADDR(lmp),
588 	    (Xword)pltndx, pbtype, NAME(nlmp), (caddr_t)symval,
589 	    (caddr_t)nsym->st_value, name, binfo));
590 
591 
592 	/*
593 	 * Complete any processing for newly loaded objects.  Note we don't
594 	 * know exactly where any new objects are loaded (we know the object
595 	 * that supplied the symbol, but others may have been loaded lazily as
596 	 * we searched for the symbol), so sorting starts from the last
597 	 * link-map know on entry to this routine.
598 	 */
599 	if (entry)
600 		load_completion(llmp, lmp);
601 
602 	/*
603 	 * Some operations like dldump() or dlopen()'ing a relocatable object
604 	 * result in objects being loaded on rtld's link-map, make sure these
605 	 * objects are initialized also.
606 	 */
607 	if ((LIST(nlmp)->lm_flags & LML_FLG_RTLDLM) && LIST(nlmp)->lm_init)
608 		load_completion(nlmp, 0);
609 
610 	/*
611 	 * If the object we've bound to is in the process of being initialized
612 	 * by another thread, determine whether we should block.
613 	 */
614 	is_dep_ready(nlmp, lmp, DBG_WAIT_SYMBOL);
615 
616 	/*
617 	 * Make sure the object to which we've bound has had it's .init fired.
618 	 * Cleanup before return to user code.
619 	 */
620 	if (entry) {
621 		is_dep_init(nlmp, lmp);
622 		leave(LIST(lmp));
623 	}
624 
625 	if (lmflags & LML_FLG_RTLDLM)
626 		dbg_mask = dbg_save;
627 
628 	return (symval);
629 }
630 
631 
632 static int
633 bindpltpad(Rt_map *lmp, List *padlist, Addr value, void **pltaddr,
634     const char *fname, const char *sname)
635 {
636 	Listnode	*lnp, *prevlnp;
637 	Pltpadinfo	*pip;
638 	void		*plt;
639 	uintptr_t	pltoff;
640 	Rela		rel;
641 	int		i;
642 
643 	prevlnp = 0;
644 	for (LIST_TRAVERSE(padlist, lnp, pip)) {
645 		if (pip->pp_addr == value) {
646 			*pltaddr = pip->pp_plt;
647 			DBG_CALL(Dbg_pltpad_bindto64(NAME(lmp), sname,
648 				(Addr)*pltaddr));
649 			return (1);
650 		}
651 		if (pip->pp_addr > value)
652 			break;
653 		prevlnp = lnp;
654 	}
655 
656 	plt = PLTPAD(lmp);
657 	pltoff = (uintptr_t)plt - (uintptr_t)ADDR(lmp);
658 
659 	PLTPAD(lmp) = (void *)((uintptr_t)PLTPAD(lmp) + M_PLT_ENTSIZE);
660 
661 	if (PLTPAD(lmp) > PLTPADEND(lmp)) {
662 		/*
663 		 * Just fail in usual relocation way
664 		 */
665 		*pltaddr = (void *)value;
666 		return (1);
667 	}
668 	rel.r_offset = pltoff;
669 	rel.r_info = 0;
670 	rel.r_addend = 0;
671 
672 
673 	/*
674 	 * elf_plt_write assumes the plt was previously filled
675 	 * with NOP's, so fill it in now.
676 	 */
677 	for (i = 0; i < (M_PLT_ENTSIZE / sizeof (uint_t)); i++) {
678 		((uint_t *)plt)[i] = M_NOP;
679 	}
680 	iflush_range((caddr_t)plt, M_PLT_ENTSIZE);
681 
682 	(void) elf_plt_write(ADDR(lmp), ADDR(lmp), &rel, value, 0);
683 
684 	if ((pip = calloc(sizeof (Pltpadinfo), 1)) == 0)
685 		return (0);
686 	pip->pp_addr = value;
687 	pip->pp_plt = plt;
688 
689 	if (prevlnp)
690 		lnp = list_insert(padlist, pip, prevlnp);
691 	else
692 		lnp = list_prepend(padlist, pip);
693 
694 	if (!lnp) {
695 		free(pip);
696 		return (0);
697 	}
698 
699 	*pltaddr = plt;
700 	DBG_CALL(Dbg_pltpad_boundto64(NAME(lmp), (Addr)*pltaddr, fname, sname));
701 	return (1);
702 }
703 
704 /*
705  * Read and process the relocations for one link object, we assume all
706  * relocation sections for loadable segments are stored contiguously in
707  * the file.
708  */
709 int
710 elf_reloc(Rt_map *lmp, uint_t plt)
711 {
712 	ulong_t		relbgn, relend, relsiz, basebgn, pltbgn, pltend;
713 	ulong_t		roffset, rsymndx, psymndx = 0, etext = ETEXT(lmp);
714 	ulong_t		emap, pltndx;
715 	uint_t		dsymndx, binfo, pbinfo;
716 	Byte		rtype;
717 	long		reladd;
718 	Addr		value, pvalue;
719 	Sym		*symref, *psymref, *symdef, *psymdef;
720 	char		*name, *pname;
721 	Rt_map		*_lmp, *plmp;
722 	int		textrel = 0, ret = 1, noplt = 0;
723 	long		relacount = RELACOUNT(lmp);
724 	Rela		*rel;
725 	Pltbindtype	pbtype;
726 	List		pltpadlist = {0, 0};
727 	Alist		*bound = 0;
728 
729 	/*
730 	 * If an object has any DT_REGISTER entries associated with
731 	 * it, they are processed now.
732 	 */
733 	if ((plt == 0) && (FLAGS(lmp) & FLG_RT_REGSYMS)) {
734 		if (elf_regsyms(lmp) == 0)
735 			return (0);
736 	}
737 
738 	/*
739 	 * Although only necessary for lazy binding, initialize the first
740 	 * procedure linkage table entry to go to elf_rtbndr().  dbx(1) seems
741 	 * to find this useful.
742 	 */
743 	if ((plt == 0) && PLTGOT(lmp)) {
744 		Xword pltoff;
745 
746 		if ((ulong_t)PLTGOT(lmp) < etext) {
747 			if (elf_set_prot(lmp, PROT_WRITE) == 0)
748 				return (0);
749 			textrel = 1;
750 		}
751 
752 		/*
753 		 * Install the lm pointer in .PLT2 as per the ABI.
754 		 */
755 		pltoff = (2 * M_PLT_ENTSIZE) / M_PLT_INSSIZE;
756 		elf_plt2_init(PLTGOT(lmp) + pltoff, lmp);
757 
758 		/*
759 		 * The V9 ABI states that the first 32k PLT entries
760 		 * use .PLT1, with .PLT0 used by the "latter" entries.
761 		 * We don't currently implement the extendend format,
762 		 * so install an error handler in .PLT0 to catch anyone
763 		 * trying to use it.
764 		 */
765 		elf_plt_init(PLTGOT(lmp), (caddr_t)elf_rtbndr_far);
766 
767 		/*
768 		 * Initialize .PLT1
769 		 */
770 		pltoff = M_PLT_ENTSIZE / M_PLT_INSSIZE;
771 		elf_plt_init(PLTGOT(lmp) + pltoff, (caddr_t)elf_rtbndr);
772 	}
773 
774 	/*
775 	 * Initialize the plt start and end addresses.
776 	 */
777 	if ((pltbgn = (ulong_t)JMPREL(lmp)) != 0)
778 		pltend = pltbgn + (ulong_t)(PLTRELSZ(lmp));
779 
780 	/*
781 	 * If we've been called upon to promote an RTLD_LAZY object to an
782 	 * RTLD_NOW then we're only interested in scaning the .plt table.
783 	 */
784 	if (plt) {
785 		relbgn = pltbgn;
786 		relend = pltend;
787 	} else {
788 		/*
789 		 * The relocation sections appear to the run-time linker as a
790 		 * single table.  Determine the address of the beginning and end
791 		 * of this table.  There are two different interpretations of
792 		 * the ABI at this point:
793 		 *
794 		 *   o	The REL table and its associated RELSZ indicate the
795 		 *	concatenation of *all* relocation sections (this is the
796 		 *	model our link-editor constructs).
797 		 *
798 		 *   o	The REL table and its associated RELSZ indicate the
799 		 *	concatenation of all *but* the .plt relocations.  These
800 		 *	relocations are specified individually by the JMPREL and
801 		 *	PLTRELSZ entries.
802 		 *
803 		 * Determine from our knowledege of the relocation range and
804 		 * .plt range, the range of the total relocation table.  Note
805 		 * that one other ABI assumption seems to be that the .plt
806 		 * relocations always follow any other relocations, the
807 		 * following range checking drops that assumption.
808 		 */
809 		relbgn = (ulong_t)(REL(lmp));
810 		relend = relbgn + (ulong_t)(RELSZ(lmp));
811 		if (pltbgn) {
812 			if (!relbgn || (relbgn > pltbgn))
813 				relbgn = pltbgn;
814 			if (!relbgn || (relend < pltend))
815 				relend = pltend;
816 		}
817 	}
818 	if (!relbgn || (relbgn == relend)) {
819 		DBG_CALL(Dbg_reloc_run(NAME(lmp), 0, plt, DBG_REL_NONE));
820 		return (1);
821 	}
822 
823 	relsiz = (ulong_t)(RELENT(lmp));
824 	basebgn = ADDR(lmp);
825 	emap = ADDR(lmp) + MSIZE(lmp);
826 
827 	DBG_CALL(Dbg_reloc_run(NAME(lmp), M_REL_SHT_TYPE, plt, DBG_REL_START));
828 
829 	/*
830 	 * If we're processing in lazy mode there is no need to scan the
831 	 * .rela.plt table.
832 	 */
833 	if (pltbgn && ((MODE(lmp) & RTLD_NOW) == 0))
834 		noplt = 1;
835 
836 	/*
837 	 * Loop through relocations.
838 	 */
839 	while (relbgn < relend) {
840 		Addr		vaddr;
841 		uint_t		sb_flags = 0;
842 
843 		rtype = (Byte)ELF_R_TYPE(((Rela *)relbgn)->r_info);
844 
845 		/*
846 		 * If this is a RELATIVE relocation in a shared object
847 		 * (the common case), and if we are not debugging, then
848 		 * jump into a tighter relocaiton loop (elf_reloc_relacount)
849 		 * Only make the jump if we've been given a hint on the
850 		 * number of relocations.
851 		 */
852 		if ((rtype == R_SPARC_RELATIVE) &&
853 		    !(FLAGS(lmp) & FLG_RT_FIXED) && !dbg_mask) {
854 			/*
855 			 * It's possible that the relative relocation block
856 			 * has relocations against the text segment as well
857 			 * as the data segment.  Since our optimized relocation
858 			 * engine does not check which segment the relocation
859 			 * is against - just mprotect it now if it's been
860 			 * marked as containing TEXTREL's.
861 			 */
862 			if ((textrel == 0) && (FLAGS1(lmp) & FL1_RT_TEXTREL)) {
863 				if (elf_set_prot(lmp, PROT_WRITE) == 0) {
864 					ret = 0;
865 					break;
866 				}
867 				textrel = 1;
868 			}
869 			if (relacount) {
870 				relbgn = elf_reloc_relacount(relbgn, relacount,
871 				    relsiz, basebgn);
872 				relacount = 0;
873 			} else {
874 				relbgn = elf_reloc_relative(relbgn, relend,
875 				    relsiz, basebgn, etext, emap);
876 			}
877 			if (relbgn >= relend)
878 				break;
879 			rtype = (Byte)ELF_R_TYPE(((Rela *)relbgn)->r_info);
880 		}
881 
882 		roffset = ((Rela *)relbgn)->r_offset;
883 
884 		reladd = (long)(((Rela *)relbgn)->r_addend);
885 		rsymndx = ELF_R_SYM(((Rela *)relbgn)->r_info);
886 
887 		rel = (Rela *)relbgn;
888 		relbgn += relsiz;
889 
890 		/*
891 		 * Optimizations.
892 		 */
893 		if (rtype == R_SPARC_NONE)
894 			continue;
895 		if (noplt && ((ulong_t)rel >= pltbgn) &&
896 		    ((ulong_t)rel < pltend)) {
897 			relbgn = pltend;
898 			continue;
899 		}
900 
901 		if (rtype != R_SPARC_REGISTER) {
902 			/*
903 			 * If this is a shared object, add the base address
904 			 * to offset.
905 			 */
906 			if (!(FLAGS(lmp) & FLG_RT_FIXED))
907 				roffset += basebgn;
908 
909 			/*
910 			 * If this relocation is not against part of the image
911 			 * mapped into memory we skip it.
912 			 */
913 			if ((roffset < ADDR(lmp)) || (roffset > (ADDR(lmp) +
914 			    MSIZE(lmp)))) {
915 				elf_reloc_bad(lmp, (void *)rel, rtype, roffset,
916 				    rsymndx);
917 				continue;
918 			}
919 		}
920 
921 		/*
922 		 * If we're promoting plts determine if this one has already
923 		 * been written. An uninitialized plts' second instruction is a
924 		 * branch.
925 		 */
926 		if (plt) {
927 			uchar_t	*_roffset = (uchar_t *)roffset;
928 
929 			_roffset += M_PLT_INSSIZE;
930 			/* LINTED */
931 			if ((*(uint_t *)_roffset &
932 			    (~(S_MASK(19)))) != M_BA_A_XCC)
933 				continue;
934 		}
935 
936 		binfo = 0;
937 		pltndx = (ulong_t)-1;
938 		pbtype = PLT_T_NONE;
939 		/*
940 		 * If a symbol index is specified then get the symbol table
941 		 * entry, locate the symbol definition, and determine its
942 		 * address.
943 		 */
944 		if (rsymndx) {
945 			/*
946 			 * Get the local symbol table entry.
947 			 */
948 			symref = (Sym *)((ulong_t)SYMTAB(lmp) +
949 			    (rsymndx * SYMENT(lmp)));
950 
951 			/*
952 			 * If this is a local symbol, just use the base address.
953 			 * (we should have no local relocations in the
954 			 * executable).
955 			 */
956 			if (ELF_ST_BIND(symref->st_info) == STB_LOCAL) {
957 				value = basebgn;
958 				name = (char *)0;
959 
960 				/*
961 				 * TLS relocation - value for DTPMOD relocation
962 				 * is the TLS modid.
963 				 */
964 				if (rtype == M_R_DTPMOD)
965 					value = TLSMODID(lmp);
966 			} else {
967 				/*
968 				 * If the symbol index is equal to the previous
969 				 * symbol index relocation we processed then
970 				 * reuse the previous values. (Note that there
971 				 * have been cases where a relocation exists
972 				 * against a copy relocation symbol, our ld(1)
973 				 * should optimize this away, but make sure we
974 				 * don't use the same symbol information should
975 				 * this case exist).
976 				 */
977 				if ((rsymndx == psymndx) &&
978 				    (rtype != R_SPARC_COPY)) {
979 					/* LINTED */
980 					if (psymdef == 0) {
981 						DBG_CALL(Dbg_bind_weak(
982 						    NAME(lmp), (caddr_t)roffset,
983 						    (caddr_t)
984 						    (roffset - basebgn), name));
985 						continue;
986 					}
987 					/* LINTED */
988 					value = pvalue;
989 					/* LINTED */
990 					name = pname;
991 					symdef = psymdef;
992 					/* LINTED */
993 					symref = psymref;
994 					/* LINTED */
995 					_lmp = plmp;
996 					/* LINTED */
997 					binfo = pbinfo;
998 
999 					if ((LIST(_lmp)->lm_tflags |
1000 					    FLAGS1(_lmp)) &
1001 					    LML_TFLG_AUD_SYMBIND) {
1002 						value = audit_symbind(lmp, _lmp,
1003 						    /* LINTED */
1004 						    symdef, dsymndx, value,
1005 						    &sb_flags);
1006 					}
1007 				} else {
1008 					Slookup		sl;
1009 					uchar_t		bind;
1010 
1011 					/*
1012 					 * Lookup the symbol definition.
1013 					 */
1014 					name = (char *)(STRTAB(lmp) +
1015 					    symref->st_name);
1016 
1017 					sl.sl_name = name;
1018 					sl.sl_cmap = lmp;
1019 					sl.sl_imap = 0;
1020 					sl.sl_hash = 0;
1021 					sl.sl_rsymndx = rsymndx;
1022 
1023 					if (rtype == R_SPARC_COPY)
1024 						sl.sl_flags = LKUP_COPY;
1025 					else
1026 						sl.sl_flags = LKUP_DEFT;
1027 
1028 					sl.sl_flags |= LKUP_ALLCNTLIST;
1029 
1030 					if (rtype != R_SPARC_JMP_SLOT)
1031 						sl.sl_flags |= LKUP_SPEC;
1032 
1033 					bind = ELF_ST_BIND(symref->st_info);
1034 					if (bind == STB_WEAK)
1035 						sl.sl_flags |= LKUP_WEAK;
1036 
1037 					symdef = lookup_sym(&sl, &_lmp, &binfo);
1038 
1039 					/*
1040 					 * If the symbol is not found and the
1041 					 * reference was not to a weak symbol,
1042 					 * report an error.  Weak references
1043 					 * may be unresolved.
1044 					 * chkmsg: MSG_INTL(MSG_LDD_SYM_NFOUND)
1045 					 */
1046 					if (symdef == 0) {
1047 					    if (bind != STB_WEAK) {
1048 						if (LIST(lmp)->lm_flags &
1049 						    LML_FLG_IGNRELERR) {
1050 						    continue;
1051 						} else if (LIST(lmp)->lm_flags &
1052 						    LML_FLG_TRC_WARN) {
1053 						    (void) printf(MSG_INTL(
1054 							MSG_LDD_SYM_NFOUND),
1055 							demangle(name),
1056 							NAME(lmp));
1057 						    continue;
1058 						} else {
1059 						    eprintf(ERR_FATAL,
1060 							MSG_INTL(MSG_REL_NOSYM),
1061 							NAME(lmp),
1062 							demangle(name));
1063 						    ret = 0;
1064 						    break;
1065 						}
1066 					    } else {
1067 						psymndx = rsymndx;
1068 						psymdef = 0;
1069 
1070 						DBG_CALL(Dbg_bind_weak(
1071 						    NAME(lmp), (caddr_t)roffset,
1072 						    (caddr_t)
1073 						    (roffset - basebgn), name));
1074 						continue;
1075 					    }
1076 					}
1077 
1078 					/*
1079 					 * If symbol was found in an object
1080 					 * other than the referencing object
1081 					 * then record the binding.
1082 					 */
1083 					if ((lmp != _lmp) && ((FLAGS1(_lmp) &
1084 					    FL1_RT_NOINIFIN) == 0)) {
1085 						if (alist_test(&bound, _lmp,
1086 						    sizeof (Rt_map *),
1087 						    AL_CNT_RELBIND) == 0) {
1088 							ret = 0;
1089 							break;
1090 						}
1091 					}
1092 
1093 					/*
1094 					 * Calculate the location of definition;
1095 					 * symbol value plus base address of
1096 					 * containing shared object.
1097 					 */
1098 					value = symdef->st_value;
1099 					if (!(FLAGS(_lmp) & FLG_RT_FIXED) &&
1100 					    (symdef->st_shndx != SHN_ABS) &&
1101 					    (ELF_ST_TYPE(symdef->st_info) !=
1102 					    STT_TLS))
1103 						value += ADDR(_lmp);
1104 
1105 					/*
1106 					 * Retain this symbol index and the
1107 					 * value in case it can be used for the
1108 					 * subsequent relocations.
1109 					 */
1110 					if (rtype != R_SPARC_COPY) {
1111 						psymndx = rsymndx;
1112 						pvalue = value;
1113 						pname = name;
1114 						psymdef = symdef;
1115 						psymref = symref;
1116 						plmp = _lmp;
1117 						pbinfo = binfo;
1118 					}
1119 					if ((LIST(_lmp)->lm_tflags |
1120 					    FLAGS1(_lmp)) &
1121 					    LML_TFLG_AUD_SYMBIND) {
1122 						/* LINTED */
1123 						dsymndx = (((uintptr_t)symdef -
1124 						    (uintptr_t)SYMTAB(_lmp)) /
1125 						    SYMENT(_lmp));
1126 						value = audit_symbind(lmp, _lmp,
1127 						    symdef, dsymndx, value,
1128 						    &sb_flags);
1129 					}
1130 				}
1131 
1132 				/*
1133 				 * If relocation is PC-relative, subtract
1134 				 * offset address.
1135 				 */
1136 				if (IS_PC_RELATIVE(rtype))
1137 					value -= roffset;
1138 
1139 				/*
1140 				 * TLS relocation - value for DTPMOD relocation
1141 				 * is the TLS modid.
1142 				 */
1143 				if (rtype == M_R_DTPMOD)
1144 					value = TLSMODID(_lmp);
1145 				else if (rtype == M_R_TPOFF)
1146 					value = -(TLSSTATOFF(_lmp) - value);
1147 			}
1148 		} else {
1149 			/*
1150 			 * Special cases, a regsiter symbol associated with
1151 			 * symbol index 0 is initialized (i.e. relocated) to
1152 			 * a constant in the r_addend field rather than to a
1153 			 * symbol value.
1154 			 *
1155 			 * A DTPMOD relocation is a local binding to a TLS
1156 			 * symbol.  Fill in the TLSMODID for the current object.
1157 			 */
1158 			if (rtype == R_SPARC_REGISTER)
1159 				value = 0;
1160 			else if (rtype == M_R_DTPMOD)
1161 				value = TLSMODID(lmp);
1162 			else
1163 				value = basebgn;
1164 			name = (char *)0;
1165 		}
1166 
1167 		/*
1168 		 * If this object has relocations in the text segment, turn
1169 		 * off the write protect.
1170 		 */
1171 		if ((rtype != R_SPARC_REGISTER) && (roffset < etext) &&
1172 		    (textrel == 0)) {
1173 			if (elf_set_prot(lmp, PROT_WRITE) == 0) {
1174 				ret = 0;
1175 				break;
1176 			}
1177 			textrel = 1;
1178 		}
1179 
1180 		/*
1181 		 * Call relocation routine to perform required relocation.
1182 		 */
1183 		DBG_CALL(Dbg_reloc_in(M_MACH, M_REL_SHT_TYPE, rel, name, NULL));
1184 
1185 		switch (rtype) {
1186 		case R_SPARC_REGISTER:
1187 			/*
1188 			 * The v9 ABI 4.2.4 says that system objects may,
1189 			 * but are not required to, use register symbols
1190 			 * to inidcate how they use global registers. Thus
1191 			 * at least %g6, %g7 must be allowed in addition
1192 			 * to %g2 and %g3.
1193 			 */
1194 			value += reladd;
1195 			if (roffset == STO_SPARC_REGISTER_G1) {
1196 				set_sparc_g1(value);
1197 			} else if (roffset == STO_SPARC_REGISTER_G2) {
1198 				set_sparc_g2(value);
1199 			} else if (roffset == STO_SPARC_REGISTER_G3) {
1200 				set_sparc_g3(value);
1201 			} else if (roffset == STO_SPARC_REGISTER_G4) {
1202 				set_sparc_g4(value);
1203 			} else if (roffset == STO_SPARC_REGISTER_G5) {
1204 				set_sparc_g5(value);
1205 			} else if (roffset == STO_SPARC_REGISTER_G6) {
1206 				set_sparc_g6(value);
1207 			} else if (roffset == STO_SPARC_REGISTER_G7) {
1208 				set_sparc_g7(value);
1209 			} else {
1210 				eprintf(ERR_FATAL, MSG_INTL(MSG_REL_BADREG),
1211 				    NAME(lmp), EC_ADDR(roffset));
1212 				ret = 0;
1213 				break;
1214 			}
1215 
1216 			DBG_CALL(Dbg_reloc_reg_apply((Xword)roffset,
1217 				(Xword)value));
1218 			break;
1219 		case R_SPARC_COPY:
1220 			if (elf_copy_reloc(name, symref, lmp, (void *)roffset,
1221 			    symdef, _lmp, (const void *)value) == 0)
1222 				ret = 0;
1223 			break;
1224 		case R_SPARC_JMP_SLOT:
1225 			pltndx = ((uintptr_t)rel -
1226 				(uintptr_t)JMPREL(lmp)) / relsiz;
1227 
1228 			if (FLAGS(lmp) & FLG_RT_FIXED)
1229 				vaddr = 0;
1230 			else
1231 				vaddr = ADDR(lmp);
1232 
1233 			if (((LIST(lmp)->lm_tflags | FLAGS1(lmp)) &
1234 			    (LML_TFLG_AUD_PLTENTER | LML_TFLG_AUD_PLTEXIT)) &&
1235 			    AUDINFO(lmp)->ai_dynplts) {
1236 				int	fail = 0;
1237 				/* LINTED */
1238 				uint_t	symndx = (uint_t)(((uintptr_t)symdef -
1239 					(uintptr_t)SYMTAB(_lmp)) /
1240 					SYMENT(_lmp));
1241 
1242 				(void) elf_plt_trace_write((caddr_t)vaddr,
1243 				    (Rela *)rel, lmp, _lmp, symdef, symndx,
1244 				    pltndx, (caddr_t)value, sb_flags, &fail);
1245 				if (fail)
1246 					ret = 0;
1247 			} else {
1248 				/*
1249 				 * Write standard PLT entry to jump directly
1250 				 * to newly bound function.
1251 				 */
1252 				DBG_CALL(Dbg_reloc_apply(roffset,
1253 				    (ulong_t)value));
1254 				pbtype = elf_plt_write((uintptr_t)vaddr,
1255 				    (uintptr_t)vaddr, (void *)rel, value,
1256 				    pltndx);
1257 			}
1258 			break;
1259 		case R_SPARC_WDISP30:
1260 			if (PLTPAD(lmp) &&
1261 			    (S_INRANGE((Sxword)value, 29) == 0)) {
1262 				void *	plt = 0;
1263 
1264 				if (bindpltpad(lmp, &pltpadlist,
1265 				    value + roffset, &plt,
1266 				    NAME(_lmp), name) == 0) {
1267 					ret = 0;
1268 					break;
1269 				}
1270 				value = (Addr)((Addr)plt - roffset);
1271 			}
1272 			/* FALLTHROUGH */
1273 		default:
1274 			value += reladd;
1275 			if (IS_EXTOFFSET(rtype))
1276 				value += (Word)ELF_R_TYPE_DATA(rel->r_info);
1277 
1278 			/*
1279 			 * Write the relocation out.  If this relocation is a
1280 			 * common basic write, skip the doreloc() engine.
1281 			 */
1282 			if ((rtype == R_SPARC_GLOB_DAT) ||
1283 			    (rtype == R_SPARC_64)) {
1284 				if (roffset & 0x7) {
1285 					eprintf(ERR_FATAL,
1286 					    MSG_INTL(MSG_REL_NONALIGN),
1287 					    conv_reloc_SPARC_type_str(rtype),
1288 					    NAME(lmp), demangle(name),
1289 					    EC_OFF(roffset));
1290 					ret = 0;
1291 				} else
1292 					*(ulong_t *)roffset += value;
1293 			} else {
1294 				if (do_reloc(rtype, (uchar_t *)roffset,
1295 				    (Xword *)&value, name, NAME(lmp)) == 0)
1296 					ret = 0;
1297 			}
1298 
1299 			/*
1300 			 * The value now contains the 'bit-shifted' value that
1301 			 * was or'ed into memory (this was set by do_reloc()).
1302 			 */
1303 			DBG_CALL(Dbg_reloc_apply((ulong_t)roffset,
1304 			    value));
1305 
1306 			/*
1307 			 * If this relocation is against a text segment, make
1308 			 * sure that the instruction cache is flushed.
1309 			 */
1310 			if (textrel)
1311 				iflush_range((caddr_t)roffset, 0x4);
1312 		}
1313 
1314 		if ((ret == 0) &&
1315 		    ((LIST(lmp)->lm_flags & LML_FLG_TRC_WARN) == 0))
1316 			break;
1317 
1318 		if (binfo) {
1319 			DBG_CALL(Dbg_bind_global(NAME(lmp), (caddr_t)roffset,
1320 			    (caddr_t)(roffset - basebgn), pltndx, pbtype,
1321 			    NAME(_lmp), (caddr_t)value,
1322 			    (caddr_t)symdef->st_value, name, binfo));
1323 		}
1324 	}
1325 
1326 	/*
1327 	 * Free up any items on the pltpadlist if it was allocated
1328 	 */
1329 	if (pltpadlist.head) {
1330 		Listnode *	lnp;
1331 		Listnode *	plnp;
1332 		Pltpadinfo *	pip;
1333 
1334 		plnp = 0;
1335 		for (LIST_TRAVERSE(&pltpadlist, lnp, pip)) {
1336 			if (plnp != 0)
1337 				free(plnp);
1338 			free(pip);
1339 			plnp = lnp;
1340 		}
1341 		if (plnp != 0)
1342 			free(plnp);
1343 	}
1344 
1345 	return (relocate_finish(lmp, bound, textrel, ret));
1346 }
1347 
1348 /*
1349  * Provide a machine specific interface to the conversion routine.  By calling
1350  * the machine specific version, rather than the generic version, we insure that
1351  * the data tables/strings for all known machine versions aren't dragged into
1352  * ld.so.1.
1353  */
1354 const char *
1355 _conv_reloc_type_str(uint_t rel)
1356 {
1357 	return (conv_reloc_SPARC_type_str(rel));
1358 }
1359