1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Copyright (c) 2012, Joyent, Inc. All rights reserved.
25 */
26
27 /*
28 * SPARC V9 machine dependent and ELF file class dependent functions.
29 * Contains routines for performing function binding and symbol relocations.
30 */
31
32 #include <stdio.h>
33 #include <sys/elf.h>
34 #include <sys/elf_SPARC.h>
35 #include <sys/mman.h>
36 #include <dlfcn.h>
37 #include <synch.h>
38 #include <string.h>
39 #include <debug.h>
40 #include <reloc.h>
41 #include <conv.h>
42 #include "_rtld.h"
43 #include "_audit.h"
44 #include "_elf.h"
45 #include "_inline_gen.h"
46 #include "_inline_reloc.h"
47 #include "msg.h"
48
49 extern void iflush_range(caddr_t, size_t);
50 extern void plt_upper_32(uintptr_t, uintptr_t);
51 extern void plt_upper_44(uintptr_t, uintptr_t);
52 extern void plt_full_range(uintptr_t, uintptr_t);
53 extern void elf_rtbndr(Rt_map *, ulong_t, caddr_t);
54 extern void elf_rtbndr_far(Rt_map *, ulong_t, caddr_t);
55
56 int
elf_mach_flags_check(Rej_desc * rej,Ehdr * ehdr)57 elf_mach_flags_check(Rej_desc *rej, Ehdr *ehdr)
58 {
59 /*
60 * Check machine type and flags.
61 */
62 if (ehdr->e_flags & EF_SPARC_EXT_MASK) {
63 /*
64 * Check vendor-specific extensions.
65 */
66 if (ehdr->e_flags & EF_SPARC_HAL_R1) {
67 rej->rej_type = SGS_REJ_HAL;
68 rej->rej_info = (uint_t)ehdr->e_flags;
69 return (0);
70 }
71 if ((ehdr->e_flags & EF_SPARC_SUN_US3) & ~at_flags) {
72 rej->rej_type = SGS_REJ_US3;
73 rej->rej_info = (uint_t)ehdr->e_flags;
74 return (0);
75 }
76
77 /*
78 * Generic check.
79 * All of our 64-bit SPARC's support the US1 (UltraSPARC 1)
80 * instructions so that bit isn't worth checking for explicitly.
81 */
82 if ((ehdr->e_flags & EF_SPARC_EXT_MASK) & ~at_flags) {
83 rej->rej_type = SGS_REJ_BADFLAG;
84 rej->rej_info = (uint_t)ehdr->e_flags;
85 return (0);
86 }
87 } else if ((ehdr->e_flags & ~EF_SPARCV9_MM) != 0) {
88 rej->rej_type = SGS_REJ_BADFLAG;
89 rej->rej_info = (uint_t)ehdr->e_flags;
90 return (0);
91 }
92 return (1);
93 }
94
95
96 void
ldso_plt_init(Rt_map * lmp)97 ldso_plt_init(Rt_map *lmp)
98 {
99 /*
100 * There is no need to analyze ld.so because we don't map in any of
101 * its dependencies. However we may map these dependencies in later
102 * (as if ld.so had dlopened them), so initialize the plt and the
103 * permission information.
104 */
105 if (PLTGOT(lmp)) {
106 Xword pltoff;
107
108 /*
109 * Install the lm pointer in .PLT2 as per the ABI.
110 */
111 pltoff = (2 * M_PLT_ENTSIZE) / M_PLT_INSSIZE;
112 elf_plt2_init(PLTGOT(lmp) + pltoff, lmp);
113
114 /*
115 * The V9 ABI states that the first 32k PLT entries
116 * use .PLT1, with .PLT0 used by the "latter" entries.
117 * We don't currently implement the extendend format,
118 * so install an error handler in .PLT0 to catch anyone
119 * trying to use it.
120 */
121 elf_plt_init(PLTGOT(lmp), (caddr_t)elf_rtbndr_far);
122
123 /*
124 * Initialize .PLT1
125 */
126 pltoff = M_PLT_ENTSIZE / M_PLT_INSSIZE;
127 elf_plt_init(PLTGOT(lmp) + pltoff, (caddr_t)elf_rtbndr);
128 }
129 }
130
131 /*
132 * elf_plt_write() will test to see how far away our destination
133 * address lies. If it is close enough that a branch can
134 * be used instead of a jmpl - we will fill the plt in with
135 * single branch. The branches are much quicker then
136 * a jmpl instruction - see bug#4356879 for further
137 * details.
138 *
139 * NOTE: we pass in both a 'pltaddr' and a 'vpltaddr' since
140 * librtld/dldump update PLT's who's physical
141 * address is not the same as the 'virtual' runtime
142 * address.
143 */
144 Pltbindtype
elf_plt_write(uintptr_t addr,uintptr_t vaddr,void * rptr,uintptr_t symval,Xword pltndx)145 elf_plt_write(uintptr_t addr, uintptr_t vaddr, void *rptr, uintptr_t symval,
146 Xword pltndx)
147 {
148 Rela *rel = (Rela *)rptr;
149 uintptr_t nsym = ~symval;
150 uintptr_t vpltaddr, pltaddr;
151 long disp;
152
153
154 pltaddr = addr + rel->r_offset;
155 vpltaddr = vaddr + rel->r_offset;
156 disp = symval - vpltaddr - 4;
157
158 if (pltndx >= (M64_PLT_NEARPLTS - M_PLT_XNumber)) {
159 *((Sxword *)pltaddr) = (uintptr_t)symval +
160 (uintptr_t)rel->r_addend - vaddr;
161 DBG_CALL(pltcntfar++);
162 return (PLT_T_FAR);
163 }
164
165 /*
166 * Test if the destination address is close enough to use
167 * a ba,a... instruction to reach it.
168 */
169 if (S_INRANGE(disp, 23) && !(rtld_flags & RT_FL_NOBAPLT)) {
170 uint_t *pltent, bainstr;
171 Pltbindtype rc;
172
173 pltent = (uint_t *)pltaddr;
174
175 /*
176 * The
177 *
178 * ba,a,pt %icc, <dest>
179 *
180 * is the most efficient of the PLT's. If we
181 * are within +-20 bits - use that branch.
182 */
183 if (S_INRANGE(disp, 20)) {
184 bainstr = M_BA_A_PT; /* ba,a,pt %icc,<dest> */
185 /* LINTED */
186 bainstr |= (uint_t)(S_MASK(19) & (disp >> 2));
187 rc = PLT_T_21D;
188 DBG_CALL(pltcnt21d++);
189 } else {
190 /*
191 * Otherwise - we fall back to the good old
192 *
193 * ba,a <dest>
194 *
195 * Which still beats a jmpl instruction.
196 */
197 bainstr = M_BA_A; /* ba,a <dest> */
198 /* LINTED */
199 bainstr |= (uint_t)(S_MASK(22) & (disp >> 2));
200 rc = PLT_T_24D;
201 DBG_CALL(pltcnt24d++);
202 }
203
204 pltent[2] = M_NOP; /* nop instr */
205 pltent[1] = bainstr;
206
207 iflush_range((char *)(&pltent[1]), 4);
208 pltent[0] = M_NOP; /* nop instr */
209 iflush_range((char *)(&pltent[0]), 4);
210 return (rc);
211 }
212
213 if ((nsym >> 32) == 0) {
214 plt_upper_32(pltaddr, symval);
215 DBG_CALL(pltcntu32++);
216 return (PLT_T_U32);
217 }
218
219 if ((nsym >> 44) == 0) {
220 plt_upper_44(pltaddr, symval);
221 DBG_CALL(pltcntu44++);
222 return (PLT_T_U44);
223 }
224
225 /*
226 * The PLT destination is not in reach of
227 * a branch instruction - so we fall back
228 * to a 'jmpl' sequence.
229 */
230 plt_full_range(pltaddr, symval);
231 DBG_CALL(pltcntfull++);
232 return (PLT_T_FULL);
233 }
234
235 /*
236 * Once relocated, the following 6 instruction sequence moves
237 * a 64-bit immediate value into register %g1
238 */
239 #define VAL64_TO_G1 \
240 /* 0x00 */ 0x0b, 0x00, 0x00, 0x00, /* sethi %hh(value), %g5 */ \
241 /* 0x04 */ 0x8a, 0x11, 0x60, 0x00, /* or %g5, %hm(value), %g5 */ \
242 /* 0x08 */ 0x8b, 0x29, 0x70, 0x20, /* sllx %g5, 32, %g5 */ \
243 /* 0x0c */ 0x03, 0x00, 0x00, 0x00, /* sethi %lm(value), %g1 */ \
244 /* 0x10 */ 0x82, 0x10, 0x60, 0x00, /* or %g1, %lo(value), %g1 */ \
245 /* 0x14 */ 0x82, 0x10, 0x40, 0x05 /* or %g1, %g5, %g1 */
246
247 /*
248 * Local storage space created on the stack created for this glue
249 * code includes space for:
250 * 0x8 pointer to dyn_data
251 * 0x8 size prev stack frame
252 */
253 static const Byte dyn_plt_template[] = {
254 /* 0x0 */ 0x2a, 0xcf, 0x80, 0x03, /* brnz,a,pt %fp, 0xc */
255 /* 0x4 */ 0x82, 0x27, 0x80, 0x0e, /* sub %fp, %sp, %g1 */
256 /* 0x8 */ 0x82, 0x10, 0x20, 0xb0, /* mov 176, %g1 */
257 /* 0xc */ 0x9d, 0xe3, 0xbf, 0x40, /* save %sp, -192, %sp */
258 /* 0x10 */ 0xc2, 0x77, 0xa7, 0xef, /* stx %g1, [%fp + 2031] */
259
260 /* store prev stack size */
261 /* 0x14 */ VAL64_TO_G1, /* dyn_data to g1 */
262 /* 0x2c */ 0xc2, 0x77, 0xa7, 0xf7, /* stx %g1, [%fp + 2039] */
263
264 /* 0x30 */ VAL64_TO_G1, /* elf_plt_trace() addr to g1 */
265
266 /* Call to elf_plt_trace() via g1 */
267 /* 0x48 */ 0x9f, 0xc0, 0x60, 0x00, /* jmpl ! link r[15] to addr in g1 */
268 /* 0x4c */ 0x01, 0x00, 0x00, 0x00 /* nop ! for jmpl delay slot *AND* */
269 /* to get 8-byte alignment */
270 };
271
272 int dyn_plt_ent_size = sizeof (dyn_plt_template) +
273 sizeof (Addr) + /* reflmp */
274 sizeof (Addr) + /* deflmp */
275 sizeof (Word) + /* symndx */
276 sizeof (Word) + /* sb_flags */
277 sizeof (Sym); /* symdef */
278
279 /*
280 * the dynamic plt entry is:
281 *
282 * brnz,a,pt %fp, 1f
283 * sub %sp, %fp, %g1
284 * mov SA(MINFRAME), %g1
285 * 1:
286 * save %sp, -(SA(MINFRAME) + (2 * CLONGSIZE)), %sp
287 *
288 * ! store prev stack size
289 * stx %g1, [%fp + STACK_BIAS - (2 * CLONGSIZE)]
290 *
291 * 2:
292 * ! move dyn_data to %g1
293 * sethi %hh(dyn_data), %g5
294 * or %g5, %hm(dyn_data), %g5
295 * sllx %g5, 32, %g5
296 * sethi %lm(dyn_data), %g1
297 * or %g1, %lo(dyn_data), %g1
298 * or %g1, %g5, %g1
299 *
300 * ! store dyn_data ptr on frame (from %g1)
301 * stx %g1, [%fp + STACK_BIAS - CLONGSIZE]
302 *
303 * ! Move address of elf_plt_trace() into %g1
304 * [Uses same 6 instructions as shown at label 2: above. Not shown.]
305 *
306 * ! Use JMPL to make call. CALL instruction is limited to 30-bits.
307 * ! of displacement.
308 * jmp1 %g1, %o7
309 *
310 * ! JMPL has a delay slot that must be filled. And, the sequence
311 * ! of instructions needs to have 8-byte alignment. This NOP does both.
312 * ! The alignment is needed for the data we put following the
313 * ! instruction.
314 * nop
315 *
316 * dyn data:
317 * Addr reflmp
318 * Addr deflmp
319 * Word symndx
320 * Word sb_flags
321 * Sym symdef (Elf64_Sym = 24-bytes)
322 */
323
324 /*
325 * Relocate the instructions given by the VAL64_TO_G1 macro above.
326 * The arguments parallel those of do_reloc_rtld().
327 *
328 * entry:
329 * off - Address of 1st instruction in sequence.
330 * value - Value being relocated (addend)
331 * sym - Name of value being relocated.
332 * lml - link map list
333 *
334 * exit:
335 * Returns TRUE for success, FALSE for failure.
336 */
337 static int
reloc_val64_to_g1(uchar_t * off,Addr * value,const char * sym,Lm_list * lml)338 reloc_val64_to_g1(uchar_t *off, Addr *value, const char *sym, Lm_list *lml)
339 {
340 Xword tmp_value;
341
342 /*
343 * relocating:
344 * sethi %hh(value), %g5
345 */
346 tmp_value = (Xword)value;
347 if (do_reloc_rtld(R_SPARC_HH22, off, &tmp_value, sym,
348 MSG_ORIG(MSG_SPECFIL_DYNPLT), lml) == 0) {
349 return (0);
350 }
351
352 /*
353 * relocating:
354 * or %g5, %hm(value), %g5
355 */
356 tmp_value = (Xword)value;
357 if (do_reloc_rtld(R_SPARC_HM10, off + 4, &tmp_value, sym,
358 MSG_ORIG(MSG_SPECFIL_DYNPLT), lml) == 0) {
359 return (0);
360 }
361
362 /*
363 * relocating:
364 * sethi %lm(value), %g1
365 */
366 tmp_value = (Xword)value;
367 if (do_reloc_rtld(R_SPARC_LM22, off + 12, &tmp_value, sym,
368 MSG_ORIG(MSG_SPECFIL_DYNPLT), lml) == 0) {
369 return (0);
370 }
371
372 /*
373 * relocating:
374 * or %g1, %lo(value), %g1
375 */
376 tmp_value = (Xword)value;
377 if (do_reloc_rtld(R_SPARC_LO10, off + 16, &tmp_value, sym,
378 MSG_ORIG(MSG_SPECFIL_DYNPLT), lml) == 0) {
379 return (0);
380 }
381
382 return (1);
383 }
384
385 static caddr_t
elf_plt_trace_write(caddr_t addr,Rela * rptr,Rt_map * rlmp,Rt_map * dlmp,Sym * sym,uint_t symndx,ulong_t pltndx,caddr_t to,uint_t sb_flags,int * fail)386 elf_plt_trace_write(caddr_t addr, Rela *rptr, Rt_map *rlmp, Rt_map *dlmp,
387 Sym *sym, uint_t symndx, ulong_t pltndx, caddr_t to, uint_t sb_flags,
388 int *fail)
389 {
390 extern ulong_t elf_plt_trace();
391 uchar_t *dyn_plt;
392 uintptr_t *dyndata;
393
394 /*
395 * If both pltenter & pltexit have been disabled there
396 * there is no reason to even create the glue code.
397 */
398 if ((sb_flags & (LA_SYMB_NOPLTENTER | LA_SYMB_NOPLTEXIT)) ==
399 (LA_SYMB_NOPLTENTER | LA_SYMB_NOPLTEXIT)) {
400 (void) elf_plt_write((uintptr_t)addr, (uintptr_t)addr,
401 rptr, (uintptr_t)to, pltndx);
402 return (to);
403 }
404
405 /*
406 * We only need to add the glue code if there is an auditing
407 * library that is interested in this binding.
408 */
409 dyn_plt = (uchar_t *)((uintptr_t)AUDINFO(rlmp)->ai_dynplts +
410 (pltndx * dyn_plt_ent_size));
411
412 /*
413 * Have we initialized this dynamic plt entry yet? If we haven't do it
414 * now. Otherwise this function has been called before, but from a
415 * different plt (ie. from another shared object). In that case
416 * we just set the plt to point to the new dyn_plt.
417 */
418 if (*dyn_plt == 0) {
419 Sym *symp;
420 Lm_list *lml = LIST(rlmp);
421
422 (void) memcpy((void *)dyn_plt, dyn_plt_template,
423 sizeof (dyn_plt_template));
424 dyndata = (uintptr_t *)((uintptr_t)dyn_plt +
425 sizeof (dyn_plt_template));
426
427 /*
428 * relocating:
429 * VAL64_TO_G1(dyndata)
430 * VAL64_TO_G1(&elf_plt_trace)
431 */
432 if (!(reloc_val64_to_g1((dyn_plt + 0x14), dyndata,
433 MSG_ORIG(MSG_SYM_LADYNDATA), lml) &&
434 reloc_val64_to_g1((dyn_plt + 0x30), (Addr *)&elf_plt_trace,
435 MSG_ORIG(MSG_SYM_ELFPLTTRACE), lml))) {
436 *fail = 1;
437 return (0);
438 }
439
440 *dyndata++ = (Addr)rlmp;
441 *dyndata++ = (Addr)dlmp;
442
443 /*
444 * symndx in the high word, sb_flags in the low.
445 */
446 *dyndata = (Addr)sb_flags;
447 *(Word *)dyndata = symndx;
448 dyndata++;
449
450 symp = (Sym *)dyndata;
451 *symp = *sym;
452 symp->st_value = (Addr)to;
453 iflush_range((void *)dyn_plt, sizeof (dyn_plt_template));
454 }
455
456 (void) elf_plt_write((uintptr_t)addr, (uintptr_t)addr, rptr,
457 (uintptr_t)dyn_plt, pltndx);
458 return ((caddr_t)dyn_plt);
459 }
460
461 /*
462 * Function binding routine - invoked on the first call to a function through
463 * the procedure linkage table;
464 * passes first through an assembly language interface.
465 *
466 * Takes the address of the PLT entry where the call originated,
467 * the offset into the relocation table of the associated
468 * relocation entry and the address of the link map (rt_private_map struct)
469 * for the entry.
470 *
471 * Returns the address of the function referenced after re-writing the PLT
472 * entry to invoke the function directly.
473 *
474 * On error, causes process to terminate with a signal.
475 */
476 ulong_t
elf_bndr(Rt_map * lmp,ulong_t pltoff,caddr_t from)477 elf_bndr(Rt_map *lmp, ulong_t pltoff, caddr_t from)
478 {
479 Rt_map *nlmp, *llmp;
480 Addr addr, vaddr, reloff, symval;
481 char *name;
482 Rela *rptr;
483 Sym *rsym, *nsym;
484 Xword pltndx;
485 uint_t binfo, sb_flags = 0, dbg_class;
486 ulong_t rsymndx;
487 Slookup sl;
488 Sresult sr;
489 Pltbindtype pbtype;
490 int entry, lmflags, farplt = 0;
491 Lm_list *lml;
492
493 /*
494 * For compatibility with libthread (TI_VERSION 1) we track the entry
495 * value. A zero value indicates we have recursed into ld.so.1 to
496 * further process a locking request. Under this recursion we disable
497 * tsort and cleanup activities.
498 */
499 entry = enter(0);
500
501 lml = LIST(lmp);
502 if ((lmflags = lml->lm_flags) & LML_FLG_RTLDLM) {
503 dbg_class = dbg_desc->d_class;
504 dbg_desc->d_class = 0;
505 }
506
507 /*
508 * Must calculate true plt relocation address from reloc.
509 * Take offset, subtract number of reserved PLT entries, and divide
510 * by PLT entry size, which should give the index of the plt
511 * entry (and relocation entry since they have been defined to be
512 * in the same order). Then we must multiply by the size of
513 * a relocation entry, which will give us the offset of the
514 * plt relocation entry from the start of them given by JMPREL(lm).
515 */
516 addr = pltoff - M_PLT_RESERVSZ;
517
518 if (pltoff < (M64_PLT_NEARPLTS * M_PLT_ENTSIZE)) {
519 pltndx = addr / M_PLT_ENTSIZE;
520 } else {
521 ulong_t pltblockoff;
522
523 pltblockoff = pltoff - (M64_PLT_NEARPLTS * M_PLT_ENTSIZE);
524 pltndx = M64_PLT_NEARPLTS +
525 ((pltblockoff / M64_PLT_FBLOCKSZ) * M64_PLT_FBLKCNTS) +
526 ((pltblockoff % M64_PLT_FBLOCKSZ) / M64_PLT_FENTSIZE) -
527 M_PLT_XNumber;
528 farplt = 1;
529 }
530
531 /*
532 * Perform some basic sanity checks. If we didn't get a load map
533 * or the plt offset is invalid then its possible someone has walked
534 * over the plt entries or jumped to plt[01] out of the blue.
535 */
536 if (!lmp || (!farplt && (addr % M_PLT_ENTSIZE) != 0) ||
537 (farplt && (addr % M_PLT_INSSIZE))) {
538 Conv_inv_buf_t inv_buf;
539
540 eprintf(lml, ERR_FATAL, MSG_INTL(MSG_REL_PLTREF),
541 conv_reloc_SPARC_type(R_SPARC_JMP_SLOT, 0, &inv_buf),
542 EC_NATPTR(lmp), EC_XWORD(pltoff), EC_NATPTR(from));
543 rtldexit(lml, 1);
544 }
545 reloff = pltndx * sizeof (Rela);
546
547 /*
548 * Use relocation entry to get symbol table entry and symbol name.
549 */
550 addr = (ulong_t)JMPREL(lmp);
551 rptr = (Rela *)(addr + reloff);
552 rsymndx = ELF_R_SYM(rptr->r_info);
553 rsym = (Sym *)((ulong_t)SYMTAB(lmp) + (rsymndx * SYMENT(lmp)));
554 name = (char *)(STRTAB(lmp) + rsym->st_name);
555
556 /*
557 * Determine the last link-map of this list, this'll be the starting
558 * point for any tsort() processing.
559 */
560 llmp = lml->lm_tail;
561
562 /*
563 * Find definition for symbol. Initialize the symbol lookup, and symbol
564 * result, data structures.
565 */
566 SLOOKUP_INIT(sl, name, lmp, lml->lm_head, ld_entry_cnt, 0,
567 rsymndx, rsym, 0, LKUP_DEFT);
568 SRESULT_INIT(sr, name);
569
570 if (lookup_sym(&sl, &sr, &binfo, NULL) == 0) {
571 eprintf(lml, ERR_FATAL, MSG_INTL(MSG_REL_NOSYM), NAME(lmp),
572 demangle(name));
573 rtldexit(lml, 1);
574 }
575
576 name = (char *)sr.sr_name;
577 nlmp = sr.sr_dmap;
578 nsym = sr.sr_sym;
579
580 symval = nsym->st_value;
581
582 if (!(FLAGS(nlmp) & FLG_RT_FIXED) &&
583 (nsym->st_shndx != SHN_ABS))
584 symval += ADDR(nlmp);
585 if ((lmp != nlmp) && ((FLAGS1(nlmp) & FL1_RT_NOINIFIN) == 0)) {
586 /*
587 * Record that this new link map is now bound to the caller.
588 */
589 if (bind_one(lmp, nlmp, BND_REFER) == 0)
590 rtldexit(lml, 1);
591 }
592
593 if ((lml->lm_tflags | AFLAGS(lmp) | AFLAGS(nlmp)) &
594 LML_TFLG_AUD_SYMBIND) {
595 /* LINTED */
596 uint_t symndx = (uint_t)(((uintptr_t)nsym -
597 (uintptr_t)SYMTAB(nlmp)) / SYMENT(nlmp));
598
599 symval = audit_symbind(lmp, nlmp, nsym, symndx, symval,
600 &sb_flags);
601 }
602
603 if (FLAGS(lmp) & FLG_RT_FIXED)
604 vaddr = 0;
605 else
606 vaddr = ADDR(lmp);
607
608 pbtype = PLT_T_NONE;
609 if (!(rtld_flags & RT_FL_NOBIND)) {
610 if (((lml->lm_tflags | AFLAGS(lmp)) &
611 (LML_TFLG_AUD_PLTENTER | LML_TFLG_AUD_PLTEXIT)) &&
612 AUDINFO(lmp)->ai_dynplts) {
613 int fail = 0;
614 /* LINTED */
615 uint_t symndx = (uint_t)(((uintptr_t)nsym -
616 (uintptr_t)SYMTAB(nlmp)) / SYMENT(nlmp));
617
618 symval = (ulong_t)elf_plt_trace_write((caddr_t)vaddr,
619 rptr, lmp, nlmp, nsym, symndx, pltndx,
620 (caddr_t)symval, sb_flags, &fail);
621 if (fail)
622 rtldexit(lml, 1);
623 } else {
624 /*
625 * Write standard PLT entry to jump directly
626 * to newly bound function.
627 */
628 pbtype = elf_plt_write((uintptr_t)vaddr,
629 (uintptr_t)vaddr, rptr, symval, pltndx);
630 }
631 }
632
633 /*
634 * Print binding information and rebuild PLT entry.
635 */
636 DBG_CALL(Dbg_bind_global(lmp, (Addr)from, (Off)(from - ADDR(lmp)),
637 (Xword)pltndx, pbtype, nlmp, (Addr)symval, nsym->st_value,
638 name, binfo));
639
640 /*
641 * Complete any processing for newly loaded objects. Note we don't
642 * know exactly where any new objects are loaded (we know the object
643 * that supplied the symbol, but others may have been loaded lazily as
644 * we searched for the symbol), so sorting starts from the last
645 * link-map know on entry to this routine.
646 */
647 if (entry)
648 load_completion(llmp);
649
650 /*
651 * Some operations like dldump() or dlopen()'ing a relocatable object
652 * result in objects being loaded on rtld's link-map, make sure these
653 * objects are initialized also.
654 */
655 if ((LIST(nlmp)->lm_flags & LML_FLG_RTLDLM) && LIST(nlmp)->lm_init)
656 load_completion(nlmp);
657
658 /*
659 * Make sure the object to which we've bound has had it's .init fired.
660 * Cleanup before return to user code.
661 */
662 if (entry) {
663 is_dep_init(nlmp, lmp);
664 leave(lml, 0);
665 }
666
667 if (lmflags & LML_FLG_RTLDLM)
668 dbg_desc->d_class = dbg_class;
669
670 return (symval);
671 }
672
673 static int
bindpltpad(Rt_map * lmp,Alist ** padlist,Addr value,void ** pltaddr,const char * fname,const char * sname)674 bindpltpad(Rt_map *lmp, Alist **padlist, Addr value, void **pltaddr,
675 const char *fname, const char *sname)
676 {
677 Aliste idx = 0;
678 Pltpadinfo ppi, *ppip;
679 void *plt;
680 uintptr_t pltoff;
681 Rela rel;
682 int i;
683
684 for (ALIST_TRAVERSE(*padlist, idx, ppip)) {
685 if (ppip->pp_addr == value) {
686 *pltaddr = ppip->pp_plt;
687 DBG_CALL(Dbg_bind_pltpad_from(lmp, (Addr)*pltaddr,
688 sname));
689 return (1);
690 }
691 if (ppip->pp_addr > value)
692 break;
693 }
694
695 plt = PLTPAD(lmp);
696 pltoff = (uintptr_t)plt - (uintptr_t)ADDR(lmp);
697
698 PLTPAD(lmp) = (void *)((uintptr_t)PLTPAD(lmp) + M_PLT_ENTSIZE);
699
700 if (PLTPAD(lmp) > PLTPADEND(lmp)) {
701 /*
702 * Just fail in usual relocation way
703 */
704 *pltaddr = (void *)value;
705 return (1);
706 }
707 rel.r_offset = pltoff;
708 rel.r_info = 0;
709 rel.r_addend = 0;
710
711 /*
712 * elf_plt_write assumes the plt was previously filled
713 * with NOP's, so fill it in now.
714 */
715 for (i = 0; i < (M_PLT_ENTSIZE / sizeof (uint_t)); i++) {
716 ((uint_t *)plt)[i] = M_NOP;
717 }
718 iflush_range((caddr_t)plt, M_PLT_ENTSIZE);
719
720 (void) elf_plt_write(ADDR(lmp), ADDR(lmp), &rel, value, 0);
721
722 ppi.pp_addr = value;
723 ppi.pp_plt = plt;
724
725 if (alist_insert(padlist, &ppi, sizeof (Pltpadinfo),
726 AL_CNT_PLTPAD, idx) == NULL)
727 return (0);
728
729 *pltaddr = plt;
730 DBG_CALL(Dbg_bind_pltpad_to(lmp, (Addr)*pltaddr, fname, sname));
731 return (1);
732 }
733
734 /*
735 * Read and process the relocations for one link object, we assume all
736 * relocation sections for loadable segments are stored contiguously in
737 * the file.
738 */
739 int
elf_reloc(Rt_map * lmp,uint_t plt,int * in_nfavl,APlist ** textrel)740 elf_reloc(Rt_map *lmp, uint_t plt, int *in_nfavl, APlist **textrel)
741 {
742 ulong_t relbgn, relend, relsiz, basebgn, pltbgn, pltend;
743 ulong_t pltndx, roffset, rsymndx, psymndx = 0;
744 uint_t dsymndx, binfo, pbinfo;
745 uchar_t rtype;
746 long reladd;
747 Addr value, pvalue;
748 Sym *symref, *psymref, *symdef, *psymdef;
749 Syminfo *sip;
750 char *name, *pname;
751 Rt_map *_lmp, *plmp;
752 int ret = 1, noplt = 0;
753 long relacount = RELACOUNT(lmp);
754 Rela *rel;
755 Pltbindtype pbtype;
756 Alist *pltpadlist = NULL;
757 APlist *bound = NULL;
758
759 /*
760 * If an object has any DT_REGISTER entries associated with
761 * it, they are processed now.
762 */
763 if ((plt == 0) && (FLAGS(lmp) & FLG_RT_REGSYMS)) {
764 if (elf_regsyms(lmp) == 0)
765 return (0);
766 }
767
768 /*
769 * Although only necessary for lazy binding, initialize the first
770 * procedure linkage table entry to go to elf_rtbndr(). dbx(1) seems
771 * to find this useful.
772 */
773 if ((plt == 0) && PLTGOT(lmp)) {
774 mmapobj_result_t *mpp;
775 Xword pltoff;
776
777 /*
778 * Make sure the segment is writable.
779 */
780 if ((((mpp =
781 find_segment((caddr_t)PLTGOT(lmp), lmp)) != NULL) &&
782 ((mpp->mr_prot & PROT_WRITE) == 0)) &&
783 ((set_prot(lmp, mpp, 1) == 0) ||
784 (aplist_append(textrel, mpp, AL_CNT_TEXTREL) == NULL)))
785 return (0);
786
787 /*
788 * Install the lm pointer in .PLT2 as per the ABI.
789 */
790 pltoff = (2 * M_PLT_ENTSIZE) / M_PLT_INSSIZE;
791 elf_plt2_init(PLTGOT(lmp) + pltoff, lmp);
792
793 /*
794 * The V9 ABI states that the first 32k PLT entries
795 * use .PLT1, with .PLT0 used by the "latter" entries.
796 * We don't currently implement the extendend format,
797 * so install an error handler in .PLT0 to catch anyone
798 * trying to use it.
799 */
800 elf_plt_init(PLTGOT(lmp), (caddr_t)elf_rtbndr_far);
801
802 /*
803 * Initialize .PLT1
804 */
805 pltoff = M_PLT_ENTSIZE / M_PLT_INSSIZE;
806 elf_plt_init(PLTGOT(lmp) + pltoff, (caddr_t)elf_rtbndr);
807 }
808
809 /*
810 * Initialize the plt start and end addresses.
811 */
812 if ((pltbgn = (ulong_t)JMPREL(lmp)) != 0)
813 pltend = pltbgn + (ulong_t)(PLTRELSZ(lmp));
814
815 /*
816 * If we've been called upon to promote an RTLD_LAZY object to an
817 * RTLD_NOW then we're only interested in scaning the .plt table.
818 */
819 if (plt) {
820 relbgn = pltbgn;
821 relend = pltend;
822 } else {
823 /*
824 * The relocation sections appear to the run-time linker as a
825 * single table. Determine the address of the beginning and end
826 * of this table. There are two different interpretations of
827 * the ABI at this point:
828 *
829 * o The REL table and its associated RELSZ indicate the
830 * concatenation of *all* relocation sections (this is the
831 * model our link-editor constructs).
832 *
833 * o The REL table and its associated RELSZ indicate the
834 * concatenation of all *but* the .plt relocations. These
835 * relocations are specified individually by the JMPREL and
836 * PLTRELSZ entries.
837 *
838 * Determine from our knowledege of the relocation range and
839 * .plt range, the range of the total relocation table. Note
840 * that one other ABI assumption seems to be that the .plt
841 * relocations always follow any other relocations, the
842 * following range checking drops that assumption.
843 */
844 relbgn = (ulong_t)(REL(lmp));
845 relend = relbgn + (ulong_t)(RELSZ(lmp));
846 if (pltbgn) {
847 if (!relbgn || (relbgn > pltbgn))
848 relbgn = pltbgn;
849 if (!relbgn || (relend < pltend))
850 relend = pltend;
851 }
852 }
853 if (!relbgn || (relbgn == relend)) {
854 DBG_CALL(Dbg_reloc_run(lmp, 0, plt, DBG_REL_NONE));
855 return (1);
856 }
857
858 relsiz = (ulong_t)(RELENT(lmp));
859 basebgn = ADDR(lmp);
860
861 DBG_CALL(Dbg_reloc_run(lmp, M_REL_SHT_TYPE, plt, DBG_REL_START));
862
863 /*
864 * If we're processing in lazy mode there is no need to scan the
865 * .rela.plt table.
866 */
867 if (pltbgn && ((MODE(lmp) & RTLD_NOW) == 0))
868 noplt = 1;
869
870 sip = SYMINFO(lmp);
871 /*
872 * Loop through relocations.
873 */
874 while (relbgn < relend) {
875 mmapobj_result_t *mpp;
876 uint_t sb_flags = 0;
877 Addr vaddr;
878
879 rtype = ELF_R_TYPE(((Rela *)relbgn)->r_info, M_MACH);
880
881 /*
882 * If this is a RELATIVE relocation in a shared object
883 * (the common case), and if we are not debugging, then
884 * jump into a tighter relocaiton loop (elf_reloc_relacount)
885 * Only make the jump if we've been given a hint on the
886 * number of relocations.
887 */
888 if ((rtype == R_SPARC_RELATIVE) &&
889 ((FLAGS(lmp) & FLG_RT_FIXED) == 0) && (DBG_ENABLED == 0)) {
890 if (relacount) {
891 relbgn = elf_reloc_relative_count(relbgn,
892 relacount, relsiz, basebgn, lmp,
893 textrel, 0);
894 relacount = 0;
895 } else {
896 relbgn = elf_reloc_relative(relbgn, relend,
897 relsiz, basebgn, lmp, textrel, 0);
898 }
899 if (relbgn >= relend)
900 break;
901 rtype = ELF_R_TYPE(((Rela *)relbgn)->r_info, M_MACH);
902 }
903
904 roffset = ((Rela *)relbgn)->r_offset;
905
906 reladd = (long)(((Rela *)relbgn)->r_addend);
907 rsymndx = ELF_R_SYM(((Rela *)relbgn)->r_info);
908 rel = (Rela *)relbgn;
909 relbgn += relsiz;
910
911 /*
912 * Optimizations.
913 */
914 if (rtype == R_SPARC_NONE)
915 continue;
916 if (noplt && ((ulong_t)rel >= pltbgn) &&
917 ((ulong_t)rel < pltend)) {
918 relbgn = pltend;
919 continue;
920 }
921
922 if (rtype != R_SPARC_REGISTER) {
923 /*
924 * If this is a shared object, add the base address
925 * to offset.
926 */
927 if (!(FLAGS(lmp) & FLG_RT_FIXED))
928 roffset += basebgn;
929
930 /*
931 * If this relocation is not against part of the image
932 * mapped into memory we skip it.
933 */
934 if ((mpp = find_segment((caddr_t)roffset,
935 lmp)) == NULL) {
936 elf_reloc_bad(lmp, (void *)rel, rtype, roffset,
937 rsymndx);
938 continue;
939 }
940 }
941
942 /*
943 * If we're promoting plts, determine if this one has already
944 * been written. An uninitialized plts' second instruction is a
945 * branch.
946 */
947 if (plt) {
948 uchar_t *_roffset = (uchar_t *)roffset;
949
950 _roffset += M_PLT_INSSIZE;
951 /* LINTED */
952 if ((*(uint_t *)_roffset &
953 (~(S_MASK(19)))) != M_BA_A_XCC)
954 continue;
955 }
956
957 binfo = 0;
958 pltndx = (ulong_t)-1;
959 pbtype = PLT_T_NONE;
960
961 /*
962 * If a symbol index is specified then get the symbol table
963 * entry, locate the symbol definition, and determine its
964 * address.
965 */
966 if (rsymndx) {
967 /*
968 * If a Syminfo section is provided, determine if this
969 * symbol is deferred, and if so, skip this relocation.
970 */
971 if (sip && is_sym_deferred((ulong_t)rel, basebgn, lmp,
972 textrel, sip, rsymndx))
973 continue;
974
975 /*
976 * Get the local symbol table entry.
977 */
978 symref = (Sym *)((ulong_t)SYMTAB(lmp) +
979 (rsymndx * SYMENT(lmp)));
980
981 /*
982 * If this is a local symbol, just use the base address.
983 * (we should have no local relocations in the
984 * executable).
985 */
986 if (ELF_ST_BIND(symref->st_info) == STB_LOCAL) {
987 value = basebgn;
988 name = NULL;
989
990 /*
991 * Special case TLS relocations.
992 */
993 if ((rtype == R_SPARC_TLS_DTPMOD32) ||
994 (rtype == R_SPARC_TLS_DTPMOD64)) {
995 /*
996 * Use the TLS modid.
997 */
998 value = TLSMODID(lmp);
999
1000 } else if ((rtype == R_SPARC_TLS_TPOFF32) ||
1001 (rtype == R_SPARC_TLS_TPOFF64)) {
1002 if ((value = elf_static_tls(lmp, symref,
1003 rel, rtype, 0, roffset, 0)) == 0) {
1004 ret = 0;
1005 break;
1006 }
1007 }
1008 } else {
1009 /*
1010 * If the symbol index is equal to the previous
1011 * symbol index relocation we processed then
1012 * reuse the previous values. (Note that there
1013 * have been cases where a relocation exists
1014 * against a copy relocation symbol, our ld(1)
1015 * should optimize this away, but make sure we
1016 * don't use the same symbol information should
1017 * this case exist).
1018 */
1019 if ((rsymndx == psymndx) &&
1020 (rtype != R_SPARC_COPY)) {
1021 /* LINTED */
1022 if (psymdef == 0) {
1023 DBG_CALL(Dbg_bind_weak(lmp,
1024 (Addr)roffset, (Addr)
1025 (roffset - basebgn), name));
1026 continue;
1027 }
1028 /* LINTED */
1029 value = pvalue;
1030 /* LINTED */
1031 name = pname;
1032 symdef = psymdef;
1033 /* LINTED */
1034 symref = psymref;
1035 /* LINTED */
1036 _lmp = plmp;
1037 /* LINTED */
1038 binfo = pbinfo;
1039
1040 if ((LIST(_lmp)->lm_tflags |
1041 AFLAGS(_lmp)) &
1042 LML_TFLG_AUD_SYMBIND) {
1043 value = audit_symbind(lmp, _lmp,
1044 /* LINTED */
1045 symdef, dsymndx, value,
1046 &sb_flags);
1047 }
1048 } else {
1049 Slookup sl;
1050 Sresult sr;
1051
1052 /*
1053 * Lookup the symbol definition.
1054 * Initialize the symbol lookup, and
1055 * symbol result, data structures.
1056 */
1057 name = (char *)(STRTAB(lmp) +
1058 symref->st_name);
1059
1060 SLOOKUP_INIT(sl, name, lmp, 0,
1061 ld_entry_cnt, 0, rsymndx, symref,
1062 rtype, LKUP_STDRELOC);
1063 SRESULT_INIT(sr, name);
1064 symdef = NULL;
1065
1066 if (lookup_sym(&sl, &sr, &binfo,
1067 in_nfavl)) {
1068 name = (char *)sr.sr_name;
1069 _lmp = sr.sr_dmap;
1070 symdef = sr.sr_sym;
1071 }
1072
1073 /*
1074 * If the symbol is not found and the
1075 * reference was not to a weak symbol,
1076 * report an error. Weak references
1077 * may be unresolved.
1078 */
1079 /* BEGIN CSTYLED */
1080 if (symdef == 0) {
1081 if (sl.sl_bind != STB_WEAK) {
1082 if (elf_reloc_error(lmp, name,
1083 rel, binfo))
1084 continue;
1085
1086 ret = 0;
1087 break;
1088
1089 } else {
1090 psymndx = rsymndx;
1091 psymdef = 0;
1092
1093 DBG_CALL(Dbg_bind_weak(lmp,
1094 (Addr)roffset, (Addr)
1095 (roffset - basebgn), name));
1096 continue;
1097 }
1098 }
1099 /* END CSTYLED */
1100
1101 /*
1102 * If symbol was found in an object
1103 * other than the referencing object
1104 * then record the binding.
1105 */
1106 if ((lmp != _lmp) && ((FLAGS1(_lmp) &
1107 FL1_RT_NOINIFIN) == 0)) {
1108 if (aplist_test(&bound, _lmp,
1109 AL_CNT_RELBIND) == 0) {
1110 ret = 0;
1111 break;
1112 }
1113 }
1114
1115 /*
1116 * Calculate the location of definition;
1117 * symbol value plus base address of
1118 * containing shared object.
1119 */
1120 if (IS_SIZE(rtype))
1121 value = symdef->st_size;
1122 else
1123 value = symdef->st_value;
1124
1125 if (!(FLAGS(_lmp) & FLG_RT_FIXED) &&
1126 !(IS_SIZE(rtype)) &&
1127 (symdef->st_shndx != SHN_ABS) &&
1128 (ELF_ST_TYPE(symdef->st_info) !=
1129 STT_TLS))
1130 value += ADDR(_lmp);
1131
1132 /*
1133 * Retain this symbol index and the
1134 * value in case it can be used for the
1135 * subsequent relocations.
1136 */
1137 if (rtype != R_SPARC_COPY) {
1138 psymndx = rsymndx;
1139 pvalue = value;
1140 pname = name;
1141 psymdef = symdef;
1142 psymref = symref;
1143 plmp = _lmp;
1144 pbinfo = binfo;
1145 }
1146 if ((LIST(_lmp)->lm_tflags |
1147 AFLAGS(_lmp)) &
1148 LML_TFLG_AUD_SYMBIND) {
1149 /* LINTED */
1150 dsymndx = (((uintptr_t)symdef -
1151 (uintptr_t)SYMTAB(_lmp)) /
1152 SYMENT(_lmp));
1153 value = audit_symbind(lmp, _lmp,
1154 symdef, dsymndx, value,
1155 &sb_flags);
1156 }
1157 }
1158
1159 /*
1160 * If relocation is PC-relative, subtract
1161 * offset address.
1162 */
1163 if (IS_PC_RELATIVE(rtype))
1164 value -= roffset;
1165
1166 /*
1167 * Special case TLS relocations.
1168 */
1169 if ((rtype == R_SPARC_TLS_DTPMOD32) ||
1170 (rtype == R_SPARC_TLS_DTPMOD64)) {
1171 /*
1172 * Relocation value is the TLS modid.
1173 */
1174 value = TLSMODID(_lmp);
1175
1176 } else if ((rtype == R_SPARC_TLS_TPOFF64) ||
1177 (rtype == R_SPARC_TLS_TPOFF32)) {
1178 if ((value = elf_static_tls(_lmp,
1179 symdef, rel, rtype, name, roffset,
1180 value)) == 0) {
1181 ret = 0;
1182 break;
1183 }
1184 }
1185 }
1186 } else {
1187 /*
1188 * Special cases.
1189 */
1190 if (rtype == R_SPARC_REGISTER) {
1191 /*
1192 * A register symbol associated with symbol
1193 * index 0 is initialized (i.e. relocated) to
1194 * a constant in the r_addend field rather than
1195 * to a symbol value.
1196 */
1197 value = 0;
1198
1199 } else if ((rtype == R_SPARC_TLS_DTPMOD32) ||
1200 (rtype == R_SPARC_TLS_DTPMOD64)) {
1201 /*
1202 * TLS relocation value is the TLS modid.
1203 */
1204 value = TLSMODID(lmp);
1205 } else
1206 value = basebgn;
1207
1208 name = NULL;
1209 }
1210
1211 DBG_CALL(Dbg_reloc_in(LIST(lmp), ELF_DBG_RTLD, M_MACH,
1212 M_REL_SHT_TYPE, rel, NULL, 0, name));
1213
1214 /*
1215 * Make sure the segment is writable.
1216 */
1217 if ((rtype != R_SPARC_REGISTER) &&
1218 ((mpp->mr_prot & PROT_WRITE) == 0) &&
1219 ((set_prot(lmp, mpp, 1) == 0) ||
1220 (aplist_append(textrel, mpp, AL_CNT_TEXTREL) == NULL))) {
1221 ret = 0;
1222 break;
1223 }
1224
1225 /*
1226 * Call relocation routine to perform required relocation.
1227 */
1228 switch (rtype) {
1229 case R_SPARC_REGISTER:
1230 /*
1231 * The v9 ABI 4.2.4 says that system objects may,
1232 * but are not required to, use register symbols
1233 * to inidcate how they use global registers. Thus
1234 * at least %g6, %g7 must be allowed in addition
1235 * to %g2 and %g3.
1236 */
1237 value += reladd;
1238 if (roffset == STO_SPARC_REGISTER_G1) {
1239 set_sparc_g1(value);
1240 } else if (roffset == STO_SPARC_REGISTER_G2) {
1241 set_sparc_g2(value);
1242 } else if (roffset == STO_SPARC_REGISTER_G3) {
1243 set_sparc_g3(value);
1244 } else if (roffset == STO_SPARC_REGISTER_G4) {
1245 set_sparc_g4(value);
1246 } else if (roffset == STO_SPARC_REGISTER_G5) {
1247 set_sparc_g5(value);
1248 } else if (roffset == STO_SPARC_REGISTER_G6) {
1249 set_sparc_g6(value);
1250 } else if (roffset == STO_SPARC_REGISTER_G7) {
1251 set_sparc_g7(value);
1252 } else {
1253 eprintf(LIST(lmp), ERR_FATAL,
1254 MSG_INTL(MSG_REL_BADREG), NAME(lmp),
1255 EC_ADDR(roffset));
1256 ret = 0;
1257 break;
1258 }
1259
1260 DBG_CALL(Dbg_reloc_apply_reg(LIST(lmp), ELF_DBG_RTLD,
1261 M_MACH, (Xword)roffset, (Xword)value));
1262 break;
1263 case R_SPARC_COPY:
1264 if (elf_copy_reloc(name, symref, lmp, (void *)roffset,
1265 symdef, _lmp, (const void *)value) == 0)
1266 ret = 0;
1267 break;
1268 case R_SPARC_JMP_SLOT:
1269 pltndx = ((uintptr_t)rel -
1270 (uintptr_t)JMPREL(lmp)) / relsiz;
1271
1272 if (FLAGS(lmp) & FLG_RT_FIXED)
1273 vaddr = 0;
1274 else
1275 vaddr = ADDR(lmp);
1276
1277 if (((LIST(lmp)->lm_tflags | AFLAGS(lmp)) &
1278 (LML_TFLG_AUD_PLTENTER | LML_TFLG_AUD_PLTEXIT)) &&
1279 AUDINFO(lmp)->ai_dynplts) {
1280 int fail = 0;
1281 /* LINTED */
1282 uint_t symndx = (uint_t)(((uintptr_t)symdef -
1283 (uintptr_t)SYMTAB(_lmp)) / SYMENT(_lmp));
1284
1285 (void) elf_plt_trace_write((caddr_t)vaddr,
1286 (Rela *)rel, lmp, _lmp, symdef, symndx,
1287 pltndx, (caddr_t)value, sb_flags, &fail);
1288 if (fail)
1289 ret = 0;
1290 } else {
1291 /*
1292 * Write standard PLT entry to jump directly
1293 * to newly bound function.
1294 */
1295 DBG_CALL(Dbg_reloc_apply_val(LIST(lmp),
1296 ELF_DBG_RTLD, (Xword)roffset,
1297 (Xword)value));
1298 pbtype = elf_plt_write((uintptr_t)vaddr,
1299 (uintptr_t)vaddr, (void *)rel, value,
1300 pltndx);
1301 }
1302 break;
1303 case R_SPARC_WDISP30:
1304 if (PLTPAD(lmp) &&
1305 (S_INRANGE((Sxword)value, 29) == 0)) {
1306 void * plt = 0;
1307
1308 if (bindpltpad(lmp, &pltpadlist,
1309 value + roffset, &plt,
1310 NAME(_lmp), name) == 0) {
1311 ret = 0;
1312 break;
1313 }
1314 value = (Addr)((Addr)plt - roffset);
1315 }
1316 /* FALLTHROUGH */
1317 default:
1318 value += reladd;
1319 if (IS_EXTOFFSET(rtype))
1320 value += (Word)ELF_R_TYPE_DATA(rel->r_info);
1321
1322 /*
1323 * Write the relocation out. If this relocation is a
1324 * common basic write, skip the doreloc() engine.
1325 */
1326 if ((rtype == R_SPARC_GLOB_DAT) ||
1327 (rtype == R_SPARC_64)) {
1328 if (roffset & 0x7) {
1329 Conv_inv_buf_t inv_buf;
1330
1331 eprintf(LIST(lmp), ERR_FATAL,
1332 MSG_INTL(MSG_REL_NONALIGN),
1333 conv_reloc_SPARC_type(rtype,
1334 0, &inv_buf),
1335 NAME(lmp), demangle(name),
1336 EC_OFF(roffset));
1337 ret = 0;
1338 } else
1339 *(ulong_t *)roffset += value;
1340 } else {
1341 if (do_reloc_rtld(rtype, (uchar_t *)roffset,
1342 (Xword *)&value, name,
1343 NAME(lmp), LIST(lmp)) == 0)
1344 ret = 0;
1345 }
1346
1347 /*
1348 * The value now contains the 'bit-shifted' value that
1349 * was or'ed into memory (this was set by
1350 * do_reloc_rtld()).
1351 */
1352 DBG_CALL(Dbg_reloc_apply_val(LIST(lmp), ELF_DBG_RTLD,
1353 (Xword)roffset, (Xword)value));
1354
1355 /*
1356 * If this relocation is against a text segment, make
1357 * sure that the instruction cache is flushed.
1358 */
1359 if (textrel)
1360 iflush_range((caddr_t)roffset, 0x4);
1361 }
1362
1363 if ((ret == 0) &&
1364 ((LIST(lmp)->lm_flags & LML_FLG_TRC_WARN) == 0))
1365 break;
1366
1367 if (binfo) {
1368 DBG_CALL(Dbg_bind_global(lmp, (Addr)roffset,
1369 (Off)(roffset - basebgn), pltndx, pbtype,
1370 _lmp, (Addr)value, symdef->st_value, name, binfo));
1371 }
1372 }
1373
1374 /*
1375 * Free up any items on the pltpadlist if it was allocated
1376 */
1377 if (pltpadlist)
1378 free(pltpadlist);
1379
1380 return (relocate_finish(lmp, bound, ret));
1381 }
1382
1383 /*
1384 * Provide a machine specific interface to the conversion routine. By calling
1385 * the machine specific version, rather than the generic version, we insure that
1386 * the data tables/strings for all known machine versions aren't dragged into
1387 * ld.so.1.
1388 */
1389 const char *
_conv_reloc_type(uint_t rel)1390 _conv_reloc_type(uint_t rel)
1391 {
1392 static Conv_inv_buf_t inv_buf;
1393
1394 return (conv_reloc_SPARC_type(rel, 0, &inv_buf));
1395 }
1396