1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2006 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 #pragma ident "%Z%%M% %I% %E% SMI" 27 28 /* 29 * SPARC V9 machine dependent and ELF file class dependent functions. 30 * Contains routines for performing function binding and symbol relocations. 31 */ 32 #include "_synonyms.h" 33 34 #include <stdio.h> 35 #include <sys/elf.h> 36 #include <sys/elf_SPARC.h> 37 #include <sys/mman.h> 38 #include <dlfcn.h> 39 #include <synch.h> 40 #include <string.h> 41 #include <debug.h> 42 #include <reloc.h> 43 #include <conv.h> 44 #include "_rtld.h" 45 #include "_audit.h" 46 #include "_elf.h" 47 #include "msg.h" 48 49 extern void iflush_range(caddr_t, size_t); 50 extern void plt_upper_32(uintptr_t, uintptr_t); 51 extern void plt_upper_44(uintptr_t, uintptr_t); 52 extern void plt_full_range(uintptr_t, uintptr_t); 53 extern void elf_rtbndr(Rt_map *, ulong_t, caddr_t); 54 extern void elf_rtbndr_far(Rt_map *, ulong_t, caddr_t); 55 56 57 int 58 elf_mach_flags_check(Rej_desc *rej, Ehdr *ehdr) 59 { 60 /* 61 * Check machine type and flags. 62 */ 63 if (ehdr->e_flags & EF_SPARC_EXT_MASK) { 64 /* 65 * Check vendor-specific extensions. 66 */ 67 if (ehdr->e_flags & EF_SPARC_HAL_R1) { 68 rej->rej_type = SGS_REJ_HAL; 69 rej->rej_info = (uint_t)ehdr->e_flags; 70 return (0); 71 } 72 if ((ehdr->e_flags & EF_SPARC_SUN_US3) & ~at_flags) { 73 rej->rej_type = SGS_REJ_US3; 74 rej->rej_info = (uint_t)ehdr->e_flags; 75 return (0); 76 } 77 78 /* 79 * Generic check. 80 * All of our 64-bit SPARC's support the US1 (UltraSPARC 1) 81 * instructions so that bit isn't worth checking for explicitly. 82 */ 83 if ((ehdr->e_flags & EF_SPARC_EXT_MASK) & ~at_flags) { 84 rej->rej_type = SGS_REJ_BADFLAG; 85 rej->rej_info = (uint_t)ehdr->e_flags; 86 return (0); 87 } 88 } else if ((ehdr->e_flags & ~EF_SPARCV9_MM) != 0) { 89 rej->rej_type = SGS_REJ_BADFLAG; 90 rej->rej_info = (uint_t)ehdr->e_flags; 91 return (0); 92 } 93 return (1); 94 } 95 96 97 void 98 ldso_plt_init(Rt_map * lmp) 99 { 100 /* 101 * There is no need to analyze ld.so because we don't map in any of 102 * its dependencies. However we may map these dependencies in later 103 * (as if ld.so had dlopened them), so initialize the plt and the 104 * permission information. 105 */ 106 if (PLTGOT(lmp)) { 107 Xword pltoff; 108 109 /* 110 * Install the lm pointer in .PLT2 as per the ABI. 111 */ 112 pltoff = (2 * M_PLT_ENTSIZE) / M_PLT_INSSIZE; 113 elf_plt2_init(PLTGOT(lmp) + pltoff, lmp); 114 115 /* 116 * The V9 ABI states that the first 32k PLT entries 117 * use .PLT1, with .PLT0 used by the "latter" entries. 118 * We don't currently implement the extendend format, 119 * so install an error handler in .PLT0 to catch anyone 120 * trying to use it. 121 */ 122 elf_plt_init(PLTGOT(lmp), (caddr_t)elf_rtbndr_far); 123 124 /* 125 * Initialize .PLT1 126 */ 127 pltoff = M_PLT_ENTSIZE / M_PLT_INSSIZE; 128 elf_plt_init(PLTGOT(lmp) + pltoff, (caddr_t)elf_rtbndr); 129 } 130 } 131 132 /* 133 * elf_plt_write() will test to see how far away our destination 134 * address lies. If it is close enough that a branch can 135 * be used instead of a jmpl - we will fill the plt in with 136 * single branch. The branches are much quicker then 137 * a jmpl instruction - see bug#4356879 for further 138 * details. 139 * 140 * NOTE: we pass in both a 'pltaddr' and a 'vpltaddr' since 141 * librtld/dldump update PLT's who's physical 142 * address is not the same as the 'virtual' runtime 143 * address. 144 */ 145 Pltbindtype 146 elf_plt_write(uintptr_t addr, uintptr_t vaddr, void *rptr, uintptr_t symval, 147 Xword pltndx) 148 { 149 Rela *rel = (Rela *)rptr; 150 uintptr_t nsym = ~symval; 151 uintptr_t vpltaddr, pltaddr; 152 long disp; 153 154 155 pltaddr = addr + rel->r_offset; 156 vpltaddr = vaddr + rel->r_offset; 157 disp = symval - vpltaddr - 4; 158 159 if (pltndx >= (M64_PLT_NEARPLTS - M_PLT_XNumber)) { 160 *((Sxword *)pltaddr) = (uintptr_t)symval + 161 (uintptr_t)rel->r_addend - vaddr; 162 DBG_CALL(pltcntfar++); 163 return (PLT_T_FAR); 164 } 165 166 /* 167 * Test if the destination address is close enough to use 168 * a ba,a... instruction to reach it. 169 */ 170 if (S_INRANGE(disp, 23) && !(rtld_flags & RT_FL_NOBAPLT)) { 171 uint_t *pltent, bainstr; 172 Pltbindtype rc; 173 174 pltent = (uint_t *)pltaddr; 175 /* 176 * The 177 * 178 * ba,a,pt %icc, <dest> 179 * 180 * is the most efficient of the PLT's. If we 181 * are within +-20 bits - use that branch. 182 */ 183 if (S_INRANGE(disp, 20)) { 184 bainstr = M_BA_A_PT; /* ba,a,pt %icc,<dest> */ 185 /* LINTED */ 186 bainstr |= (uint_t)(S_MASK(19) & (disp >> 2)); 187 rc = PLT_T_21D; 188 DBG_CALL(pltcnt21d++); 189 } else { 190 /* 191 * Otherwise - we fall back to the good old 192 * 193 * ba,a <dest> 194 * 195 * Which still beats a jmpl instruction. 196 */ 197 bainstr = M_BA_A; /* ba,a <dest> */ 198 /* LINTED */ 199 bainstr |= (uint_t)(S_MASK(22) & (disp >> 2)); 200 rc = PLT_T_24D; 201 DBG_CALL(pltcnt24d++); 202 } 203 204 pltent[2] = M_NOP; /* nop instr */ 205 pltent[1] = bainstr; 206 207 iflush_range((char *)(&pltent[1]), 4); 208 pltent[0] = M_NOP; /* nop instr */ 209 iflush_range((char *)(&pltent[0]), 4); 210 return (rc); 211 } 212 213 if ((nsym >> 32) == 0) { 214 plt_upper_32(pltaddr, symval); 215 DBG_CALL(pltcntu32++); 216 return (PLT_T_U32); 217 } 218 219 if ((nsym >> 44) == 0) { 220 plt_upper_44(pltaddr, symval); 221 DBG_CALL(pltcntu44++); 222 return (PLT_T_U44); 223 } 224 225 /* 226 * The PLT destination is not in reach of 227 * a branch instruction - so we fall back 228 * to a 'jmpl' sequence. 229 */ 230 plt_full_range(pltaddr, symval); 231 DBG_CALL(pltcntfull++); 232 return (PLT_T_FULL); 233 } 234 235 /* 236 * Local storage space created on the stack created for this glue 237 * code includes space for: 238 * 0x8 pointer to dyn_data 239 * 0x8 size prev stack frame 240 */ 241 static const Byte dyn_plt_template[] = { 242 /* 0x0 */ 0x2a, 0xcf, 0x80, 0x03, /* brnz,a,pt %fp, 0xc */ 243 /* 0x4 */ 0x82, 0x27, 0x80, 0x0e, /* sub %fp, %sp, %g1 */ 244 /* 0x8 */ 0x82, 0x10, 0x20, 0xb0, /* mov 176, %g1 */ 245 /* 0xc */ 0x9d, 0xe3, 0xbf, 0x40, /* save %sp, -192, %sp */ 246 /* 0x10 */ 0xc2, 0x77, 0xa7, 0xef, /* stx %g1, [%fp + 2031] */ 247 /* 0x14 */ 0x0b, 0x00, 0x00, 0x00, /* sethi %hh(dyn_data), %g5 */ 248 /* 0x18 */ 0x8a, 0x11, 0x60, 0x00, /* or %g5, %hm(dyn_data), %g5 */ 249 /* 0x1c */ 0x8b, 0x29, 0x70, 0x20, /* sllx %g5, 32, %g5 */ 250 /* 0x20 */ 0x03, 0x00, 0x00, 0x00, /* sethi %lm(dyn_data), %g1 */ 251 /* 0x24 */ 0x82, 0x10, 0x60, 0x00, /* or %g1, %lo(dyn_data), %g1 */ 252 /* 0x28 */ 0x82, 0x10, 0x40, 0x05, /* or %g1, %g5, %g1 */ 253 /* 0x2c */ 0x40, 0x00, 0x00, 0x00, /* call <rel_addr> */ 254 /* 0x30 */ 0xc2, 0x77, 0xa7, 0xf7, /* stx %g1, [%fp + 2039] */ 255 /* 0x34 */ 0x01, 0x00, 0x00, 0x00 /* nop ! for 8-byte alignment */ 256 }; 257 258 259 int dyn_plt_ent_size = sizeof (dyn_plt_template) + 260 sizeof (Addr) + /* reflmp */ 261 sizeof (Addr) + /* deflmp */ 262 sizeof (Word) + /* symndx */ 263 sizeof (Word) + /* sb_flags */ 264 sizeof (Sym); /* symdef */ 265 266 267 /* 268 * the dynamic plt entry is: 269 * 270 * brnz,a,pt %fp, 1f 271 * sub %sp, %fp, %g1 272 * mov SA(MINFRAME), %g1 273 * 1: 274 * save %sp, -(SA(MINFRAME) + (2 * CLONGSIZE)), %sp 275 * 276 * ! store prev stack size 277 * stx %g1, [%fp + STACK_BIAS - (2 * CLONGSIZE)] 278 * 279 * sethi %hh(dyn_data), %g5 280 * or %g5, %hm(dyn_data), %g5 281 * sllx %g5, 32, %g5 282 * sethi %lm(dyn_data), %g1 283 * or %g1, %lo(dyn_data), %g1 284 * or %g1, %g5, %g1 285 * 286 * ! store dyn_data ptr and call 287 * call elf_plt_trace 288 * stx %g1, [%fp + STACK_BIAS - CLONGSIZE] 289 * nop 290 * * dyn data: 291 * Addr reflmp 292 * Addr deflmp 293 * Word symndx 294 * Word sb_flags 295 * Sym symdef (Elf64_Sym = 24-bytes) 296 */ 297 static caddr_t 298 elf_plt_trace_write(caddr_t addr, Rela * rptr, Rt_map * rlmp, Rt_map * dlmp, 299 Sym * sym, uint_t symndx, ulong_t pltndx, caddr_t to, uint_t sb_flags, 300 int *fail) 301 { 302 extern ulong_t elf_plt_trace(); 303 Addr dyn_plt, *dyndata; 304 305 /* 306 * If both pltenter & pltexit have been disabled there 307 * there is no reason to even create the glue code. 308 */ 309 if ((sb_flags & (LA_SYMB_NOPLTENTER | LA_SYMB_NOPLTEXIT)) == 310 (LA_SYMB_NOPLTENTER | LA_SYMB_NOPLTEXIT)) { 311 (void) elf_plt_write((uintptr_t)addr, (uintptr_t)addr, 312 rptr, (uintptr_t)to, pltndx); 313 return (to); 314 } 315 316 /* 317 * We only need to add the glue code if there is an auditing 318 * library that is interested in this binding. 319 */ 320 dyn_plt = (Xword)AUDINFO(rlmp)->ai_dynplts + 321 (pltndx * dyn_plt_ent_size); 322 323 /* 324 * Have we initialized this dynamic plt entry yet? If we haven't do it 325 * now. Otherwise this function has been called before, but from a 326 * different plt (ie. from another shared object). In that case 327 * we just set the plt to point to the new dyn_plt. 328 */ 329 if (*(Word *)dyn_plt == 0) { 330 Sym *symp; 331 Xword symvalue; 332 Lm_list *lml = LIST(rlmp); 333 334 (void) memcpy((void *)dyn_plt, dyn_plt_template, 335 sizeof (dyn_plt_template)); 336 dyndata = (Addr *)(dyn_plt + sizeof (dyn_plt_template)); 337 338 /* 339 * relocating: 340 * sethi %hh(dyndata), %g5 341 */ 342 symvalue = (Xword)dyndata; 343 if (do_reloc(R_SPARC_HH22, (Byte *)(dyn_plt + 0x14), 344 &symvalue, MSG_ORIG(MSG_SYM_LADYNDATA), 345 MSG_ORIG(MSG_SPECFIL_DYNPLT), lml) == 0) { 346 *fail = 1; 347 return (0); 348 } 349 350 /* 351 * relocating: 352 * or %g5, %hm(dyndata), %g5 353 */ 354 symvalue = (Xword)dyndata; 355 if (do_reloc(R_SPARC_HM10, (Byte *)(dyn_plt + 0x18), 356 &symvalue, MSG_ORIG(MSG_SYM_LADYNDATA), 357 MSG_ORIG(MSG_SPECFIL_DYNPLT), lml) == 0) { 358 *fail = 1; 359 return (0); 360 } 361 362 /* 363 * relocating: 364 * sethi %lm(dyndata), %g1 365 */ 366 symvalue = (Xword)dyndata; 367 if (do_reloc(R_SPARC_LM22, (Byte *)(dyn_plt + 0x20), 368 &symvalue, MSG_ORIG(MSG_SYM_LADYNDATA), 369 MSG_ORIG(MSG_SPECFIL_DYNPLT), lml) == 0) { 370 *fail = 1; 371 return (0); 372 } 373 374 /* 375 * relocating: 376 * or %g1, %lo(dyndata), %g1 377 */ 378 symvalue = (Xword)dyndata; 379 if (do_reloc(R_SPARC_LO10, (Byte *)(dyn_plt + 0x24), 380 &symvalue, MSG_ORIG(MSG_SYM_LADYNDATA), 381 MSG_ORIG(MSG_SPECFIL_DYNPLT), lml) == 0) { 382 *fail = 1; 383 return (0); 384 } 385 386 /* 387 * relocating: 388 * call elf_plt_trace 389 */ 390 symvalue = (Xword)((Addr)&elf_plt_trace - 391 (Addr)(dyn_plt + 0x2c)); 392 if (do_reloc(R_SPARC_WDISP30, (Byte *)(dyn_plt + 0x2c), 393 &symvalue, MSG_ORIG(MSG_SYM_ELFPLTTRACE), 394 MSG_ORIG(MSG_SPECFIL_DYNPLT), lml) == 0) { 395 *fail = 1; 396 return (0); 397 } 398 399 *dyndata++ = (Addr)rlmp; 400 *dyndata++ = (Addr)dlmp; 401 402 /* 403 * symndx in the high word, sb_flags in the low. 404 */ 405 *dyndata = (Addr)sb_flags; 406 *(Word *)dyndata = symndx; 407 dyndata++; 408 409 symp = (Sym *)dyndata; 410 *symp = *sym; 411 symp->st_value = (Addr)to; 412 iflush_range((void *)dyn_plt, sizeof (dyn_plt_template)); 413 } 414 415 (void) elf_plt_write((uintptr_t)addr, (uintptr_t)addr, rptr, 416 (uintptr_t)dyn_plt, pltndx); 417 return ((caddr_t)dyn_plt); 418 } 419 420 /* 421 * Function binding routine - invoked on the first call to a function through 422 * the procedure linkage table; 423 * passes first through an assembly language interface. 424 * 425 * Takes the address of the PLT entry where the call originated, 426 * the offset into the relocation table of the associated 427 * relocation entry and the address of the link map (rt_private_map struct) 428 * for the entry. 429 * 430 * Returns the address of the function referenced after re-writing the PLT 431 * entry to invoke the function directly. 432 * 433 * On error, causes process to terminate with a signal. 434 */ 435 436 ulong_t 437 elf_bndr(Rt_map *lmp, ulong_t pltoff, caddr_t from) 438 { 439 Rt_map *nlmp, *llmp; 440 Addr addr, vaddr, reloff, symval; 441 char *name; 442 Rela *rptr; 443 Sym *sym, *nsym; 444 Xword pltndx; 445 uint_t binfo, sb_flags = 0; 446 ulong_t rsymndx; 447 Slookup sl; 448 Pltbindtype pbtype; 449 int entry, lmflags, farplt = 0; 450 uint_t dbg_class; 451 Lm_list *lml = LIST(lmp); 452 453 /* 454 * For compatibility with libthread (TI_VERSION 1) we track the entry 455 * value. A zero value indicates we have recursed into ld.so.1 to 456 * further process a locking request. Under this recursion we disable 457 * tsort and cleanup activities. 458 */ 459 entry = enter(); 460 461 if ((lmflags = lml->lm_flags) & LML_FLG_RTLDLM) { 462 dbg_class = dbg_desc->d_class; 463 dbg_desc->d_class = 0; 464 } 465 466 /* 467 * Must calculate true plt relocation address from reloc. 468 * Take offset, subtract number of reserved PLT entries, and divide 469 * by PLT entry size, which should give the index of the plt 470 * entry (and relocation entry since they have been defined to be 471 * in the same order). Then we must multiply by the size of 472 * a relocation entry, which will give us the offset of the 473 * plt relocation entry from the start of them given by JMPREL(lm). 474 */ 475 addr = pltoff - M_PLT_RESERVSZ; 476 477 if (pltoff < (M64_PLT_NEARPLTS * M_PLT_ENTSIZE)) { 478 pltndx = addr / M_PLT_ENTSIZE; 479 } else { 480 ulong_t pltblockoff; 481 482 pltblockoff = pltoff - (M64_PLT_NEARPLTS * M_PLT_ENTSIZE); 483 pltndx = M64_PLT_NEARPLTS + 484 ((pltblockoff / M64_PLT_FBLOCKSZ) * M64_PLT_FBLKCNTS) + 485 ((pltblockoff % M64_PLT_FBLOCKSZ) / M64_PLT_FENTSIZE) - 486 M_PLT_XNumber; 487 farplt = 1; 488 } 489 490 /* 491 * Perform some basic sanity checks. If we didn't get a load map 492 * or the plt offset is invalid then its possible someone has walked 493 * over the plt entries or jumped to plt[01] out of the blue. 494 */ 495 if (!lmp || (!farplt && (addr % M_PLT_ENTSIZE) != 0) || 496 (farplt && (addr % M_PLT_INSSIZE))) { 497 eprintf(lml, ERR_FATAL, MSG_INTL(MSG_REL_PLTREF), 498 conv_reloc_SPARC_type(R_SPARC_JMP_SLOT), 499 EC_NATPTR(lmp), EC_XWORD(pltoff), EC_NATPTR(from)); 500 rtldexit(lml, 1); 501 } 502 reloff = pltndx * sizeof (Rela); 503 504 /* 505 * Use relocation entry to get symbol table entry and symbol name. 506 */ 507 addr = (ulong_t)JMPREL(lmp); 508 rptr = (Rela *)(addr + reloff); 509 rsymndx = ELF_R_SYM(rptr->r_info); 510 sym = (Sym *)((ulong_t)SYMTAB(lmp) + (rsymndx * SYMENT(lmp))); 511 name = (char *)(STRTAB(lmp) + sym->st_name); 512 513 /* 514 * Determine the last link-map of this list, this'll be the starting 515 * point for any tsort() processing. 516 */ 517 llmp = lml->lm_tail; 518 519 /* 520 * Find definition for symbol. 521 */ 522 sl.sl_name = name; 523 sl.sl_cmap = lmp; 524 sl.sl_imap = lml->lm_head; 525 sl.sl_hash = 0; 526 sl.sl_rsymndx = rsymndx; 527 sl.sl_flags = LKUP_DEFT; 528 if ((nsym = lookup_sym(&sl, &nlmp, &binfo)) == 0) { 529 eprintf(lml, ERR_FATAL, MSG_INTL(MSG_REL_NOSYM), NAME(lmp), 530 demangle(name)); 531 rtldexit(lml, 1); 532 } 533 534 symval = nsym->st_value; 535 if (!(FLAGS(nlmp) & FLG_RT_FIXED) && 536 (nsym->st_shndx != SHN_ABS)) 537 symval += ADDR(nlmp); 538 if ((lmp != nlmp) && ((FLAGS1(nlmp) & FL1_RT_NOINIFIN) == 0)) { 539 /* 540 * Record that this new link map is now bound to the caller. 541 */ 542 if (bind_one(lmp, nlmp, BND_REFER) == 0) 543 rtldexit(lml, 1); 544 } 545 546 if ((lml->lm_tflags | FLAGS1(lmp)) & LML_TFLG_AUD_SYMBIND) { 547 /* LINTED */ 548 uint_t symndx = (uint_t)(((uintptr_t)nsym - 549 (uintptr_t)SYMTAB(nlmp)) / SYMENT(nlmp)); 550 551 symval = audit_symbind(lmp, nlmp, nsym, symndx, symval, 552 &sb_flags); 553 } 554 555 if (FLAGS(lmp) & FLG_RT_FIXED) 556 vaddr = 0; 557 else 558 vaddr = ADDR(lmp); 559 560 pbtype = PLT_T_NONE; 561 if (!(rtld_flags & RT_FL_NOBIND)) { 562 if (((lml->lm_tflags | FLAGS1(lmp)) & 563 (LML_TFLG_AUD_PLTENTER | LML_TFLG_AUD_PLTEXIT)) && 564 AUDINFO(lmp)->ai_dynplts) { 565 int fail = 0; 566 /* LINTED */ 567 uint_t symndx = (uint_t)(((uintptr_t)nsym - 568 (uintptr_t)SYMTAB(nlmp)) / SYMENT(nlmp)); 569 570 symval = (ulong_t)elf_plt_trace_write((caddr_t)vaddr, 571 rptr, lmp, nlmp, nsym, symndx, pltndx, 572 (caddr_t)symval, sb_flags, &fail); 573 if (fail) 574 rtldexit(lml, 1); 575 } else { 576 /* 577 * Write standard PLT entry to jump directly 578 * to newly bound function. 579 */ 580 pbtype = elf_plt_write((uintptr_t)vaddr, 581 (uintptr_t)vaddr, rptr, symval, pltndx); 582 } 583 } 584 585 /* 586 * Print binding information and rebuild PLT entry. 587 */ 588 DBG_CALL(Dbg_bind_global(lmp, (Addr)from, (Off)(from - ADDR(lmp)), 589 (Xword)pltndx, pbtype, nlmp, (Addr)symval, nsym->st_value, 590 name, binfo)); 591 592 /* 593 * Complete any processing for newly loaded objects. Note we don't 594 * know exactly where any new objects are loaded (we know the object 595 * that supplied the symbol, but others may have been loaded lazily as 596 * we searched for the symbol), so sorting starts from the last 597 * link-map know on entry to this routine. 598 */ 599 if (entry) 600 load_completion(llmp, lmp); 601 602 /* 603 * Some operations like dldump() or dlopen()'ing a relocatable object 604 * result in objects being loaded on rtld's link-map, make sure these 605 * objects are initialized also. 606 */ 607 if ((LIST(nlmp)->lm_flags & LML_FLG_RTLDLM) && LIST(nlmp)->lm_init) 608 load_completion(nlmp, 0); 609 610 /* 611 * If the object we've bound to is in the process of being initialized 612 * by another thread, determine whether we should block. 613 */ 614 is_dep_ready(nlmp, lmp, DBG_WAIT_SYMBOL); 615 616 /* 617 * Make sure the object to which we've bound has had it's .init fired. 618 * Cleanup before return to user code. 619 */ 620 if (entry) { 621 is_dep_init(nlmp, lmp); 622 leave(LIST(lmp)); 623 } 624 625 if (lmflags & LML_FLG_RTLDLM) 626 dbg_desc->d_class = dbg_class; 627 628 return (symval); 629 } 630 631 632 static int 633 bindpltpad(Rt_map *lmp, List *padlist, Addr value, void **pltaddr, 634 const char *fname, const char *sname) 635 { 636 Listnode *lnp, *prevlnp; 637 Pltpadinfo *pip; 638 void *plt; 639 uintptr_t pltoff; 640 Rela rel; 641 int i; 642 643 prevlnp = 0; 644 for (LIST_TRAVERSE(padlist, lnp, pip)) { 645 if (pip->pp_addr == value) { 646 *pltaddr = pip->pp_plt; 647 DBG_CALL(Dbg_bind_pltpad_from(lmp, (Addr)*pltaddr, 648 sname)); 649 return (1); 650 } 651 if (pip->pp_addr > value) 652 break; 653 prevlnp = lnp; 654 } 655 656 plt = PLTPAD(lmp); 657 pltoff = (uintptr_t)plt - (uintptr_t)ADDR(lmp); 658 659 PLTPAD(lmp) = (void *)((uintptr_t)PLTPAD(lmp) + M_PLT_ENTSIZE); 660 661 if (PLTPAD(lmp) > PLTPADEND(lmp)) { 662 /* 663 * Just fail in usual relocation way 664 */ 665 *pltaddr = (void *)value; 666 return (1); 667 } 668 rel.r_offset = pltoff; 669 rel.r_info = 0; 670 rel.r_addend = 0; 671 672 673 /* 674 * elf_plt_write assumes the plt was previously filled 675 * with NOP's, so fill it in now. 676 */ 677 for (i = 0; i < (M_PLT_ENTSIZE / sizeof (uint_t)); i++) { 678 ((uint_t *)plt)[i] = M_NOP; 679 } 680 iflush_range((caddr_t)plt, M_PLT_ENTSIZE); 681 682 (void) elf_plt_write(ADDR(lmp), ADDR(lmp), &rel, value, 0); 683 684 if ((pip = calloc(sizeof (Pltpadinfo), 1)) == 0) 685 return (0); 686 pip->pp_addr = value; 687 pip->pp_plt = plt; 688 689 if (prevlnp) 690 lnp = list_insert(padlist, pip, prevlnp); 691 else 692 lnp = list_prepend(padlist, pip); 693 694 if (!lnp) { 695 free(pip); 696 return (0); 697 } 698 699 *pltaddr = plt; 700 DBG_CALL(Dbg_bind_pltpad_to(lmp, (Addr)*pltaddr, fname, sname)); 701 return (1); 702 } 703 704 /* 705 * Read and process the relocations for one link object, we assume all 706 * relocation sections for loadable segments are stored contiguously in 707 * the file. 708 */ 709 int 710 elf_reloc(Rt_map *lmp, uint_t plt) 711 { 712 ulong_t relbgn, relend, relsiz, basebgn, pltbgn, pltend; 713 ulong_t roffset, rsymndx, psymndx = 0, etext = ETEXT(lmp); 714 ulong_t emap, pltndx; 715 uint_t dsymndx, binfo, pbinfo; 716 Byte rtype; 717 long reladd; 718 Addr value, pvalue; 719 Sym *symref, *psymref, *symdef, *psymdef; 720 char *name, *pname; 721 Rt_map *_lmp, *plmp; 722 int textrel = 0, ret = 1, noplt = 0; 723 long relacount = RELACOUNT(lmp); 724 Rela *rel; 725 Pltbindtype pbtype; 726 List pltpadlist = {0, 0}; 727 Alist *bound = 0; 728 729 /* 730 * If an object has any DT_REGISTER entries associated with 731 * it, they are processed now. 732 */ 733 if ((plt == 0) && (FLAGS(lmp) & FLG_RT_REGSYMS)) { 734 if (elf_regsyms(lmp) == 0) 735 return (0); 736 } 737 738 /* 739 * Although only necessary for lazy binding, initialize the first 740 * procedure linkage table entry to go to elf_rtbndr(). dbx(1) seems 741 * to find this useful. 742 */ 743 if ((plt == 0) && PLTGOT(lmp)) { 744 Xword pltoff; 745 746 if ((ulong_t)PLTGOT(lmp) < etext) { 747 if (elf_set_prot(lmp, PROT_WRITE) == 0) 748 return (0); 749 textrel = 1; 750 } 751 752 /* 753 * Install the lm pointer in .PLT2 as per the ABI. 754 */ 755 pltoff = (2 * M_PLT_ENTSIZE) / M_PLT_INSSIZE; 756 elf_plt2_init(PLTGOT(lmp) + pltoff, lmp); 757 758 /* 759 * The V9 ABI states that the first 32k PLT entries 760 * use .PLT1, with .PLT0 used by the "latter" entries. 761 * We don't currently implement the extendend format, 762 * so install an error handler in .PLT0 to catch anyone 763 * trying to use it. 764 */ 765 elf_plt_init(PLTGOT(lmp), (caddr_t)elf_rtbndr_far); 766 767 /* 768 * Initialize .PLT1 769 */ 770 pltoff = M_PLT_ENTSIZE / M_PLT_INSSIZE; 771 elf_plt_init(PLTGOT(lmp) + pltoff, (caddr_t)elf_rtbndr); 772 } 773 774 /* 775 * Initialize the plt start and end addresses. 776 */ 777 if ((pltbgn = (ulong_t)JMPREL(lmp)) != 0) 778 pltend = pltbgn + (ulong_t)(PLTRELSZ(lmp)); 779 780 /* 781 * If we've been called upon to promote an RTLD_LAZY object to an 782 * RTLD_NOW then we're only interested in scaning the .plt table. 783 */ 784 if (plt) { 785 relbgn = pltbgn; 786 relend = pltend; 787 } else { 788 /* 789 * The relocation sections appear to the run-time linker as a 790 * single table. Determine the address of the beginning and end 791 * of this table. There are two different interpretations of 792 * the ABI at this point: 793 * 794 * o The REL table and its associated RELSZ indicate the 795 * concatenation of *all* relocation sections (this is the 796 * model our link-editor constructs). 797 * 798 * o The REL table and its associated RELSZ indicate the 799 * concatenation of all *but* the .plt relocations. These 800 * relocations are specified individually by the JMPREL and 801 * PLTRELSZ entries. 802 * 803 * Determine from our knowledege of the relocation range and 804 * .plt range, the range of the total relocation table. Note 805 * that one other ABI assumption seems to be that the .plt 806 * relocations always follow any other relocations, the 807 * following range checking drops that assumption. 808 */ 809 relbgn = (ulong_t)(REL(lmp)); 810 relend = relbgn + (ulong_t)(RELSZ(lmp)); 811 if (pltbgn) { 812 if (!relbgn || (relbgn > pltbgn)) 813 relbgn = pltbgn; 814 if (!relbgn || (relend < pltend)) 815 relend = pltend; 816 } 817 } 818 if (!relbgn || (relbgn == relend)) { 819 DBG_CALL(Dbg_reloc_run(lmp, 0, plt, DBG_REL_NONE)); 820 return (1); 821 } 822 823 relsiz = (ulong_t)(RELENT(lmp)); 824 basebgn = ADDR(lmp); 825 emap = ADDR(lmp) + MSIZE(lmp); 826 827 DBG_CALL(Dbg_reloc_run(lmp, M_REL_SHT_TYPE, plt, DBG_REL_START)); 828 829 /* 830 * If we're processing in lazy mode there is no need to scan the 831 * .rela.plt table. 832 */ 833 if (pltbgn && ((MODE(lmp) & RTLD_NOW) == 0)) 834 noplt = 1; 835 836 /* 837 * Loop through relocations. 838 */ 839 while (relbgn < relend) { 840 Addr vaddr; 841 uint_t sb_flags = 0; 842 843 rtype = (Byte)ELF_R_TYPE(((Rela *)relbgn)->r_info); 844 845 /* 846 * If this is a RELATIVE relocation in a shared object 847 * (the common case), and if we are not debugging, then 848 * jump into a tighter relocaiton loop (elf_reloc_relacount) 849 * Only make the jump if we've been given a hint on the 850 * number of relocations. 851 */ 852 if ((rtype == R_SPARC_RELATIVE) && 853 ((FLAGS(lmp) & FLG_RT_FIXED) == 0) && (DBG_ENABLED == 0)) { 854 /* 855 * It's possible that the relative relocation block 856 * has relocations against the text segment as well 857 * as the data segment. Since our optimized relocation 858 * engine does not check which segment the relocation 859 * is against - just mprotect it now if it's been 860 * marked as containing TEXTREL's. 861 */ 862 if ((textrel == 0) && (FLAGS1(lmp) & FL1_RT_TEXTREL)) { 863 if (elf_set_prot(lmp, PROT_WRITE) == 0) { 864 ret = 0; 865 break; 866 } 867 textrel = 1; 868 } 869 if (relacount) { 870 relbgn = elf_reloc_relacount(relbgn, relacount, 871 relsiz, basebgn); 872 relacount = 0; 873 } else { 874 relbgn = elf_reloc_relative(relbgn, relend, 875 relsiz, basebgn, etext, emap); 876 } 877 if (relbgn >= relend) 878 break; 879 rtype = (Byte)ELF_R_TYPE(((Rela *)relbgn)->r_info); 880 } 881 882 roffset = ((Rela *)relbgn)->r_offset; 883 884 reladd = (long)(((Rela *)relbgn)->r_addend); 885 rsymndx = ELF_R_SYM(((Rela *)relbgn)->r_info); 886 887 rel = (Rela *)relbgn; 888 relbgn += relsiz; 889 890 /* 891 * Optimizations. 892 */ 893 if (rtype == R_SPARC_NONE) 894 continue; 895 if (noplt && ((ulong_t)rel >= pltbgn) && 896 ((ulong_t)rel < pltend)) { 897 relbgn = pltend; 898 continue; 899 } 900 901 if (rtype != R_SPARC_REGISTER) { 902 /* 903 * If this is a shared object, add the base address 904 * to offset. 905 */ 906 if (!(FLAGS(lmp) & FLG_RT_FIXED)) 907 roffset += basebgn; 908 909 /* 910 * If this relocation is not against part of the image 911 * mapped into memory we skip it. 912 */ 913 if ((roffset < ADDR(lmp)) || (roffset > (ADDR(lmp) + 914 MSIZE(lmp)))) { 915 elf_reloc_bad(lmp, (void *)rel, rtype, roffset, 916 rsymndx); 917 continue; 918 } 919 } 920 921 /* 922 * If we're promoting plts determine if this one has already 923 * been written. An uninitialized plts' second instruction is a 924 * branch. 925 */ 926 if (plt) { 927 uchar_t *_roffset = (uchar_t *)roffset; 928 929 _roffset += M_PLT_INSSIZE; 930 /* LINTED */ 931 if ((*(uint_t *)_roffset & 932 (~(S_MASK(19)))) != M_BA_A_XCC) 933 continue; 934 } 935 936 binfo = 0; 937 pltndx = (ulong_t)-1; 938 pbtype = PLT_T_NONE; 939 /* 940 * If a symbol index is specified then get the symbol table 941 * entry, locate the symbol definition, and determine its 942 * address. 943 */ 944 if (rsymndx) { 945 /* 946 * Get the local symbol table entry. 947 */ 948 symref = (Sym *)((ulong_t)SYMTAB(lmp) + 949 (rsymndx * SYMENT(lmp))); 950 951 /* 952 * If this is a local symbol, just use the base address. 953 * (we should have no local relocations in the 954 * executable). 955 */ 956 if (ELF_ST_BIND(symref->st_info) == STB_LOCAL) { 957 value = basebgn; 958 name = (char *)0; 959 960 /* 961 * TLS relocation - value for DTPMOD relocation 962 * is the TLS modid. 963 */ 964 if (rtype == M_R_DTPMOD) 965 value = TLSMODID(lmp); 966 } else { 967 /* 968 * If the symbol index is equal to the previous 969 * symbol index relocation we processed then 970 * reuse the previous values. (Note that there 971 * have been cases where a relocation exists 972 * against a copy relocation symbol, our ld(1) 973 * should optimize this away, but make sure we 974 * don't use the same symbol information should 975 * this case exist). 976 */ 977 if ((rsymndx == psymndx) && 978 (rtype != R_SPARC_COPY)) { 979 /* LINTED */ 980 if (psymdef == 0) { 981 DBG_CALL(Dbg_bind_weak(lmp, 982 (Addr)roffset, (Addr) 983 (roffset - basebgn), name)); 984 continue; 985 } 986 /* LINTED */ 987 value = pvalue; 988 /* LINTED */ 989 name = pname; 990 symdef = psymdef; 991 /* LINTED */ 992 symref = psymref; 993 /* LINTED */ 994 _lmp = plmp; 995 /* LINTED */ 996 binfo = pbinfo; 997 998 if ((LIST(_lmp)->lm_tflags | 999 FLAGS1(_lmp)) & 1000 LML_TFLG_AUD_SYMBIND) { 1001 value = audit_symbind(lmp, _lmp, 1002 /* LINTED */ 1003 symdef, dsymndx, value, 1004 &sb_flags); 1005 } 1006 } else { 1007 Slookup sl; 1008 uchar_t bind; 1009 1010 /* 1011 * Lookup the symbol definition. 1012 */ 1013 name = (char *)(STRTAB(lmp) + 1014 symref->st_name); 1015 1016 sl.sl_name = name; 1017 sl.sl_cmap = lmp; 1018 sl.sl_imap = 0; 1019 sl.sl_hash = 0; 1020 sl.sl_rsymndx = rsymndx; 1021 1022 if (rtype == R_SPARC_COPY) 1023 sl.sl_flags = LKUP_COPY; 1024 else 1025 sl.sl_flags = LKUP_DEFT; 1026 1027 sl.sl_flags |= LKUP_ALLCNTLIST; 1028 1029 if (rtype != R_SPARC_JMP_SLOT) 1030 sl.sl_flags |= LKUP_SPEC; 1031 1032 bind = ELF_ST_BIND(symref->st_info); 1033 if (bind == STB_WEAK) 1034 sl.sl_flags |= LKUP_WEAK; 1035 1036 symdef = lookup_sym(&sl, &_lmp, &binfo); 1037 1038 /* 1039 * If the symbol is not found and the 1040 * reference was not to a weak symbol, 1041 * report an error. Weak references 1042 * may be unresolved. 1043 * chkmsg: MSG_INTL(MSG_LDD_SYM_NFOUND) 1044 */ 1045 if (symdef == 0) { 1046 Lm_list *lml = LIST(lmp); 1047 1048 if (bind != STB_WEAK) { 1049 if (lml->lm_flags & 1050 LML_FLG_IGNRELERR) { 1051 continue; 1052 } else if (lml->lm_flags & 1053 LML_FLG_TRC_WARN) { 1054 (void) printf(MSG_INTL( 1055 MSG_LDD_SYM_NFOUND), 1056 demangle(name), 1057 NAME(lmp)); 1058 continue; 1059 } else { 1060 eprintf(lml, ERR_FATAL, 1061 MSG_INTL(MSG_REL_NOSYM), 1062 NAME(lmp), 1063 demangle(name)); 1064 ret = 0; 1065 break; 1066 } 1067 } else { 1068 psymndx = rsymndx; 1069 psymdef = 0; 1070 1071 DBG_CALL(Dbg_bind_weak(lmp, 1072 (Addr)roffset, (Addr) 1073 (roffset - basebgn), name)); 1074 continue; 1075 } 1076 } 1077 1078 /* 1079 * If symbol was found in an object 1080 * other than the referencing object 1081 * then record the binding. 1082 */ 1083 if ((lmp != _lmp) && ((FLAGS1(_lmp) & 1084 FL1_RT_NOINIFIN) == 0)) { 1085 if (alist_test(&bound, _lmp, 1086 sizeof (Rt_map *), 1087 AL_CNT_RELBIND) == 0) { 1088 ret = 0; 1089 break; 1090 } 1091 } 1092 1093 /* 1094 * Calculate the location of definition; 1095 * symbol value plus base address of 1096 * containing shared object. 1097 */ 1098 value = symdef->st_value; 1099 if (!(FLAGS(_lmp) & FLG_RT_FIXED) && 1100 (symdef->st_shndx != SHN_ABS) && 1101 (ELF_ST_TYPE(symdef->st_info) != 1102 STT_TLS)) 1103 value += ADDR(_lmp); 1104 1105 /* 1106 * Retain this symbol index and the 1107 * value in case it can be used for the 1108 * subsequent relocations. 1109 */ 1110 if (rtype != R_SPARC_COPY) { 1111 psymndx = rsymndx; 1112 pvalue = value; 1113 pname = name; 1114 psymdef = symdef; 1115 psymref = symref; 1116 plmp = _lmp; 1117 pbinfo = binfo; 1118 } 1119 if ((LIST(_lmp)->lm_tflags | 1120 FLAGS1(_lmp)) & 1121 LML_TFLG_AUD_SYMBIND) { 1122 /* LINTED */ 1123 dsymndx = (((uintptr_t)symdef - 1124 (uintptr_t)SYMTAB(_lmp)) / 1125 SYMENT(_lmp)); 1126 value = audit_symbind(lmp, _lmp, 1127 symdef, dsymndx, value, 1128 &sb_flags); 1129 } 1130 } 1131 1132 /* 1133 * If relocation is PC-relative, subtract 1134 * offset address. 1135 */ 1136 if (IS_PC_RELATIVE(rtype)) 1137 value -= roffset; 1138 1139 /* 1140 * TLS relocation - value for DTPMOD relocation 1141 * is the TLS modid. 1142 */ 1143 if (rtype == M_R_DTPMOD) 1144 value = TLSMODID(_lmp); 1145 else if (rtype == M_R_TPOFF) 1146 value = -(TLSSTATOFF(_lmp) - value); 1147 } 1148 } else { 1149 /* 1150 * Special cases, a regsiter symbol associated with 1151 * symbol index 0 is initialized (i.e. relocated) to 1152 * a constant in the r_addend field rather than to a 1153 * symbol value. 1154 * 1155 * A DTPMOD relocation is a local binding to a TLS 1156 * symbol. Fill in the TLSMODID for the current object. 1157 */ 1158 if (rtype == R_SPARC_REGISTER) 1159 value = 0; 1160 else if (rtype == M_R_DTPMOD) 1161 value = TLSMODID(lmp); 1162 else 1163 value = basebgn; 1164 name = (char *)0; 1165 } 1166 1167 /* 1168 * If this object has relocations in the text segment, turn 1169 * off the write protect. 1170 */ 1171 if ((rtype != R_SPARC_REGISTER) && (roffset < etext) && 1172 (textrel == 0)) { 1173 if (elf_set_prot(lmp, PROT_WRITE) == 0) { 1174 ret = 0; 1175 break; 1176 } 1177 textrel = 1; 1178 } 1179 1180 /* 1181 * Call relocation routine to perform required relocation. 1182 */ 1183 DBG_CALL(Dbg_reloc_in(LIST(lmp), ELF_DBG_RTLD, M_MACH, 1184 M_REL_SHT_TYPE, rel, NULL, name)); 1185 1186 switch (rtype) { 1187 case R_SPARC_REGISTER: 1188 /* 1189 * The v9 ABI 4.2.4 says that system objects may, 1190 * but are not required to, use register symbols 1191 * to inidcate how they use global registers. Thus 1192 * at least %g6, %g7 must be allowed in addition 1193 * to %g2 and %g3. 1194 */ 1195 value += reladd; 1196 if (roffset == STO_SPARC_REGISTER_G1) { 1197 set_sparc_g1(value); 1198 } else if (roffset == STO_SPARC_REGISTER_G2) { 1199 set_sparc_g2(value); 1200 } else if (roffset == STO_SPARC_REGISTER_G3) { 1201 set_sparc_g3(value); 1202 } else if (roffset == STO_SPARC_REGISTER_G4) { 1203 set_sparc_g4(value); 1204 } else if (roffset == STO_SPARC_REGISTER_G5) { 1205 set_sparc_g5(value); 1206 } else if (roffset == STO_SPARC_REGISTER_G6) { 1207 set_sparc_g6(value); 1208 } else if (roffset == STO_SPARC_REGISTER_G7) { 1209 set_sparc_g7(value); 1210 } else { 1211 eprintf(LIST(lmp), ERR_FATAL, 1212 MSG_INTL(MSG_REL_BADREG), NAME(lmp), 1213 EC_ADDR(roffset)); 1214 ret = 0; 1215 break; 1216 } 1217 1218 DBG_CALL(Dbg_reloc_apply_reg(LIST(lmp), ELF_DBG_RTLD, 1219 M_MACH, (Xword)roffset, (Xword)value)); 1220 break; 1221 case R_SPARC_COPY: 1222 if (elf_copy_reloc(name, symref, lmp, (void *)roffset, 1223 symdef, _lmp, (const void *)value) == 0) 1224 ret = 0; 1225 break; 1226 case R_SPARC_JMP_SLOT: 1227 pltndx = ((uintptr_t)rel - 1228 (uintptr_t)JMPREL(lmp)) / relsiz; 1229 1230 if (FLAGS(lmp) & FLG_RT_FIXED) 1231 vaddr = 0; 1232 else 1233 vaddr = ADDR(lmp); 1234 1235 if (((LIST(lmp)->lm_tflags | FLAGS1(lmp)) & 1236 (LML_TFLG_AUD_PLTENTER | LML_TFLG_AUD_PLTEXIT)) && 1237 AUDINFO(lmp)->ai_dynplts) { 1238 int fail = 0; 1239 /* LINTED */ 1240 uint_t symndx = (uint_t)(((uintptr_t)symdef - 1241 (uintptr_t)SYMTAB(_lmp)) / 1242 SYMENT(_lmp)); 1243 1244 (void) elf_plt_trace_write((caddr_t)vaddr, 1245 (Rela *)rel, lmp, _lmp, symdef, symndx, 1246 pltndx, (caddr_t)value, sb_flags, &fail); 1247 if (fail) 1248 ret = 0; 1249 } else { 1250 /* 1251 * Write standard PLT entry to jump directly 1252 * to newly bound function. 1253 */ 1254 DBG_CALL(Dbg_reloc_apply_val(LIST(lmp), 1255 ELF_DBG_RTLD, (Xword)roffset, 1256 (Xword)value)); 1257 pbtype = elf_plt_write((uintptr_t)vaddr, 1258 (uintptr_t)vaddr, (void *)rel, value, 1259 pltndx); 1260 } 1261 break; 1262 case R_SPARC_WDISP30: 1263 if (PLTPAD(lmp) && 1264 (S_INRANGE((Sxword)value, 29) == 0)) { 1265 void * plt = 0; 1266 1267 if (bindpltpad(lmp, &pltpadlist, 1268 value + roffset, &plt, 1269 NAME(_lmp), name) == 0) { 1270 ret = 0; 1271 break; 1272 } 1273 value = (Addr)((Addr)plt - roffset); 1274 } 1275 /* FALLTHROUGH */ 1276 default: 1277 value += reladd; 1278 if (IS_EXTOFFSET(rtype)) 1279 value += (Word)ELF_R_TYPE_DATA(rel->r_info); 1280 1281 /* 1282 * Write the relocation out. If this relocation is a 1283 * common basic write, skip the doreloc() engine. 1284 */ 1285 if ((rtype == R_SPARC_GLOB_DAT) || 1286 (rtype == R_SPARC_64)) { 1287 if (roffset & 0x7) { 1288 eprintf(LIST(lmp), ERR_FATAL, 1289 MSG_INTL(MSG_REL_NONALIGN), 1290 conv_reloc_SPARC_type(rtype), 1291 NAME(lmp), demangle(name), 1292 EC_OFF(roffset)); 1293 ret = 0; 1294 } else 1295 *(ulong_t *)roffset += value; 1296 } else { 1297 if (do_reloc(rtype, (uchar_t *)roffset, 1298 (Xword *)&value, name, 1299 NAME(lmp), LIST(lmp)) == 0) 1300 ret = 0; 1301 } 1302 1303 /* 1304 * The value now contains the 'bit-shifted' value that 1305 * was or'ed into memory (this was set by do_reloc()). 1306 */ 1307 DBG_CALL(Dbg_reloc_apply_val(LIST(lmp), ELF_DBG_RTLD, 1308 (Xword)roffset, (Xword)value)); 1309 1310 /* 1311 * If this relocation is against a text segment, make 1312 * sure that the instruction cache is flushed. 1313 */ 1314 if (textrel) 1315 iflush_range((caddr_t)roffset, 0x4); 1316 } 1317 1318 if ((ret == 0) && 1319 ((LIST(lmp)->lm_flags & LML_FLG_TRC_WARN) == 0)) 1320 break; 1321 1322 if (binfo) { 1323 DBG_CALL(Dbg_bind_global(lmp, (Addr)roffset, 1324 (Off)(roffset - basebgn), pltndx, pbtype, 1325 _lmp, (Addr)value, symdef->st_value, name, binfo)); 1326 } 1327 } 1328 1329 /* 1330 * Free up any items on the pltpadlist if it was allocated 1331 */ 1332 if (pltpadlist.head) { 1333 Listnode * lnp; 1334 Listnode * plnp; 1335 Pltpadinfo * pip; 1336 1337 plnp = 0; 1338 for (LIST_TRAVERSE(&pltpadlist, lnp, pip)) { 1339 if (plnp != 0) 1340 free(plnp); 1341 free(pip); 1342 plnp = lnp; 1343 } 1344 if (plnp != 0) 1345 free(plnp); 1346 } 1347 1348 return (relocate_finish(lmp, bound, textrel, ret)); 1349 } 1350 1351 /* 1352 * Provide a machine specific interface to the conversion routine. By calling 1353 * the machine specific version, rather than the generic version, we insure that 1354 * the data tables/strings for all known machine versions aren't dragged into 1355 * ld.so.1. 1356 */ 1357 const char * 1358 _conv_reloc_type(uint_t rel) 1359 { 1360 return (conv_reloc_SPARC_type(rel)); 1361 } 1362