1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright (c) 1988 AT&T 24 * All Rights Reserved 25 * 26 * Copyright 2007 Sun Microsystems, Inc. All rights reserved. 27 * Use is subject to license terms. 28 */ 29 #pragma ident "%Z%%M% %I% %E% SMI" 30 31 /* 32 * SPARC machine dependent and ELF file class dependent functions. 33 * Contains routines for performing function binding and symbol relocations. 34 */ 35 #include "_synonyms.h" 36 37 #include <stdio.h> 38 #include <sys/elf.h> 39 #include <sys/elf_SPARC.h> 40 #include <sys/mman.h> 41 #include <dlfcn.h> 42 #include <synch.h> 43 #include <string.h> 44 #include <debug.h> 45 #include <reloc.h> 46 #include <conv.h> 47 #include "_rtld.h" 48 #include "_audit.h" 49 #include "_elf.h" 50 #include "msg.h" 51 52 53 extern void iflush_range(caddr_t, size_t); 54 extern void plt_full_range(uintptr_t, uintptr_t); 55 56 57 int 58 elf_mach_flags_check(Rej_desc *rej, Ehdr *ehdr) 59 { 60 /* 61 * Check machine type and flags. 62 */ 63 if (ehdr->e_machine != EM_SPARC) { 64 if (ehdr->e_machine != EM_SPARC32PLUS) { 65 rej->rej_type = SGS_REJ_MACH; 66 rej->rej_info = (uint_t)ehdr->e_machine; 67 return (0); 68 } 69 if ((ehdr->e_flags & EF_SPARC_32PLUS) == 0) { 70 rej->rej_type = SGS_REJ_MISFLAG; 71 rej->rej_info = (uint_t)ehdr->e_flags; 72 return (0); 73 } 74 if ((ehdr->e_flags & ~at_flags) & EF_SPARC_32PLUS_MASK) { 75 rej->rej_type = SGS_REJ_BADFLAG; 76 rej->rej_info = (uint_t)ehdr->e_flags; 77 return (0); 78 } 79 } else if ((ehdr->e_flags & ~EF_SPARCV9_MM) != 0) { 80 rej->rej_type = SGS_REJ_BADFLAG; 81 rej->rej_info = (uint_t)ehdr->e_flags; 82 return (0); 83 } 84 return (1); 85 } 86 87 void 88 ldso_plt_init(Rt_map * lmp) 89 { 90 /* 91 * There is no need to analyze ld.so because we don't map in any of 92 * its dependencies. However we may map these dependencies in later 93 * (as if ld.so had dlopened them), so initialize the plt and the 94 * permission information. 95 */ 96 if (PLTGOT(lmp)) 97 elf_plt_init((PLTGOT(lmp)), (caddr_t)lmp); 98 } 99 100 /* 101 * elf_plt_write() will test to see how far away our destination 102 * address lies. If it is close enough that a branch can 103 * be used instead of a jmpl - we will fill the plt in with 104 * single branch. The branches are much quicker then 105 * a jmpl instruction - see bug#4356879 for further 106 * details. 107 * 108 * NOTE: we pass in both a 'pltaddr' and a 'vpltaddr' since 109 * librtld/dldump update PLT's who's physical 110 * address is not the same as the 'virtual' runtime 111 * address. 112 */ 113 Pltbindtype 114 /* ARGSUSED4 */ 115 elf_plt_write(uintptr_t addr, uintptr_t vaddr, void *rptr, uintptr_t symval, 116 Xword pltndx) 117 { 118 Rela *rel = (Rela *)rptr; 119 uintptr_t vpltaddr, pltaddr; 120 long disp; 121 122 123 pltaddr = addr + rel->r_offset; 124 vpltaddr = vaddr + rel->r_offset; 125 disp = symval - vpltaddr - 4; 126 127 /* 128 * Test if the destination address is close enough to use 129 * a ba,a... instruction to reach it. 130 */ 131 if (S_INRANGE(disp, 23) && !(rtld_flags & RT_FL_NOBAPLT)) { 132 uint_t *pltent, bainstr; 133 Pltbindtype rc; 134 135 pltent = (uint_t *)pltaddr; 136 /* 137 * The 138 * 139 * ba,a,pt %icc, <dest> 140 * 141 * is the most efficient of the PLT's. If we 142 * are within +-20 bits *and* running on a 143 * v8plus architecture - use that branch. 144 */ 145 if ((at_flags & EF_SPARC_32PLUS) && 146 S_INRANGE(disp, 20)) { 147 bainstr = M_BA_A_PT; /* ba,a,pt %icc,<dest> */ 148 bainstr |= (S_MASK(19) & (disp >> 2)); 149 rc = PLT_T_21D; 150 DBG_CALL(pltcnt21d++); 151 } else { 152 /* 153 * Otherwise - we fall back to the good old 154 * 155 * ba,a <dest> 156 * 157 * Which still beats a jmpl instruction. 158 */ 159 bainstr = M_BA_A; /* ba,a <dest> */ 160 bainstr |= (S_MASK(22) & (disp >> 2)); 161 rc = PLT_T_24D; 162 DBG_CALL(pltcnt24d++); 163 } 164 165 pltent[2] = M_NOP; /* nop instr */ 166 pltent[1] = bainstr; 167 168 iflush_range((char *)(&pltent[1]), 4); 169 pltent[0] = M_NOP; /* nop instr */ 170 iflush_range((char *)(&pltent[0]), 4); 171 return (rc); 172 } 173 174 /* 175 * The PLT destination is not in reach of 176 * a branch instruction - so we fall back 177 * to a 'jmpl' sequence. 178 */ 179 plt_full_range(pltaddr, symval); 180 DBG_CALL(pltcntfull++); 181 return (PLT_T_FULL); 182 } 183 184 185 /* 186 * Local storage space created on the stack created for this glue 187 * code includes space for: 188 * 0x4 pointer to dyn_data 189 * 0x4 size prev stack frame 190 */ 191 static const uchar_t dyn_plt_template[] = { 192 /* 0x00 */ 0x80, 0x90, 0x00, 0x1e, /* tst %fp */ 193 /* 0x04 */ 0x02, 0x80, 0x00, 0x04, /* be 0x14 */ 194 /* 0x08 */ 0x82, 0x27, 0x80, 0x0e, /* sub %sp, %fp, %g1 */ 195 /* 0x0c */ 0x10, 0x80, 0x00, 0x03, /* ba 0x20 */ 196 /* 0x10 */ 0x01, 0x00, 0x00, 0x00, /* nop */ 197 /* 0x14 */ 0x82, 0x10, 0x20, 0x60, /* mov 0x60, %g1 */ 198 /* 0x18 */ 0x9d, 0xe3, 0xbf, 0x98, /* save %sp, -0x68, %sp */ 199 /* 0x1c */ 0xc2, 0x27, 0xbf, 0xf8, /* st %g1, [%fp + -0x8] */ 200 /* 0x20 */ 0x03, 0x00, 0x00, 0x00, /* sethi %hi(val), %g1 */ 201 /* 0x24 */ 0x82, 0x10, 0x60, 0x00, /* or %g1, %lo(val), %g1 */ 202 /* 0x28 */ 0x40, 0x00, 0x00, 0x00, /* call <rel_addr> */ 203 /* 0x2c */ 0xc2, 0x27, 0xbf, 0xfc /* st %g1, [%fp + -0x4] */ 204 }; 205 206 int dyn_plt_ent_size = sizeof (dyn_plt_template) + 207 sizeof (uintptr_t) + /* reflmp */ 208 sizeof (uintptr_t) + /* deflmp */ 209 sizeof (ulong_t) + /* symndx */ 210 sizeof (ulong_t) + /* sb_flags */ 211 sizeof (Sym); /* symdef */ 212 213 /* 214 * the dynamic plt entry is: 215 * 216 * tst %fp 217 * be 1f 218 * nop 219 * sub %sp, %fp, %g1 220 * ba 2f 221 * nop 222 * 1: 223 * mov SA(MINFRAME), %g1 ! if %fp is null this is the 224 * ! 'minimum stack'. %fp is null 225 * ! on the initial stack frame 226 * 2: 227 * save %sp, -(SA(MINFRAME) + 2 * CLONGSIZE), %sp 228 * st %g1, [%fp + -0x8] ! store prev_stack size in [%fp - 8] 229 * sethi %hi(dyn_data), %g1 230 * or %g1, %lo(dyn_data), %g1 231 * call elf_plt_trace 232 * st %g1, [%fp + -0x4] ! store dyn_data ptr in [%fp - 4] 233 * dyn data: 234 * uintptr_t reflmp 235 * uintptr_t deflmp 236 * ulong_t symndx 237 * ulong_t sb_flags 238 * Sym symdef 239 */ 240 static caddr_t 241 elf_plt_trace_write(caddr_t addr, Rela *rptr, Rt_map *rlmp, Rt_map *dlmp, 242 Sym *sym, ulong_t symndx, ulong_t pltndx, caddr_t to, ulong_t sb_flags, 243 int *fail) 244 { 245 extern ulong_t elf_plt_trace(); 246 uintptr_t dyn_plt, *dyndata; 247 248 /* 249 * If both pltenter & pltexit have been disabled there 250 * there is no reason to even create the glue code. 251 */ 252 if ((sb_flags & (LA_SYMB_NOPLTENTER | LA_SYMB_NOPLTEXIT)) == 253 (LA_SYMB_NOPLTENTER | LA_SYMB_NOPLTEXIT)) { 254 (void) elf_plt_write((uintptr_t)addr, (uintptr_t)addr, 255 rptr, (uintptr_t)to, pltndx); 256 return (to); 257 } 258 259 /* 260 * We only need to add the glue code if there is an auditing 261 * library that is interested in this binding. 262 */ 263 dyn_plt = (uintptr_t)AUDINFO(rlmp)->ai_dynplts + 264 (pltndx * dyn_plt_ent_size); 265 266 /* 267 * Have we initialized this dynamic plt entry yet? If we haven't do it 268 * now. Otherwise this function has been called before, but from a 269 * different plt (ie. from another shared object). In that case 270 * we just set the plt to point to the new dyn_plt. 271 */ 272 if (*(uint_t *)dyn_plt == 0) { 273 Sym *symp; 274 Xword symvalue; 275 Lm_list *lml = LIST(rlmp); 276 277 (void) memcpy((void *)dyn_plt, dyn_plt_template, 278 sizeof (dyn_plt_template)); 279 dyndata = (uintptr_t *)(dyn_plt + sizeof (dyn_plt_template)); 280 281 /* 282 * relocating: 283 * sethi %hi(dyndata), %g1 284 */ 285 symvalue = (Xword)dyndata; 286 if (do_reloc_rtld(R_SPARC_HI22, (uchar_t *)(dyn_plt + 0x20), 287 &symvalue, MSG_ORIG(MSG_SYM_LADYNDATA), 288 MSG_ORIG(MSG_SPECFIL_DYNPLT), lml) == 0) { 289 *fail = 1; 290 return (0); 291 } 292 293 /* 294 * relocating: 295 * or %g1, %lo(dyndata), %g1 296 */ 297 symvalue = (Xword)dyndata; 298 if (do_reloc_rtld(R_SPARC_LO10, (uchar_t *)(dyn_plt + 0x24), 299 &symvalue, MSG_ORIG(MSG_SYM_LADYNDATA), 300 MSG_ORIG(MSG_SPECFIL_DYNPLT), lml) == 0) { 301 *fail = 1; 302 return (0); 303 } 304 305 /* 306 * relocating: 307 * call elf_plt_trace 308 */ 309 symvalue = (Xword)((uintptr_t)&elf_plt_trace - 310 (dyn_plt + 0x28)); 311 if (do_reloc_rtld(R_SPARC_WDISP30, (uchar_t *)(dyn_plt + 0x28), 312 &symvalue, MSG_ORIG(MSG_SYM_ELFPLTTRACE), 313 MSG_ORIG(MSG_SPECFIL_DYNPLT), lml) == 0) { 314 *fail = 1; 315 return (0); 316 } 317 318 *dyndata++ = (uintptr_t)rlmp; 319 *dyndata++ = (uintptr_t)dlmp; 320 *(ulong_t *)dyndata++ = symndx; 321 *(ulong_t *)dyndata++ = sb_flags; 322 symp = (Sym *)dyndata; 323 *symp = *sym; 324 symp->st_name += (Word)STRTAB(dlmp); 325 symp->st_value = (Addr)to; 326 327 iflush_range((void *)dyn_plt, sizeof (dyn_plt_template)); 328 } 329 330 (void) elf_plt_write((uintptr_t)addr, (uintptr_t)addr, 331 rptr, (uintptr_t)dyn_plt, 0); 332 return ((caddr_t)dyn_plt); 333 } 334 335 336 /* 337 * Function binding routine - invoked on the first call to a function through 338 * the procedure linkage table; 339 * passes first through an assembly language interface. 340 * 341 * Takes the address of the PLT entry where the call originated, 342 * the offset into the relocation table of the associated 343 * relocation entry and the address of the link map (rt_private_map struct) 344 * for the entry. 345 * 346 * Returns the address of the function referenced after re-writing the PLT 347 * entry to invoke the function directly. 348 * 349 * On error, causes process to terminate with a signal. 350 */ 351 ulong_t 352 elf_bndr(Rt_map *lmp, ulong_t pltoff, caddr_t from) 353 { 354 Rt_map *nlmp, *llmp; 355 ulong_t addr, vaddr, reloff, symval, rsymndx; 356 char *name; 357 Rela *rptr; 358 Sym *sym, *nsym; 359 Xword pltndx; 360 uint_t binfo, sb_flags = 0; 361 Slookup sl; 362 Pltbindtype pbtype; 363 int entry, lmflags; 364 uint_t dbg_class; 365 Lm_list *lml = LIST(lmp); 366 367 /* 368 * For compatibility with libthread (TI_VERSION 1) we track the entry 369 * value. A zero value indicates we have recursed into ld.so.1 to 370 * further process a locking request. Under this recursion we disable 371 * tsort and cleanup activities. 372 */ 373 entry = enter(); 374 375 if ((lmflags = lml->lm_flags) & LML_FLG_RTLDLM) { 376 dbg_class = dbg_desc->d_class; 377 dbg_desc->d_class = 0; 378 } 379 380 /* 381 * Must calculate true plt relocation address from reloc. 382 * Take offset, subtract number of reserved PLT entries, and divide 383 * by PLT entry size, which should give the index of the plt 384 * entry (and relocation entry since they have been defined to be 385 * in the same order). Then we must multiply by the size of 386 * a relocation entry, which will give us the offset of the 387 * plt relocation entry from the start of them given by JMPREL(lm). 388 */ 389 addr = pltoff - M_PLT_RESERVSZ; 390 pltndx = addr / M_PLT_ENTSIZE; 391 392 /* 393 * Perform some basic sanity checks. If we didn't get a load map 394 * or the plt offset is invalid then its possible someone has walked 395 * over the plt entries or jumped to plt0 out of the blue. 396 */ 397 if (!lmp || ((addr % M_PLT_ENTSIZE) != 0)) { 398 Conv_inv_buf_t inv_buf; 399 400 eprintf(lml, ERR_FATAL, MSG_INTL(MSG_REL_PLTREF), 401 conv_reloc_SPARC_type(R_SPARC_JMP_SLOT, 0, &inv_buf), 402 EC_NATPTR(lmp), EC_XWORD(pltoff), EC_NATPTR(from)); 403 rtldexit(lml, 1); 404 } 405 reloff = pltndx * sizeof (Rela); 406 407 /* 408 * Use relocation entry to get symbol table entry and symbol name. 409 */ 410 addr = (ulong_t)JMPREL(lmp); 411 rptr = (Rela *)(addr + reloff); 412 rsymndx = ELF_R_SYM(rptr->r_info); 413 sym = (Sym *)((ulong_t)SYMTAB(lmp) + (rsymndx * SYMENT(lmp))); 414 name = (char *)(STRTAB(lmp) + sym->st_name); 415 416 /* 417 * Determine the last link-map of this list, this'll be the starting 418 * point for any tsort() processing. 419 */ 420 llmp = lml->lm_tail; 421 422 /* 423 * Find definition for symbol. 424 */ 425 sl.sl_name = name; 426 sl.sl_cmap = lmp; 427 sl.sl_imap = lml->lm_head; 428 sl.sl_hash = 0; 429 sl.sl_rsymndx = rsymndx; 430 sl.sl_flags = LKUP_DEFT; 431 432 if ((nsym = lookup_sym(&sl, &nlmp, &binfo)) == 0) { 433 eprintf(lml, ERR_FATAL, MSG_INTL(MSG_REL_NOSYM), NAME(lmp), 434 demangle(name)); 435 rtldexit(lml, 1); 436 } 437 438 symval = nsym->st_value; 439 if (!(FLAGS(nlmp) & FLG_RT_FIXED) && 440 (nsym->st_shndx != SHN_ABS)) 441 symval += ADDR(nlmp); 442 if ((lmp != nlmp) && ((FLAGS1(nlmp) & FL1_RT_NOINIFIN) == 0)) { 443 /* 444 * Record that this new link map is now bound to the caller. 445 */ 446 if (bind_one(lmp, nlmp, BND_REFER) == 0) 447 rtldexit(lml, 1); 448 } 449 450 if ((lml->lm_tflags | FLAGS1(lmp)) & LML_TFLG_AUD_SYMBIND) { 451 ulong_t symndx = (((uintptr_t)nsym - 452 (uintptr_t)SYMTAB(nlmp)) / SYMENT(nlmp)); 453 454 symval = audit_symbind(lmp, nlmp, nsym, symndx, symval, 455 &sb_flags); 456 } 457 458 if (FLAGS(lmp) & FLG_RT_FIXED) 459 vaddr = 0; 460 else 461 vaddr = ADDR(lmp); 462 463 pbtype = PLT_T_NONE; 464 if (!(rtld_flags & RT_FL_NOBIND)) { 465 if (((lml->lm_tflags | FLAGS1(lmp)) & 466 (LML_TFLG_AUD_PLTENTER | LML_TFLG_AUD_PLTEXIT)) && 467 AUDINFO(lmp)->ai_dynplts) { 468 int fail = 0; 469 ulong_t symndx = (((uintptr_t)nsym - 470 (uintptr_t)SYMTAB(nlmp)) / SYMENT(nlmp)); 471 472 symval = (ulong_t)elf_plt_trace_write((caddr_t)vaddr, 473 rptr, lmp, nlmp, nsym, symndx, pltndx, 474 (caddr_t)symval, sb_flags, &fail); 475 if (fail) 476 rtldexit(lml, 1); 477 } else { 478 /* 479 * Write standard PLT entry to jump directly 480 * to newly bound function. 481 */ 482 pbtype = elf_plt_write((uintptr_t)vaddr, 483 (uintptr_t)vaddr, rptr, symval, pltndx); 484 } 485 } 486 487 /* 488 * Print binding information and rebuild PLT entry. 489 */ 490 DBG_CALL(Dbg_bind_global(lmp, (Addr)from, (Off)(from - ADDR(lmp)), 491 pltndx, pbtype, nlmp, (Addr)symval, nsym->st_value, name, binfo)); 492 493 /* 494 * Complete any processing for newly loaded objects. Note we don't 495 * know exactly where any new objects are loaded (we know the object 496 * that supplied the symbol, but others may have been loaded lazily as 497 * we searched for the symbol), so sorting starts from the last 498 * link-map know on entry to this routine. 499 */ 500 if (entry) 501 load_completion(llmp); 502 503 /* 504 * Some operations like dldump() or dlopen()'ing a relocatable object 505 * result in objects being loaded on rtld's link-map, make sure these 506 * objects are initialized also. 507 */ 508 if ((LIST(nlmp)->lm_flags & LML_FLG_RTLDLM) && LIST(nlmp)->lm_init) 509 load_completion(nlmp); 510 511 /* 512 * If the object we've bound to is in the process of being initialized 513 * by another thread, determine whether we should block. 514 */ 515 is_dep_ready(nlmp, lmp, DBG_WAIT_SYMBOL); 516 517 /* 518 * Make sure the object to which we've bound has had it's .init fired. 519 * Cleanup before return to user code. 520 */ 521 if (entry) { 522 is_dep_init(nlmp, lmp); 523 leave(lml); 524 } 525 526 if (lmflags & LML_FLG_RTLDLM) 527 dbg_desc->d_class = dbg_class; 528 529 return (symval); 530 } 531 532 533 /* 534 * Read and process the relocations for one link object, we assume all 535 * relocation sections for loadable segments are stored contiguously in 536 * the file. 537 */ 538 int 539 elf_reloc(Rt_map *lmp, uint_t plt) 540 { 541 ulong_t relbgn, relend, relsiz, basebgn, pltbgn, pltend; 542 ulong_t roffset, rsymndx, psymndx = 0, etext = ETEXT(lmp); 543 ulong_t emap, dsymndx, pltndx; 544 uchar_t rtype; 545 long reladd, value, pvalue; 546 Sym *symref, *psymref, *symdef, *psymdef; 547 char *name, *pname; 548 Rt_map *_lmp, *plmp; 549 int textrel = 0, ret = 1, noplt = 0; 550 long relacount = RELACOUNT(lmp); 551 Rela *rel; 552 Pltbindtype pbtype; 553 uint_t binfo, pbinfo; 554 Alist *bound = 0; 555 556 /* 557 * If an object has any DT_REGISTER entries associated with 558 * it, they are processed now. 559 */ 560 if ((plt == 0) && (FLAGS(lmp) & FLG_RT_REGSYMS)) { 561 if (elf_regsyms(lmp) == 0) 562 return (0); 563 } 564 565 /* 566 * Although only necessary for lazy binding, initialize the first 567 * procedure linkage table entry to go to elf_rtbndr(). dbx(1) seems 568 * to find this useful. 569 */ 570 if ((plt == 0) && PLTGOT(lmp)) { 571 if ((ulong_t)PLTGOT(lmp) < etext) { 572 if (elf_set_prot(lmp, PROT_WRITE) == 0) 573 return (0); 574 textrel = 1; 575 } 576 elf_plt_init(PLTGOT(lmp), (caddr_t)lmp); 577 } 578 579 /* 580 * Initialize the plt start and end addresses. 581 */ 582 if ((pltbgn = (ulong_t)JMPREL(lmp)) != 0) 583 pltend = pltbgn + (ulong_t)(PLTRELSZ(lmp)); 584 585 /* 586 * If we've been called upon to promote an RTLD_LAZY object to an 587 * RTLD_NOW then we're only interested in scaning the .plt table. 588 */ 589 if (plt) { 590 relbgn = pltbgn; 591 relend = pltend; 592 } else { 593 /* 594 * The relocation sections appear to the run-time linker as a 595 * single table. Determine the address of the beginning and end 596 * of this table. There are two different interpretations of 597 * the ABI at this point: 598 * 599 * o The REL table and its associated RELSZ indicate the 600 * concatenation of *all* relocation sections (this is the 601 * model our link-editor constructs). 602 * 603 * o The REL table and its associated RELSZ indicate the 604 * concatenation of all *but* the .plt relocations. These 605 * relocations are specified individually by the JMPREL and 606 * PLTRELSZ entries. 607 * 608 * Determine from our knowledege of the relocation range and 609 * .plt range, the range of the total relocation table. Note 610 * that one other ABI assumption seems to be that the .plt 611 * relocations always follow any other relocations, the 612 * following range checking drops that assumption. 613 */ 614 relbgn = (ulong_t)(REL(lmp)); 615 relend = relbgn + (ulong_t)(RELSZ(lmp)); 616 if (pltbgn) { 617 if (!relbgn || (relbgn > pltbgn)) 618 relbgn = pltbgn; 619 if (!relbgn || (relend < pltend)) 620 relend = pltend; 621 } 622 } 623 if (!relbgn || (relbgn == relend)) { 624 DBG_CALL(Dbg_reloc_run(lmp, 0, plt, DBG_REL_NONE)); 625 return (1); 626 } 627 628 relsiz = (ulong_t)(RELENT(lmp)); 629 basebgn = ADDR(lmp); 630 emap = ADDR(lmp) + MSIZE(lmp); 631 632 DBG_CALL(Dbg_reloc_run(lmp, M_REL_SHT_TYPE, plt, DBG_REL_START)); 633 634 /* 635 * If we're processing in lazy mode there is no need to scan the 636 * .rela.plt table. 637 */ 638 if (pltbgn && ((MODE(lmp) & RTLD_NOW) == 0)) 639 noplt = 1; 640 641 /* 642 * Loop through relocations. 643 */ 644 while (relbgn < relend) { 645 Addr vaddr; 646 uint_t sb_flags = 0; 647 648 rtype = ELF_R_TYPE(((Rela *)relbgn)->r_info); 649 650 /* 651 * If this is a RELATIVE relocation in a shared object (the 652 * common case), and if we are not debugging, then jump into a 653 * tighter relocation loop (elf_reloc_relative). Only make the 654 * jump if we've been given a hint on the number of relocations. 655 */ 656 if ((rtype == R_SPARC_RELATIVE) && 657 ((FLAGS(lmp) & FLG_RT_FIXED) == 0) && (DBG_ENABLED == 0)) { 658 /* 659 * It's possible that the relative relocation block 660 * has relocations against the text segment as well 661 * as the data segment. Since our optimized relocation 662 * engine does not check which segment the relocation 663 * is against - just mprotect it now if it's been 664 * marked as containing TEXTREL's. 665 */ 666 if ((textrel == 0) && (FLAGS1(lmp) & FL1_RT_TEXTREL)) { 667 if (elf_set_prot(lmp, PROT_WRITE) == 0) { 668 ret = 0; 669 break; 670 } 671 textrel = 1; 672 } 673 if (relacount) { 674 relbgn = elf_reloc_relacount(relbgn, relacount, 675 relsiz, basebgn); 676 relacount = 0; 677 } else { 678 relbgn = elf_reloc_relative(relbgn, relend, 679 relsiz, basebgn, etext, emap); 680 } 681 if (relbgn >= relend) 682 break; 683 rtype = ELF_R_TYPE(((Rela *)relbgn)->r_info); 684 } 685 686 roffset = ((Rela *)relbgn)->r_offset; 687 688 reladd = (long)(((Rela *)relbgn)->r_addend); 689 rsymndx = ELF_R_SYM(((Rela *)relbgn)->r_info); 690 691 rel = (Rela *)relbgn; 692 relbgn += relsiz; 693 694 /* 695 * Optimizations. 696 */ 697 if (rtype == R_SPARC_NONE) 698 continue; 699 if (noplt && ((ulong_t)rel >= pltbgn) && 700 ((ulong_t)rel < pltend)) { 701 relbgn = pltend; 702 continue; 703 } 704 705 if (rtype != R_SPARC_REGISTER) { 706 /* 707 * If this is a shared object, add the base address 708 * to offset. 709 */ 710 if (!(FLAGS(lmp) & FLG_RT_FIXED)) 711 roffset += basebgn; 712 713 /* 714 * If this relocation is not against part of the image 715 * mapped into memory we skip it. 716 */ 717 if ((roffset < ADDR(lmp)) || (roffset > (ADDR(lmp) + 718 MSIZE(lmp)))) { 719 elf_reloc_bad(lmp, (void *)rel, rtype, roffset, 720 rsymndx); 721 continue; 722 } 723 } 724 725 /* 726 * If we're promoting .plts try and determine if this one has 727 * already been written. An uninitialized .plts' second 728 * instruction is a branch. Note, elf_plt_write() optimizes 729 * .plt relocations, and it's possible that a relocated entry 730 * is a branch. If this is the case, we can't tell the 731 * difference between an uninitialized .plt and a relocated, 732 * .plt that uses a branch. In this case, we'll simply redo 733 * the relocation calculation, which is a bit sad. 734 */ 735 if (plt) { 736 ulong_t *_roffset = (ulong_t *)roffset; 737 738 _roffset++; 739 if ((*_roffset & (~(S_MASK(22)))) != M_BA_A) 740 continue; 741 } 742 743 binfo = 0; 744 pltndx = (ulong_t)-1; 745 pbtype = PLT_T_NONE; 746 /* 747 * If a symbol index is specified then get the symbol table 748 * entry, locate the symbol definition, and determine its 749 * address. 750 */ 751 if (rsymndx) { 752 /* 753 * Get the local symbol table entry. 754 */ 755 symref = (Sym *)((ulong_t)SYMTAB(lmp) + 756 (rsymndx * SYMENT(lmp))); 757 758 /* 759 * If this is a local symbol, just use the base address. 760 * (we should have no local relocations in the 761 * executable). 762 */ 763 if (ELF_ST_BIND(symref->st_info) == STB_LOCAL) { 764 value = basebgn; 765 name = (char *)0; 766 767 /* 768 * Special case TLS relocations. 769 */ 770 if (rtype == R_SPARC_TLS_DTPMOD32) { 771 /* 772 * Use the TLS modid. 773 */ 774 value = TLSMODID(lmp); 775 776 } else if (rtype == R_SPARC_TLS_TPOFF32) { 777 if ((value = elf_static_tls(lmp, symref, 778 rel, rtype, 0, roffset, 0)) == 0) { 779 ret = 0; 780 break; 781 } 782 } 783 } else { 784 /* 785 * If the symbol index is equal to the previous 786 * symbol index relocation we processed then 787 * reuse the previous values. (Note that there 788 * have been cases where a relocation exists 789 * against a copy relocation symbol, our ld(1) 790 * should optimize this away, but make sure we 791 * don't use the same symbol information should 792 * this case exist). 793 */ 794 if ((rsymndx == psymndx) && 795 (rtype != R_SPARC_COPY)) { 796 /* LINTED */ 797 if (psymdef == 0) { 798 DBG_CALL(Dbg_bind_weak(lmp, 799 (Addr)roffset, (Addr) 800 (roffset - basebgn), name)); 801 continue; 802 } 803 /* LINTED */ 804 value = pvalue; 805 /* LINTED */ 806 name = pname; 807 symdef = psymdef; 808 /* LINTED */ 809 symref = psymref; 810 /* LINTED */ 811 _lmp = plmp; 812 /* LINTED */ 813 binfo = pbinfo; 814 815 if ((LIST(_lmp)->lm_tflags | 816 FLAGS1(_lmp)) & 817 LML_TFLG_AUD_SYMBIND) { 818 value = audit_symbind(lmp, _lmp, 819 /* LINTED */ 820 symdef, dsymndx, value, 821 &sb_flags); 822 } 823 } else { 824 Slookup sl; 825 uchar_t bind; 826 827 /* 828 * Lookup the symbol definition. 829 */ 830 name = (char *)(STRTAB(lmp) + 831 symref->st_name); 832 833 sl.sl_name = name; 834 sl.sl_cmap = lmp; 835 sl.sl_imap = 0; 836 sl.sl_hash = 0; 837 sl.sl_rsymndx = rsymndx; 838 839 if (rtype == R_SPARC_COPY) 840 sl.sl_flags = LKUP_COPY; 841 else 842 sl.sl_flags = LKUP_DEFT; 843 844 sl.sl_flags |= LKUP_ALLCNTLIST; 845 846 if (rtype != R_SPARC_JMP_SLOT) 847 sl.sl_flags |= LKUP_SPEC; 848 849 /* 850 * Under ldd -w, any unresolved weak 851 * references are diagnosed. Set the 852 * symbol binding as global to trigger 853 * a relocation error if the symbol can 854 * not be found. 855 */ 856 if (LIST(lmp)->lm_flags & 857 LML_FLG_TRC_NOUNRESWEAK) { 858 bind = STB_GLOBAL; 859 } else if ((bind = 860 ELF_ST_BIND(symref->st_info)) == 861 STB_WEAK) { 862 sl.sl_flags |= LKUP_WEAK; 863 } 864 865 symdef = lookup_sym(&sl, &_lmp, &binfo); 866 867 /* 868 * If the symbol is not found and the 869 * reference was not to a weak symbol, 870 * report an error. Weak references 871 * may be unresolved. 872 */ 873 /* BEGIN CSTYLED */ 874 if (symdef == 0) { 875 Lm_list *lml = LIST(lmp); 876 877 if (bind != STB_WEAK) { 878 if (lml->lm_flags & 879 LML_FLG_IGNRELERR) { 880 continue; 881 } else if (lml->lm_flags & 882 LML_FLG_TRC_WARN) { 883 (void) printf(MSG_INTL( 884 MSG_LDD_SYM_NFOUND), 885 demangle(name), 886 NAME(lmp)); 887 continue; 888 } else { 889 DBG_CALL(Dbg_reloc_in(lml, 890 ELF_DBG_RTLD, M_MACH, 891 M_REL_SHT_TYPE, rel, 892 NULL, name)); 893 eprintf(lml, ERR_FATAL, 894 MSG_INTL(MSG_REL_NOSYM), 895 NAME(lmp), 896 demangle(name)); 897 ret = 0; 898 break; 899 } 900 } else { 901 psymndx = rsymndx; 902 psymdef = 0; 903 904 DBG_CALL(Dbg_bind_weak(lmp, 905 (Addr)roffset, (Addr) 906 (roffset - basebgn), name)); 907 continue; 908 } 909 } 910 /* END CSTYLED */ 911 912 /* 913 * If symbol was found in an object 914 * other than the referencing object 915 * then record the binding. 916 */ 917 if ((lmp != _lmp) && ((FLAGS1(_lmp) & 918 FL1_RT_NOINIFIN) == 0)) { 919 if (alist_test(&bound, _lmp, 920 sizeof (Rt_map *), 921 AL_CNT_RELBIND) == 0) { 922 ret = 0; 923 break; 924 } 925 } 926 927 /* 928 * Calculate the location of definition; 929 * symbol value plus base address of 930 * containing shared object. 931 */ 932 if (IS_SIZE(rtype)) 933 value = symdef->st_size; 934 else 935 value = symdef->st_value; 936 937 if (!(FLAGS(_lmp) & FLG_RT_FIXED) && 938 !(IS_SIZE(rtype)) && 939 (symdef->st_shndx != SHN_ABS) && 940 (ELF_ST_TYPE(symdef->st_info) != 941 STT_TLS)) 942 value += ADDR(_lmp); 943 944 /* 945 * Retain this symbol index and the 946 * value in case it can be used for the 947 * subsequent relocations. 948 */ 949 if (rtype != R_SPARC_COPY) { 950 psymndx = rsymndx; 951 pvalue = value; 952 pname = name; 953 psymdef = symdef; 954 psymref = symref; 955 plmp = _lmp; 956 pbinfo = binfo; 957 } 958 if ((LIST(_lmp)->lm_tflags | 959 FLAGS1(_lmp)) & 960 LML_TFLG_AUD_SYMBIND) { 961 dsymndx = (((uintptr_t)symdef - 962 (uintptr_t)SYMTAB(_lmp)) / 963 SYMENT(_lmp)); 964 value = audit_symbind(lmp, _lmp, 965 symdef, dsymndx, value, 966 &sb_flags); 967 } 968 } 969 970 /* 971 * If relocation is PC-relative, subtract 972 * offset address. 973 */ 974 if (IS_PC_RELATIVE(rtype)) 975 value -= roffset; 976 977 /* 978 * Special case TLS relocations. 979 */ 980 if (rtype == R_SPARC_TLS_DTPMOD32) { 981 /* 982 * Relocation value is the TLS modid. 983 */ 984 value = TLSMODID(_lmp); 985 986 } else if (rtype == R_SPARC_TLS_TPOFF32) { 987 if ((value = elf_static_tls(_lmp, 988 symdef, rel, rtype, name, roffset, 989 value)) == 0) { 990 ret = 0; 991 break; 992 } 993 } 994 } 995 } else { 996 /* 997 * Special cases. 998 */ 999 if (rtype == R_SPARC_REGISTER) { 1000 /* 1001 * A register symbol associated with symbol 1002 * index 0 is initialized (i.e. relocated) to 1003 * a constant in the r_addend field rather than 1004 * to a symbol value. 1005 */ 1006 value = 0; 1007 1008 } else if (rtype == R_SPARC_TLS_DTPMOD32) { 1009 /* 1010 * TLS relocation value is the TLS modid. 1011 */ 1012 value = TLSMODID(lmp); 1013 } else 1014 value = basebgn; 1015 name = (char *)0; 1016 } 1017 1018 DBG_CALL(Dbg_reloc_in(LIST(lmp), ELF_DBG_RTLD, M_MACH, 1019 M_REL_SHT_TYPE, rel, NULL, name)); 1020 1021 /* 1022 * If this object has relocations in the text segment, turn 1023 * off the write protect. 1024 */ 1025 if ((rtype != R_SPARC_REGISTER) && (roffset < etext) && 1026 (textrel == 0)) { 1027 if (elf_set_prot(lmp, PROT_WRITE) == 0) { 1028 ret = 0; 1029 break; 1030 } 1031 textrel = 1; 1032 } 1033 1034 /* 1035 * Call relocation routine to perform required relocation. 1036 */ 1037 switch (rtype) { 1038 case R_SPARC_REGISTER: 1039 /* 1040 * The v9 ABI 4.2.4 says that system objects may, 1041 * but are not required to, use register symbols 1042 * to inidcate how they use global registers. Thus 1043 * at least %g6, %g7 must be allowed in addition 1044 * to %g2 and %g3. 1045 */ 1046 value += reladd; 1047 if (roffset == STO_SPARC_REGISTER_G1) { 1048 set_sparc_g1(value); 1049 } else if (roffset == STO_SPARC_REGISTER_G2) { 1050 set_sparc_g2(value); 1051 } else if (roffset == STO_SPARC_REGISTER_G3) { 1052 set_sparc_g3(value); 1053 } else if (roffset == STO_SPARC_REGISTER_G4) { 1054 set_sparc_g4(value); 1055 } else if (roffset == STO_SPARC_REGISTER_G5) { 1056 set_sparc_g5(value); 1057 } else if (roffset == STO_SPARC_REGISTER_G6) { 1058 set_sparc_g6(value); 1059 } else if (roffset == STO_SPARC_REGISTER_G7) { 1060 set_sparc_g7(value); 1061 } else { 1062 eprintf(LIST(lmp), ERR_FATAL, 1063 MSG_INTL(MSG_REL_BADREG), NAME(lmp), 1064 EC_ADDR(roffset)); 1065 ret = 0; 1066 break; 1067 } 1068 1069 DBG_CALL(Dbg_reloc_apply_reg(LIST(lmp), ELF_DBG_RTLD, 1070 M_MACH, (Xword)roffset, (Xword)value)); 1071 break; 1072 case R_SPARC_COPY: 1073 if (elf_copy_reloc(name, symref, lmp, (void *)roffset, 1074 symdef, _lmp, (const void *)value) == 0) 1075 ret = 0; 1076 break; 1077 case R_SPARC_JMP_SLOT: 1078 pltndx = ((ulong_t)rel - 1079 (uintptr_t)JMPREL(lmp)) / relsiz; 1080 1081 if (FLAGS(lmp) & FLG_RT_FIXED) 1082 vaddr = 0; 1083 else 1084 vaddr = ADDR(lmp); 1085 1086 if (((LIST(lmp)->lm_tflags | FLAGS1(lmp)) & 1087 (LML_TFLG_AUD_PLTENTER | LML_TFLG_AUD_PLTEXIT)) && 1088 AUDINFO(lmp)->ai_dynplts) { 1089 int fail = 0; 1090 ulong_t symndx = (((uintptr_t)symdef - 1091 (uintptr_t)SYMTAB(_lmp)) / SYMENT(_lmp)); 1092 1093 (void) elf_plt_trace_write((caddr_t)vaddr, 1094 (Rela *)rel, lmp, _lmp, symdef, symndx, 1095 pltndx, (caddr_t)value, sb_flags, &fail); 1096 if (fail) 1097 ret = 0; 1098 } else { 1099 /* 1100 * Write standard PLT entry to jump directly 1101 * to newly bound function. 1102 */ 1103 DBG_CALL(Dbg_reloc_apply_val(LIST(lmp), 1104 ELF_DBG_RTLD, (Xword)roffset, 1105 (Xword)value)); 1106 pbtype = elf_plt_write((uintptr_t)vaddr, 1107 (uintptr_t)vaddr, (void *)rel, value, 1108 pltndx); 1109 } 1110 break; 1111 default: 1112 value += reladd; 1113 1114 /* 1115 * Write the relocation out. If this relocation is a 1116 * common basic write, skip the doreloc() engine. 1117 */ 1118 if ((rtype == R_SPARC_GLOB_DAT) || 1119 (rtype == R_SPARC_32)) { 1120 if (roffset & 0x3) { 1121 Conv_inv_buf_t inv_buf; 1122 1123 eprintf(LIST(lmp), ERR_FATAL, 1124 MSG_INTL(MSG_REL_NONALIGN), 1125 conv_reloc_SPARC_type(rtype, 1126 0, &inv_buf), 1127 NAME(lmp), demangle(name), 1128 EC_OFF(roffset)); 1129 ret = 0; 1130 } else 1131 *(uint_t *)roffset += value; 1132 } else { 1133 if (do_reloc_rtld(rtype, (uchar_t *)roffset, 1134 (Xword *)&value, name, 1135 NAME(lmp), LIST(lmp)) == 0) 1136 ret = 0; 1137 } 1138 1139 /* 1140 * The value now contains the 'bit-shifted' value that 1141 * was or'ed into memory (this was set by 1142 * do_reloc_rtld()). 1143 */ 1144 DBG_CALL(Dbg_reloc_apply_val(LIST(lmp), ELF_DBG_RTLD, 1145 (Xword)roffset, (Xword)value)); 1146 1147 /* 1148 * If this relocation is against a text segment, make 1149 * sure that the instruction cache is flushed. 1150 */ 1151 if (textrel) 1152 iflush_range((caddr_t)roffset, 0x4); 1153 } 1154 1155 if ((ret == 0) && 1156 ((LIST(lmp)->lm_flags & LML_FLG_TRC_WARN) == 0)) 1157 break; 1158 1159 if (binfo) { 1160 DBG_CALL(Dbg_bind_global(lmp, (Addr)roffset, 1161 (Off)(roffset - basebgn), pltndx, pbtype, 1162 _lmp, (Addr)value, symdef->st_value, name, binfo)); 1163 } 1164 } 1165 1166 return (relocate_finish(lmp, bound, textrel, ret)); 1167 } 1168 1169 /* 1170 * Provide a machine specific interface to the conversion routine. By calling 1171 * the machine specific version, rather than the generic version, we insure that 1172 * the data tables/strings for all known machine versions aren't dragged into 1173 * ld.so.1. 1174 */ 1175 const char * 1176 _conv_reloc_type(uint_t rel) 1177 { 1178 static Conv_inv_buf_t inv_buf; 1179 1180 return (conv_reloc_SPARC_type(rel, 0, &inv_buf)); 1181 } 1182