1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License, Version 1.0 only 6 * (the "License"). You may not use this file except in compliance 7 * with the License. 8 * 9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 10 * or http://www.opensolaris.org/os/licensing. 11 * See the License for the specific language governing permissions 12 * and limitations under the License. 13 * 14 * When distributing Covered Code, include this CDDL HEADER in each 15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 16 * If applicable, add the following below this CDDL HEADER, with the 17 * fields enclosed by brackets "[]" replaced with your own identifying 18 * information: Portions Copyright [yyyy] [name of copyright owner] 19 * 20 * CDDL HEADER END 21 */ 22 /* 23 * Copyright (c) 1988 AT&T 24 * All Rights Reserved 25 * 26 * 27 * Copyright 2005 Sun Microsystems, Inc. All rights reserved. 28 * Use is subject to license terms. 29 */ 30 #pragma ident "%Z%%M% %I% %E% SMI" 31 32 /* 33 * SPARC machine dependent and ELF file class dependent functions. 34 * Contains routines for performing function binding and symbol relocations. 35 */ 36 #include "_synonyms.h" 37 38 #include <stdio.h> 39 #include <sys/elf.h> 40 #include <sys/elf_SPARC.h> 41 #include <sys/mman.h> 42 #include <dlfcn.h> 43 #include <synch.h> 44 #include <string.h> 45 #include "_rtld.h" 46 #include "_audit.h" 47 #include "_elf.h" 48 #include "msg.h" 49 #include "debug.h" 50 #include "reloc.h" 51 #include "conv.h" 52 53 54 extern void iflush_range(caddr_t, size_t); 55 extern void plt_full_range(uintptr_t, uintptr_t); 56 57 58 int 59 elf_mach_flags_check(Rej_desc *rej, Ehdr *ehdr) 60 { 61 /* 62 * Check machine type and flags. 63 */ 64 if (ehdr->e_machine != EM_SPARC) { 65 if (ehdr->e_machine != EM_SPARC32PLUS) { 66 rej->rej_type = SGS_REJ_MACH; 67 rej->rej_info = (uint_t)ehdr->e_machine; 68 return (0); 69 } 70 if ((ehdr->e_flags & EF_SPARC_32PLUS) == 0) { 71 rej->rej_type = SGS_REJ_MISFLAG; 72 rej->rej_info = (uint_t)ehdr->e_flags; 73 return (0); 74 } 75 if ((ehdr->e_flags & ~at_flags) & EF_SPARC_32PLUS_MASK) { 76 rej->rej_type = SGS_REJ_BADFLAG; 77 rej->rej_info = (uint_t)ehdr->e_flags; 78 return (0); 79 } 80 } else if ((ehdr->e_flags & ~EF_SPARCV9_MM) != 0) { 81 rej->rej_type = SGS_REJ_BADFLAG; 82 rej->rej_info = (uint_t)ehdr->e_flags; 83 return (0); 84 } 85 return (1); 86 } 87 88 void 89 ldso_plt_init(Rt_map * lmp) 90 { 91 /* 92 * There is no need to analyze ld.so because we don't map in any of 93 * its dependencies. However we may map these dependencies in later 94 * (as if ld.so had dlopened them), so initialize the plt and the 95 * permission information. 96 */ 97 if (PLTGOT(lmp)) 98 elf_plt_init((PLTGOT(lmp)), (caddr_t)lmp); 99 } 100 101 /* 102 * elf_plt_write() will test to see how far away our destination 103 * address lies. If it is close enough that a branch can 104 * be used instead of a jmpl - we will fill the plt in with 105 * single branch. The branches are much quicker then 106 * a jmpl instruction - see bug#4356879 for further 107 * details. 108 * 109 * NOTE: we pass in both a 'pltaddr' and a 'vpltaddr' since 110 * librtld/dldump update PLT's who's physical 111 * address is not the same as the 'virtual' runtime 112 * address. 113 */ 114 Pltbindtype 115 /* ARGSUSED4 */ 116 elf_plt_write(uintptr_t addr, uintptr_t vaddr, void *rptr, uintptr_t symval, 117 Xword pltndx) 118 { 119 Rela *rel = (Rela *)rptr; 120 uintptr_t vpltaddr, pltaddr; 121 long disp; 122 123 124 pltaddr = addr + rel->r_offset; 125 vpltaddr = vaddr + rel->r_offset; 126 disp = symval - vpltaddr - 4; 127 128 /* 129 * Test if the destination address is close enough to use 130 * a ba,a... instruction to reach it. 131 */ 132 if (S_INRANGE(disp, 23) && !(rtld_flags & RT_FL_NOBAPLT)) { 133 uint_t *pltent, bainstr; 134 Pltbindtype rc; 135 136 pltent = (uint_t *)pltaddr; 137 /* 138 * The 139 * 140 * ba,a,pt %icc, <dest> 141 * 142 * is the most efficient of the PLT's. If we 143 * are within +-20 bits *and* running on a 144 * v8plus architecture - use that branch. 145 */ 146 if ((at_flags & EF_SPARC_32PLUS) && 147 S_INRANGE(disp, 20)) { 148 bainstr = M_BA_A_PT; /* ba,a,pt %icc,<dest> */ 149 bainstr |= (S_MASK(19) & (disp >> 2)); 150 rc = PLT_T_21D; 151 DBG_CALL(pltcnt21d++); 152 } else { 153 /* 154 * Otherwise - we fall back to the good old 155 * 156 * ba,a <dest> 157 * 158 * Which still beats a jmpl instruction. 159 */ 160 bainstr = M_BA_A; /* ba,a <dest> */ 161 bainstr |= (S_MASK(22) & (disp >> 2)); 162 rc = PLT_T_24D; 163 DBG_CALL(pltcnt24d++); 164 } 165 166 pltent[2] = M_NOP; /* nop instr */ 167 pltent[1] = bainstr; 168 169 iflush_range((char *)(&pltent[1]), 4); 170 pltent[0] = M_NOP; /* nop instr */ 171 iflush_range((char *)(&pltent[0]), 4); 172 return (rc); 173 } 174 175 /* 176 * The PLT destination is not in reach of 177 * a branch instruction - so we fall back 178 * to a 'jmpl' sequence. 179 */ 180 plt_full_range(pltaddr, symval); 181 DBG_CALL(pltcntfull++); 182 return (PLT_T_FULL); 183 } 184 185 186 /* 187 * Local storage space created on the stack created for this glue 188 * code includes space for: 189 * 0x4 pointer to dyn_data 190 * 0x4 size prev stack frame 191 */ 192 static const uchar_t dyn_plt_template[] = { 193 /* 0x00 */ 0x80, 0x90, 0x00, 0x1e, /* tst %fp */ 194 /* 0x04 */ 0x02, 0x80, 0x00, 0x04, /* be 0x14 */ 195 /* 0x08 */ 0x82, 0x27, 0x80, 0x0e, /* sub %sp, %fp, %g1 */ 196 /* 0x0c */ 0x10, 0x80, 0x00, 0x03, /* ba 0x20 */ 197 /* 0x10 */ 0x01, 0x00, 0x00, 0x00, /* nop */ 198 /* 0x14 */ 0x82, 0x10, 0x20, 0x60, /* mov 0x60, %g1 */ 199 /* 0x18 */ 0x9d, 0xe3, 0xbf, 0x98, /* save %sp, -0x68, %sp */ 200 /* 0x1c */ 0xc2, 0x27, 0xbf, 0xf8, /* st %g1, [%fp + -0x8] */ 201 /* 0x20 */ 0x03, 0x00, 0x00, 0x00, /* sethi %hi(val), %g1 */ 202 /* 0x24 */ 0x82, 0x10, 0x60, 0x00, /* or %g1, %lo(val), %g1 */ 203 /* 0x28 */ 0x40, 0x00, 0x00, 0x00, /* call <rel_addr> */ 204 /* 0x2c */ 0xc2, 0x27, 0xbf, 0xfc /* st %g1, [%fp + -0x4] */ 205 }; 206 207 int dyn_plt_ent_size = sizeof (dyn_plt_template) + 208 sizeof (uintptr_t) + /* reflmp */ 209 sizeof (uintptr_t) + /* deflmp */ 210 sizeof (ulong_t) + /* symndx */ 211 sizeof (ulong_t) + /* sb_flags */ 212 sizeof (Sym); /* symdef */ 213 214 /* 215 * the dynamic plt entry is: 216 * 217 * tst %fp 218 * be 1f 219 * nop 220 * sub %sp, %fp, %g1 221 * ba 2f 222 * nop 223 * 1: 224 * mov SA(MINFRAME), %g1 ! if %fp is null this is the 225 * ! 'minimum stack'. %fp is null 226 * ! on the initial stack frame 227 * 2: 228 * save %sp, -(SA(MINFRAME) + 2 * CLONGSIZE), %sp 229 * st %g1, [%fp + -0x8] ! store prev_stack size in [%fp - 8] 230 * sethi %hi(dyn_data), %g1 231 * or %g1, %lo(dyn_data), %g1 232 * call elf_plt_trace 233 * st %g1, [%fp + -0x4] ! store dyn_data ptr in [%fp - 4] 234 * dyn data: 235 * uintptr_t reflmp 236 * uintptr_t deflmp 237 * ulong_t symndx 238 * ulong_t sb_flags 239 * Sym symdef 240 */ 241 static caddr_t 242 elf_plt_trace_write(caddr_t addr, Rela *rptr, Rt_map *rlmp, Rt_map *dlmp, 243 Sym *sym, ulong_t symndx, ulong_t pltndx, caddr_t to, ulong_t sb_flags, 244 int *fail) 245 { 246 extern ulong_t elf_plt_trace(); 247 uintptr_t dyn_plt; 248 uintptr_t *dyndata; 249 250 /* 251 * If both pltenter & pltexit have been disabled there 252 * there is no reason to even create the glue code. 253 */ 254 if ((sb_flags & (LA_SYMB_NOPLTENTER | LA_SYMB_NOPLTEXIT)) == 255 (LA_SYMB_NOPLTENTER | LA_SYMB_NOPLTEXIT)) { 256 (void) elf_plt_write((uintptr_t)addr, (uintptr_t)addr, 257 rptr, (uintptr_t)to, pltndx); 258 return (to); 259 } 260 261 /* 262 * We only need to add the glue code if there is an auditing 263 * library that is interested in this binding. 264 */ 265 dyn_plt = (uintptr_t)AUDINFO(rlmp)->ai_dynplts + 266 (pltndx * dyn_plt_ent_size); 267 268 /* 269 * Have we initialized this dynamic plt entry yet? If we haven't do it 270 * now. Otherwise this function has been called before, but from a 271 * different plt (ie. from another shared object). In that case 272 * we just set the plt to point to the new dyn_plt. 273 */ 274 if (*(uint_t *)dyn_plt == 0) { 275 Sym * symp; 276 Xword symvalue; 277 278 (void) memcpy((void *)dyn_plt, dyn_plt_template, 279 sizeof (dyn_plt_template)); 280 dyndata = (uintptr_t *)(dyn_plt + sizeof (dyn_plt_template)); 281 282 /* 283 * relocating: 284 * sethi %hi(dyndata), %g1 285 */ 286 symvalue = (Xword)dyndata; 287 if (do_reloc(R_SPARC_HI22, (uchar_t *)(dyn_plt + 0x20), 288 &symvalue, MSG_ORIG(MSG_SYM_LADYNDATA), 289 MSG_ORIG(MSG_SPECFIL_DYNPLT)) == 0) { 290 *fail = 1; 291 return (0); 292 } 293 294 /* 295 * relocating: 296 * or %g1, %lo(dyndata), %g1 297 */ 298 symvalue = (Xword)dyndata; 299 if (do_reloc(R_SPARC_LO10, (uchar_t *)(dyn_plt + 0x24), 300 &symvalue, MSG_ORIG(MSG_SYM_LADYNDATA), 301 MSG_ORIG(MSG_SPECFIL_DYNPLT)) == 0) { 302 *fail = 1; 303 return (0); 304 } 305 306 /* 307 * relocating: 308 * call elf_plt_trace 309 */ 310 symvalue = (Xword)((uintptr_t)&elf_plt_trace - 311 (dyn_plt + 0x28)); 312 if (do_reloc(R_SPARC_WDISP30, (uchar_t *)(dyn_plt + 0x28), 313 &symvalue, MSG_ORIG(MSG_SYM_ELFPLTTRACE), 314 MSG_ORIG(MSG_SPECFIL_DYNPLT)) == 0) { 315 *fail = 1; 316 return (0); 317 } 318 319 *dyndata++ = (uintptr_t)rlmp; 320 *dyndata++ = (uintptr_t)dlmp; 321 *(ulong_t *)dyndata++ = symndx; 322 *(ulong_t *)dyndata++ = sb_flags; 323 symp = (Sym *)dyndata; 324 *symp = *sym; 325 symp->st_name += (Word)STRTAB(dlmp); 326 symp->st_value = (Addr)to; 327 328 iflush_range((void *)dyn_plt, sizeof (dyn_plt_template)); 329 } 330 331 (void) elf_plt_write((uintptr_t)addr, (uintptr_t)addr, 332 rptr, (uintptr_t)dyn_plt, 0); 333 return ((caddr_t)dyn_plt); 334 } 335 336 337 /* 338 * Function binding routine - invoked on the first call to a function through 339 * the procedure linkage table; 340 * passes first through an assembly language interface. 341 * 342 * Takes the address of the PLT entry where the call originated, 343 * the offset into the relocation table of the associated 344 * relocation entry and the address of the link map (rt_private_map struct) 345 * for the entry. 346 * 347 * Returns the address of the function referenced after re-writing the PLT 348 * entry to invoke the function directly. 349 * 350 * On error, causes process to terminate with a signal. 351 */ 352 ulong_t 353 elf_bndr(Rt_map *lmp, ulong_t pltoff, caddr_t from) 354 { 355 Rt_map *nlmp, *llmp; 356 ulong_t addr, vaddr, reloff, symval, rsymndx; 357 char *name; 358 Rela *rptr; 359 Sym *sym, *nsym; 360 Xword pltndx; 361 uint_t binfo, sb_flags = 0; 362 Slookup sl; 363 Pltbindtype pbtype; 364 int entry, dbg_save, lmflags; 365 366 /* 367 * For compatibility with libthread (TI_VERSION 1) we track the entry 368 * value. A zero value indicates we have recursed into ld.so.1 to 369 * further process a locking request. Under this recursion we disable 370 * tsort and cleanup activities. 371 */ 372 entry = enter(); 373 374 if ((lmflags = LIST(lmp)->lm_flags) & LML_FLG_RTLDLM) { 375 dbg_save = dbg_mask; 376 dbg_mask = 0; 377 } 378 379 /* 380 * Must calculate true plt relocation address from reloc. 381 * Take offset, subtract number of reserved PLT entries, and divide 382 * by PLT entry size, which should give the index of the plt 383 * entry (and relocation entry since they have been defined to be 384 * in the same order). Then we must multiply by the size of 385 * a relocation entry, which will give us the offset of the 386 * plt relocation entry from the start of them given by JMPREL(lm). 387 */ 388 addr = pltoff - M_PLT_RESERVSZ; 389 pltndx = addr / M_PLT_ENTSIZE; 390 391 /* 392 * Perform some basic sanity checks. If we didn't get a load map 393 * or the plt offset is invalid then its possible someone has walked 394 * over the plt entries or jumped to plt0 out of the blue. 395 */ 396 if (!lmp || ((addr % M_PLT_ENTSIZE) != 0)) { 397 eprintf(ERR_FATAL, MSG_INTL(MSG_REL_PLTREF), 398 conv_reloc_SPARC_type_str(R_SPARC_JMP_SLOT), 399 EC_XWORD(lmp), EC_XWORD(pltoff), EC_ADDR(from)); 400 rtldexit(LIST(lmp), 1); 401 } 402 reloff = pltndx * sizeof (Rela); 403 404 /* 405 * Use relocation entry to get symbol table entry and symbol name. 406 */ 407 addr = (ulong_t)JMPREL(lmp); 408 rptr = (Rela *)(addr + reloff); 409 rsymndx = ELF_R_SYM(rptr->r_info); 410 sym = (Sym *)((ulong_t)SYMTAB(lmp) + (rsymndx * SYMENT(lmp))); 411 name = (char *)(STRTAB(lmp) + sym->st_name); 412 413 /* 414 * Determine the last link-map of this list, this'll be the starting 415 * point for any tsort() processing. 416 */ 417 llmp = LIST(lmp)->lm_tail; 418 419 /* 420 * Find definition for symbol. 421 */ 422 sl.sl_name = name; 423 sl.sl_cmap = lmp; 424 sl.sl_imap = LIST(lmp)->lm_head; 425 sl.sl_hash = 0; 426 sl.sl_rsymndx = rsymndx; 427 sl.sl_flags = LKUP_DEFT; 428 429 if ((nsym = lookup_sym(&sl, &nlmp, &binfo)) == 0) { 430 eprintf(ERR_FATAL, MSG_INTL(MSG_REL_NOSYM), NAME(lmp), 431 demangle(name)); 432 rtldexit(LIST(lmp), 1); 433 } 434 435 symval = nsym->st_value; 436 if (!(FLAGS(nlmp) & FLG_RT_FIXED) && 437 (nsym->st_shndx != SHN_ABS)) 438 symval += ADDR(nlmp); 439 if ((lmp != nlmp) && ((FLAGS1(nlmp) & FL1_RT_NOINIFIN) == 0)) { 440 /* 441 * Record that this new link map is now bound to the caller. 442 */ 443 if (bind_one(lmp, nlmp, BND_REFER) == 0) 444 rtldexit(LIST(lmp), 1); 445 } 446 447 if ((LIST(lmp)->lm_tflags | FLAGS1(lmp)) & LML_TFLG_AUD_SYMBIND) { 448 ulong_t symndx = (((uintptr_t)nsym - 449 (uintptr_t)SYMTAB(nlmp)) / SYMENT(nlmp)); 450 451 symval = audit_symbind(lmp, nlmp, nsym, symndx, symval, 452 &sb_flags); 453 } 454 455 if (FLAGS(lmp) & FLG_RT_FIXED) 456 vaddr = 0; 457 else 458 vaddr = ADDR(lmp); 459 460 pbtype = PLT_T_NONE; 461 if (!(rtld_flags & RT_FL_NOBIND)) { 462 if (((LIST(lmp)->lm_tflags | FLAGS1(lmp)) & 463 (LML_TFLG_AUD_PLTENTER | LML_TFLG_AUD_PLTEXIT)) && 464 AUDINFO(lmp)->ai_dynplts) { 465 int fail = 0; 466 ulong_t symndx = (((uintptr_t)nsym - 467 (uintptr_t)SYMTAB(nlmp)) / SYMENT(nlmp)); 468 469 symval = (ulong_t)elf_plt_trace_write((caddr_t)vaddr, 470 rptr, lmp, nlmp, nsym, symndx, pltndx, 471 (caddr_t)symval, sb_flags, &fail); 472 if (fail) 473 rtldexit(LIST(lmp), 1); 474 } else { 475 /* 476 * Write standard PLT entry to jump directly 477 * to newly bound function. 478 */ 479 pbtype = elf_plt_write((uintptr_t)vaddr, 480 (uintptr_t)vaddr, rptr, symval, pltndx); 481 } 482 } 483 484 /* 485 * Print binding information and rebuild PLT entry. 486 */ 487 DBG_CALL(Dbg_bind_global(NAME(lmp), from, from - ADDR(lmp), pltndx, 488 pbtype, NAME(nlmp), (caddr_t)symval, (caddr_t)nsym->st_value, 489 name, binfo)); 490 491 /* 492 * Complete any processing for newly loaded objects. Note we don't 493 * know exactly where any new objects are loaded (we know the object 494 * that supplied the symbol, but others may have been loaded lazily as 495 * we searched for the symbol), so sorting starts from the last 496 * link-map know on entry to this routine. 497 */ 498 if (entry) 499 load_completion(llmp, lmp); 500 501 /* 502 * Some operations like dldump() or dlopen()'ing a relocatable object 503 * result in objects being loaded on rtld's link-map, make sure these 504 * objects are initialized also. 505 */ 506 if ((LIST(nlmp)->lm_flags & LML_FLG_RTLDLM) && LIST(nlmp)->lm_init) 507 load_completion(nlmp, 0); 508 509 /* 510 * If the object we've bound to is in the process of being initialized 511 * by another thread, determine whether we should block. 512 */ 513 is_dep_ready(nlmp, lmp, DBG_WAIT_SYMBOL); 514 515 /* 516 * Make sure the object to which we've bound has had it's .init fired. 517 * Cleanup before return to user code. 518 */ 519 if (entry) { 520 is_dep_init(nlmp, lmp); 521 leave(LIST(lmp)); 522 } 523 524 if (lmflags & LML_FLG_RTLDLM) 525 dbg_mask = dbg_save; 526 527 return (symval); 528 } 529 530 531 /* 532 * Read and process the relocations for one link object, we assume all 533 * relocation sections for loadable segments are stored contiguously in 534 * the file. 535 */ 536 int 537 elf_reloc(Rt_map *lmp, uint_t plt) 538 { 539 ulong_t relbgn, relend, relsiz, basebgn, pltbgn, pltend; 540 ulong_t roffset, rsymndx, psymndx = 0, etext = ETEXT(lmp); 541 ulong_t emap, dsymndx, pltndx; 542 uchar_t rtype; 543 long reladd, value, pvalue; 544 Sym *symref, *psymref, *symdef, *psymdef; 545 char *name, *pname; 546 Rt_map *_lmp, *plmp; 547 int textrel = 0, ret = 1, noplt = 0; 548 long relacount = RELACOUNT(lmp); 549 Rela *rel; 550 Pltbindtype pbtype; 551 uint_t binfo, pbinfo; 552 Alist *bound = 0; 553 554 /* 555 * If an object has any DT_REGISTER entries associated with 556 * it, they are processed now. 557 */ 558 if ((plt == 0) && (FLAGS(lmp) & FLG_RT_REGSYMS)) { 559 if (elf_regsyms(lmp) == 0) 560 return (0); 561 } 562 563 /* 564 * Although only necessary for lazy binding, initialize the first 565 * procedure linkage table entry to go to elf_rtbndr(). dbx(1) seems 566 * to find this useful. 567 */ 568 if ((plt == 0) && PLTGOT(lmp)) { 569 if ((ulong_t)PLTGOT(lmp) < etext) { 570 if (elf_set_prot(lmp, PROT_WRITE) == 0) 571 return (0); 572 textrel = 1; 573 } 574 elf_plt_init(PLTGOT(lmp), (caddr_t)lmp); 575 } 576 577 /* 578 * Initialize the plt start and end addresses. 579 */ 580 if ((pltbgn = (ulong_t)JMPREL(lmp)) != 0) 581 pltend = pltbgn + (ulong_t)(PLTRELSZ(lmp)); 582 583 /* 584 * If we've been called upon to promote an RTLD_LAZY object to an 585 * RTLD_NOW then we're only interested in scaning the .plt table. 586 */ 587 if (plt) { 588 relbgn = pltbgn; 589 relend = pltend; 590 } else { 591 /* 592 * The relocation sections appear to the run-time linker as a 593 * single table. Determine the address of the beginning and end 594 * of this table. There are two different interpretations of 595 * the ABI at this point: 596 * 597 * o The REL table and its associated RELSZ indicate the 598 * concatenation of *all* relocation sections (this is the 599 * model our link-editor constructs). 600 * 601 * o The REL table and its associated RELSZ indicate the 602 * concatenation of all *but* the .plt relocations. These 603 * relocations are specified individually by the JMPREL and 604 * PLTRELSZ entries. 605 * 606 * Determine from our knowledege of the relocation range and 607 * .plt range, the range of the total relocation table. Note 608 * that one other ABI assumption seems to be that the .plt 609 * relocations always follow any other relocations, the 610 * following range checking drops that assumption. 611 */ 612 relbgn = (ulong_t)(REL(lmp)); 613 relend = relbgn + (ulong_t)(RELSZ(lmp)); 614 if (pltbgn) { 615 if (!relbgn || (relbgn > pltbgn)) 616 relbgn = pltbgn; 617 if (!relbgn || (relend < pltend)) 618 relend = pltend; 619 } 620 } 621 if (!relbgn || (relbgn == relend)) { 622 DBG_CALL(Dbg_reloc_run(NAME(lmp), 0, plt, DBG_REL_NONE)); 623 return (1); 624 } 625 626 relsiz = (ulong_t)(RELENT(lmp)); 627 basebgn = ADDR(lmp); 628 emap = ADDR(lmp) + MSIZE(lmp); 629 630 DBG_CALL(Dbg_reloc_run(NAME(lmp), M_REL_SHT_TYPE, plt, DBG_REL_START)); 631 632 /* 633 * If we're processing in lazy mode there is no need to scan the 634 * .rela.plt table. 635 */ 636 if (pltbgn && ((MODE(lmp) & RTLD_NOW) == 0)) 637 noplt = 1; 638 639 /* 640 * Loop through relocations. 641 */ 642 while (relbgn < relend) { 643 Addr vaddr; 644 uint_t sb_flags = 0; 645 646 rtype = ELF_R_TYPE(((Rela *)relbgn)->r_info); 647 648 /* 649 * If this is a RELATIVE relocation in a shared object (the 650 * common case), and if we are not debugging, then jump into a 651 * tighter relocation loop (elf_reloc_relative). Only make the 652 * jump if we've been given a hint on the number of relocations. 653 */ 654 if ((rtype == R_SPARC_RELATIVE) && 655 !(FLAGS(lmp) & FLG_RT_FIXED) && !dbg_mask) { 656 /* 657 * It's possible that the relative relocation block 658 * has relocations against the text segment as well 659 * as the data segment. Since our optimized relocation 660 * engine does not check which segment the relocation 661 * is against - just mprotect it now if it's been 662 * marked as containing TEXTREL's. 663 */ 664 if ((textrel == 0) && (FLAGS1(lmp) & FL1_RT_TEXTREL)) { 665 if (elf_set_prot(lmp, PROT_WRITE) == 0) { 666 ret = 0; 667 break; 668 } 669 textrel = 1; 670 } 671 if (relacount) { 672 relbgn = elf_reloc_relacount(relbgn, relacount, 673 relsiz, basebgn); 674 relacount = 0; 675 } else { 676 relbgn = elf_reloc_relative(relbgn, relend, 677 relsiz, basebgn, etext, emap); 678 } 679 if (relbgn >= relend) 680 break; 681 rtype = ELF_R_TYPE(((Rela *)relbgn)->r_info); 682 } 683 684 roffset = ((Rela *)relbgn)->r_offset; 685 686 reladd = (long)(((Rela *)relbgn)->r_addend); 687 rsymndx = ELF_R_SYM(((Rela *)relbgn)->r_info); 688 689 rel = (Rela *)relbgn; 690 relbgn += relsiz; 691 692 /* 693 * Optimizations. 694 */ 695 if (rtype == R_SPARC_NONE) 696 continue; 697 if (noplt && ((ulong_t)rel >= pltbgn) && 698 ((ulong_t)rel < pltend)) { 699 relbgn = pltend; 700 continue; 701 } 702 703 if (rtype != R_SPARC_REGISTER) { 704 /* 705 * If this is a shared object, add the base address 706 * to offset. 707 */ 708 if (!(FLAGS(lmp) & FLG_RT_FIXED)) 709 roffset += basebgn; 710 711 /* 712 * If this relocation is not against part of the image 713 * mapped into memory we skip it. 714 */ 715 if ((roffset < ADDR(lmp)) || (roffset > (ADDR(lmp) + 716 MSIZE(lmp)))) { 717 elf_reloc_bad(lmp, (void *)rel, rtype, roffset, 718 rsymndx); 719 continue; 720 } 721 } 722 723 /* 724 * If we're promoting plts determine if this one has already 725 * been written. An uninitialized plts' second instruction is a 726 * branch. 727 */ 728 if (plt) { 729 ulong_t *_roffset = (ulong_t *)roffset; 730 731 _roffset++; 732 if ((*_roffset & (~(S_MASK(22)))) != M_BA_A) 733 continue; 734 } 735 736 binfo = 0; 737 pltndx = (ulong_t)-1; 738 pbtype = PLT_T_NONE; 739 /* 740 * If a symbol index is specified then get the symbol table 741 * entry, locate the symbol definition, and determine its 742 * address. 743 */ 744 if (rsymndx) { 745 /* 746 * Get the local symbol table entry. 747 */ 748 symref = (Sym *)((ulong_t)SYMTAB(lmp) + 749 (rsymndx * SYMENT(lmp))); 750 751 /* 752 * If this is a local symbol, just use the base address. 753 * (we should have no local relocations in the 754 * executable). 755 */ 756 if (ELF_ST_BIND(symref->st_info) == STB_LOCAL) { 757 value = basebgn; 758 name = (char *)0; 759 760 /* 761 * TLS relocation - value for DTPMOD relocation 762 * is the TLS modid. 763 */ 764 if (rtype == M_R_DTPMOD) 765 value = TLSMODID(lmp); 766 } else { 767 /* 768 * If the symbol index is equal to the previous 769 * symbol index relocation we processed then 770 * reuse the previous values. (Note that there 771 * have been cases where a relocation exists 772 * against a copy relocation symbol, our ld(1) 773 * should optimize this away, but make sure we 774 * don't use the same symbol information should 775 * this case exist). 776 */ 777 if ((rsymndx == psymndx) && 778 (rtype != R_SPARC_COPY)) { 779 /* LINTED */ 780 if (psymdef == 0) { 781 DBG_CALL(Dbg_bind_weak( 782 NAME(lmp), (caddr_t)roffset, 783 (caddr_t) 784 (roffset - basebgn), name)); 785 continue; 786 } 787 /* LINTED */ 788 value = pvalue; 789 /* LINTED */ 790 name = pname; 791 symdef = psymdef; 792 /* LINTED */ 793 symref = psymref; 794 /* LINTED */ 795 _lmp = plmp; 796 /* LINTED */ 797 binfo = pbinfo; 798 799 if ((LIST(_lmp)->lm_tflags | 800 FLAGS1(_lmp)) & 801 LML_TFLG_AUD_SYMBIND) { 802 value = audit_symbind(lmp, _lmp, 803 /* LINTED */ 804 symdef, dsymndx, value, 805 &sb_flags); 806 } 807 } else { 808 Slookup sl; 809 uchar_t bind; 810 811 /* 812 * Lookup the symbol definition. 813 */ 814 name = (char *)(STRTAB(lmp) + 815 symref->st_name); 816 817 sl.sl_name = name; 818 sl.sl_cmap = lmp; 819 sl.sl_imap = 0; 820 sl.sl_hash = 0; 821 sl.sl_rsymndx = rsymndx; 822 823 if (rtype == R_SPARC_COPY) 824 sl.sl_flags = LKUP_COPY; 825 else 826 sl.sl_flags = LKUP_DEFT; 827 828 sl.sl_flags |= LKUP_ALLCNTLIST; 829 830 if (rtype != R_SPARC_JMP_SLOT) 831 sl.sl_flags |= LKUP_SPEC; 832 833 bind = ELF_ST_BIND(symref->st_info); 834 if (bind == STB_WEAK) 835 sl.sl_flags |= LKUP_WEAK; 836 837 symdef = lookup_sym(&sl, &_lmp, &binfo); 838 839 /* 840 * If the symbol is not found and the 841 * reference was not to a weak symbol, 842 * report an error. Weak references 843 * may be unresolved. 844 */ 845 if (symdef == 0) { 846 if (bind != STB_WEAK) { 847 if (LIST(lmp)->lm_flags & 848 LML_FLG_IGNRELERR) { 849 continue; 850 } else if (LIST(lmp)->lm_flags & 851 LML_FLG_TRC_WARN) { 852 (void) printf(MSG_INTL( 853 MSG_LDD_SYM_NFOUND), 854 demangle(name), 855 NAME(lmp)); 856 continue; 857 } else { 858 eprintf(ERR_FATAL, 859 MSG_INTL(MSG_REL_NOSYM), 860 NAME(lmp), 861 demangle(name)); 862 ret = 0; 863 break; 864 } 865 } else { 866 psymndx = rsymndx; 867 psymdef = 0; 868 869 DBG_CALL(Dbg_bind_weak( 870 NAME(lmp), (caddr_t)roffset, 871 (caddr_t) 872 (roffset - basebgn), name)); 873 continue; 874 } 875 } 876 877 /* 878 * If symbol was found in an object 879 * other than the referencing object 880 * then record the binding. 881 */ 882 if ((lmp != _lmp) && ((FLAGS1(_lmp) & 883 FL1_RT_NOINIFIN) == 0)) { 884 if (alist_test(&bound, _lmp, 885 sizeof (Rt_map *), 886 AL_CNT_RELBIND) == 0) { 887 ret = 0; 888 break; 889 } 890 } 891 892 /* 893 * Calculate the location of definition; 894 * symbol value plus base address of 895 * containing shared object. 896 */ 897 value = symdef->st_value; 898 if (!(FLAGS(_lmp) & FLG_RT_FIXED) && 899 (symdef->st_shndx != SHN_ABS) && 900 (ELF_ST_TYPE(symdef->st_info) != 901 STT_TLS)) 902 value += ADDR(_lmp); 903 904 /* 905 * Retain this symbol index and the 906 * value in case it can be used for the 907 * subsequent relocations. 908 */ 909 if (rtype != R_SPARC_COPY) { 910 psymndx = rsymndx; 911 pvalue = value; 912 pname = name; 913 psymdef = symdef; 914 psymref = symref; 915 plmp = _lmp; 916 pbinfo = binfo; 917 } 918 if ((LIST(_lmp)->lm_tflags | 919 FLAGS1(_lmp)) & 920 LML_TFLG_AUD_SYMBIND) { 921 dsymndx = (((uintptr_t)symdef - 922 (uintptr_t)SYMTAB(_lmp)) / 923 SYMENT(_lmp)); 924 value = audit_symbind(lmp, _lmp, 925 symdef, dsymndx, value, 926 &sb_flags); 927 } 928 } 929 930 /* 931 * If relocation is PC-relative, subtract 932 * offset address. 933 */ 934 if (IS_PC_RELATIVE(rtype)) 935 value -= roffset; 936 937 /* 938 * TLS relocation - value for DTPMOD relocation 939 * is the TLS modid. 940 */ 941 if (rtype == M_R_DTPMOD) 942 value = TLSMODID(_lmp); 943 else if (rtype == M_R_TPOFF) 944 value = -(TLSSTATOFF(_lmp) - value); 945 } 946 } else { 947 /* 948 * Special cases, a register symbol associated with 949 * symbol index 0 is initialized (i.e. relocated) to 950 * a constant in the r_addend field rather than to a 951 * symbol value. 952 * 953 * A DTPMOD relocation is a local binding to a TLS 954 * symbol. Fill in the TLSMODID for the current object. 955 */ 956 if (rtype == R_SPARC_REGISTER) 957 value = 0; 958 else if (rtype == M_R_DTPMOD) 959 value = TLSMODID(lmp); 960 else 961 value = basebgn; 962 name = (char *)0; 963 } 964 965 /* 966 * If this object has relocations in the text segment, turn 967 * off the write protect. 968 */ 969 if ((rtype != R_SPARC_REGISTER) && (roffset < etext) && 970 (textrel == 0)) { 971 if (elf_set_prot(lmp, PROT_WRITE) == 0) { 972 ret = 0; 973 break; 974 } 975 textrel = 1; 976 } 977 978 /* 979 * Call relocation routine to perform required relocation. 980 */ 981 DBG_CALL(Dbg_reloc_in(M_MACH, M_REL_SHT_TYPE, rel, name, NULL)); 982 983 switch (rtype) { 984 case R_SPARC_REGISTER: 985 /* 986 * The v9 ABI 4.2.4 says that system objects may, 987 * but are not required to, use register symbols 988 * to inidcate how they use global registers. Thus 989 * at least %g6, %g7 must be allowed in addition 990 * to %g2 and %g3. 991 */ 992 value += reladd; 993 if (roffset == STO_SPARC_REGISTER_G1) { 994 set_sparc_g1(value); 995 } else if (roffset == STO_SPARC_REGISTER_G2) { 996 set_sparc_g2(value); 997 } else if (roffset == STO_SPARC_REGISTER_G3) { 998 set_sparc_g3(value); 999 } else if (roffset == STO_SPARC_REGISTER_G4) { 1000 set_sparc_g4(value); 1001 } else if (roffset == STO_SPARC_REGISTER_G5) { 1002 set_sparc_g5(value); 1003 } else if (roffset == STO_SPARC_REGISTER_G6) { 1004 set_sparc_g6(value); 1005 } else if (roffset == STO_SPARC_REGISTER_G7) { 1006 set_sparc_g7(value); 1007 } else { 1008 eprintf(ERR_FATAL, MSG_INTL(MSG_REL_BADREG), 1009 NAME(lmp), EC_ADDR(roffset)); 1010 ret = 0; 1011 break; 1012 } 1013 1014 DBG_CALL(Dbg_reloc_reg_apply((Xword)roffset, 1015 (Xword)value)); 1016 break; 1017 case R_SPARC_COPY: 1018 if (elf_copy_reloc(name, symref, lmp, (void *)roffset, 1019 symdef, _lmp, (const void *)value) == 0) 1020 ret = 0; 1021 break; 1022 case R_SPARC_JMP_SLOT: 1023 pltndx = ((ulong_t)rel - 1024 (uintptr_t)JMPREL(lmp)) / relsiz; 1025 1026 if (FLAGS(lmp) & FLG_RT_FIXED) 1027 vaddr = 0; 1028 else 1029 vaddr = ADDR(lmp); 1030 1031 if (((LIST(lmp)->lm_tflags | FLAGS1(lmp)) & 1032 (LML_TFLG_AUD_PLTENTER | LML_TFLG_AUD_PLTEXIT)) && 1033 AUDINFO(lmp)->ai_dynplts) { 1034 int fail = 0; 1035 ulong_t symndx = (((uintptr_t)symdef - 1036 (uintptr_t)SYMTAB(_lmp)) / 1037 SYMENT(_lmp)); 1038 1039 (void) elf_plt_trace_write((caddr_t)vaddr, 1040 (Rela *)rel, lmp, _lmp, symdef, symndx, 1041 pltndx, (caddr_t)value, sb_flags, &fail); 1042 if (fail) 1043 ret = 0; 1044 } else { 1045 /* 1046 * Write standard PLT entry to jump directly 1047 * to newly bound function. 1048 */ 1049 DBG_CALL(Dbg_reloc_apply((Xword)roffset, 1050 (Xword)value)); 1051 pbtype = elf_plt_write((uintptr_t)vaddr, 1052 (uintptr_t)vaddr, (void *)rel, value, 1053 pltndx); 1054 } 1055 break; 1056 default: 1057 value += reladd; 1058 1059 /* 1060 * Write the relocation out. If this relocation is a 1061 * common basic write, skip the doreloc() engine. 1062 */ 1063 if ((rtype == R_SPARC_GLOB_DAT) || 1064 (rtype == R_SPARC_32)) { 1065 if (roffset & 0x3) { 1066 eprintf(ERR_FATAL, 1067 MSG_INTL(MSG_REL_NONALIGN), 1068 conv_reloc_SPARC_type_str(rtype), 1069 NAME(lmp), demangle(name), 1070 EC_OFF(roffset)); 1071 ret = 0; 1072 } else 1073 *(uint_t *)roffset += value; 1074 } else { 1075 if (do_reloc(rtype, (uchar_t *)roffset, 1076 (Xword *)&value, name, NAME(lmp)) == 0) 1077 ret = 0; 1078 } 1079 1080 /* 1081 * The value now contains the 'bit-shifted' value that 1082 * was or'ed into memory (this was set by do_reloc()). 1083 */ 1084 DBG_CALL(Dbg_reloc_apply((Xword)roffset, 1085 (Xword)value)); 1086 1087 /* 1088 * If this relocation is against a text segment, make 1089 * sure that the instruction cache is flushed. 1090 */ 1091 if (textrel) 1092 iflush_range((caddr_t)roffset, 0x4); 1093 } 1094 1095 if ((ret == 0) && 1096 ((LIST(lmp)->lm_flags & LML_FLG_TRC_WARN) == 0)) 1097 break; 1098 1099 if (binfo) { 1100 DBG_CALL(Dbg_bind_global(NAME(lmp), (caddr_t)roffset, 1101 (caddr_t)(roffset - basebgn), pltndx, pbtype, 1102 NAME(_lmp), (caddr_t)value, 1103 (caddr_t)symdef->st_value, name, binfo)); 1104 } 1105 } 1106 1107 return (relocate_finish(lmp, bound, textrel, ret)); 1108 } 1109 1110 /* 1111 * Provide a machine specific interface to the conversion routine. By calling 1112 * the machine specific version, rather than the generic version, we insure that 1113 * the data tables/strings for all known machine versions aren't dragged into 1114 * ld.so.1. 1115 */ 1116 const char * 1117 _conv_reloc_type_str(uint_t rel) 1118 { 1119 return (conv_reloc_SPARC_type_str(rel)); 1120 } 1121