1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2010 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 /* 28 * Copyright (c) 1988 AT&T 29 * All Rights Reserved 30 */ 31 32 /* 33 * SPARC machine dependent and ELF file class dependent functions. 34 * Contains routines for performing function binding and symbol relocations. 35 */ 36 37 #include <stdio.h> 38 #include <sys/elf.h> 39 #include <sys/elf_SPARC.h> 40 #include <sys/mman.h> 41 #include <dlfcn.h> 42 #include <synch.h> 43 #include <string.h> 44 #include <debug.h> 45 #include <reloc.h> 46 #include <conv.h> 47 #include "_rtld.h" 48 #include "_audit.h" 49 #include "_elf.h" 50 #include "_inline.h" 51 #include "msg.h" 52 53 extern void iflush_range(caddr_t, size_t); 54 extern void plt_full_range(uintptr_t, uintptr_t); 55 56 int 57 elf_mach_flags_check(Rej_desc *rej, Ehdr *ehdr) 58 { 59 /* 60 * Check machine type and flags. 61 */ 62 if (ehdr->e_machine != EM_SPARC) { 63 if (ehdr->e_machine != EM_SPARC32PLUS) { 64 rej->rej_type = SGS_REJ_MACH; 65 rej->rej_info = (uint_t)ehdr->e_machine; 66 return (0); 67 } 68 if ((ehdr->e_flags & EF_SPARC_32PLUS) == 0) { 69 rej->rej_type = SGS_REJ_MISFLAG; 70 rej->rej_info = (uint_t)ehdr->e_flags; 71 return (0); 72 } 73 if ((ehdr->e_flags & ~at_flags) & EF_SPARC_32PLUS_MASK) { 74 rej->rej_type = SGS_REJ_BADFLAG; 75 rej->rej_info = (uint_t)ehdr->e_flags; 76 return (0); 77 } 78 } else if ((ehdr->e_flags & ~EF_SPARCV9_MM) != 0) { 79 rej->rej_type = SGS_REJ_BADFLAG; 80 rej->rej_info = (uint_t)ehdr->e_flags; 81 return (0); 82 } 83 return (1); 84 } 85 86 void 87 ldso_plt_init(Rt_map *lmp) 88 { 89 /* 90 * There is no need to analyze ld.so because we don't map in any of 91 * its dependencies. However we may map these dependencies in later 92 * (as if ld.so had dlopened them), so initialize the plt and the 93 * permission information. 94 */ 95 if (PLTGOT(lmp)) 96 elf_plt_init((PLTGOT(lmp)), (caddr_t)lmp); 97 } 98 99 /* 100 * elf_plt_write() will test to see how far away our destination 101 * address lies. If it is close enough that a branch can 102 * be used instead of a jmpl - we will fill the plt in with 103 * single branch. The branches are much quicker then 104 * a jmpl instruction - see bug#4356879 for further 105 * details. 106 * 107 * NOTE: we pass in both a 'pltaddr' and a 'vpltaddr' since 108 * librtld/dldump update PLT's who's physical 109 * address is not the same as the 'virtual' runtime 110 * address. 111 */ 112 Pltbindtype 113 /* ARGSUSED4 */ 114 elf_plt_write(uintptr_t addr, uintptr_t vaddr, void *rptr, uintptr_t symval, 115 Xword pltndx) 116 { 117 Rela *rel = (Rela *)rptr; 118 uintptr_t vpltaddr, pltaddr; 119 long disp; 120 121 pltaddr = addr + rel->r_offset; 122 vpltaddr = vaddr + rel->r_offset; 123 disp = symval - vpltaddr - 4; 124 125 /* 126 * Test if the destination address is close enough to use 127 * a ba,a... instruction to reach it. 128 */ 129 if (S_INRANGE(disp, 23) && !(rtld_flags & RT_FL_NOBAPLT)) { 130 uint_t *pltent, bainstr; 131 Pltbindtype rc; 132 133 pltent = (uint_t *)pltaddr; 134 135 /* 136 * The 137 * 138 * ba,a,pt %icc, <dest> 139 * 140 * is the most efficient of the PLT's. If we 141 * are within +-20 bits *and* running on a 142 * v8plus architecture - use that branch. 143 */ 144 if ((at_flags & EF_SPARC_32PLUS) && 145 S_INRANGE(disp, 20)) { 146 bainstr = M_BA_A_PT; /* ba,a,pt %icc,<dest> */ 147 bainstr |= (S_MASK(19) & (disp >> 2)); 148 rc = PLT_T_21D; 149 DBG_CALL(pltcnt21d++); 150 } else { 151 /* 152 * Otherwise - we fall back to the good old 153 * 154 * ba,a <dest> 155 * 156 * Which still beats a jmpl instruction. 157 */ 158 bainstr = M_BA_A; /* ba,a <dest> */ 159 bainstr |= (S_MASK(22) & (disp >> 2)); 160 rc = PLT_T_24D; 161 DBG_CALL(pltcnt24d++); 162 } 163 164 pltent[2] = M_NOP; /* nop instr */ 165 pltent[1] = bainstr; 166 167 iflush_range((char *)(&pltent[1]), 4); 168 pltent[0] = M_NOP; /* nop instr */ 169 iflush_range((char *)(&pltent[0]), 4); 170 return (rc); 171 } 172 173 /* 174 * The PLT destination is not in reach of 175 * a branch instruction - so we fall back 176 * to a 'jmpl' sequence. 177 */ 178 plt_full_range(pltaddr, symval); 179 DBG_CALL(pltcntfull++); 180 return (PLT_T_FULL); 181 } 182 183 /* 184 * Local storage space created on the stack created for this glue 185 * code includes space for: 186 * 0x4 pointer to dyn_data 187 * 0x4 size prev stack frame 188 */ 189 static const uchar_t dyn_plt_template[] = { 190 /* 0x00 */ 0x80, 0x90, 0x00, 0x1e, /* tst %fp */ 191 /* 0x04 */ 0x02, 0x80, 0x00, 0x04, /* be 0x14 */ 192 /* 0x08 */ 0x82, 0x27, 0x80, 0x0e, /* sub %sp, %fp, %g1 */ 193 /* 0x0c */ 0x10, 0x80, 0x00, 0x03, /* ba 0x20 */ 194 /* 0x10 */ 0x01, 0x00, 0x00, 0x00, /* nop */ 195 /* 0x14 */ 0x82, 0x10, 0x20, 0x60, /* mov 0x60, %g1 */ 196 /* 0x18 */ 0x9d, 0xe3, 0xbf, 0x98, /* save %sp, -0x68, %sp */ 197 /* 0x1c */ 0xc2, 0x27, 0xbf, 0xf8, /* st %g1, [%fp + -0x8] */ 198 /* 0x20 */ 0x03, 0x00, 0x00, 0x00, /* sethi %hi(val), %g1 */ 199 /* 0x24 */ 0x82, 0x10, 0x60, 0x00, /* or %g1, %lo(val), %g1 */ 200 /* 0x28 */ 0x40, 0x00, 0x00, 0x00, /* call <rel_addr> */ 201 /* 0x2c */ 0xc2, 0x27, 0xbf, 0xfc /* st %g1, [%fp + -0x4] */ 202 }; 203 204 int dyn_plt_ent_size = sizeof (dyn_plt_template) + 205 sizeof (uintptr_t) + /* reflmp */ 206 sizeof (uintptr_t) + /* deflmp */ 207 sizeof (ulong_t) + /* symndx */ 208 sizeof (ulong_t) + /* sb_flags */ 209 sizeof (Sym); /* symdef */ 210 211 /* 212 * the dynamic plt entry is: 213 * 214 * tst %fp 215 * be 1f 216 * nop 217 * sub %sp, %fp, %g1 218 * ba 2f 219 * nop 220 * 1: 221 * mov SA(MINFRAME), %g1 ! if %fp is null this is the 222 * ! 'minimum stack'. %fp is null 223 * ! on the initial stack frame 224 * 2: 225 * save %sp, -(SA(MINFRAME) + 2 * CLONGSIZE), %sp 226 * st %g1, [%fp + -0x8] ! store prev_stack size in [%fp - 8] 227 * sethi %hi(dyn_data), %g1 228 * or %g1, %lo(dyn_data), %g1 229 * call elf_plt_trace 230 * st %g1, [%fp + -0x4] ! store dyn_data ptr in [%fp - 4] 231 * dyn data: 232 * uintptr_t reflmp 233 * uintptr_t deflmp 234 * ulong_t symndx 235 * ulong_t sb_flags 236 * Sym symdef 237 */ 238 static caddr_t 239 elf_plt_trace_write(caddr_t addr, Rela *rptr, Rt_map *rlmp, Rt_map *dlmp, 240 Sym *sym, ulong_t symndx, ulong_t pltndx, caddr_t to, ulong_t sb_flags, 241 int *fail) 242 { 243 extern ulong_t elf_plt_trace(); 244 uchar_t *dyn_plt; 245 uintptr_t *dyndata; 246 247 /* 248 * If both pltenter & pltexit have been disabled there 249 * there is no reason to even create the glue code. 250 */ 251 if ((sb_flags & (LA_SYMB_NOPLTENTER | LA_SYMB_NOPLTEXIT)) == 252 (LA_SYMB_NOPLTENTER | LA_SYMB_NOPLTEXIT)) { 253 (void) elf_plt_write((uintptr_t)addr, (uintptr_t)addr, 254 rptr, (uintptr_t)to, pltndx); 255 return (to); 256 } 257 258 /* 259 * We only need to add the glue code if there is an auditing 260 * library that is interested in this binding. 261 */ 262 dyn_plt = (uchar_t *)((uintptr_t)AUDINFO(rlmp)->ai_dynplts + 263 (pltndx * dyn_plt_ent_size)); 264 265 /* 266 * Have we initialized this dynamic plt entry yet? If we haven't do it 267 * now. Otherwise this function has been called before, but from a 268 * different plt (ie. from another shared object). In that case 269 * we just set the plt to point to the new dyn_plt. 270 */ 271 if (*dyn_plt == 0) { 272 Sym *symp; 273 Xword symvalue; 274 Lm_list *lml = LIST(rlmp); 275 276 (void) memcpy((void *)dyn_plt, dyn_plt_template, 277 sizeof (dyn_plt_template)); 278 dyndata = (uintptr_t *)((uintptr_t)dyn_plt + 279 sizeof (dyn_plt_template)); 280 281 /* 282 * relocating: 283 * sethi %hi(dyndata), %g1 284 */ 285 symvalue = (Xword)dyndata; 286 if (do_reloc_rtld(R_SPARC_HI22, (dyn_plt + 0x20), 287 &symvalue, MSG_ORIG(MSG_SYM_LADYNDATA), 288 MSG_ORIG(MSG_SPECFIL_DYNPLT), lml) == 0) { 289 *fail = 1; 290 return (0); 291 } 292 293 /* 294 * relocating: 295 * or %g1, %lo(dyndata), %g1 296 */ 297 symvalue = (Xword)dyndata; 298 if (do_reloc_rtld(R_SPARC_LO10, (dyn_plt + 0x24), 299 &symvalue, MSG_ORIG(MSG_SYM_LADYNDATA), 300 MSG_ORIG(MSG_SPECFIL_DYNPLT), lml) == 0) { 301 *fail = 1; 302 return (0); 303 } 304 305 /* 306 * relocating: 307 * call elf_plt_trace 308 */ 309 symvalue = (Xword)((uintptr_t)&elf_plt_trace - 310 (uintptr_t)(dyn_plt + 0x28)); 311 if (do_reloc_rtld(R_SPARC_WDISP30, (dyn_plt + 0x28), 312 &symvalue, MSG_ORIG(MSG_SYM_ELFPLTTRACE), 313 MSG_ORIG(MSG_SPECFIL_DYNPLT), lml) == 0) { 314 *fail = 1; 315 return (0); 316 } 317 318 *dyndata++ = (uintptr_t)rlmp; 319 *dyndata++ = (uintptr_t)dlmp; 320 *(ulong_t *)dyndata++ = symndx; 321 *(ulong_t *)dyndata++ = sb_flags; 322 symp = (Sym *)dyndata; 323 *symp = *sym; 324 symp->st_name += (Word)STRTAB(dlmp); 325 symp->st_value = (Addr)to; 326 327 iflush_range((void *)dyn_plt, sizeof (dyn_plt_template)); 328 } 329 330 (void) elf_plt_write((uintptr_t)addr, (uintptr_t)addr, rptr, 331 (uintptr_t)dyn_plt, 0); 332 return ((caddr_t)dyn_plt); 333 } 334 335 /* 336 * Function binding routine - invoked on the first call to a function through 337 * the procedure linkage table; 338 * passes first through an assembly language interface. 339 * 340 * Takes the address of the PLT entry where the call originated, 341 * the offset into the relocation table of the associated 342 * relocation entry and the address of the link map (rt_private_map struct) 343 * for the entry. 344 * 345 * Returns the address of the function referenced after re-writing the PLT 346 * entry to invoke the function directly. 347 * 348 * On error, causes process to terminate with a signal. 349 */ 350 ulong_t 351 elf_bndr(Rt_map *lmp, ulong_t pltoff, caddr_t from) 352 { 353 Rt_map *nlmp, *llmp; 354 ulong_t addr, vaddr, reloff, symval, rsymndx; 355 char *name; 356 Rela *rptr; 357 Sym *rsym, *nsym; 358 Xword pltndx; 359 uint_t binfo, sb_flags = 0, dbg_class; 360 Slookup sl; 361 Sresult sr; 362 Pltbindtype pbtype; 363 int entry, lmflags; 364 Lm_list *lml; 365 366 /* 367 * For compatibility with libthread (TI_VERSION 1) we track the entry 368 * value. A zero value indicates we have recursed into ld.so.1 to 369 * further process a locking request. Under this recursion we disable 370 * tsort and cleanup activities. 371 */ 372 entry = enter(0); 373 374 lml = LIST(lmp); 375 if ((lmflags = lml->lm_flags) & LML_FLG_RTLDLM) { 376 dbg_class = dbg_desc->d_class; 377 dbg_desc->d_class = 0; 378 } 379 380 /* 381 * Must calculate true plt relocation address from reloc. 382 * Take offset, subtract number of reserved PLT entries, and divide 383 * by PLT entry size, which should give the index of the plt 384 * entry (and relocation entry since they have been defined to be 385 * in the same order). Then we must multiply by the size of 386 * a relocation entry, which will give us the offset of the 387 * plt relocation entry from the start of them given by JMPREL(lm). 388 */ 389 addr = pltoff - M_PLT_RESERVSZ; 390 pltndx = addr / M_PLT_ENTSIZE; 391 392 /* 393 * Perform some basic sanity checks. If we didn't get a load map 394 * or the plt offset is invalid then its possible someone has walked 395 * over the plt entries or jumped to plt[0] out of the blue. 396 */ 397 if (!lmp || ((addr % M_PLT_ENTSIZE) != 0)) { 398 Conv_inv_buf_t inv_buf; 399 400 eprintf(lml, ERR_FATAL, MSG_INTL(MSG_REL_PLTREF), 401 conv_reloc_SPARC_type(R_SPARC_JMP_SLOT, 0, &inv_buf), 402 EC_NATPTR(lmp), EC_XWORD(pltoff), EC_NATPTR(from)); 403 rtldexit(lml, 1); 404 } 405 reloff = pltndx * sizeof (Rela); 406 407 /* 408 * Use relocation entry to get symbol table entry and symbol name. 409 */ 410 addr = (ulong_t)JMPREL(lmp); 411 rptr = (Rela *)(addr + reloff); 412 rsymndx = ELF_R_SYM(rptr->r_info); 413 rsym = (Sym *)((ulong_t)SYMTAB(lmp) + (rsymndx * SYMENT(lmp))); 414 name = (char *)(STRTAB(lmp) + rsym->st_name); 415 416 /* 417 * Determine the last link-map of this list, this'll be the starting 418 * point for any tsort() processing. 419 */ 420 llmp = lml->lm_tail; 421 422 /* 423 * Find definition for symbol. Initialize the symbol lookup, and 424 * symbol result, data structures. 425 */ 426 SLOOKUP_INIT(sl, name, lmp, lml->lm_head, ld_entry_cnt, 0, 427 rsymndx, rsym, 0, LKUP_DEFT); 428 SRESULT_INIT(sr, name); 429 430 if (lookup_sym(&sl, &sr, &binfo, NULL) == 0) { 431 eprintf(lml, ERR_FATAL, MSG_INTL(MSG_REL_NOSYM), NAME(lmp), 432 demangle(name)); 433 rtldexit(lml, 1); 434 } 435 436 name = (char *)sr.sr_name; 437 nlmp = sr.sr_dmap; 438 nsym = sr.sr_sym; 439 440 symval = nsym->st_value; 441 442 if (!(FLAGS(nlmp) & FLG_RT_FIXED) && 443 (nsym->st_shndx != SHN_ABS)) 444 symval += ADDR(nlmp); 445 if ((lmp != nlmp) && ((FLAGS1(nlmp) & FL1_RT_NOINIFIN) == 0)) { 446 /* 447 * Record that this new link map is now bound to the caller. 448 */ 449 if (bind_one(lmp, nlmp, BND_REFER) == 0) 450 rtldexit(lml, 1); 451 } 452 453 if ((lml->lm_tflags | AFLAGS(lmp)) & LML_TFLG_AUD_SYMBIND) { 454 ulong_t symndx = (((uintptr_t)nsym - 455 (uintptr_t)SYMTAB(nlmp)) / SYMENT(nlmp)); 456 457 symval = audit_symbind(lmp, nlmp, nsym, symndx, symval, 458 &sb_flags); 459 } 460 461 if (FLAGS(lmp) & FLG_RT_FIXED) 462 vaddr = 0; 463 else 464 vaddr = ADDR(lmp); 465 466 pbtype = PLT_T_NONE; 467 if (!(rtld_flags & RT_FL_NOBIND)) { 468 if (((lml->lm_tflags | AFLAGS(lmp)) & 469 (LML_TFLG_AUD_PLTENTER | LML_TFLG_AUD_PLTEXIT)) && 470 AUDINFO(lmp)->ai_dynplts) { 471 int fail = 0; 472 ulong_t symndx = (((uintptr_t)nsym - 473 (uintptr_t)SYMTAB(nlmp)) / SYMENT(nlmp)); 474 475 symval = (ulong_t)elf_plt_trace_write((caddr_t)vaddr, 476 rptr, lmp, nlmp, nsym, symndx, pltndx, 477 (caddr_t)symval, sb_flags, &fail); 478 if (fail) 479 rtldexit(lml, 1); 480 } else { 481 /* 482 * Write standard PLT entry to jump directly 483 * to newly bound function. 484 */ 485 pbtype = elf_plt_write((uintptr_t)vaddr, 486 (uintptr_t)vaddr, rptr, symval, pltndx); 487 } 488 } 489 490 /* 491 * Print binding information and rebuild PLT entry. 492 */ 493 DBG_CALL(Dbg_bind_global(lmp, (Addr)from, (Off)(from - ADDR(lmp)), 494 pltndx, pbtype, nlmp, (Addr)symval, nsym->st_value, name, binfo)); 495 496 /* 497 * Complete any processing for newly loaded objects. Note we don't 498 * know exactly where any new objects are loaded (we know the object 499 * that supplied the symbol, but others may have been loaded lazily as 500 * we searched for the symbol), so sorting starts from the last 501 * link-map know on entry to this routine. 502 */ 503 if (entry) 504 load_completion(llmp); 505 506 /* 507 * Some operations like dldump() or dlopen()'ing a relocatable object 508 * result in objects being loaded on rtld's link-map, make sure these 509 * objects are initialized also. 510 */ 511 if ((LIST(nlmp)->lm_flags & LML_FLG_RTLDLM) && LIST(nlmp)->lm_init) 512 load_completion(nlmp); 513 514 /* 515 * Make sure the object to which we've bound has had it's .init fired. 516 * Cleanup before return to user code. 517 */ 518 if (entry) { 519 is_dep_init(nlmp, lmp); 520 leave(lml, 0); 521 } 522 523 if (lmflags & LML_FLG_RTLDLM) 524 dbg_desc->d_class = dbg_class; 525 526 return (symval); 527 } 528 529 /* 530 * Read and process the relocations for one link object, we assume all 531 * relocation sections for loadable segments are stored contiguously in 532 * the file. 533 */ 534 int 535 elf_reloc(Rt_map *lmp, uint_t plt, int *in_nfavl, APlist **textrel) 536 { 537 ulong_t relbgn, relend, relsiz, basebgn, pltbgn, pltend; 538 ulong_t dsymndx, pltndx, roffset, rsymndx, psymndx = 0; 539 uchar_t rtype; 540 long reladd, value, pvalue, relacount = RELACOUNT(lmp); 541 Sym *symref, *psymref, *symdef, *psymdef; 542 char *name, *pname; 543 Rt_map *_lmp, *plmp; 544 int ret = 1, noplt = 0; 545 Rela *rel; 546 Pltbindtype pbtype; 547 uint_t binfo, pbinfo; 548 APlist *bound = NULL; 549 550 /* 551 * If an object has any DT_REGISTER entries associated with 552 * it, they are processed now. 553 */ 554 if ((plt == 0) && (FLAGS(lmp) & FLG_RT_REGSYMS)) { 555 if (elf_regsyms(lmp) == 0) 556 return (0); 557 } 558 559 /* 560 * Although only necessary for lazy binding, initialize the first 561 * procedure linkage table entry to go to elf_rtbndr(). dbx(1) seems 562 * to find this useful. 563 */ 564 if ((plt == 0) && PLTGOT(lmp)) { 565 mmapobj_result_t *mpp; 566 567 /* 568 * Make sure the segment is writable. 569 */ 570 if ((((mpp = 571 find_segment((caddr_t)PLTGOT(lmp), lmp)) != NULL) && 572 ((mpp->mr_prot & PROT_WRITE) == 0)) && 573 ((set_prot(lmp, mpp, 1) == 0) || 574 (aplist_append(textrel, mpp, AL_CNT_TEXTREL) == NULL))) 575 return (0); 576 577 elf_plt_init(PLTGOT(lmp), (caddr_t)lmp); 578 } 579 580 /* 581 * Initialize the plt start and end addresses. 582 */ 583 if ((pltbgn = (ulong_t)JMPREL(lmp)) != 0) 584 pltend = pltbgn + (ulong_t)(PLTRELSZ(lmp)); 585 586 /* 587 * If we've been called upon to promote an RTLD_LAZY object to an 588 * RTLD_NOW then we're only interested in scaning the .plt table. 589 */ 590 if (plt) { 591 relbgn = pltbgn; 592 relend = pltend; 593 } else { 594 /* 595 * The relocation sections appear to the run-time linker as a 596 * single table. Determine the address of the beginning and end 597 * of this table. There are two different interpretations of 598 * the ABI at this point: 599 * 600 * - The REL table and its associated RELSZ indicate the 601 * concatenation of *all* relocation sections (this is the 602 * model our link-editor constructs). 603 * 604 * - The REL table and its associated RELSZ indicate the 605 * concatenation of all *but* the .plt relocations. These 606 * relocations are specified individually by the JMPREL and 607 * PLTRELSZ entries. 608 * 609 * Determine from our knowledege of the relocation range and 610 * .plt range, the range of the total relocation table. Note 611 * that one other ABI assumption seems to be that the .plt 612 * relocations always follow any other relocations, the 613 * following range checking drops that assumption. 614 */ 615 relbgn = (ulong_t)(REL(lmp)); 616 relend = relbgn + (ulong_t)(RELSZ(lmp)); 617 if (pltbgn) { 618 if (!relbgn || (relbgn > pltbgn)) 619 relbgn = pltbgn; 620 if (!relbgn || (relend < pltend)) 621 relend = pltend; 622 } 623 } 624 if (!relbgn || (relbgn == relend)) { 625 DBG_CALL(Dbg_reloc_run(lmp, 0, plt, DBG_REL_NONE)); 626 return (1); 627 } 628 629 relsiz = (ulong_t)(RELENT(lmp)); 630 basebgn = ADDR(lmp); 631 632 DBG_CALL(Dbg_reloc_run(lmp, M_REL_SHT_TYPE, plt, DBG_REL_START)); 633 634 /* 635 * If we're processing in lazy mode there is no need to scan the 636 * .rela.plt table. 637 */ 638 if (pltbgn && ((MODE(lmp) & RTLD_NOW) == 0)) 639 noplt = 1; 640 641 /* 642 * Loop through relocations. 643 */ 644 while (relbgn < relend) { 645 mmapobj_result_t *mpp; 646 uint_t sb_flags = 0; 647 Addr vaddr; 648 649 rtype = ELF_R_TYPE(((Rela *)relbgn)->r_info, M_MACH); 650 651 /* 652 * If this is a RELATIVE relocation in a shared object (the 653 * common case), and if we are not debugging, then jump into one 654 * of the tighter relocation loops. 655 */ 656 if ((rtype == R_SPARC_RELATIVE) && 657 ((FLAGS(lmp) & FLG_RT_FIXED) == 0) && (DBG_ENABLED == 0)) { 658 if (relacount) { 659 relbgn = elf_reloc_relative_count(relbgn, 660 relacount, relsiz, basebgn, lmp, textrel); 661 relacount = 0; 662 } else { 663 relbgn = elf_reloc_relative(relbgn, relend, 664 relsiz, basebgn, lmp, textrel); 665 } 666 if (relbgn >= relend) 667 break; 668 rtype = ELF_R_TYPE(((Rela *)relbgn)->r_info, M_MACH); 669 } 670 671 roffset = ((Rela *)relbgn)->r_offset; 672 673 reladd = (long)(((Rela *)relbgn)->r_addend); 674 rsymndx = ELF_R_SYM(((Rela *)relbgn)->r_info); 675 676 rel = (Rela *)relbgn; 677 relbgn += relsiz; 678 679 /* 680 * Optimizations. 681 */ 682 if (rtype == R_SPARC_NONE) 683 continue; 684 if (noplt && ((ulong_t)rel >= pltbgn) && 685 ((ulong_t)rel < pltend)) { 686 relbgn = pltend; 687 continue; 688 } 689 690 if (rtype != R_SPARC_REGISTER) { 691 /* 692 * If this is a shared object, add the base address 693 * to offset. 694 */ 695 if (!(FLAGS(lmp) & FLG_RT_FIXED)) 696 roffset += basebgn; 697 698 /* 699 * If this relocation is not against part of the image 700 * mapped into memory we skip it. 701 */ 702 if ((mpp = find_segment((caddr_t)roffset, 703 lmp)) == NULL) { 704 elf_reloc_bad(lmp, (void *)rel, rtype, roffset, 705 rsymndx); 706 continue; 707 } 708 } 709 710 /* 711 * If we're promoting .plts, try and determine if this one has 712 * already been written. An uninitialized .plts' second 713 * instruction is a branch. Note, elf_plt_write() optimizes 714 * .plt relocations, and it's possible that a relocated entry 715 * is a branch. If this is the case, we can't tell the 716 * difference between an uninitialized .plt and a relocated, 717 * .plt that uses a branch. In this case, we'll simply redo 718 * the relocation calculation, which is a bit sad. 719 */ 720 if (plt) { 721 ulong_t *_roffset = (ulong_t *)roffset; 722 723 _roffset++; 724 if ((*_roffset & (~(S_MASK(22)))) != M_BA_A) 725 continue; 726 } 727 728 binfo = 0; 729 pltndx = (ulong_t)-1; 730 pbtype = PLT_T_NONE; 731 732 /* 733 * If a symbol index is specified then get the symbol table 734 * entry, locate the symbol definition, and determine its 735 * address. 736 */ 737 if (rsymndx) { 738 /* 739 * Get the local symbol table entry. 740 */ 741 symref = (Sym *)((ulong_t)SYMTAB(lmp) + 742 (rsymndx * SYMENT(lmp))); 743 744 /* 745 * If this is a local symbol, just use the base address. 746 * (we should have no local relocations in the 747 * executable). 748 */ 749 if (ELF_ST_BIND(symref->st_info) == STB_LOCAL) { 750 value = basebgn; 751 name = NULL; 752 753 /* 754 * Special case TLS relocations. 755 */ 756 if (rtype == R_SPARC_TLS_DTPMOD32) { 757 /* 758 * Use the TLS modid. 759 */ 760 value = TLSMODID(lmp); 761 762 } else if (rtype == R_SPARC_TLS_TPOFF32) { 763 if ((value = elf_static_tls(lmp, symref, 764 rel, rtype, 0, roffset, 0)) == 0) { 765 ret = 0; 766 break; 767 } 768 } 769 } else { 770 /* 771 * If the symbol index is equal to the previous 772 * symbol index relocation we processed then 773 * reuse the previous values. (Note that there 774 * have been cases where a relocation exists 775 * against a copy relocation symbol, our ld(1) 776 * should optimize this away, but make sure we 777 * don't use the same symbol information should 778 * this case exist). 779 */ 780 if ((rsymndx == psymndx) && 781 (rtype != R_SPARC_COPY)) { 782 /* LINTED */ 783 if (psymdef == 0) { 784 DBG_CALL(Dbg_bind_weak(lmp, 785 (Addr)roffset, (Addr) 786 (roffset - basebgn), name)); 787 continue; 788 } 789 /* LINTED */ 790 value = pvalue; 791 /* LINTED */ 792 name = pname; 793 symdef = psymdef; 794 /* LINTED */ 795 symref = psymref; 796 /* LINTED */ 797 _lmp = plmp; 798 /* LINTED */ 799 binfo = pbinfo; 800 801 if ((LIST(_lmp)->lm_tflags | 802 AFLAGS(_lmp)) & 803 LML_TFLG_AUD_SYMBIND) { 804 value = audit_symbind(lmp, _lmp, 805 /* LINTED */ 806 symdef, dsymndx, value, 807 &sb_flags); 808 } 809 } else { 810 Slookup sl; 811 Sresult sr; 812 813 /* 814 * Lookup the symbol definition. 815 * Initialize the symbol lookup, and 816 * symbol result, data structures. 817 */ 818 name = (char *)(STRTAB(lmp) + 819 symref->st_name); 820 821 SLOOKUP_INIT(sl, name, lmp, 0, 822 ld_entry_cnt, 0, rsymndx, symref, 823 rtype, LKUP_STDRELOC); 824 SRESULT_INIT(sr, name); 825 symdef = NULL; 826 827 if (lookup_sym(&sl, &sr, &binfo, 828 in_nfavl)) { 829 name = (char *)sr.sr_name; 830 _lmp = sr.sr_dmap; 831 symdef = sr.sr_sym; 832 } 833 834 /* 835 * If the symbol is not found and the 836 * reference was not to a weak symbol, 837 * report an error. Weak references 838 * may be unresolved. 839 */ 840 /* BEGIN CSTYLED */ 841 if (symdef == 0) { 842 if (sl.sl_bind != STB_WEAK) { 843 if (elf_reloc_error(lmp, name, 844 rel, binfo)) 845 continue; 846 847 ret = 0; 848 break; 849 850 } else { 851 psymndx = rsymndx; 852 psymdef = 0; 853 854 DBG_CALL(Dbg_bind_weak(lmp, 855 (Addr)roffset, (Addr) 856 (roffset - basebgn), name)); 857 continue; 858 } 859 } 860 /* END CSTYLED */ 861 862 /* 863 * If symbol was found in an object 864 * other than the referencing object 865 * then record the binding. 866 */ 867 if ((lmp != _lmp) && ((FLAGS1(_lmp) & 868 FL1_RT_NOINIFIN) == 0)) { 869 if (aplist_test(&bound, _lmp, 870 AL_CNT_RELBIND) == 0) { 871 ret = 0; 872 break; 873 } 874 } 875 876 /* 877 * Calculate the location of definition; 878 * symbol value plus base address of 879 * containing shared object. 880 */ 881 if (IS_SIZE(rtype)) 882 value = symdef->st_size; 883 else 884 value = symdef->st_value; 885 886 if (!(FLAGS(_lmp) & FLG_RT_FIXED) && 887 !(IS_SIZE(rtype)) && 888 (symdef->st_shndx != SHN_ABS) && 889 (ELF_ST_TYPE(symdef->st_info) != 890 STT_TLS)) 891 value += ADDR(_lmp); 892 893 /* 894 * Retain this symbol index and the 895 * value in case it can be used for the 896 * subsequent relocations. 897 */ 898 if (rtype != R_SPARC_COPY) { 899 psymndx = rsymndx; 900 pvalue = value; 901 pname = name; 902 psymdef = symdef; 903 psymref = symref; 904 plmp = _lmp; 905 pbinfo = binfo; 906 } 907 if ((LIST(_lmp)->lm_tflags | 908 AFLAGS(_lmp)) & 909 LML_TFLG_AUD_SYMBIND) { 910 dsymndx = (((uintptr_t)symdef - 911 (uintptr_t)SYMTAB(_lmp)) / 912 SYMENT(_lmp)); 913 value = audit_symbind(lmp, _lmp, 914 symdef, dsymndx, value, 915 &sb_flags); 916 } 917 } 918 919 /* 920 * If relocation is PC-relative, subtract 921 * offset address. 922 */ 923 if (IS_PC_RELATIVE(rtype)) 924 value -= roffset; 925 926 /* 927 * Special case TLS relocations. 928 */ 929 if (rtype == R_SPARC_TLS_DTPMOD32) { 930 /* 931 * Relocation value is the TLS modid. 932 */ 933 value = TLSMODID(_lmp); 934 935 } else if (rtype == R_SPARC_TLS_TPOFF32) { 936 if ((value = elf_static_tls(_lmp, 937 symdef, rel, rtype, name, roffset, 938 value)) == 0) { 939 ret = 0; 940 break; 941 } 942 } 943 } 944 } else { 945 /* 946 * Special cases. 947 */ 948 if (rtype == R_SPARC_REGISTER) { 949 /* 950 * A register symbol associated with symbol 951 * index 0 is initialized (i.e. relocated) to 952 * a constant in the r_addend field rather than 953 * to a symbol value. 954 */ 955 value = 0; 956 957 } else if (rtype == R_SPARC_TLS_DTPMOD32) { 958 /* 959 * TLS relocation value is the TLS modid. 960 */ 961 value = TLSMODID(lmp); 962 } else 963 value = basebgn; 964 965 name = NULL; 966 } 967 968 DBG_CALL(Dbg_reloc_in(LIST(lmp), ELF_DBG_RTLD, M_MACH, 969 M_REL_SHT_TYPE, rel, NULL, 0, name)); 970 971 /* 972 * Make sure the segment is writable. 973 */ 974 if ((rtype != R_SPARC_REGISTER) && 975 ((mpp->mr_prot & PROT_WRITE) == 0) && 976 ((set_prot(lmp, mpp, 1) == 0) || 977 (aplist_append(textrel, mpp, AL_CNT_TEXTREL) == NULL))) { 978 ret = 0; 979 break; 980 } 981 982 /* 983 * Call relocation routine to perform required relocation. 984 */ 985 switch (rtype) { 986 case R_SPARC_REGISTER: 987 /* 988 * The v9 ABI 4.2.4 says that system objects may, 989 * but are not required to, use register symbols 990 * to inidcate how they use global registers. Thus 991 * at least %g6, %g7 must be allowed in addition 992 * to %g2 and %g3. 993 */ 994 value += reladd; 995 if (roffset == STO_SPARC_REGISTER_G1) { 996 set_sparc_g1(value); 997 } else if (roffset == STO_SPARC_REGISTER_G2) { 998 set_sparc_g2(value); 999 } else if (roffset == STO_SPARC_REGISTER_G3) { 1000 set_sparc_g3(value); 1001 } else if (roffset == STO_SPARC_REGISTER_G4) { 1002 set_sparc_g4(value); 1003 } else if (roffset == STO_SPARC_REGISTER_G5) { 1004 set_sparc_g5(value); 1005 } else if (roffset == STO_SPARC_REGISTER_G6) { 1006 set_sparc_g6(value); 1007 } else if (roffset == STO_SPARC_REGISTER_G7) { 1008 set_sparc_g7(value); 1009 } else { 1010 eprintf(LIST(lmp), ERR_FATAL, 1011 MSG_INTL(MSG_REL_BADREG), NAME(lmp), 1012 EC_ADDR(roffset)); 1013 ret = 0; 1014 break; 1015 } 1016 1017 DBG_CALL(Dbg_reloc_apply_reg(LIST(lmp), ELF_DBG_RTLD, 1018 M_MACH, (Xword)roffset, (Xword)value)); 1019 break; 1020 case R_SPARC_COPY: 1021 if (elf_copy_reloc(name, symref, lmp, (void *)roffset, 1022 symdef, _lmp, (const void *)value) == 0) 1023 ret = 0; 1024 break; 1025 case R_SPARC_JMP_SLOT: 1026 pltndx = ((ulong_t)rel - 1027 (uintptr_t)JMPREL(lmp)) / relsiz; 1028 1029 if (FLAGS(lmp) & FLG_RT_FIXED) 1030 vaddr = 0; 1031 else 1032 vaddr = ADDR(lmp); 1033 1034 if (((LIST(lmp)->lm_tflags | AFLAGS(lmp)) & 1035 (LML_TFLG_AUD_PLTENTER | LML_TFLG_AUD_PLTEXIT)) && 1036 AUDINFO(lmp)->ai_dynplts) { 1037 int fail = 0; 1038 ulong_t symndx = (((uintptr_t)symdef - 1039 (uintptr_t)SYMTAB(_lmp)) / SYMENT(_lmp)); 1040 1041 (void) elf_plt_trace_write((caddr_t)vaddr, 1042 (Rela *)rel, lmp, _lmp, symdef, symndx, 1043 pltndx, (caddr_t)value, sb_flags, &fail); 1044 if (fail) 1045 ret = 0; 1046 } else { 1047 /* 1048 * Write standard PLT entry to jump directly 1049 * to newly bound function. 1050 */ 1051 DBG_CALL(Dbg_reloc_apply_val(LIST(lmp), 1052 ELF_DBG_RTLD, (Xword)roffset, 1053 (Xword)value)); 1054 pbtype = elf_plt_write((uintptr_t)vaddr, 1055 (uintptr_t)vaddr, (void *)rel, value, 1056 pltndx); 1057 } 1058 break; 1059 default: 1060 value += reladd; 1061 1062 /* 1063 * Write the relocation out. If this relocation is a 1064 * common basic write, skip the doreloc() engine. 1065 */ 1066 if ((rtype == R_SPARC_GLOB_DAT) || 1067 (rtype == R_SPARC_32)) { 1068 if (roffset & 0x3) { 1069 Conv_inv_buf_t inv_buf; 1070 1071 eprintf(LIST(lmp), ERR_FATAL, 1072 MSG_INTL(MSG_REL_NONALIGN), 1073 conv_reloc_SPARC_type(rtype, 1074 0, &inv_buf), 1075 NAME(lmp), demangle(name), 1076 EC_OFF(roffset)); 1077 ret = 0; 1078 } else 1079 *(uint_t *)roffset += value; 1080 } else { 1081 if (do_reloc_rtld(rtype, (uchar_t *)roffset, 1082 (Xword *)&value, name, 1083 NAME(lmp), LIST(lmp)) == 0) 1084 ret = 0; 1085 } 1086 1087 /* 1088 * The value now contains the 'bit-shifted' value that 1089 * was or'ed into memory (this was set by 1090 * do_reloc_rtld()). 1091 */ 1092 DBG_CALL(Dbg_reloc_apply_val(LIST(lmp), ELF_DBG_RTLD, 1093 (Xword)roffset, (Xword)value)); 1094 1095 /* 1096 * If this relocation is against a text segment, make 1097 * sure that the instruction cache is flushed. 1098 */ 1099 if (textrel) 1100 iflush_range((caddr_t)roffset, 0x4); 1101 } 1102 1103 if ((ret == 0) && 1104 ((LIST(lmp)->lm_flags & LML_FLG_TRC_WARN) == 0)) 1105 break; 1106 1107 if (binfo) { 1108 DBG_CALL(Dbg_bind_global(lmp, (Addr)roffset, 1109 (Off)(roffset - basebgn), pltndx, pbtype, 1110 _lmp, (Addr)value, symdef->st_value, name, binfo)); 1111 } 1112 } 1113 1114 return (relocate_finish(lmp, bound, ret)); 1115 } 1116 1117 /* 1118 * Provide a machine specific interface to the conversion routine. By calling 1119 * the machine specific version, rather than the generic version, we insure that 1120 * the data tables/strings for all known machine versions aren't dragged into 1121 * ld.so.1. 1122 */ 1123 const char * 1124 _conv_reloc_type(uint_t rel) 1125 { 1126 static Conv_inv_buf_t inv_buf; 1127 1128 return (conv_reloc_SPARC_type(rel, 0, &inv_buf)); 1129 } 1130