xref: /freebsd/libexec/rtld-elf/powerpc64/reloc.c (revision 87b759f0fa1f7554d50ce640c40138512bbded44)
1 /*      $NetBSD: ppc_reloc.c,v 1.10 2001/09/10 06:09:41 mycroft Exp $   */
2 
3 /*-
4  * SPDX-License-Identifier: BSD-2-Clause
5  *
6  * Copyright (C) 1998   Tsubai Masanari
7  * All rights reserved.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  * 3. The name of the author may not be used to endorse or promote products
18  *    derived from this software without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
21  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
22  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
23  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
24  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
25  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
29  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 #include <sys/param.h>
33 #include <sys/mman.h>
34 #include <sys/sysctl.h>
35 
36 #include <errno.h>
37 #include <stdio.h>
38 #include <stdlib.h>
39 #include <string.h>
40 #include <unistd.h>
41 #include <machine/cpu.h>
42 #include <machine/md_var.h>
43 
44 #include "debug.h"
45 #include "rtld.h"
46 
47 #if !defined(_CALL_ELF) || _CALL_ELF == 1
48 struct funcdesc {
49 	Elf_Addr addr;
50 	Elf_Addr toc;
51 	Elf_Addr env;
52 };
53 #endif
54 
55 bool
56 arch_digest_dynamic(struct Struct_Obj_Entry *obj, const Elf_Dyn *dynp)
57 {
58 	if (dynp->d_tag == DT_PPC64_GLINK) {
59 		obj->glink = (Elf_Addr)(obj->relocbase + dynp->d_un.d_ptr);
60 		return (true);
61 	}
62 
63 	return (false);
64 }
65 
66 /*
67  * Process the R_PPC_COPY relocations
68  */
69 int
70 do_copy_relocations(Obj_Entry *dstobj)
71 {
72 	const Elf_Rela *relalim;
73 	const Elf_Rela *rela;
74 
75 	/*
76 	 * COPY relocs are invalid outside of the main program
77 	 */
78 	assert(dstobj->mainprog);
79 
80 	relalim = (const Elf_Rela *)((const char *) dstobj->rela +
81 	    dstobj->relasize);
82 	for (rela = dstobj->rela;  rela < relalim;  rela++) {
83 		void *dstaddr;
84 		const Elf_Sym *dstsym;
85 		const char *name;
86 		size_t size;
87 		const void *srcaddr;
88 		const Elf_Sym *srcsym = NULL;
89 		const Obj_Entry *srcobj, *defobj;
90 		SymLook req;
91 		int res;
92 
93 		if (ELF_R_TYPE(rela->r_info) != R_PPC_COPY) {
94 			continue;
95 		}
96 
97 		dstaddr = (void *)(dstobj->relocbase + rela->r_offset);
98 		dstsym = dstobj->symtab + ELF_R_SYM(rela->r_info);
99 		name = dstobj->strtab + dstsym->st_name;
100 		size = dstsym->st_size;
101 		symlook_init(&req, name);
102 		req.ventry = fetch_ventry(dstobj, ELF_R_SYM(rela->r_info));
103 		req.flags = SYMLOOK_EARLY;
104 
105 		for (srcobj = globallist_next(dstobj); srcobj != NULL;
106 		     srcobj = globallist_next(srcobj)) {
107 			res = symlook_obj(&req, srcobj);
108 			if (res == 0) {
109 				srcsym = req.sym_out;
110 				defobj = req.defobj_out;
111 				break;
112 			}
113 		}
114 
115 		if (srcobj == NULL) {
116 			_rtld_error("Undefined symbol \"%s\" "
117 				    " referenced from COPY"
118 				    " relocation in %s", name, dstobj->path);
119 			return (-1);
120 		}
121 
122 		srcaddr = (const void *)(defobj->relocbase+srcsym->st_value);
123 		memcpy(dstaddr, srcaddr, size);
124 		dbg("copy_reloc: src=%p,dst=%p,size=%zd\n",srcaddr,dstaddr,size);
125 	}
126 
127 	return (0);
128 }
129 
130 
131 /*
132  * Perform early relocation of the run-time linker image
133  */
134 void
135 reloc_non_plt_self(Elf_Dyn *dynp, Elf_Addr relocbase)
136 {
137 	const Elf_Rela *rela = NULL, *relalim;
138 	Elf_Addr relasz = 0;
139 	Elf_Addr *where;
140 
141 	/*
142 	 * Extract the rela/relasz values from the dynamic section
143 	 */
144 	for (; dynp->d_tag != DT_NULL; dynp++) {
145 		switch (dynp->d_tag) {
146 		case DT_RELA:
147 			rela = (const Elf_Rela *)(relocbase+dynp->d_un.d_ptr);
148 			break;
149 		case DT_RELASZ:
150 			relasz = dynp->d_un.d_val;
151 			break;
152 		}
153 	}
154 
155 	/*
156 	 * Relocate these values
157 	 */
158 	relalim = (const Elf_Rela *)((const char *)rela + relasz);
159 	for (; rela < relalim; rela++) {
160 		where = (Elf_Addr *)(relocbase + rela->r_offset);
161 		*where = (Elf_Addr)(relocbase + rela->r_addend);
162 	}
163 }
164 
165 
166 /*
167  * Relocate a non-PLT object with addend.
168  */
169 static int
170 reloc_nonplt_object(Obj_Entry *obj_rtld __unused, Obj_Entry *obj,
171     const Elf_Rela *rela, SymCache *cache, int flags, RtldLockState *lockstate)
172 {
173 	const Elf_Sym	*def = NULL;
174 	const Obj_Entry	*defobj;
175 	Elf_Addr	*where, symval = 0;
176 
177 	/*
178 	 * First, resolve symbol for relocations which
179 	 * reference symbols.
180 	 */
181 	switch (ELF_R_TYPE(rela->r_info)) {
182 
183 	case R_PPC64_UADDR64:    /* doubleword64 S + A */
184 	case R_PPC64_ADDR64:
185 	case R_PPC_GLOB_DAT:
186 	case R_PPC64_DTPMOD64:
187 	case R_PPC64_TPREL64:
188 	case R_PPC64_DTPREL64:
189 		def = find_symdef(ELF_R_SYM(rela->r_info), obj, &defobj,
190 		    flags, cache, lockstate);
191 		if (def == NULL) {
192 			return (-1);
193 		}
194 		/*
195 		 * If symbol is IFUNC, only perform relocation
196 		 * when caller allowed it by passing
197 		 * SYMLOOK_IFUNC flag.  Skip the relocations
198 		 * otherwise.
199 		 *
200 		 * Also error out in case IFUNC relocations
201 		 * are specified for TLS, which cannot be
202 		 * usefully interpreted.
203 		 */
204 		if (ELF_ST_TYPE(def->st_info) == STT_GNU_IFUNC) {
205 			switch (ELF_R_TYPE(rela->r_info)) {
206 			case R_PPC64_UADDR64:
207 			case R_PPC64_ADDR64:
208 			case R_PPC_GLOB_DAT:
209 				if ((flags & SYMLOOK_IFUNC) == 0) {
210 					dbg("Non-PLT reference to IFUNC found!");
211 					obj->non_plt_gnu_ifunc = true;
212 					return (0);
213 				}
214 				symval = (Elf_Addr)rtld_resolve_ifunc(
215 					defobj, def);
216 				break;
217 			default:
218 				_rtld_error("%s: IFUNC for TLS reloc",
219 					 obj->path);
220 				return (-1);
221 			}
222 		} else {
223 			if ((flags & SYMLOOK_IFUNC) != 0)
224 				return (0);
225 			symval = (Elf_Addr)defobj->relocbase +
226 				def->st_value;
227 		}
228 		break;
229 	default:
230 		if ((flags & SYMLOOK_IFUNC) != 0)
231 			return (0);
232 	}
233 
234 	where = (Elf_Addr *)(obj->relocbase + rela->r_offset);
235 
236 	switch (ELF_R_TYPE(rela->r_info)) {
237 	case R_PPC_NONE:
238 		break;
239 	case R_PPC64_UADDR64:
240 	case R_PPC64_ADDR64:
241 	case R_PPC_GLOB_DAT:
242 		/* Don't issue write if unnecessary; avoid COW page fault */
243 		if (*where != symval + rela->r_addend) {
244 			*where = symval + rela->r_addend;
245 		}
246 		break;
247 	case R_PPC64_DTPMOD64:
248 		*where = (Elf_Addr) defobj->tlsindex;
249 		break;
250 	case R_PPC64_TPREL64:
251 		/*
252 		 * We lazily allocate offsets for static TLS as we
253 		 * see the first relocation that references the
254 		 * TLS block. This allows us to support (small
255 		 * amounts of) static TLS in dynamically loaded
256 		 * modules. If we run out of space, we generate an
257 		 * error.
258 		 */
259 		if (!defobj->tls_static) {
260 			if (!allocate_tls_offset(
261 				    __DECONST(Obj_Entry *, defobj))) {
262 				_rtld_error("%s: No space available for static "
263 				    "Thread Local Storage", obj->path);
264 				return (-1);
265 			}
266 		}
267 
268 		*(Elf_Addr **)where = *where * sizeof(Elf_Addr)
269 		    + (Elf_Addr *)(def->st_value + rela->r_addend
270 		    + defobj->tlsoffset - TLS_TP_OFFSET - TLS_TCB_SIZE);
271 		break;
272 	case R_PPC64_DTPREL64:
273 		*where += (Elf_Addr)(def->st_value + rela->r_addend
274 		    - TLS_DTV_OFFSET);
275 		break;
276 	case R_PPC_RELATIVE:  /* doubleword64 B + A */
277 		symval = (Elf_Addr)(obj->relocbase + rela->r_addend);
278 
279 		/* As above, don't issue write unnecessarily */
280 		if (*where != symval) {
281 			*where = symval;
282 		}
283 		break;
284 	case R_PPC_COPY:
285 		/*
286 		 * These are deferred until all other relocations
287 		 * have been done.  All we do here is make sure
288 		 * that the COPY relocation is not in a shared
289 		 * library.  They are allowed only in executable
290 		 * files.
291 		 */
292 		if (!obj->mainprog) {
293 			_rtld_error("%s: Unexpected R_COPY "
294 				    " relocation in shared library",
295 				    obj->path);
296 			return (-1);
297 		}
298 		break;
299 	case R_PPC_IRELATIVE:
300 		/*
301 		 * These will be handled by reloc_iresolve().
302 		 */
303 		obj->irelative = true;
304 		break;
305 	case R_PPC_JMP_SLOT:
306 		/*
307 		 * These will be handled by the plt/jmpslot routines
308 		 */
309 		break;
310 
311 	default:
312 		_rtld_error("%s: Unsupported relocation type %ld"
313 			    " in non-PLT relocations\n", obj->path,
314 			    ELF_R_TYPE(rela->r_info));
315 		return (-1);
316 	}
317 	return (0);
318 }
319 
320 
321 /*
322  * Process non-PLT relocations
323  */
324 int
325 reloc_non_plt(Obj_Entry *obj, Obj_Entry *obj_rtld, int flags,
326     RtldLockState *lockstate)
327 {
328 	const Elf_Rela *relalim;
329 	const Elf_Rela *rela;
330 	const Elf_Phdr *phdr;
331 	SymCache *cache;
332 	int bytes = obj->dynsymcount * sizeof(SymCache);
333 	int r = -1;
334 
335 	/*
336 	 * The dynamic loader may be called from a thread, we have
337 	 * limited amounts of stack available so we cannot use alloca().
338 	 */
339 	if (obj != obj_rtld) {
340 		cache = mmap(NULL, bytes, PROT_READ|PROT_WRITE, MAP_ANON,
341 		    -1, 0);
342 		if (cache == MAP_FAILED)
343 			cache = NULL;
344 	} else
345 		cache = NULL;
346 
347 	/*
348 	 * From the SVR4 PPC ABI:
349 	 * "The PowerPC family uses only the Elf32_Rela relocation
350 	 *  entries with explicit addends."
351 	 */
352 	relalim = (const Elf_Rela *)((const char *)obj->rela + obj->relasize);
353 	for (rela = obj->rela; rela < relalim; rela++) {
354 		if (reloc_nonplt_object(obj_rtld, obj, rela, cache, flags,
355 		    lockstate) < 0)
356 			goto done;
357 	}
358 	r = 0;
359 done:
360 	if (cache)
361 		munmap(cache, bytes);
362 
363 	/*
364 	 * Synchronize icache for executable segments in case we made
365 	 * any changes.
366 	 */
367 	for (phdr = obj->phdr;
368 	    (const char *)phdr < (const char *)obj->phdr + obj->phsize;
369 	    phdr++) {
370 		if (phdr->p_type == PT_LOAD && (phdr->p_flags & PF_X) != 0) {
371 			__syncicache(obj->relocbase + phdr->p_vaddr,
372 			    phdr->p_memsz);
373 		}
374 	}
375 
376 	return (r);
377 }
378 
379 
380 /*
381  * Initialise a PLT slot to the resolving trampoline
382  */
383 static int
384 reloc_plt_object(Obj_Entry *obj, const Elf_Rela *rela)
385 {
386 	Elf_Addr *where = (Elf_Addr *)(obj->relocbase + rela->r_offset);
387 	long reloff;
388 
389 	reloff = rela - obj->pltrela;
390 
391 	dbg(" reloc_plt_object: where=%p,reloff=%lx,glink=%#lx", (void *)where,
392 	    reloff, obj->glink);
393 
394 #if !defined(_CALL_ELF) || _CALL_ELF == 1
395 	/* Glink code is 3 instructions after the first 32k, 2 before */
396 	*where = (Elf_Addr)obj->glink + 32 +
397 	    8*((reloff < 0x8000) ? reloff : 0x8000) +
398 	    12*((reloff < 0x8000) ? 0 : (reloff - 0x8000));
399 #else
400 	/* 64-Bit ELF V2 ABI Specification, sec. 4.2.5.3. */
401 	*where = (Elf_Addr)obj->glink + 4*reloff + 32;
402 #endif
403 
404 	return (0);
405 }
406 
407 /*
408  * Process the PLT relocations.
409  */
410 int
411 reloc_plt(Obj_Entry *obj, int flags __unused, RtldLockState *lockstate __unused)
412 {
413 	const Elf_Rela *relalim;
414 	const Elf_Rela *rela;
415 
416 	if (obj->pltrelasize != 0) {
417 		relalim = (const Elf_Rela *)((const char *)obj->pltrela +
418 		    obj->pltrelasize);
419 		for (rela = obj->pltrela;  rela < relalim;  rela++) {
420 
421 #if defined(_CALL_ELF) && _CALL_ELF == 2
422 			if (ELF_R_TYPE(rela->r_info) == R_PPC_IRELATIVE) {
423 				dbg("ABI violation - found IRELATIVE in the PLT.");
424 				obj->irelative = true;
425 				continue;
426 			}
427 #endif
428 			/*
429 			 * PowerPC(64) .rela.plt is composed of an array of
430 			 * R_PPC_JMP_SLOT relocations. Unlike other platforms,
431 			 * this is the ONLY relocation type that is valid here.
432 			 */
433 			assert(ELF_R_TYPE(rela->r_info) == R_PPC_JMP_SLOT);
434 
435 			if (reloc_plt_object(obj, rela) < 0) {
436 				return (-1);
437 			}
438 		}
439 	}
440 
441 	return (0);
442 }
443 
444 /*
445  * LD_BIND_NOW was set - force relocation for all jump slots
446  */
447 int
448 reloc_jmpslots(Obj_Entry *obj, int flags, RtldLockState *lockstate)
449 {
450 	const Obj_Entry *defobj;
451 	const Elf_Rela *relalim;
452 	const Elf_Rela *rela;
453 	const Elf_Sym *def;
454 	Elf_Addr *where;
455 	Elf_Addr target;
456 
457 	relalim = (const Elf_Rela *)((const char *)obj->pltrela +
458 	    obj->pltrelasize);
459 	for (rela = obj->pltrela; rela < relalim; rela++) {
460 		/* This isn't actually a jump slot, ignore it. */
461 		if (ELF_R_TYPE(rela->r_info) == R_PPC_IRELATIVE)
462 			continue;
463 		assert(ELF_R_TYPE(rela->r_info) == R_PPC_JMP_SLOT);
464 		where = (Elf_Addr *)(obj->relocbase + rela->r_offset);
465 		def = find_symdef(ELF_R_SYM(rela->r_info), obj, &defobj,
466 		    SYMLOOK_IN_PLT | flags, NULL, lockstate);
467 		if (def == NULL) {
468 			dbg("reloc_jmpslots: sym not found");
469 			return (-1);
470 		}
471 
472 		target = (Elf_Addr)(defobj->relocbase + def->st_value);
473 
474 		if (def == &sym_zero) {
475 			/* Zero undefined weak symbols */
476 #if !defined(_CALL_ELF) || _CALL_ELF == 1
477 			bzero(where, sizeof(struct funcdesc));
478 #else
479 			*where = 0;
480 #endif
481 		} else {
482 			if (ELF_ST_TYPE(def->st_info) == STT_GNU_IFUNC) {
483 				/* LD_BIND_NOW, ifunc in shared lib.*/
484 				obj->gnu_ifunc = true;
485 				continue;
486 			}
487 			reloc_jmpslot(where, target, defobj, obj,
488 			    (const Elf_Rel *) rela);
489 		}
490 	}
491 
492 	obj->jmpslots_done = true;
493 
494 	return (0);
495 }
496 
497 
498 /*
499  * Update the value of a PLT jump slot.
500  */
501 Elf_Addr
502 reloc_jmpslot(Elf_Addr *wherep, Elf_Addr target, const Obj_Entry *defobj __unused,
503     const Obj_Entry *obj __unused, const Elf_Rel *rel __unused)
504 {
505 
506 	/*
507 	 * At the PLT entry pointed at by `wherep', construct
508 	 * a direct transfer to the now fully resolved function
509 	 * address.
510 	 */
511 
512 #if !defined(_CALL_ELF) || _CALL_ELF == 1
513 	dbg(" reloc_jmpslot: where=%p, target=%p (%#lx + %#lx)",
514 	    (void *)wherep, (void *)target, *(Elf_Addr *)target,
515 	    (Elf_Addr)defobj->relocbase);
516 
517 	if (ld_bind_not)
518 		goto out;
519 
520 	/*
521 	 * For the trampoline, the second two elements of the function
522 	 * descriptor are unused, so we are fine replacing those at any time
523 	 * with the real ones with no thread safety implications. However, we
524 	 * need to make sure the main entry point pointer ([0]) is seen to be
525 	 * modified *after* the second two elements. This can't be done in
526 	 * general, since there are no barriers in the reading code, but put in
527 	 * some isyncs to at least make it a little better.
528 	 */
529 	memcpy(wherep, (void *)target, sizeof(struct funcdesc));
530 	wherep[2] = ((Elf_Addr *)target)[2];
531 	wherep[1] = ((Elf_Addr *)target)[1];
532 	__asm __volatile ("isync" : : : "memory");
533 	wherep[0] = ((Elf_Addr *)target)[0];
534 	__asm __volatile ("isync" : : : "memory");
535 
536 	if (((struct funcdesc *)(wherep))->addr < (Elf_Addr)defobj->relocbase) {
537 		/*
538 		 * It is possible (LD_BIND_NOW) that the function
539 		 * descriptor we are copying has not yet been relocated.
540 		 * If this happens, fix it. Don't worry about threading in
541 		 * this case since LD_BIND_NOW makes it irrelevant.
542 		 */
543 
544 		((struct funcdesc *)(wherep))->addr +=
545 		    (Elf_Addr)defobj->relocbase;
546 		((struct funcdesc *)(wherep))->toc +=
547 		    (Elf_Addr)defobj->relocbase;
548 	}
549 #else
550 	dbg(" reloc_jmpslot: where=%p, target=%p", (void *)wherep,
551 	    (void *)target);
552 
553 	assert(target >= (Elf_Addr)defobj->relocbase);
554 
555 	if (ld_bind_not)
556 		goto out;
557 
558 	if (*wherep != target)
559 		*wherep = target;
560 
561 #endif
562 out:
563 
564 	return (target);
565 }
566 
567 int
568 reloc_iresolve(Obj_Entry *obj,
569     struct Struct_RtldLockState *lockstate)
570 {
571 	/*
572 	 * Since PLT slots on PowerPC64 are always R_PPC_JMP_SLOT,
573 	 * R_PPC_IRELATIVE is in RELA.
574 	 */
575 #if !defined(_CALL_ELF) || _CALL_ELF == 1
576 	(void)(obj);
577 	(void)(lockstate);
578 	/* XXX not implemented */
579 	return (0);
580 #else
581 	const Elf_Rela *relalim;
582 	const Elf_Rela *rela;
583 	Elf_Addr *where, target, *ptr;
584 
585 	if (!obj->irelative)
586 		return (0);
587 
588 	relalim = (const Elf_Rela *)((const char *)obj->rela + obj->relasize);
589 	for (rela = obj->rela;  rela < relalim;  rela++) {
590 		if (ELF_R_TYPE(rela->r_info) == R_PPC_IRELATIVE) {
591 			ptr = (Elf_Addr *)(obj->relocbase + rela->r_addend);
592 			where = (Elf_Addr *)(obj->relocbase + rela->r_offset);
593 
594 			lock_release(rtld_bind_lock, lockstate);
595 			target = call_ifunc_resolver(ptr);
596 			wlock_acquire(rtld_bind_lock, lockstate);
597 
598 			*where = target;
599 		}
600 	}
601 	/*
602 	 * XXX Remove me when lld is fixed!
603 	 * LLD currently makes illegal relocations in the PLT.
604 	 */
605         relalim = (const Elf_Rela *)((const char *)obj->pltrela + obj->pltrelasize);
606         for (rela = obj->pltrela;  rela < relalim;  rela++) {
607                 if (ELF_R_TYPE(rela->r_info) == R_PPC_IRELATIVE) {
608                         ptr = (Elf_Addr *)(obj->relocbase + rela->r_addend);
609                         where = (Elf_Addr *)(obj->relocbase + rela->r_offset);
610 
611                         lock_release(rtld_bind_lock, lockstate);
612                         target = call_ifunc_resolver(ptr);
613                         wlock_acquire(rtld_bind_lock, lockstate);
614 
615                         *where = target;
616                 }
617         }
618 
619 	obj->irelative = false;
620 	return (0);
621 #endif
622 }
623 
624 int
625 reloc_gnu_ifunc(Obj_Entry *obj __unused, int flags __unused,
626     struct Struct_RtldLockState *lockstate __unused)
627 {
628 #if !defined(_CALL_ELF) || _CALL_ELF == 1
629 	_rtld_error("reloc_gnu_ifunc(): Not implemented!");
630 	/* XXX not implemented */
631 	return (-1);
632 #else
633 
634 	const Elf_Rela *relalim;
635 	const Elf_Rela *rela;
636 	Elf_Addr *where, target;
637 	const Elf_Sym *def;
638 	const Obj_Entry *defobj;
639 
640 	if (!obj->gnu_ifunc)
641 		return (0);
642 	relalim = (const Elf_Rela *)((const char *)obj->pltrela + obj->pltrelasize);
643 	for (rela = obj->pltrela;  rela < relalim;  rela++) {
644 		if (ELF_R_TYPE(rela->r_info) == R_PPC_JMP_SLOT) {
645 			where = (Elf_Addr *)(obj->relocbase + rela->r_offset);
646 			def = find_symdef(ELF_R_SYM(rela->r_info), obj, &defobj,
647 			    SYMLOOK_IN_PLT | flags, NULL, lockstate);
648 			if (def == NULL)
649 				return (-1);
650 			if (ELF_ST_TYPE(def->st_info) != STT_GNU_IFUNC)
651 				continue;
652 			lock_release(rtld_bind_lock, lockstate);
653 			target = (Elf_Addr)rtld_resolve_ifunc(defobj, def);
654 			wlock_acquire(rtld_bind_lock, lockstate);
655 			reloc_jmpslot(where, target, defobj, obj,
656 			    (const Elf_Rel *)rela);
657 		}
658 	}
659 	obj->gnu_ifunc = false;
660 	return (0);
661 #endif
662 }
663 
664 int
665 reloc_iresolve_nonplt(Obj_Entry *obj __unused,
666     struct Struct_RtldLockState *lockstate __unused)
667 {
668 	return (0);
669 }
670 
671 void
672 init_pltgot(Obj_Entry *obj)
673 {
674 	Elf_Addr *pltcall;
675 
676 	pltcall = obj->pltgot;
677 
678 	if (pltcall == NULL) {
679 		return;
680 	}
681 
682 #if defined(_CALL_ELF) && _CALL_ELF == 2
683 	pltcall[0] = (Elf_Addr)&_rtld_bind_start;
684 	pltcall[1] = (Elf_Addr)obj;
685 #else
686 	memcpy(pltcall, _rtld_bind_start, sizeof(struct funcdesc));
687 	pltcall[2] = (Elf_Addr)obj;
688 #endif
689 }
690 
691 /*
692  * Actual values are 32 bit.
693  */
694 u_long cpu_features;
695 u_long cpu_features2;
696 
697 void
698 powerpc64_abi_variant_hook(Elf_Auxinfo** aux_info)
699 {
700 	/*
701 	 * Since aux_info[] is easier to work with than aux, go ahead and
702 	 * initialize cpu_features / cpu_features2.
703 	 */
704 	cpu_features = -1UL;
705 	cpu_features2 = -1UL;
706 	if (aux_info[AT_HWCAP] != NULL)
707 		cpu_features = (uint32_t)aux_info[AT_HWCAP]->a_un.a_val;
708 	if (aux_info[AT_HWCAP2] != NULL)
709 		cpu_features2 = (uint32_t)aux_info[AT_HWCAP2]->a_un.a_val;
710 }
711 
712 void
713 ifunc_init(Elf_Auxinfo *aux_info[__min_size(AT_COUNT)] __unused)
714 {
715 
716 }
717 
718 void
719 allocate_initial_tls(Obj_Entry *list)
720 {
721 
722 	/*
723 	* Fix the size of the static TLS block by using the maximum
724 	* offset allocated so far and adding a bit for dynamic modules to
725 	* use.
726 	*/
727 
728 	tls_static_space = tls_last_offset + tls_last_size +
729 	    ld_static_tls_extra;
730 
731 	_tcb_set(allocate_tls(list, NULL, TLS_TCB_SIZE, TLS_TCB_ALIGN));
732 }
733 
734 void*
735 __tls_get_addr(tls_index* ti)
736 {
737 	uintptr_t **dtvp;
738 	char *p;
739 
740 	dtvp = &_tcb_get()->tcb_dtv;
741 	p = tls_get_addr_common(dtvp, ti->ti_module, ti->ti_offset);
742 
743 	return (p + TLS_DTV_OFFSET);
744 }
745