xref: /titanic_50/usr/src/cmd/sgs/libld/common/machrel.amd.c (revision 2df1fe9ca32bb227b9158c67f5c00b54c20b10fd)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 #pragma ident	"%Z%%M%	%I%	%E% SMI"
27 
28 #include	<string.h>
29 #include	<stdio.h>
30 #include	<strings.h>
31 #include	<sys/elf_amd64.h>
32 #include	<debug.h>
33 #include	<reloc.h>
34 #include	"msg.h"
35 #include	"_libld.h"
36 
37 Word
38 ld_init_rel(Rel_desc *reld, void *reloc)
39 {
40 	Rela *	rel = (Rela *)reloc;
41 
42 	/* LINTED */
43 	reld->rel_rtype = (Word)ELF_R_TYPE(rel->r_info);
44 	reld->rel_roffset = rel->r_offset;
45 	reld->rel_raddend = rel->r_addend;
46 	reld->rel_typedata = 0;
47 
48 	reld->rel_flags |= FLG_REL_RELA;
49 
50 	return ((Word)ELF_R_SYM(rel->r_info));
51 }
52 
53 void
54 ld_mach_eflags(Ehdr *ehdr, Ofl_desc *ofl)
55 {
56 	ofl->ofl_dehdr->e_flags |= ehdr->e_flags;
57 }
58 
59 void
60 ld_mach_make_dynamic(Ofl_desc *ofl, size_t *cnt)
61 {
62 	if (!(ofl->ofl_flags & FLG_OF_RELOBJ)) {
63 		/*
64 		 * Create this entry if we are going to create a PLT table.
65 		 */
66 		if (ofl->ofl_pltcnt)
67 			(*cnt)++;		/* DT_PLTGOT */
68 	}
69 }
70 
71 void
72 ld_mach_update_odynamic(Ofl_desc *ofl, Dyn **dyn)
73 {
74 	if (((ofl->ofl_flags & FLG_OF_RELOBJ) == 0) && ofl->ofl_pltcnt) {
75 		(*dyn)->d_tag = DT_PLTGOT;
76 		if (ofl->ofl_osgot)
77 			(*dyn)->d_un.d_ptr = ofl->ofl_osgot->os_shdr->sh_addr;
78 		else
79 			(*dyn)->d_un.d_ptr = 0;
80 		(*dyn)++;
81 	}
82 }
83 
84 Xword
85 ld_calc_plt_addr(Sym_desc *sdp, Ofl_desc *ofl)
86 {
87 	Xword	value;
88 
89 	value = (Xword)(ofl->ofl_osplt->os_shdr->sh_addr) +
90 	    M_PLT_RESERVSZ + ((sdp->sd_aux->sa_PLTndx - 1) * M_PLT_ENTSIZE);
91 	return (value);
92 }
93 
94 /*
95  *  Build a single plt entry - code is:
96  *	JMP	*name1@GOTPCREL(%rip)
97  *	PUSHL	$index
98  *	JMP	.PLT0
99  */
100 static uchar_t pltn_entry[M_PLT_ENTSIZE] = {
101 /* 0x00 jmpq *name1@GOTPCREL(%rip) */	0xff, 0x25, 0x00, 0x00, 0x00, 0x00,
102 /* 0x06 pushq $index */			0x68, 0x00, 0x00, 0x00, 0x00,
103 /* 0x0b jmpq  .plt0(%rip) */		0xe9, 0x00, 0x00, 0x00, 0x00
104 /* 0x10 */
105 };
106 
107 static uintptr_t
108 plt_entry(Ofl_desc * ofl, Sym_desc * sdp)
109 {
110 	uchar_t		*plt0, *pltent, *gotent;
111 	Sword		plt_off;
112 	Word		got_off;
113 	Xword		val1;
114 	int		bswap;
115 
116 	got_off = sdp->sd_aux->sa_PLTGOTndx * M_GOT_ENTSIZE;
117 	plt_off = M_PLT_RESERVSZ + ((sdp->sd_aux->sa_PLTndx - 1) *
118 	    M_PLT_ENTSIZE);
119 	plt0 = (uchar_t *)(ofl->ofl_osplt->os_outdata->d_buf);
120 	pltent = plt0 + plt_off;
121 	gotent = (uchar_t *)(ofl->ofl_osgot->os_outdata->d_buf) + got_off;
122 
123 	bcopy(pltn_entry, pltent, sizeof (pltn_entry));
124 	/*
125 	 * Fill in the got entry with the address of the next instruction.
126 	 */
127 	/* LINTED */
128 	*(Word *)gotent = ofl->ofl_osplt->os_shdr->sh_addr + plt_off +
129 	    M_PLT_INSSIZE;
130 
131 	/*
132 	 * If '-z noreloc' is specified - skip the do_reloc_ld
133 	 * stage.
134 	 */
135 	if (!OFL_DO_RELOC(ofl))
136 		return (1);
137 
138 	/*
139 	 * If the running linker has a different byte order than
140 	 * the target host, tell do_reloc_ld() to swap bytes.
141 	 *
142 	 * We know the PLT is PROGBITS --- we don't have to check
143 	 */
144 	bswap = (ofl->ofl_flags1 & FLG_OF1_ENCDIFF) != 0;
145 
146 	/*
147 	 * patchup:
148 	 *	jmpq	*name1@gotpcrel(%rip)
149 	 *
150 	 * NOTE: 0x06 represents next instruction.
151 	 */
152 	val1 = (ofl->ofl_osgot->os_shdr->sh_addr + got_off) -
153 	    (ofl->ofl_osplt->os_shdr->sh_addr + plt_off) - 0x06;
154 
155 	if (do_reloc_ld(R_AMD64_GOTPCREL, &pltent[0x02],
156 	    &val1, MSG_ORIG(MSG_SYM_PLTENT),
157 	    MSG_ORIG(MSG_SPECFIL_PLTENT), bswap, ofl->ofl_lml) == 0) {
158 		eprintf(ofl->ofl_lml, ERR_FATAL, MSG_INTL(MSG_PLT_PLTNFAIL),
159 		    sdp->sd_aux->sa_PLTndx, demangle(sdp->sd_name));
160 		return (S_ERROR);
161 	}
162 
163 	/*
164 	 * patchup:
165 	 *	pushq	$pltndx
166 	 */
167 	val1 = (Xword)(sdp->sd_aux->sa_PLTndx - 1);
168 
169 	if (do_reloc_ld(R_AMD64_32, &pltent[0x07],
170 	    &val1, MSG_ORIG(MSG_SYM_PLTENT),
171 	    MSG_ORIG(MSG_SPECFIL_PLTENT), bswap, ofl->ofl_lml) == 0) {
172 		eprintf(ofl->ofl_lml, ERR_FATAL, MSG_INTL(MSG_PLT_PLTNFAIL),
173 		    sdp->sd_aux->sa_PLTndx, demangle(sdp->sd_name));
174 		return (S_ERROR);
175 	}
176 
177 	/*
178 	 * patchup:
179 	 *	jmpq	.plt0(%rip)
180 	 * NOTE: 0x10 represents next instruction. The rather complex
181 	 * series of casts is necessary to sign extend an offset into
182 	 * a 64-bit value while satisfying various compiler error
183 	 * checks.  Handle with care.
184 	 */
185 	val1 = (Xword)((intptr_t)((uintptr_t)plt0 -
186 	    (uintptr_t)(&pltent[0x10])));
187 
188 	if (do_reloc_ld(R_AMD64_PC32, &pltent[0x0c],
189 	    &val1, MSG_ORIG(MSG_SYM_PLTENT),
190 	    MSG_ORIG(MSG_SPECFIL_PLTENT), bswap, ofl->ofl_lml) == 0) {
191 		eprintf(ofl->ofl_lml, ERR_FATAL, MSG_INTL(MSG_PLT_PLTNFAIL),
192 		    sdp->sd_aux->sa_PLTndx, demangle(sdp->sd_name));
193 		return (S_ERROR);
194 	}
195 
196 	return (1);
197 }
198 
199 uintptr_t
200 ld_perform_outreloc(Rel_desc * orsp, Ofl_desc * ofl)
201 {
202 	Os_desc *	relosp, * osp = 0;
203 	Word		ndx;
204 	Xword		roffset, value;
205 	Sxword		raddend;
206 	Rela		rea;
207 	char		*relbits;
208 	Sym_desc *	sdp, * psym = (Sym_desc *)0;
209 	int		sectmoved = 0;
210 
211 	raddend = orsp->rel_raddend;
212 	sdp = orsp->rel_sym;
213 
214 	/*
215 	 * If the section this relocation is against has been discarded
216 	 * (-zignore), then also discard (skip) the relocation itself.
217 	 */
218 	if (orsp->rel_isdesc && ((orsp->rel_flags &
219 	    (FLG_REL_GOT | FLG_REL_BSS | FLG_REL_PLT | FLG_REL_NOINFO)) == 0) &&
220 	    (orsp->rel_isdesc->is_flags & FLG_IS_DISCARD)) {
221 		DBG_CALL(Dbg_reloc_discard(ofl->ofl_lml, M_MACH, orsp));
222 		return (1);
223 	}
224 
225 	/*
226 	 * If this is a relocation against a move table, or expanded move
227 	 * table, adjust the relocation entries.
228 	 */
229 	if (orsp->rel_move)
230 		ld_adj_movereloc(ofl, orsp);
231 
232 	/*
233 	 * If this is a relocation against a section then we need to adjust the
234 	 * raddend field to compensate for the new position of the input section
235 	 * within the new output section.
236 	 */
237 	if (ELF_ST_TYPE(sdp->sd_sym->st_info) == STT_SECTION) {
238 		if (ofl->ofl_parsym.head &&
239 		    (sdp->sd_isc->is_flags & FLG_IS_RELUPD) &&
240 		    /* LINTED */
241 		    (psym = ld_am_I_partial(orsp, orsp->rel_raddend))) {
242 			DBG_CALL(Dbg_move_outsctadj(ofl->ofl_lml, psym));
243 			sectmoved = 1;
244 			if (ofl->ofl_flags & FLG_OF_RELOBJ)
245 				raddend = psym->sd_sym->st_value;
246 			else
247 				raddend = psym->sd_sym->st_value -
248 				    psym->sd_isc->is_osdesc->os_shdr->sh_addr;
249 			/* LINTED */
250 			raddend += (Off)_elf_getxoff(psym->sd_isc->is_indata);
251 			if (psym->sd_isc->is_shdr->sh_flags & SHF_ALLOC)
252 				raddend +=
253 				    psym->sd_isc->is_osdesc->os_shdr->sh_addr;
254 		} else {
255 			/* LINTED */
256 			raddend += (Off)_elf_getxoff(sdp->sd_isc->is_indata);
257 			if (sdp->sd_isc->is_shdr->sh_flags & SHF_ALLOC)
258 				raddend +=
259 				    sdp->sd_isc->is_osdesc->os_shdr->sh_addr;
260 		}
261 	}
262 
263 	value = sdp->sd_sym->st_value;
264 
265 	if (orsp->rel_flags & FLG_REL_GOT) {
266 		/*
267 		 * Note: for GOT relative relocations on amd64
268 		 *	 we discard the addend.  It was relevant
269 		 *	 to the reference - not to the data item
270 		 *	 being referenced (ie: that -4 thing).
271 		 */
272 		raddend = 0;
273 		osp = ofl->ofl_osgot;
274 		roffset = ld_calc_got_offset(orsp, ofl);
275 
276 	} else if (orsp->rel_flags & FLG_REL_PLT) {
277 		/*
278 		 * Note that relocations for PLT's actually
279 		 * cause a relocation againt the GOT.
280 		 */
281 		osp = ofl->ofl_osplt;
282 		roffset = (ofl->ofl_osgot->os_shdr->sh_addr) +
283 		    sdp->sd_aux->sa_PLTGOTndx * M_GOT_ENTSIZE;
284 		raddend = 0;
285 		if (plt_entry(ofl, sdp) == S_ERROR)
286 			return (S_ERROR);
287 
288 	} else if (orsp->rel_flags & FLG_REL_BSS) {
289 		/*
290 		 * This must be a R_AMD64_COPY.  For these set the roffset to
291 		 * point to the new symbols location.
292 		 */
293 		osp = ofl->ofl_isbss->is_osdesc;
294 		roffset = value;
295 
296 		/*
297 		 * The raddend doesn't mean anything in a R_SPARC_COPY
298 		 * relocation.  Null it out because it can confuse people.
299 		 */
300 		raddend = 0;
301 	} else {
302 		osp = orsp->rel_osdesc;
303 
304 		/*
305 		 * Calculate virtual offset of reference point; equals offset
306 		 * into section + vaddr of section for loadable sections, or
307 		 * offset plus section displacement for nonloadable sections.
308 		 */
309 		roffset = orsp->rel_roffset +
310 		    (Off)_elf_getxoff(orsp->rel_isdesc->is_indata);
311 		if (!(ofl->ofl_flags & FLG_OF_RELOBJ))
312 			roffset += orsp->rel_isdesc->is_osdesc->
313 			    os_shdr->sh_addr;
314 	}
315 
316 	if ((osp == 0) || ((relosp = osp->os_relosdesc) == 0))
317 		relosp = ofl->ofl_osrel;
318 
319 	/*
320 	 * Assign the symbols index for the output relocation.  If the
321 	 * relocation refers to a SECTION symbol then it's index is based upon
322 	 * the output sections symbols index.  Otherwise the index can be
323 	 * derived from the symbols index itself.
324 	 */
325 	if (orsp->rel_rtype == R_AMD64_RELATIVE)
326 		ndx = STN_UNDEF;
327 	else if ((orsp->rel_flags & FLG_REL_SCNNDX) ||
328 	    (ELF_ST_TYPE(sdp->sd_sym->st_info) == STT_SECTION)) {
329 		if (sectmoved == 0) {
330 			/*
331 			 * Check for a null input section. This can
332 			 * occur if this relocation references a symbol
333 			 * generated by sym_add_sym().
334 			 */
335 			if ((sdp->sd_isc != 0) &&
336 			    (sdp->sd_isc->is_osdesc != 0))
337 				ndx = sdp->sd_isc->is_osdesc->os_scnsymndx;
338 			else
339 				ndx = sdp->sd_shndx;
340 		} else
341 			ndx = ofl->ofl_sunwdata1ndx;
342 	} else
343 		ndx = sdp->sd_symndx;
344 
345 	/*
346 	 * Add the symbols 'value' to the addend field.
347 	 */
348 	if (orsp->rel_flags & FLG_REL_ADVAL)
349 		raddend += value;
350 
351 	/*
352 	 * The addend field for R_AMD64_DTPMOD64 means nothing.  The addend
353 	 * is propagated in the corresponding R_AMD64_DTPOFF64 relocation.
354 	 */
355 	if (orsp->rel_rtype == R_AMD64_DTPMOD64)
356 		raddend = 0;
357 
358 	relbits = (char *)relosp->os_outdata->d_buf;
359 
360 	rea.r_info = ELF_R_INFO(ndx, orsp->rel_rtype);
361 	rea.r_offset = roffset;
362 	rea.r_addend = raddend;
363 	DBG_CALL(Dbg_reloc_out(ofl, ELF_DBG_LD, SHT_RELA, &rea, relosp->os_name,
364 	    orsp->rel_sname));
365 
366 	/*
367 	 * Assert we haven't walked off the end of our relocation table.
368 	 */
369 	assert(relosp->os_szoutrels <= relosp->os_shdr->sh_size);
370 
371 	(void) memcpy((relbits + relosp->os_szoutrels),
372 	    (char *)&rea, sizeof (Rela));
373 	relosp->os_szoutrels += (Xword)sizeof (Rela);
374 
375 	/*
376 	 * Determine if this relocation is against a non-writable, allocatable
377 	 * section.  If so we may need to provide a text relocation diagnostic.
378 	 * Note that relocations against the .plt (R_AMD64_JUMP_SLOT) actually
379 	 * result in modifications to the .got.
380 	 */
381 	if (orsp->rel_rtype == R_AMD64_JUMP_SLOT)
382 		osp = ofl->ofl_osgot;
383 
384 	ld_reloc_remain_entry(orsp, osp, ofl);
385 	return (1);
386 }
387 
388 /*
389  * amd64 Instructions for TLS processing
390  */
391 static uchar_t tlsinstr_gd_ie[] = {
392 	/*
393 	 *	0x00 movq %fs:0, %rax
394 	 */
395 	0x64, 0x48, 0x8b, 0x04, 0x25,
396 	0x00, 0x00, 0x00, 0x00,
397 	/*
398 	 *	0x09 addq x@gottpoff(%rip), %rax
399 	 */
400 	0x48, 0x03, 0x05, 0x00, 0x00,
401 	0x00, 0x00
402 };
403 
404 static uchar_t tlsinstr_gd_le[] = {
405 	/*
406 	 *	0x00 movq %fs:0, %rax
407 	 */
408 	0x64, 0x48, 0x8b, 0x04, 0x25,
409 	0x00, 0x00, 0x00, 0x00,
410 	/*
411 	 *	0x09 leaq x@gottpoff(%rip), %rax
412 	 */
413 	0x48, 0x8d, 0x80, 0x00, 0x00,
414 	0x00, 0x00
415 };
416 
417 static uchar_t tlsinstr_ld_le[] = {
418 	/*
419 	 * .byte 0x66
420 	 */
421 	0x66,
422 	/*
423 	 * .byte 0x66
424 	 */
425 	0x66,
426 	/*
427 	 * .byte 0x66
428 	 */
429 	0x66,
430 	/*
431 	 * movq %fs:0, %rax
432 	 */
433 	0x64, 0x48, 0x8b, 0x04, 0x25,
434 	0x00, 0x00, 0x00, 0x00
435 };
436 
437 
438 static Fixupret
439 tls_fixups(Ofl_desc *ofl, Rel_desc *arsp)
440 {
441 	Sym_desc	*sdp = arsp->rel_sym;
442 	Word		rtype = arsp->rel_rtype;
443 	uchar_t		*offset;
444 
445 	offset = (uchar_t *)((uintptr_t)arsp->rel_roffset +
446 	    (uintptr_t)_elf_getxoff(arsp->rel_isdesc->is_indata) +
447 	    (uintptr_t)arsp->rel_osdesc->os_outdata->d_buf);
448 
449 	if (sdp->sd_ref == REF_DYN_NEED) {
450 		/*
451 		 * IE reference model
452 		 */
453 		switch (rtype) {
454 		case R_AMD64_TLSGD:
455 			/*
456 			 *  GD -> IE
457 			 *
458 			 * Transition:
459 			 *	0x00 .byte 0x66
460 			 *	0x01 leaq x@tlsgd(%rip), %rdi
461 			 *	0x08 .word 0x6666
462 			 *	0x0a rex64
463 			 *	0x0b call __tls_get_addr@plt
464 			 *	0x10
465 			 * To:
466 			 *	0x00 movq %fs:0, %rax
467 			 *	0x09 addq x@gottpoff(%rip), %rax
468 			 *	0x10
469 			 */
470 			DBG_CALL(Dbg_reloc_transition(ofl->ofl_lml, M_MACH,
471 			    R_AMD64_GOTTPOFF, arsp));
472 			arsp->rel_rtype = R_AMD64_GOTTPOFF;
473 			arsp->rel_roffset += 8;
474 			arsp->rel_raddend = (Sxword)-4;
475 
476 			/*
477 			 * Adjust 'offset' to beginning of instruction
478 			 * sequence.
479 			 */
480 			offset -= 4;
481 			(void) memcpy(offset, tlsinstr_gd_ie,
482 			    sizeof (tlsinstr_gd_ie));
483 			return (FIX_RELOC);
484 
485 		case R_AMD64_PLT32:
486 			/*
487 			 * Fixup done via the TLS_GD relocation.
488 			 */
489 			DBG_CALL(Dbg_reloc_transition(ofl->ofl_lml, M_MACH,
490 			    R_AMD64_NONE, arsp));
491 			return (FIX_DONE);
492 		}
493 	}
494 
495 	/*
496 	 * LE reference model
497 	 */
498 	switch (rtype) {
499 	case R_AMD64_TLSGD:
500 		/*
501 		 * GD -> LE
502 		 *
503 		 * Transition:
504 		 *	0x00 .byte 0x66
505 		 *	0x01 leaq x@tlsgd(%rip), %rdi
506 		 *	0x08 .word 0x6666
507 		 *	0x0a rex64
508 		 *	0x0b call __tls_get_addr@plt
509 		 *	0x10
510 		 * To:
511 		 *	0x00 movq %fs:0, %rax
512 		 *	0x09 leaq x@tpoff(%rax), %rax
513 		 *	0x10
514 		 */
515 		DBG_CALL(Dbg_reloc_transition(ofl->ofl_lml, M_MACH,
516 		    R_AMD64_TPOFF32, arsp));
517 		arsp->rel_rtype = R_AMD64_TPOFF32;
518 		arsp->rel_roffset += 8;
519 		arsp->rel_raddend = 0;
520 
521 		/*
522 		 * Adjust 'offset' to beginning of instruction sequence.
523 		 */
524 		offset -= 4;
525 		(void) memcpy(offset, tlsinstr_gd_le, sizeof (tlsinstr_gd_le));
526 		return (FIX_RELOC);
527 
528 	case R_AMD64_GOTTPOFF:
529 		/*
530 		 * IE -> LE
531 		 *
532 		 * Transition:
533 		 *	0x00 movq %fs:0, %rax
534 		 *	0x09 addq x@gottopoff(%rip), %rax
535 		 *	0x10
536 		 * To:
537 		 *	0x00 movq %fs:0, %rax
538 		 *	0x09 leaq x@tpoff(%rax), %rax
539 		 *	0x10
540 		 */
541 		DBG_CALL(Dbg_reloc_transition(ofl->ofl_lml, M_MACH,
542 		    R_AMD64_TPOFF32, arsp));
543 		arsp->rel_rtype = R_AMD64_TPOFF32;
544 		arsp->rel_raddend = 0;
545 
546 		/*
547 		 * Adjust 'offset' to beginning of instruction sequence.
548 		 */
549 		offset -= 12;
550 
551 		/*
552 		 * Same code sequence used in the GD -> LE transition.
553 		 */
554 		(void) memcpy(offset, tlsinstr_gd_le, sizeof (tlsinstr_gd_le));
555 		return (FIX_RELOC);
556 
557 	case R_AMD64_TLSLD:
558 		/*
559 		 * LD -> LE
560 		 *
561 		 * Transition
562 		 *	0x00 leaq x1@tlsgd(%rip), %rdi
563 		 *	0x07 call __tls_get_addr@plt
564 		 *	0x0c
565 		 * To:
566 		 *	0x00 .byte 0x66
567 		 *	0x01 .byte 0x66
568 		 *	0x02 .byte 0x66
569 		 *	0x03 movq %fs:0, %rax
570 		 */
571 		DBG_CALL(Dbg_reloc_transition(ofl->ofl_lml, M_MACH,
572 		    R_AMD64_NONE, arsp));
573 		offset -= 3;
574 		(void) memcpy(offset, tlsinstr_ld_le, sizeof (tlsinstr_ld_le));
575 		return (FIX_DONE);
576 
577 	case R_AMD64_DTPOFF32:
578 		/*
579 		 * LD->LE
580 		 *
581 		 * Transition:
582 		 *	0x00 leaq x1@dtpoff(%rax), %rcx
583 		 * To:
584 		 *	0x00 leaq x1@tpoff(%rax), %rcx
585 		 */
586 		DBG_CALL(Dbg_reloc_transition(ofl->ofl_lml, M_MACH,
587 		    R_AMD64_TPOFF32, arsp));
588 		arsp->rel_rtype = R_AMD64_TPOFF32;
589 		arsp->rel_raddend = 0;
590 		return (FIX_RELOC);
591 	}
592 
593 	return (FIX_RELOC);
594 }
595 
596 uintptr_t
597 ld_do_activerelocs(Ofl_desc *ofl)
598 {
599 	Rel_desc	*arsp;
600 	Rel_cache	*rcp;
601 	Listnode	*lnp;
602 	uintptr_t	return_code = 1;
603 	Word		flags = ofl->ofl_flags;
604 
605 	if (ofl->ofl_actrels.head)
606 		DBG_CALL(Dbg_reloc_doact_title(ofl->ofl_lml));
607 
608 	/*
609 	 * Process active relocations.
610 	 */
611 	for (LIST_TRAVERSE(&ofl->ofl_actrels, lnp, rcp)) {
612 		/* LINTED */
613 		for (arsp = (Rel_desc *)(rcp + 1);
614 		    arsp < rcp->rc_free; arsp++) {
615 			uchar_t		*addr;
616 			Xword 		value;
617 			Sym_desc	*sdp;
618 			const char	*ifl_name;
619 			Xword		refaddr;
620 			int		moved = 0;
621 			Gotref		gref;
622 
623 			/*
624 			 * If the section this relocation is against has been
625 			 * discarded (-zignore), then discard (skip) the
626 			 * relocation itself.
627 			 */
628 			if ((arsp->rel_isdesc->is_flags & FLG_IS_DISCARD) &&
629 			    ((arsp->rel_flags &
630 			    (FLG_REL_GOT | FLG_REL_BSS |
631 			    FLG_REL_PLT | FLG_REL_NOINFO)) == 0)) {
632 				DBG_CALL(Dbg_reloc_discard(ofl->ofl_lml,
633 				    M_MACH, arsp));
634 				continue;
635 			}
636 
637 			/*
638 			 * We deteremine what the 'got reference'
639 			 * model (if required) is at this point.  This
640 			 * needs to be done before tls_fixup() since
641 			 * it may 'transition' our instructions.
642 			 *
643 			 * The got table entries have already been assigned,
644 			 * and we bind to those initial entries.
645 			 */
646 			if (arsp->rel_flags & FLG_REL_DTLS)
647 				gref = GOT_REF_TLSGD;
648 			else if (arsp->rel_flags & FLG_REL_MTLS)
649 				gref = GOT_REF_TLSLD;
650 			else if (arsp->rel_flags & FLG_REL_STLS)
651 				gref = GOT_REF_TLSIE;
652 			else
653 				gref = GOT_REF_GENERIC;
654 
655 			/*
656 			 * Perform any required TLS fixups.
657 			 */
658 			if (arsp->rel_flags & FLG_REL_TLSFIX) {
659 				Fixupret	ret;
660 
661 				if ((ret = tls_fixups(ofl, arsp)) == FIX_ERROR)
662 					return (S_ERROR);
663 				if (ret == FIX_DONE)
664 					continue;
665 			}
666 
667 			/*
668 			 * If this is a relocation against a move table, or
669 			 * expanded move table, adjust the relocation entries.
670 			 */
671 			if (arsp->rel_move)
672 				ld_adj_movereloc(ofl, arsp);
673 
674 			sdp = arsp->rel_sym;
675 			refaddr = arsp->rel_roffset +
676 			    (Off)_elf_getxoff(arsp->rel_isdesc->is_indata);
677 
678 			if ((arsp->rel_flags & FLG_REL_CLVAL) ||
679 			    (arsp->rel_flags & FLG_REL_GOTCL))
680 				value = 0;
681 			else if (ELF_ST_TYPE(sdp->sd_sym->st_info) ==
682 			    STT_SECTION) {
683 				Sym_desc	*sym;
684 
685 				/*
686 				 * The value for a symbol pointing to a SECTION
687 				 * is based off of that sections position.
688 				 *
689 				 * The second argument of the ld_am_I_partial()
690 				 * is the value stored at the target address
691 				 * relocation is going to be applied.
692 				 */
693 				if ((sdp->sd_isc->is_flags & FLG_IS_RELUPD) &&
694 				    /* LINTED */
695 				    (sym = ld_am_I_partial(arsp, *(Xword *)
696 				    ((uchar_t *)
697 				    arsp->rel_isdesc->is_indata->d_buf +
698 				    arsp->rel_roffset)))) {
699 					/*
700 					 * If the symbol is moved,
701 					 * adjust the value
702 					 */
703 					value = sym->sd_sym->st_value;
704 					moved = 1;
705 				} else {
706 					value = _elf_getxoff(
707 					    sdp->sd_isc->is_indata);
708 					if (sdp->sd_isc->is_shdr->sh_flags &
709 					    SHF_ALLOC)
710 						value +=
711 						    sdp->sd_isc->is_osdesc->
712 						    os_shdr->sh_addr;
713 				}
714 				if (sdp->sd_isc->is_shdr->sh_flags & SHF_TLS)
715 					value -= ofl->ofl_tlsphdr->p_vaddr;
716 
717 			} else if (IS_SIZE(arsp->rel_rtype)) {
718 				/*
719 				 * Size relocations require the symbols size.
720 				 */
721 				value = sdp->sd_sym->st_size;
722 			} else {
723 				/*
724 				 * Else the value is the symbols value.
725 				 */
726 				value = sdp->sd_sym->st_value;
727 			}
728 
729 			/*
730 			 * Relocation against the GLOBAL_OFFSET_TABLE.
731 			 */
732 			if (arsp->rel_flags & FLG_REL_GOT)
733 				arsp->rel_osdesc = ofl->ofl_osgot;
734 
735 			/*
736 			 * If loadable and not producing a relocatable object
737 			 * add the sections virtual address to the reference
738 			 * address.
739 			 */
740 			if ((arsp->rel_flags & FLG_REL_LOAD) &&
741 			    ((flags & FLG_OF_RELOBJ) == 0))
742 				refaddr += arsp->rel_isdesc->is_osdesc->
743 				    os_shdr->sh_addr;
744 
745 			/*
746 			 * If this entry has a PLT assigned to it, it's
747 			 * value is actually the address of the PLT (and
748 			 * not the address of the function).
749 			 */
750 			if (IS_PLT(arsp->rel_rtype)) {
751 				if (sdp->sd_aux && sdp->sd_aux->sa_PLTndx)
752 					value = ld_calc_plt_addr(sdp, ofl);
753 			}
754 
755 			/*
756 			 * Add relocations addend to value.  Add extra
757 			 * relocation addend if needed.
758 			 *
759 			 * Note: for GOT relative relocations on amd64
760 			 *	 we discard the addend.  It was relevant
761 			 *	 to the reference - not to the data item
762 			 *	 being referenced (ie: that -4 thing).
763 			 */
764 			if ((arsp->rel_flags & FLG_REL_GOT) == 0)
765 				value += arsp->rel_raddend;
766 
767 			/*
768 			 * Determine whether the value needs further adjustment.
769 			 * Filter through the attributes of the relocation to
770 			 * determine what adjustment is required.  Note, many
771 			 * of the following cases are only applicable when a
772 			 * .got is present.  As a .got is not generated when a
773 			 * relocatable object is being built, any adjustments
774 			 * that require a .got need to be skipped.
775 			 */
776 			if ((arsp->rel_flags & FLG_REL_GOT) &&
777 			    ((flags & FLG_OF_RELOBJ) == 0)) {
778 				Xword		R1addr;
779 				uintptr_t	R2addr;
780 				Word		gotndx;
781 				Gotndx		*gnp;
782 
783 				/*
784 				 * Perform relocation against GOT table.  Since
785 				 * this doesn't fit exactly into a relocation
786 				 * we place the appropriate byte in the GOT
787 				 * directly
788 				 *
789 				 * Calculate offset into GOT at which to apply
790 				 * the relocation.
791 				 */
792 				gnp = ld_find_gotndx(&(sdp->sd_GOTndxs), gref,
793 				    ofl, arsp);
794 				assert(gnp);
795 
796 				if (arsp->rel_rtype == R_AMD64_DTPOFF64)
797 					gotndx = gnp->gn_gotndx + 1;
798 				else
799 					gotndx = gnp->gn_gotndx;
800 
801 				R1addr = (Xword)(gotndx * M_GOT_ENTSIZE);
802 
803 				/*
804 				 * Add the GOTs data's offset.
805 				 */
806 				R2addr = R1addr + (uintptr_t)
807 				    arsp->rel_osdesc->os_outdata->d_buf;
808 
809 				DBG_CALL(Dbg_reloc_doact(ofl->ofl_lml,
810 				    ELF_DBG_LD, M_MACH, SHT_RELA,
811 				    arsp->rel_rtype, R1addr, value,
812 				    arsp->rel_sname, arsp->rel_osdesc));
813 
814 				/*
815 				 * And do it.
816 				 */
817 				if (ofl->ofl_flags1 & FLG_OF1_ENCDIFF)
818 					*(Xword *)R2addr =
819 					    ld_byteswap_Xword(value);
820 				else
821 					*(Xword *)R2addr = value;
822 				continue;
823 
824 			} else if (IS_GOT_BASED(arsp->rel_rtype) &&
825 			    ((flags & FLG_OF_RELOBJ) == 0)) {
826 				value -= ofl->ofl_osgot->os_shdr->sh_addr;
827 
828 			} else if (IS_GOTPCREL(arsp->rel_rtype) &&
829 			    ((flags & FLG_OF_RELOBJ) == 0)) {
830 				Gotndx *gnp;
831 
832 				/*
833 				 * Calculation:
834 				 *	G + GOT + A - P
835 				 */
836 				gnp = ld_find_gotndx(&(sdp->sd_GOTndxs),
837 				    gref, ofl, arsp);
838 				assert(gnp);
839 				value = (Xword)(ofl->ofl_osgot->os_shdr->
840 				    sh_addr) + ((Xword)gnp->gn_gotndx *
841 				    M_GOT_ENTSIZE) + arsp->rel_raddend -
842 				    refaddr;
843 
844 			} else if (IS_GOT_PC(arsp->rel_rtype) &&
845 			    ((flags & FLG_OF_RELOBJ) == 0)) {
846 				value = (Xword)(ofl->ofl_osgot->os_shdr->
847 				    sh_addr) - refaddr + arsp->rel_raddend;
848 
849 			} else if ((IS_PC_RELATIVE(arsp->rel_rtype)) &&
850 			    (((flags & FLG_OF_RELOBJ) == 0) ||
851 			    (arsp->rel_osdesc == sdp->sd_isc->is_osdesc))) {
852 				value -= refaddr;
853 
854 			} else if (IS_TLS_INS(arsp->rel_rtype) &&
855 			    IS_GOT_RELATIVE(arsp->rel_rtype) &&
856 			    ((flags & FLG_OF_RELOBJ) == 0)) {
857 				Gotndx	*gnp;
858 
859 				gnp = ld_find_gotndx(&(sdp->sd_GOTndxs), gref,
860 				    ofl, arsp);
861 				assert(gnp);
862 				value = (Xword)gnp->gn_gotndx * M_GOT_ENTSIZE;
863 
864 			} else if (IS_GOT_RELATIVE(arsp->rel_rtype) &&
865 			    ((flags & FLG_OF_RELOBJ) == 0)) {
866 				Gotndx *gnp;
867 
868 				gnp = ld_find_gotndx(&(sdp->sd_GOTndxs),
869 				    gref, ofl, arsp);
870 				assert(gnp);
871 				value = (Xword)gnp->gn_gotndx * M_GOT_ENTSIZE;
872 
873 			} else if ((arsp->rel_flags & FLG_REL_STLS) &&
874 			    ((flags & FLG_OF_RELOBJ) == 0)) {
875 				Xword	tlsstatsize;
876 
877 				/*
878 				 * This is the LE TLS reference model.  Static
879 				 * offset is hard-coded.
880 				 */
881 				tlsstatsize =
882 				    S_ROUND(ofl->ofl_tlsphdr->p_memsz,
883 				    M_TLSSTATALIGN);
884 				value = tlsstatsize - value;
885 
886 				/*
887 				 * Since this code is fixed up, it assumes a
888 				 * negative offset that can be added to the
889 				 * thread pointer.
890 				 */
891 				if (arsp->rel_rtype == R_AMD64_TPOFF32)
892 					value = -value;
893 			}
894 
895 			if (arsp->rel_isdesc->is_file)
896 				ifl_name = arsp->rel_isdesc->is_file->ifl_name;
897 			else
898 				ifl_name = MSG_INTL(MSG_STR_NULL);
899 
900 			/*
901 			 * Make sure we have data to relocate.  Compiler and
902 			 * assembler developers have been known to generate
903 			 * relocations against invalid sections (normally .bss),
904 			 * so for their benefit give them sufficient information
905 			 * to help analyze the problem.  End users should never
906 			 * see this.
907 			 */
908 			if (arsp->rel_isdesc->is_indata->d_buf == 0) {
909 				Conv_inv_buf_t inv_buf;
910 
911 				eprintf(ofl->ofl_lml, ERR_FATAL,
912 				    MSG_INTL(MSG_REL_EMPTYSEC),
913 				    conv_reloc_amd64_type(arsp->rel_rtype,
914 				    0, &inv_buf), ifl_name,
915 				    demangle(arsp->rel_sname),
916 				    arsp->rel_isdesc->is_name);
917 				return (S_ERROR);
918 			}
919 
920 			/*
921 			 * Get the address of the data item we need to modify.
922 			 */
923 			addr = (uchar_t *)((uintptr_t)arsp->rel_roffset +
924 			    (uintptr_t)_elf_getxoff(arsp->rel_isdesc->
925 			    is_indata));
926 
927 			DBG_CALL(Dbg_reloc_doact(ofl->ofl_lml, ELF_DBG_LD,
928 			    M_MACH, SHT_RELA, arsp->rel_rtype, EC_NATPTR(addr),
929 			    value, arsp->rel_sname, arsp->rel_osdesc));
930 			addr += (uintptr_t)arsp->rel_osdesc->os_outdata->d_buf;
931 
932 			if ((((uintptr_t)addr - (uintptr_t)ofl->ofl_nehdr) >
933 			    ofl->ofl_size) || (arsp->rel_roffset >
934 			    arsp->rel_osdesc->os_shdr->sh_size)) {
935 				int		class;
936 				Conv_inv_buf_t inv_buf;
937 
938 				if (((uintptr_t)addr -
939 				    (uintptr_t)ofl->ofl_nehdr) > ofl->ofl_size)
940 					class = ERR_FATAL;
941 				else
942 					class = ERR_WARNING;
943 
944 				eprintf(ofl->ofl_lml, class,
945 				    MSG_INTL(MSG_REL_INVALOFFSET),
946 				    conv_reloc_amd64_type(arsp->rel_rtype,
947 				    0, &inv_buf), ifl_name,
948 				    arsp->rel_isdesc->is_name,
949 				    demangle(arsp->rel_sname),
950 				    EC_ADDR((uintptr_t)addr -
951 				    (uintptr_t)ofl->ofl_nehdr));
952 
953 				if (class == ERR_FATAL) {
954 					return_code = S_ERROR;
955 					continue;
956 				}
957 			}
958 
959 			/*
960 			 * The relocation is additive.  Ignore the previous
961 			 * symbol value if this local partial symbol is
962 			 * expanded.
963 			 */
964 			if (moved)
965 				value -= *addr;
966 
967 			/*
968 			 * If '-z noreloc' is specified - skip the do_reloc_ld
969 			 * stage.
970 			 */
971 			if (OFL_DO_RELOC(ofl)) {
972 				/*
973 				 * If this is a PROGBITS section and the
974 				 * running linker has a different byte order
975 				 * than the target host, tell do_reloc_ld()
976 				 * to swap bytes.
977 				 */
978 				if (do_reloc_ld((uchar_t)arsp->rel_rtype,
979 				    addr, &value, arsp->rel_sname, ifl_name,
980 				    OFL_SWAP_RELOC_DATA(ofl, arsp),
981 				    ofl->ofl_lml) == 0)
982 					return_code = S_ERROR;
983 			}
984 		}
985 	}
986 	return (return_code);
987 }
988 
989 uintptr_t
990 ld_add_outrel(Word flags, Rel_desc *rsp, Ofl_desc *ofl)
991 {
992 	Rel_desc	*orsp;
993 	Rel_cache	*rcp;
994 	Sym_desc	*sdp = rsp->rel_sym;
995 
996 	/*
997 	 * Static executables *do not* want any relocations against them.
998 	 * Since our engine still creates relocations against a WEAK UNDEFINED
999 	 * symbol in a static executable, it's best to disable them here
1000 	 * instead of through out the relocation code.
1001 	 */
1002 	if ((ofl->ofl_flags & (FLG_OF_STATIC | FLG_OF_EXEC)) ==
1003 	    (FLG_OF_STATIC | FLG_OF_EXEC))
1004 		return (1);
1005 
1006 	/*
1007 	 * If no relocation cache structures are available allocate
1008 	 * a new one and link it into the cache list.
1009 	 */
1010 	if ((ofl->ofl_outrels.tail == 0) ||
1011 	    ((rcp = (Rel_cache *)ofl->ofl_outrels.tail->data) == 0) ||
1012 	    ((orsp = rcp->rc_free) == rcp->rc_end)) {
1013 		static size_t	nextsize = 0;
1014 		size_t		size;
1015 
1016 		/*
1017 		 * Output relocation numbers can vary considerably between
1018 		 * building executables or shared objects (pic vs. non-pic),
1019 		 * etc.  But, they typically aren't very large, so for these
1020 		 * objects use a standard bucket size.  For building relocatable
1021 		 * objects, typically there will be an output relocation for
1022 		 * every input relocation.
1023 		 */
1024 		if (nextsize == 0) {
1025 			if (ofl->ofl_flags & FLG_OF_RELOBJ) {
1026 				if ((size = ofl->ofl_relocincnt) == 0)
1027 					size = REL_LOIDESCNO;
1028 				if (size > REL_HOIDESCNO)
1029 					nextsize = REL_HOIDESCNO;
1030 				else
1031 					nextsize = REL_LOIDESCNO;
1032 			} else
1033 				nextsize = size = REL_HOIDESCNO;
1034 		} else
1035 			size = nextsize;
1036 
1037 		size = size * sizeof (Rel_desc);
1038 
1039 		if (((rcp = libld_malloc(sizeof (Rel_cache) + size)) == 0) ||
1040 		    (list_appendc(&ofl->ofl_outrels, rcp) == 0))
1041 			return (S_ERROR);
1042 
1043 		/* LINTED */
1044 		rcp->rc_free = orsp = (Rel_desc *)(rcp + 1);
1045 		/* LINTED */
1046 		rcp->rc_end = (Rel_desc *)((char *)rcp->rc_free + size);
1047 	}
1048 
1049 	/*
1050 	 * If we are adding a output relocation against a section
1051 	 * symbol (non-RELATIVE) then mark that section.  These sections
1052 	 * will be added to the .dynsym symbol table.
1053 	 */
1054 	if (sdp && (rsp->rel_rtype != M_R_RELATIVE) &&
1055 	    ((flags & FLG_REL_SCNNDX) ||
1056 	    (ELF_ST_TYPE(sdp->sd_sym->st_info) == STT_SECTION))) {
1057 
1058 		/*
1059 		 * If this is a COMMON symbol - no output section
1060 		 * exists yet - (it's created as part of sym_validate()).
1061 		 * So - we mark here that when it's created it should
1062 		 * be tagged with the FLG_OS_OUTREL flag.
1063 		 */
1064 		if ((sdp->sd_flags & FLG_SY_SPECSEC) &&
1065 		    (sdp->sd_sym->st_shndx == SHN_COMMON)) {
1066 			if (ELF_ST_TYPE(sdp->sd_sym->st_info) != STT_TLS)
1067 				ofl->ofl_flags1 |= FLG_OF1_BSSOREL;
1068 			else
1069 				ofl->ofl_flags1 |= FLG_OF1_TLSOREL;
1070 		} else {
1071 			Os_desc	*osp = sdp->sd_isc->is_osdesc;
1072 
1073 			if (osp && ((osp->os_flags & FLG_OS_OUTREL) == 0)) {
1074 				ofl->ofl_dynshdrcnt++;
1075 				osp->os_flags |= FLG_OS_OUTREL;
1076 			}
1077 		}
1078 	}
1079 
1080 	*orsp = *rsp;
1081 	orsp->rel_flags |= flags;
1082 
1083 	rcp->rc_free++;
1084 	ofl->ofl_outrelscnt++;
1085 
1086 	if (flags & FLG_REL_GOT)
1087 		ofl->ofl_relocgotsz += (Xword)sizeof (Rela);
1088 	else if (flags & FLG_REL_PLT)
1089 		ofl->ofl_relocpltsz += (Xword)sizeof (Rela);
1090 	else if (flags & FLG_REL_BSS)
1091 		ofl->ofl_relocbsssz += (Xword)sizeof (Rela);
1092 	else if (flags & FLG_REL_NOINFO)
1093 		ofl->ofl_relocrelsz += (Xword)sizeof (Rela);
1094 	else
1095 		orsp->rel_osdesc->os_szoutrels += (Xword)sizeof (Rela);
1096 
1097 	if (orsp->rel_rtype == M_R_RELATIVE)
1098 		ofl->ofl_relocrelcnt++;
1099 
1100 	/*
1101 	 * We don't perform sorting on PLT relocations because
1102 	 * they have already been assigned a PLT index and if we
1103 	 * were to sort them we would have to re-assign the plt indexes.
1104 	 */
1105 	if (!(flags & FLG_REL_PLT))
1106 		ofl->ofl_reloccnt++;
1107 
1108 	/*
1109 	 * Insure a GLOBAL_OFFSET_TABLE is generated if required.
1110 	 */
1111 	if (IS_GOT_REQUIRED(orsp->rel_rtype))
1112 		ofl->ofl_flags |= FLG_OF_BLDGOT;
1113 
1114 	/*
1115 	 * Identify and possibly warn of a displacement relocation.
1116 	 */
1117 	if (orsp->rel_flags & FLG_REL_DISP) {
1118 		ofl->ofl_dtflags_1 |= DF_1_DISPRELPND;
1119 
1120 		if (ofl->ofl_flags & FLG_OF_VERBOSE)
1121 			ld_disp_errmsg(MSG_INTL(MSG_REL_DISPREL4), orsp, ofl);
1122 	}
1123 	DBG_CALL(Dbg_reloc_ors_entry(ofl->ofl_lml, ELF_DBG_LD, SHT_RELA,
1124 	    M_MACH, orsp));
1125 	return (1);
1126 }
1127 
1128 /*
1129  * Stub routine since register symbols are not supported on amd64.
1130  */
1131 /* ARGSUSED */
1132 uintptr_t
1133 ld_reloc_register(Rel_desc * rsp, Is_desc * isp, Ofl_desc * ofl)
1134 {
1135 	eprintf(ofl->ofl_lml, ERR_FATAL, MSG_INTL(MSG_REL_NOREG));
1136 	return (S_ERROR);
1137 }
1138 
1139 /*
1140  * process relocation for a LOCAL symbol
1141  */
1142 uintptr_t
1143 ld_reloc_local(Rel_desc * rsp, Ofl_desc * ofl)
1144 {
1145 	Word		flags = ofl->ofl_flags;
1146 	Sym_desc	*sdp = rsp->rel_sym;
1147 	Word		shndx = sdp->sd_sym->st_shndx;
1148 	Word		ortype = rsp->rel_rtype;
1149 
1150 	/*
1151 	 * if ((shared object) and (not pc relative relocation) and
1152 	 *    (not against ABS symbol))
1153 	 * then
1154 	 *	build R_AMD64_RELATIVE
1155 	 * fi
1156 	 */
1157 	if ((flags & FLG_OF_SHAROBJ) && (rsp->rel_flags & FLG_REL_LOAD) &&
1158 	    !(IS_PC_RELATIVE(rsp->rel_rtype)) && !(IS_SIZE(rsp->rel_rtype)) &&
1159 	    !(IS_GOT_BASED(rsp->rel_rtype)) &&
1160 	    !(rsp->rel_isdesc != NULL &&
1161 	    (rsp->rel_isdesc->is_shdr->sh_type == SHT_SUNW_dof)) &&
1162 	    (((sdp->sd_flags & FLG_SY_SPECSEC) == 0) ||
1163 	    (shndx != SHN_ABS) || (sdp->sd_aux && sdp->sd_aux->sa_symspec))) {
1164 
1165 		/*
1166 		 * R_AMD64_RELATIVE updates a 64bit address, if this
1167 		 * relocation isn't a 64bit binding then we can not
1168 		 * simplify it to a RELATIVE relocation.
1169 		 */
1170 		if (reloc_table[ortype].re_fsize != sizeof (Addr)) {
1171 			return (ld_add_outrel(NULL, rsp, ofl));
1172 		}
1173 
1174 		rsp->rel_rtype = R_AMD64_RELATIVE;
1175 		if (ld_add_outrel(FLG_REL_ADVAL, rsp, ofl) == S_ERROR)
1176 			return (S_ERROR);
1177 		rsp->rel_rtype = ortype;
1178 		return (1);
1179 	}
1180 
1181 	/*
1182 	 * If the relocation is against a 'non-allocatable' section
1183 	 * and we can not resolve it now - then give a warning
1184 	 * message.
1185 	 *
1186 	 * We can not resolve the symbol if either:
1187 	 *	a) it's undefined
1188 	 *	b) it's defined in a shared library and a
1189 	 *	   COPY relocation hasn't moved it to the executable
1190 	 *
1191 	 * Note: because we process all of the relocations against the
1192 	 *	text segment before any others - we know whether
1193 	 *	or not a copy relocation will be generated before
1194 	 *	we get here (see reloc_init()->reloc_segments()).
1195 	 */
1196 	if (!(rsp->rel_flags & FLG_REL_LOAD) &&
1197 	    ((shndx == SHN_UNDEF) ||
1198 	    ((sdp->sd_ref == REF_DYN_NEED) &&
1199 	    ((sdp->sd_flags & FLG_SY_MVTOCOMM) == 0)))) {
1200 		Conv_inv_buf_t inv_buf;
1201 
1202 		/*
1203 		 * If the relocation is against a SHT_SUNW_ANNOTATE
1204 		 * section - then silently ignore that the relocation
1205 		 * can not be resolved.
1206 		 */
1207 		if (rsp->rel_osdesc &&
1208 		    (rsp->rel_osdesc->os_shdr->sh_type == SHT_SUNW_ANNOTATE))
1209 			return (0);
1210 		(void) eprintf(ofl->ofl_lml, ERR_WARNING,
1211 		    MSG_INTL(MSG_REL_EXTERNSYM),
1212 		    conv_reloc_amd64_type(rsp->rel_rtype, 0, &inv_buf),
1213 		    rsp->rel_isdesc->is_file->ifl_name,
1214 		    demangle(rsp->rel_sname), rsp->rel_osdesc->os_name);
1215 		return (1);
1216 	}
1217 
1218 	/*
1219 	 * Perform relocation.
1220 	 */
1221 	return (ld_add_actrel(NULL, rsp, ofl));
1222 }
1223 
1224 
1225 uintptr_t
1226 /* ARGSUSED */
1227 ld_reloc_GOTOP(Boolean local, Rel_desc * rsp, Ofl_desc * ofl)
1228 {
1229 	/*
1230 	 * Stub routine for common code compatibility, we shouldn't
1231 	 * actually get here on amd64.
1232 	 */
1233 	assert(0);
1234 	return (S_ERROR);
1235 }
1236 
1237 uintptr_t
1238 ld_reloc_TLS(Boolean local, Rel_desc * rsp, Ofl_desc * ofl)
1239 {
1240 	Word		rtype = rsp->rel_rtype;
1241 	Sym_desc	*sdp = rsp->rel_sym;
1242 	Word		flags = ofl->ofl_flags;
1243 	Gotndx		*gnp;
1244 
1245 	/*
1246 	 * If we're building an executable - use either the IE or LE access
1247 	 * model.  If we're building a shared object process any IE model.
1248 	 */
1249 	if ((flags & FLG_OF_EXEC) || (IS_TLS_IE(rtype))) {
1250 		/*
1251 		 * Set the DF_STATIC_TLS flag.
1252 		 */
1253 		ofl->ofl_dtflags |= DF_STATIC_TLS;
1254 
1255 		if (!local || ((flags & FLG_OF_EXEC) == 0)) {
1256 			/*
1257 			 * Assign a GOT entry for static TLS references.
1258 			 */
1259 			if ((gnp = ld_find_gotndx(&(sdp->sd_GOTndxs),
1260 			    GOT_REF_TLSIE, ofl, rsp)) == 0) {
1261 
1262 				if (ld_assign_got_TLS(local, rsp, ofl, sdp,
1263 				    gnp, GOT_REF_TLSIE, FLG_REL_STLS,
1264 				    rtype, R_AMD64_TPOFF64, 0) == S_ERROR)
1265 					return (S_ERROR);
1266 			}
1267 
1268 			/*
1269 			 * IE access model.
1270 			 */
1271 			if (IS_TLS_IE(rtype))
1272 				return (ld_add_actrel(FLG_REL_STLS, rsp, ofl));
1273 
1274 			/*
1275 			 * Fixups are required for other executable models.
1276 			 */
1277 			return (ld_add_actrel((FLG_REL_TLSFIX | FLG_REL_STLS),
1278 			    rsp, ofl));
1279 		}
1280 
1281 		/*
1282 		 * LE access model.
1283 		 */
1284 		if (IS_TLS_LE(rtype))
1285 			return (ld_add_actrel(FLG_REL_STLS, rsp, ofl));
1286 
1287 		return (ld_add_actrel((FLG_REL_TLSFIX | FLG_REL_STLS),
1288 		    rsp, ofl));
1289 	}
1290 
1291 	/*
1292 	 * Building a shared object.
1293 	 *
1294 	 * Assign a GOT entry for a dynamic TLS reference.
1295 	 */
1296 	if (IS_TLS_LD(rtype) && ((gnp = ld_find_gotndx(&(sdp->sd_GOTndxs),
1297 	    GOT_REF_TLSLD, ofl, rsp)) == 0)) {
1298 
1299 		if (ld_assign_got_TLS(local, rsp, ofl, sdp, gnp, GOT_REF_TLSLD,
1300 		    FLG_REL_MTLS, rtype, R_AMD64_DTPMOD64, 0) == S_ERROR)
1301 			return (S_ERROR);
1302 
1303 	} else if (IS_TLS_GD(rtype) &&
1304 	    ((gnp = ld_find_gotndx(&(sdp->sd_GOTndxs), GOT_REF_TLSGD,
1305 	    ofl, rsp)) == 0)) {
1306 
1307 		if (ld_assign_got_TLS(local, rsp, ofl, sdp, gnp, GOT_REF_TLSGD,
1308 		    FLG_REL_DTLS, rtype, R_AMD64_DTPMOD64,
1309 		    R_AMD64_DTPOFF64) == S_ERROR)
1310 			return (S_ERROR);
1311 	}
1312 
1313 	if (IS_TLS_LD(rtype))
1314 		return (ld_add_actrel(FLG_REL_MTLS, rsp, ofl));
1315 
1316 	return (ld_add_actrel(FLG_REL_DTLS, rsp, ofl));
1317 }
1318 
1319 /* ARGSUSED3 */
1320 Gotndx *
1321 ld_find_gotndx(List * lst, Gotref gref, Ofl_desc * ofl, Rel_desc * rdesc)
1322 {
1323 	Listnode *	lnp;
1324 	Gotndx *	gnp;
1325 
1326 	assert(rdesc != 0);
1327 
1328 	if ((gref == GOT_REF_TLSLD) && ofl->ofl_tlsldgotndx)
1329 		return (ofl->ofl_tlsldgotndx);
1330 
1331 	for (LIST_TRAVERSE(lst, lnp, gnp)) {
1332 		if ((rdesc->rel_raddend == gnp->gn_addend) &&
1333 		    (gnp->gn_gotref == gref)) {
1334 			return (gnp);
1335 		}
1336 	}
1337 	return ((Gotndx *)0);
1338 }
1339 
1340 Xword
1341 ld_calc_got_offset(Rel_desc * rdesc, Ofl_desc * ofl)
1342 {
1343 	Os_desc		*osp = ofl->ofl_osgot;
1344 	Sym_desc	*sdp = rdesc->rel_sym;
1345 	Xword		gotndx;
1346 	Gotref		gref;
1347 	Gotndx		*gnp;
1348 
1349 	if (rdesc->rel_flags & FLG_REL_DTLS)
1350 		gref = GOT_REF_TLSGD;
1351 	else if (rdesc->rel_flags & FLG_REL_MTLS)
1352 		gref = GOT_REF_TLSLD;
1353 	else if (rdesc->rel_flags & FLG_REL_STLS)
1354 		gref = GOT_REF_TLSIE;
1355 	else
1356 		gref = GOT_REF_GENERIC;
1357 
1358 	gnp = ld_find_gotndx(&(sdp->sd_GOTndxs), gref, ofl, rdesc);
1359 	assert(gnp);
1360 
1361 	gotndx = (Xword)gnp->gn_gotndx;
1362 
1363 	if ((rdesc->rel_flags & FLG_REL_DTLS) &&
1364 	    (rdesc->rel_rtype == R_AMD64_DTPOFF64))
1365 		gotndx++;
1366 
1367 	return ((Xword)(osp->os_shdr->sh_addr + (gotndx * M_GOT_ENTSIZE)));
1368 }
1369 
1370 
1371 /* ARGSUSED5 */
1372 uintptr_t
1373 ld_assign_got_ndx(List * lst, Gotndx * pgnp, Gotref gref, Ofl_desc * ofl,
1374     Rel_desc * rsp, Sym_desc * sdp)
1375 {
1376 	Xword		raddend;
1377 	Gotndx		*gnp, *_gnp;
1378 	Listnode	*lnp, *plnp;
1379 	uint_t		gotents;
1380 
1381 	raddend = rsp->rel_raddend;
1382 	if (pgnp && (pgnp->gn_addend == raddend) &&
1383 	    (pgnp->gn_gotref == gref))
1384 		return (1);
1385 
1386 	if ((gref == GOT_REF_TLSGD) || (gref == GOT_REF_TLSLD))
1387 		gotents = 2;
1388 	else
1389 		gotents = 1;
1390 
1391 	plnp = 0;
1392 	for (LIST_TRAVERSE(lst, lnp, _gnp)) {
1393 		if (_gnp->gn_addend > raddend)
1394 			break;
1395 		plnp = lnp;
1396 	}
1397 
1398 	/*
1399 	 * Allocate a new entry.
1400 	 */
1401 	if ((gnp = libld_calloc(sizeof (Gotndx), 1)) == 0)
1402 		return (S_ERROR);
1403 	gnp->gn_addend = raddend;
1404 	gnp->gn_gotndx = ofl->ofl_gotcnt;
1405 	gnp->gn_gotref = gref;
1406 
1407 	ofl->ofl_gotcnt += gotents;
1408 
1409 	if (gref == GOT_REF_TLSLD) {
1410 		ofl->ofl_tlsldgotndx = gnp;
1411 		return (1);
1412 	}
1413 
1414 	if (plnp == 0) {
1415 		/*
1416 		 * Insert at head of list
1417 		 */
1418 		if (list_prependc(lst, (void *)gnp) == 0)
1419 			return (S_ERROR);
1420 	} else if (_gnp->gn_addend > raddend) {
1421 		/*
1422 		 * Insert in middle of lest
1423 		 */
1424 		if (list_insertc(lst, (void *)gnp, plnp) == 0)
1425 			return (S_ERROR);
1426 	} else {
1427 		/*
1428 		 * Append to tail of list
1429 		 */
1430 		if (list_appendc(lst, (void *)gnp) == 0)
1431 			return (S_ERROR);
1432 	}
1433 	return (1);
1434 }
1435 
1436 void
1437 ld_assign_plt_ndx(Sym_desc * sdp, Ofl_desc *ofl)
1438 {
1439 	sdp->sd_aux->sa_PLTndx = 1 + ofl->ofl_pltcnt++;
1440 	sdp->sd_aux->sa_PLTGOTndx = ofl->ofl_gotcnt++;
1441 	ofl->ofl_flags |= FLG_OF_BLDGOT;
1442 }
1443 
1444 static uchar_t plt0_template[M_PLT_ENTSIZE] = {
1445 /* 0x00 PUSHQ GOT+8(%rip) */	0xff, 0x35, 0x00, 0x00, 0x00, 0x00,
1446 /* 0x06 JMP   *GOT+16(%rip) */	0xff, 0x25, 0x00, 0x00, 0x00, 0x00,
1447 /* 0x0c NOP */			0x90,
1448 /* 0x0d NOP */			0x90,
1449 /* 0x0e NOP */			0x90,
1450 /* 0x0f NOP */			0x90
1451 };
1452 
1453 /*
1454  * Initializes .got[0] with the _DYNAMIC symbol value.
1455  */
1456 uintptr_t
1457 ld_fillin_gotplt(Ofl_desc *ofl)
1458 {
1459 	if (ofl->ofl_osgot) {
1460 		Sym_desc	*sdp;
1461 
1462 		if ((sdp = ld_sym_find(MSG_ORIG(MSG_SYM_DYNAMIC_U),
1463 		    SYM_NOHASH, 0, ofl)) != NULL) {
1464 			uchar_t	*genptr;
1465 
1466 			genptr = ((uchar_t *)ofl->ofl_osgot->os_outdata->d_buf +
1467 			    (M_GOT_XDYNAMIC * M_GOT_ENTSIZE));
1468 			/* LINTED */
1469 			*(Xword *)genptr = sdp->sd_sym->st_value;
1470 		}
1471 	}
1472 
1473 	/*
1474 	 * Fill in the reserved slot in the procedure linkage table the first
1475 	 * entry is:
1476 	 *	0x00 PUSHQ	GOT+8(%rip)	    # GOT[1]
1477 	 *	0x06 JMP	*GOT+16(%rip)	    # GOT[2]
1478 	 *	0x0c NOP
1479 	 *	0x0d NOP
1480 	 *	0x0e NOP
1481 	 *	0x0f NOP
1482 	 */
1483 	if ((ofl->ofl_flags & FLG_OF_DYNAMIC) && ofl->ofl_osplt) {
1484 		uchar_t	*pltent;
1485 		Xword	val1;
1486 		int	bswap;
1487 
1488 		pltent = (uchar_t *)ofl->ofl_osplt->os_outdata->d_buf;
1489 		bcopy(plt0_template, pltent, sizeof (plt0_template));
1490 
1491 		/*
1492 		 * If '-z noreloc' is specified - skip the do_reloc_ld
1493 		 * stage.
1494 		 */
1495 		if (!OFL_DO_RELOC(ofl))
1496 			return (1);
1497 
1498 		/*
1499 		 * If the running linker has a different byte order than
1500 		 * the target host, tell do_reloc_ld() to swap bytes.
1501 		 *
1502 		 * We know the GOT is PROGBITS --- we don't have
1503 		 * to check.
1504 		 */
1505 		bswap = (ofl->ofl_flags1 & FLG_OF1_ENCDIFF) != 0;
1506 
1507 		/*
1508 		 * filin:
1509 		 *	PUSHQ GOT + 8(%rip)
1510 		 *
1511 		 * Note: 0x06 below represents the offset to the
1512 		 *	 next instruction - which is what %rip will
1513 		 *	 be pointing at.
1514 		 */
1515 		val1 = (ofl->ofl_osgot->os_shdr->sh_addr) +
1516 		    (M_GOT_XLINKMAP * M_GOT_ENTSIZE) -
1517 		    ofl->ofl_osplt->os_shdr->sh_addr - 0x06;
1518 
1519 		if (do_reloc_ld(R_AMD64_GOTPCREL, &pltent[0x02],
1520 		    &val1, MSG_ORIG(MSG_SYM_PLTENT),
1521 		    MSG_ORIG(MSG_SPECFIL_PLTENT), bswap, ofl->ofl_lml) == 0) {
1522 			eprintf(ofl->ofl_lml, ERR_FATAL,
1523 			    MSG_INTL(MSG_PLT_PLT0FAIL));
1524 			return (S_ERROR);
1525 		}
1526 
1527 		/*
1528 		 * filin:
1529 		 *  JMP	*GOT+16(%rip)
1530 		 */
1531 		val1 = (ofl->ofl_osgot->os_shdr->sh_addr) +
1532 		    (M_GOT_XRTLD * M_GOT_ENTSIZE) -
1533 		    ofl->ofl_osplt->os_shdr->sh_addr - 0x0c;
1534 
1535 		if (do_reloc_ld(R_AMD64_GOTPCREL, &pltent[0x08],
1536 		    &val1, MSG_ORIG(MSG_SYM_PLTENT),
1537 		    MSG_ORIG(MSG_SPECFIL_PLTENT), bswap, ofl->ofl_lml) == 0) {
1538 			eprintf(ofl->ofl_lml, ERR_FATAL,
1539 			    MSG_INTL(MSG_PLT_PLT0FAIL));
1540 			return (S_ERROR);
1541 		}
1542 	}
1543 
1544 	return (1);
1545 }
1546