xref: /titanic_50/usr/src/cmd/sgs/libld/common/machrel.amd.c (revision ec530482c1ef4dca30addfa5aad4ee0ed6588b9c)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2010 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 /* Get the x86 version of the relocation engine */
28 #define	DO_RELOC_LIBLD_X86
29 
30 #include	<string.h>
31 #include	<stdio.h>
32 #include	<strings.h>
33 #include	<sys/elf_amd64.h>
34 #include	<debug.h>
35 #include	<reloc.h>
36 #include	<i386/machdep_x86.h>
37 #include	"msg.h"
38 #include	"_libld.h"
39 
40 /*
41  * Search the GOT index list for a GOT entry with a matching reference and the
42  * proper addend.
43  */
44 static Gotndx *
45 ld_find_got_ndx(Alist *alp, Gotref gref, Ofl_desc *ofl, Rel_desc *rdesc)
46 {
47 	Aliste	idx;
48 	Gotndx	*gnp;
49 
50 	assert(rdesc != 0);
51 
52 	if ((gref == GOT_REF_TLSLD) && ofl->ofl_tlsldgotndx)
53 		return (ofl->ofl_tlsldgotndx);
54 
55 	for (ALIST_TRAVERSE(alp, idx, gnp)) {
56 		if ((rdesc->rel_raddend == gnp->gn_addend) &&
57 		    (gnp->gn_gotref == gref)) {
58 			return (gnp);
59 		}
60 	}
61 	return (NULL);
62 }
63 
64 static Xword
65 ld_calc_got_offset(Rel_desc *rdesc, Ofl_desc *ofl)
66 {
67 	Os_desc		*osp = ofl->ofl_osgot;
68 	Sym_desc	*sdp = rdesc->rel_sym;
69 	Xword		gotndx;
70 	Gotref		gref;
71 	Gotndx		*gnp;
72 
73 	if (rdesc->rel_flags & FLG_REL_DTLS)
74 		gref = GOT_REF_TLSGD;
75 	else if (rdesc->rel_flags & FLG_REL_MTLS)
76 		gref = GOT_REF_TLSLD;
77 	else if (rdesc->rel_flags & FLG_REL_STLS)
78 		gref = GOT_REF_TLSIE;
79 	else
80 		gref = GOT_REF_GENERIC;
81 
82 	gnp = ld_find_got_ndx(sdp->sd_GOTndxs, gref, ofl, rdesc);
83 	assert(gnp);
84 
85 	gotndx = (Xword)gnp->gn_gotndx;
86 
87 	if ((rdesc->rel_flags & FLG_REL_DTLS) &&
88 	    (rdesc->rel_rtype == R_AMD64_DTPOFF64))
89 		gotndx++;
90 
91 	return ((Xword)(osp->os_shdr->sh_addr + (gotndx * M_GOT_ENTSIZE)));
92 }
93 
94 static Word
95 ld_init_rel(Rel_desc *reld, void *reloc)
96 {
97 	Rela	*rel = (Rela *)reloc;
98 
99 	/* LINTED */
100 	reld->rel_rtype = (Word)ELF_R_TYPE(rel->r_info, M_MACH);
101 	reld->rel_roffset = rel->r_offset;
102 	reld->rel_raddend = rel->r_addend;
103 	reld->rel_typedata = 0;
104 
105 	reld->rel_flags |= FLG_REL_RELA;
106 
107 	return ((Word)ELF_R_SYM(rel->r_info));
108 }
109 
110 static void
111 ld_mach_eflags(Ehdr *ehdr, Ofl_desc *ofl)
112 {
113 	ofl->ofl_dehdr->e_flags |= ehdr->e_flags;
114 }
115 
116 static void
117 ld_mach_make_dynamic(Ofl_desc *ofl, size_t *cnt)
118 {
119 	if (!(ofl->ofl_flags & FLG_OF_RELOBJ)) {
120 		/*
121 		 * Create this entry if we are going to create a PLT table.
122 		 */
123 		if (ofl->ofl_pltcnt)
124 			(*cnt)++;		/* DT_PLTGOT */
125 	}
126 }
127 
128 static void
129 ld_mach_update_odynamic(Ofl_desc *ofl, Dyn **dyn)
130 {
131 	if (((ofl->ofl_flags & FLG_OF_RELOBJ) == 0) && ofl->ofl_pltcnt) {
132 		(*dyn)->d_tag = DT_PLTGOT;
133 		if (ofl->ofl_osgot)
134 			(*dyn)->d_un.d_ptr = ofl->ofl_osgot->os_shdr->sh_addr;
135 		else
136 			(*dyn)->d_un.d_ptr = 0;
137 		(*dyn)++;
138 	}
139 }
140 
141 static Xword
142 ld_calc_plt_addr(Sym_desc *sdp, Ofl_desc *ofl)
143 {
144 	Xword	value;
145 
146 	value = (Xword)(ofl->ofl_osplt->os_shdr->sh_addr) +
147 	    M_PLT_RESERVSZ + ((sdp->sd_aux->sa_PLTndx - 1) * M_PLT_ENTSIZE);
148 	return (value);
149 }
150 
151 /*
152  *  Build a single plt entry - code is:
153  *	JMP	*name1@GOTPCREL(%rip)
154  *	PUSHL	$index
155  *	JMP	.PLT0
156  */
157 static uchar_t pltn_entry[M_PLT_ENTSIZE] = {
158 /* 0x00 jmpq *name1@GOTPCREL(%rip) */	0xff, 0x25, 0x00, 0x00, 0x00, 0x00,
159 /* 0x06 pushq $index */			0x68, 0x00, 0x00, 0x00, 0x00,
160 /* 0x0b jmpq  .plt0(%rip) */		0xe9, 0x00, 0x00, 0x00, 0x00
161 /* 0x10 */
162 };
163 
164 static uintptr_t
165 plt_entry(Ofl_desc * ofl, Sym_desc * sdp)
166 {
167 	uchar_t		*plt0, *pltent, *gotent;
168 	Sword		plt_off;
169 	Word		got_off;
170 	Xword		val1;
171 	int		bswap = (ofl->ofl_flags1 & FLG_OF1_ENCDIFF) != 0;
172 
173 	got_off = sdp->sd_aux->sa_PLTGOTndx * M_GOT_ENTSIZE;
174 	plt_off = M_PLT_RESERVSZ + ((sdp->sd_aux->sa_PLTndx - 1) *
175 	    M_PLT_ENTSIZE);
176 	plt0 = (uchar_t *)(ofl->ofl_osplt->os_outdata->d_buf);
177 	pltent = plt0 + plt_off;
178 	gotent = (uchar_t *)(ofl->ofl_osgot->os_outdata->d_buf) + got_off;
179 
180 	bcopy(pltn_entry, pltent, sizeof (pltn_entry));
181 	/*
182 	 * Fill in the got entry with the address of the next instruction.
183 	 */
184 	/* LINTED */
185 	*(Word *)gotent = ofl->ofl_osplt->os_shdr->sh_addr + plt_off +
186 	    M_PLT_INSSIZE;
187 	if (bswap)
188 		/* LINTED */
189 		*(Word *)gotent = ld_bswap_Word(*(Word *)gotent);
190 
191 	/*
192 	 * If '-z noreloc' is specified - skip the do_reloc_ld
193 	 * stage.
194 	 */
195 	if (!OFL_DO_RELOC(ofl))
196 		return (1);
197 
198 	/*
199 	 * patchup:
200 	 *	jmpq	*name1@gotpcrel(%rip)
201 	 *
202 	 * NOTE: 0x06 represents next instruction.
203 	 */
204 	val1 = (ofl->ofl_osgot->os_shdr->sh_addr + got_off) -
205 	    (ofl->ofl_osplt->os_shdr->sh_addr + plt_off) - 0x06;
206 
207 	if (do_reloc_ld(R_AMD64_GOTPCREL, &pltent[0x02],
208 	    &val1, MSG_ORIG(MSG_SYM_PLTENT),
209 	    MSG_ORIG(MSG_SPECFIL_PLTENT), bswap, ofl->ofl_lml) == 0) {
210 		eprintf(ofl->ofl_lml, ERR_FATAL, MSG_INTL(MSG_PLT_PLTNFAIL),
211 		    sdp->sd_aux->sa_PLTndx, demangle(sdp->sd_name));
212 		return (S_ERROR);
213 	}
214 
215 	/*
216 	 * patchup:
217 	 *	pushq	$pltndx
218 	 */
219 	val1 = (Xword)(sdp->sd_aux->sa_PLTndx - 1);
220 
221 	if (do_reloc_ld(R_AMD64_32, &pltent[0x07],
222 	    &val1, MSG_ORIG(MSG_SYM_PLTENT),
223 	    MSG_ORIG(MSG_SPECFIL_PLTENT), bswap, ofl->ofl_lml) == 0) {
224 		eprintf(ofl->ofl_lml, ERR_FATAL, MSG_INTL(MSG_PLT_PLTNFAIL),
225 		    sdp->sd_aux->sa_PLTndx, demangle(sdp->sd_name));
226 		return (S_ERROR);
227 	}
228 
229 	/*
230 	 * patchup:
231 	 *	jmpq	.plt0(%rip)
232 	 * NOTE: 0x10 represents next instruction. The rather complex
233 	 * series of casts is necessary to sign extend an offset into
234 	 * a 64-bit value while satisfying various compiler error
235 	 * checks.  Handle with care.
236 	 */
237 	val1 = (Xword)((intptr_t)((uintptr_t)plt0 -
238 	    (uintptr_t)(&pltent[0x10])));
239 
240 	if (do_reloc_ld(R_AMD64_PC32, &pltent[0x0c],
241 	    &val1, MSG_ORIG(MSG_SYM_PLTENT),
242 	    MSG_ORIG(MSG_SPECFIL_PLTENT), bswap, ofl->ofl_lml) == 0) {
243 		eprintf(ofl->ofl_lml, ERR_FATAL, MSG_INTL(MSG_PLT_PLTNFAIL),
244 		    sdp->sd_aux->sa_PLTndx, demangle(sdp->sd_name));
245 		return (S_ERROR);
246 	}
247 
248 	return (1);
249 }
250 
251 static uintptr_t
252 ld_perform_outreloc(Rel_desc * orsp, Ofl_desc * ofl)
253 {
254 	Os_desc *	relosp, * osp = 0;
255 	Word		ndx;
256 	Xword		roffset, value;
257 	Sxword		raddend;
258 	Rela		rea;
259 	char		*relbits;
260 	Sym_desc *	sdp, * psym = (Sym_desc *)0;
261 	int		sectmoved = 0;
262 
263 	raddend = orsp->rel_raddend;
264 	sdp = orsp->rel_sym;
265 
266 	/*
267 	 * If the section this relocation is against has been discarded
268 	 * (-zignore), then also discard (skip) the relocation itself.
269 	 */
270 	if (orsp->rel_isdesc && ((orsp->rel_flags &
271 	    (FLG_REL_GOT | FLG_REL_BSS | FLG_REL_PLT | FLG_REL_NOINFO)) == 0) &&
272 	    (orsp->rel_isdesc->is_flags & FLG_IS_DISCARD)) {
273 		DBG_CALL(Dbg_reloc_discard(ofl->ofl_lml, M_MACH, orsp));
274 		return (1);
275 	}
276 
277 	/*
278 	 * If this is a relocation against a move table, or expanded move
279 	 * table, adjust the relocation entries.
280 	 */
281 	if (orsp->rel_move)
282 		ld_adj_movereloc(ofl, orsp);
283 
284 	/*
285 	 * If this is a relocation against a section then we need to adjust the
286 	 * raddend field to compensate for the new position of the input section
287 	 * within the new output section.
288 	 */
289 	if (ELF_ST_TYPE(sdp->sd_sym->st_info) == STT_SECTION) {
290 		if (ofl->ofl_parsyms &&
291 		    (sdp->sd_isc->is_flags & FLG_IS_RELUPD) &&
292 		    /* LINTED */
293 		    (psym = ld_am_I_partial(orsp, orsp->rel_raddend))) {
294 			DBG_CALL(Dbg_move_outsctadj(ofl->ofl_lml, psym));
295 			sectmoved = 1;
296 			if (ofl->ofl_flags & FLG_OF_RELOBJ)
297 				raddend = psym->sd_sym->st_value;
298 			else
299 				raddend = psym->sd_sym->st_value -
300 				    psym->sd_isc->is_osdesc->os_shdr->sh_addr;
301 			/* LINTED */
302 			raddend += (Off)_elf_getxoff(psym->sd_isc->is_indata);
303 			if (psym->sd_isc->is_shdr->sh_flags & SHF_ALLOC)
304 				raddend +=
305 				    psym->sd_isc->is_osdesc->os_shdr->sh_addr;
306 		} else {
307 			/* LINTED */
308 			raddend += (Off)_elf_getxoff(sdp->sd_isc->is_indata);
309 			if (sdp->sd_isc->is_shdr->sh_flags & SHF_ALLOC)
310 				raddend +=
311 				    sdp->sd_isc->is_osdesc->os_shdr->sh_addr;
312 		}
313 	}
314 
315 	value = sdp->sd_sym->st_value;
316 
317 	if (orsp->rel_flags & FLG_REL_GOT) {
318 		/*
319 		 * Note: for GOT relative relocations on amd64
320 		 *	 we discard the addend.  It was relevant
321 		 *	 to the reference - not to the data item
322 		 *	 being referenced (ie: that -4 thing).
323 		 */
324 		raddend = 0;
325 		osp = ofl->ofl_osgot;
326 		roffset = ld_calc_got_offset(orsp, ofl);
327 
328 	} else if (orsp->rel_flags & FLG_REL_PLT) {
329 		/*
330 		 * Note that relocations for PLT's actually
331 		 * cause a relocation againt the GOT.
332 		 */
333 		osp = ofl->ofl_osplt;
334 		roffset = (ofl->ofl_osgot->os_shdr->sh_addr) +
335 		    sdp->sd_aux->sa_PLTGOTndx * M_GOT_ENTSIZE;
336 		raddend = 0;
337 		if (plt_entry(ofl, sdp) == S_ERROR)
338 			return (S_ERROR);
339 
340 	} else if (orsp->rel_flags & FLG_REL_BSS) {
341 		/*
342 		 * This must be a R_AMD64_COPY.  For these set the roffset to
343 		 * point to the new symbols location.
344 		 */
345 		osp = ofl->ofl_isbss->is_osdesc;
346 		roffset = value;
347 
348 		/*
349 		 * The raddend doesn't mean anything in a R_SPARC_COPY
350 		 * relocation.  Null it out because it can confuse people.
351 		 */
352 		raddend = 0;
353 	} else {
354 		osp = orsp->rel_osdesc;
355 
356 		/*
357 		 * Calculate virtual offset of reference point; equals offset
358 		 * into section + vaddr of section for loadable sections, or
359 		 * offset plus section displacement for nonloadable sections.
360 		 */
361 		roffset = orsp->rel_roffset +
362 		    (Off)_elf_getxoff(orsp->rel_isdesc->is_indata);
363 		if (!(ofl->ofl_flags & FLG_OF_RELOBJ))
364 			roffset += orsp->rel_isdesc->is_osdesc->
365 			    os_shdr->sh_addr;
366 	}
367 
368 	if ((osp == 0) || ((relosp = osp->os_relosdesc) == 0))
369 		relosp = ofl->ofl_osrel;
370 
371 	/*
372 	 * Assign the symbols index for the output relocation.  If the
373 	 * relocation refers to a SECTION symbol then it's index is based upon
374 	 * the output sections symbols index.  Otherwise the index can be
375 	 * derived from the symbols index itself.
376 	 */
377 	if (orsp->rel_rtype == R_AMD64_RELATIVE)
378 		ndx = STN_UNDEF;
379 	else if ((orsp->rel_flags & FLG_REL_SCNNDX) ||
380 	    (ELF_ST_TYPE(sdp->sd_sym->st_info) == STT_SECTION)) {
381 		if (sectmoved == 0) {
382 			/*
383 			 * Check for a null input section. This can
384 			 * occur if this relocation references a symbol
385 			 * generated by sym_add_sym().
386 			 */
387 			if (sdp->sd_isc && sdp->sd_isc->is_osdesc)
388 				ndx = sdp->sd_isc->is_osdesc->os_identndx;
389 			else
390 				ndx = sdp->sd_shndx;
391 		} else
392 			ndx = ofl->ofl_parexpnndx;
393 	} else
394 		ndx = sdp->sd_symndx;
395 
396 	/*
397 	 * Add the symbols 'value' to the addend field.
398 	 */
399 	if (orsp->rel_flags & FLG_REL_ADVAL)
400 		raddend += value;
401 
402 	/*
403 	 * The addend field for R_AMD64_DTPMOD64 means nothing.  The addend
404 	 * is propagated in the corresponding R_AMD64_DTPOFF64 relocation.
405 	 */
406 	if (orsp->rel_rtype == R_AMD64_DTPMOD64)
407 		raddend = 0;
408 
409 	relbits = (char *)relosp->os_outdata->d_buf;
410 
411 	rea.r_info = ELF_R_INFO(ndx, orsp->rel_rtype);
412 	rea.r_offset = roffset;
413 	rea.r_addend = raddend;
414 	DBG_CALL(Dbg_reloc_out(ofl, ELF_DBG_LD, SHT_RELA, &rea, relosp->os_name,
415 	    orsp->rel_sname));
416 
417 	/*
418 	 * Assert we haven't walked off the end of our relocation table.
419 	 */
420 	assert(relosp->os_szoutrels <= relosp->os_shdr->sh_size);
421 
422 	(void) memcpy((relbits + relosp->os_szoutrels),
423 	    (char *)&rea, sizeof (Rela));
424 	relosp->os_szoutrels += (Xword)sizeof (Rela);
425 
426 	/*
427 	 * Determine if this relocation is against a non-writable, allocatable
428 	 * section.  If so we may need to provide a text relocation diagnostic.
429 	 * Note that relocations against the .plt (R_AMD64_JUMP_SLOT) actually
430 	 * result in modifications to the .got.
431 	 */
432 	if (orsp->rel_rtype == R_AMD64_JUMP_SLOT)
433 		osp = ofl->ofl_osgot;
434 
435 	ld_reloc_remain_entry(orsp, osp, ofl);
436 	return (1);
437 }
438 
439 /*
440  * amd64 Instructions for TLS processing
441  */
442 static uchar_t tlsinstr_gd_ie[] = {
443 	/*
444 	 *	0x00 movq %fs:0, %rax
445 	 */
446 	0x64, 0x48, 0x8b, 0x04, 0x25,
447 	0x00, 0x00, 0x00, 0x00,
448 	/*
449 	 *	0x09 addq x@gottpoff(%rip), %rax
450 	 */
451 	0x48, 0x03, 0x05, 0x00, 0x00,
452 	0x00, 0x00
453 };
454 
455 static uchar_t tlsinstr_gd_le[] = {
456 	/*
457 	 *	0x00 movq %fs:0, %rax
458 	 */
459 	0x64, 0x48, 0x8b, 0x04, 0x25,
460 	0x00, 0x00, 0x00, 0x00,
461 	/*
462 	 *	0x09 leaq x@gottpoff(%rip), %rax
463 	 */
464 	0x48, 0x8d, 0x80, 0x00, 0x00,
465 	0x00, 0x00
466 };
467 
468 static uchar_t tlsinstr_ld_le[] = {
469 	/*
470 	 * .byte 0x66
471 	 */
472 	0x66,
473 	/*
474 	 * .byte 0x66
475 	 */
476 	0x66,
477 	/*
478 	 * .byte 0x66
479 	 */
480 	0x66,
481 	/*
482 	 * movq %fs:0, %rax
483 	 */
484 	0x64, 0x48, 0x8b, 0x04, 0x25,
485 	0x00, 0x00, 0x00, 0x00
486 };
487 
488 
489 static Fixupret
490 tls_fixups(Ofl_desc *ofl, Rel_desc *arsp)
491 {
492 	Sym_desc	*sdp = arsp->rel_sym;
493 	Word		rtype = arsp->rel_rtype;
494 	uchar_t		*offset;
495 
496 	offset = (uchar_t *)((uintptr_t)arsp->rel_roffset +
497 	    (uintptr_t)_elf_getxoff(arsp->rel_isdesc->is_indata) +
498 	    (uintptr_t)arsp->rel_osdesc->os_outdata->d_buf);
499 
500 	if (sdp->sd_ref == REF_DYN_NEED) {
501 		/*
502 		 * IE reference model
503 		 */
504 		switch (rtype) {
505 		case R_AMD64_TLSGD:
506 			/*
507 			 *  GD -> IE
508 			 *
509 			 * Transition:
510 			 *	0x00 .byte 0x66
511 			 *	0x01 leaq x@tlsgd(%rip), %rdi
512 			 *	0x08 .word 0x6666
513 			 *	0x0a rex64
514 			 *	0x0b call __tls_get_addr@plt
515 			 *	0x10
516 			 * To:
517 			 *	0x00 movq %fs:0, %rax
518 			 *	0x09 addq x@gottpoff(%rip), %rax
519 			 *	0x10
520 			 */
521 			DBG_CALL(Dbg_reloc_transition(ofl->ofl_lml, M_MACH,
522 			    R_AMD64_GOTTPOFF, arsp));
523 			arsp->rel_rtype = R_AMD64_GOTTPOFF;
524 			arsp->rel_roffset += 8;
525 			arsp->rel_raddend = (Sxword)-4;
526 
527 			/*
528 			 * Adjust 'offset' to beginning of instruction
529 			 * sequence.
530 			 */
531 			offset -= 4;
532 			(void) memcpy(offset, tlsinstr_gd_ie,
533 			    sizeof (tlsinstr_gd_ie));
534 			return (FIX_RELOC);
535 
536 		case R_AMD64_PLT32:
537 			/*
538 			 * Fixup done via the TLS_GD relocation.
539 			 */
540 			DBG_CALL(Dbg_reloc_transition(ofl->ofl_lml, M_MACH,
541 			    R_AMD64_NONE, arsp));
542 			return (FIX_DONE);
543 		}
544 	}
545 
546 	/*
547 	 * LE reference model
548 	 */
549 	switch (rtype) {
550 	case R_AMD64_TLSGD:
551 		/*
552 		 * GD -> LE
553 		 *
554 		 * Transition:
555 		 *	0x00 .byte 0x66
556 		 *	0x01 leaq x@tlsgd(%rip), %rdi
557 		 *	0x08 .word 0x6666
558 		 *	0x0a rex64
559 		 *	0x0b call __tls_get_addr@plt
560 		 *	0x10
561 		 * To:
562 		 *	0x00 movq %fs:0, %rax
563 		 *	0x09 leaq x@tpoff(%rax), %rax
564 		 *	0x10
565 		 */
566 		DBG_CALL(Dbg_reloc_transition(ofl->ofl_lml, M_MACH,
567 		    R_AMD64_TPOFF32, arsp));
568 		arsp->rel_rtype = R_AMD64_TPOFF32;
569 		arsp->rel_roffset += 8;
570 		arsp->rel_raddend = 0;
571 
572 		/*
573 		 * Adjust 'offset' to beginning of instruction sequence.
574 		 */
575 		offset -= 4;
576 		(void) memcpy(offset, tlsinstr_gd_le, sizeof (tlsinstr_gd_le));
577 		return (FIX_RELOC);
578 
579 	case R_AMD64_GOTTPOFF:
580 		/*
581 		 * IE -> LE
582 		 *
583 		 * Transition:
584 		 *	0x00 movq %fs:0, %rax
585 		 *	0x09 addq x@gottopoff(%rip), %rax
586 		 *	0x10
587 		 * To:
588 		 *	0x00 movq %fs:0, %rax
589 		 *	0x09 leaq x@tpoff(%rax), %rax
590 		 *	0x10
591 		 */
592 		DBG_CALL(Dbg_reloc_transition(ofl->ofl_lml, M_MACH,
593 		    R_AMD64_TPOFF32, arsp));
594 		arsp->rel_rtype = R_AMD64_TPOFF32;
595 		arsp->rel_raddend = 0;
596 
597 		/*
598 		 * Adjust 'offset' to beginning of instruction sequence.
599 		 */
600 		offset -= 12;
601 
602 		/*
603 		 * Same code sequence used in the GD -> LE transition.
604 		 */
605 		(void) memcpy(offset, tlsinstr_gd_le, sizeof (tlsinstr_gd_le));
606 		return (FIX_RELOC);
607 
608 	case R_AMD64_TLSLD:
609 		/*
610 		 * LD -> LE
611 		 *
612 		 * Transition
613 		 *	0x00 leaq x1@tlsgd(%rip), %rdi
614 		 *	0x07 call __tls_get_addr@plt
615 		 *	0x0c
616 		 * To:
617 		 *	0x00 .byte 0x66
618 		 *	0x01 .byte 0x66
619 		 *	0x02 .byte 0x66
620 		 *	0x03 movq %fs:0, %rax
621 		 */
622 		DBG_CALL(Dbg_reloc_transition(ofl->ofl_lml, M_MACH,
623 		    R_AMD64_NONE, arsp));
624 		offset -= 3;
625 		(void) memcpy(offset, tlsinstr_ld_le, sizeof (tlsinstr_ld_le));
626 		return (FIX_DONE);
627 
628 	case R_AMD64_DTPOFF32:
629 		/*
630 		 * LD->LE
631 		 *
632 		 * Transition:
633 		 *	0x00 leaq x1@dtpoff(%rax), %rcx
634 		 * To:
635 		 *	0x00 leaq x1@tpoff(%rax), %rcx
636 		 */
637 		DBG_CALL(Dbg_reloc_transition(ofl->ofl_lml, M_MACH,
638 		    R_AMD64_TPOFF32, arsp));
639 		arsp->rel_rtype = R_AMD64_TPOFF32;
640 		arsp->rel_raddend = 0;
641 		return (FIX_RELOC);
642 	}
643 
644 	return (FIX_RELOC);
645 }
646 
647 static uintptr_t
648 ld_do_activerelocs(Ofl_desc *ofl)
649 {
650 	Rel_desc	*arsp;
651 	Rel_cache	*rcp;
652 	Aliste		idx;
653 	uintptr_t	return_code = 1;
654 	ofl_flag_t	flags = ofl->ofl_flags;
655 
656 	if (ofl->ofl_actrels)
657 		DBG_CALL(Dbg_reloc_doact_title(ofl->ofl_lml));
658 
659 	/*
660 	 * Process active relocations.
661 	 */
662 	for (APLIST_TRAVERSE(ofl->ofl_actrels, idx, rcp)) {
663 		/* LINTED */
664 		for (arsp = (Rel_desc *)(rcp + 1);
665 		    arsp < rcp->rc_free; arsp++) {
666 			uchar_t		*addr;
667 			Xword 		value;
668 			Sym_desc	*sdp;
669 			const char	*ifl_name;
670 			Xword		refaddr;
671 			int		moved = 0;
672 			Gotref		gref;
673 
674 			/*
675 			 * If the section this relocation is against has been
676 			 * discarded (-zignore), then discard (skip) the
677 			 * relocation itself.
678 			 */
679 			if ((arsp->rel_isdesc->is_flags & FLG_IS_DISCARD) &&
680 			    ((arsp->rel_flags &
681 			    (FLG_REL_GOT | FLG_REL_BSS |
682 			    FLG_REL_PLT | FLG_REL_NOINFO)) == 0)) {
683 				DBG_CALL(Dbg_reloc_discard(ofl->ofl_lml,
684 				    M_MACH, arsp));
685 				continue;
686 			}
687 
688 			/*
689 			 * We determine what the 'got reference'
690 			 * model (if required) is at this point.  This
691 			 * needs to be done before tls_fixup() since
692 			 * it may 'transition' our instructions.
693 			 *
694 			 * The got table entries have already been assigned,
695 			 * and we bind to those initial entries.
696 			 */
697 			if (arsp->rel_flags & FLG_REL_DTLS)
698 				gref = GOT_REF_TLSGD;
699 			else if (arsp->rel_flags & FLG_REL_MTLS)
700 				gref = GOT_REF_TLSLD;
701 			else if (arsp->rel_flags & FLG_REL_STLS)
702 				gref = GOT_REF_TLSIE;
703 			else
704 				gref = GOT_REF_GENERIC;
705 
706 			/*
707 			 * Perform any required TLS fixups.
708 			 */
709 			if (arsp->rel_flags & FLG_REL_TLSFIX) {
710 				Fixupret	ret;
711 
712 				if ((ret = tls_fixups(ofl, arsp)) == FIX_ERROR)
713 					return (S_ERROR);
714 				if (ret == FIX_DONE)
715 					continue;
716 			}
717 
718 			/*
719 			 * If this is a relocation against a move table, or
720 			 * expanded move table, adjust the relocation entries.
721 			 */
722 			if (arsp->rel_move)
723 				ld_adj_movereloc(ofl, arsp);
724 
725 			sdp = arsp->rel_sym;
726 			refaddr = arsp->rel_roffset +
727 			    (Off)_elf_getxoff(arsp->rel_isdesc->is_indata);
728 
729 			if ((arsp->rel_flags & FLG_REL_CLVAL) ||
730 			    (arsp->rel_flags & FLG_REL_GOTCL))
731 				value = 0;
732 			else if (ELF_ST_TYPE(sdp->sd_sym->st_info) ==
733 			    STT_SECTION) {
734 				Sym_desc	*sym;
735 
736 				/*
737 				 * The value for a symbol pointing to a SECTION
738 				 * is based off of that sections position.
739 				 */
740 				if ((sdp->sd_isc->is_flags & FLG_IS_RELUPD) &&
741 				    /* LINTED */
742 				    (sym = ld_am_I_partial(arsp,
743 				    arsp->rel_raddend))) {
744 					/*
745 					 * The symbol was moved, so adjust
746 					 * the value relative to the new
747 					 * section.
748 					 */
749 					value = sym->sd_sym->st_value;
750 					moved = 1;
751 
752 					/*
753 					 * The original raddend covers the
754 					 * displacement from the section start
755 					 * to the desired address. The value
756 					 * computed above gets us from the
757 					 * section start to the start of the
758 					 * symbol range. Adjust the old raddend
759 					 * to remove the offset from section
760 					 * start to symbol start, leaving the
761 					 * displacement within the range of
762 					 * the symbol.
763 					 */
764 					arsp->rel_raddend -=
765 					    sym->sd_osym->st_value;
766 				} else {
767 					value = _elf_getxoff(
768 					    sdp->sd_isc->is_indata);
769 					if (sdp->sd_isc->is_shdr->sh_flags &
770 					    SHF_ALLOC)
771 						value +=
772 						    sdp->sd_isc->is_osdesc->
773 						    os_shdr->sh_addr;
774 				}
775 				if (sdp->sd_isc->is_shdr->sh_flags & SHF_TLS)
776 					value -= ofl->ofl_tlsphdr->p_vaddr;
777 
778 			} else if (IS_SIZE(arsp->rel_rtype)) {
779 				/*
780 				 * Size relocations require the symbols size.
781 				 */
782 				value = sdp->sd_sym->st_size;
783 			} else {
784 				/*
785 				 * Else the value is the symbols value.
786 				 */
787 				value = sdp->sd_sym->st_value;
788 			}
789 
790 			/*
791 			 * Relocation against the GLOBAL_OFFSET_TABLE.
792 			 */
793 			if (arsp->rel_flags & FLG_REL_GOT)
794 				arsp->rel_osdesc = ofl->ofl_osgot;
795 
796 			/*
797 			 * If loadable and not producing a relocatable object
798 			 * add the sections virtual address to the reference
799 			 * address.
800 			 */
801 			if ((arsp->rel_flags & FLG_REL_LOAD) &&
802 			    ((flags & FLG_OF_RELOBJ) == 0))
803 				refaddr += arsp->rel_isdesc->is_osdesc->
804 				    os_shdr->sh_addr;
805 
806 			/*
807 			 * If this entry has a PLT assigned to it, it's
808 			 * value is actually the address of the PLT (and
809 			 * not the address of the function).
810 			 */
811 			if (IS_PLT(arsp->rel_rtype)) {
812 				if (sdp->sd_aux && sdp->sd_aux->sa_PLTndx)
813 					value = ld_calc_plt_addr(sdp, ofl);
814 			}
815 
816 			/*
817 			 * Add relocations addend to value.  Add extra
818 			 * relocation addend if needed.
819 			 *
820 			 * Note: for GOT relative relocations on amd64
821 			 *	 we discard the addend.  It was relevant
822 			 *	 to the reference - not to the data item
823 			 *	 being referenced (ie: that -4 thing).
824 			 */
825 			if ((arsp->rel_flags & FLG_REL_GOT) == 0)
826 				value += arsp->rel_raddend;
827 
828 			/*
829 			 * Determine whether the value needs further adjustment.
830 			 * Filter through the attributes of the relocation to
831 			 * determine what adjustment is required.  Note, many
832 			 * of the following cases are only applicable when a
833 			 * .got is present.  As a .got is not generated when a
834 			 * relocatable object is being built, any adjustments
835 			 * that require a .got need to be skipped.
836 			 */
837 			if ((arsp->rel_flags & FLG_REL_GOT) &&
838 			    ((flags & FLG_OF_RELOBJ) == 0)) {
839 				Xword		R1addr;
840 				uintptr_t	R2addr;
841 				Word		gotndx;
842 				Gotndx		*gnp;
843 
844 				/*
845 				 * Perform relocation against GOT table.  Since
846 				 * this doesn't fit exactly into a relocation
847 				 * we place the appropriate byte in the GOT
848 				 * directly
849 				 *
850 				 * Calculate offset into GOT at which to apply
851 				 * the relocation.
852 				 */
853 				gnp = ld_find_got_ndx(sdp->sd_GOTndxs, gref,
854 				    ofl, arsp);
855 				assert(gnp);
856 
857 				if (arsp->rel_rtype == R_AMD64_DTPOFF64)
858 					gotndx = gnp->gn_gotndx + 1;
859 				else
860 					gotndx = gnp->gn_gotndx;
861 
862 				R1addr = (Xword)(gotndx * M_GOT_ENTSIZE);
863 
864 				/*
865 				 * Add the GOTs data's offset.
866 				 */
867 				R2addr = R1addr + (uintptr_t)
868 				    arsp->rel_osdesc->os_outdata->d_buf;
869 
870 				DBG_CALL(Dbg_reloc_doact(ofl->ofl_lml,
871 				    ELF_DBG_LD_ACT, M_MACH, SHT_RELA,
872 				    arsp->rel_rtype, R1addr, value,
873 				    arsp->rel_sname, arsp->rel_osdesc));
874 
875 				/*
876 				 * And do it.
877 				 */
878 				if (ofl->ofl_flags1 & FLG_OF1_ENCDIFF)
879 					*(Xword *)R2addr =
880 					    ld_bswap_Xword(value);
881 				else
882 					*(Xword *)R2addr = value;
883 				continue;
884 
885 			} else if (IS_GOT_BASED(arsp->rel_rtype) &&
886 			    ((flags & FLG_OF_RELOBJ) == 0)) {
887 				value -= ofl->ofl_osgot->os_shdr->sh_addr;
888 
889 			} else if (IS_GOTPCREL(arsp->rel_rtype) &&
890 			    ((flags & FLG_OF_RELOBJ) == 0)) {
891 				Gotndx *gnp;
892 
893 				/*
894 				 * Calculation:
895 				 *	G + GOT + A - P
896 				 */
897 				gnp = ld_find_got_ndx(sdp->sd_GOTndxs,
898 				    gref, ofl, arsp);
899 				assert(gnp);
900 				value = (Xword)(ofl->ofl_osgot->os_shdr->
901 				    sh_addr) + ((Xword)gnp->gn_gotndx *
902 				    M_GOT_ENTSIZE) + arsp->rel_raddend -
903 				    refaddr;
904 
905 			} else if (IS_GOT_PC(arsp->rel_rtype) &&
906 			    ((flags & FLG_OF_RELOBJ) == 0)) {
907 				value = (Xword)(ofl->ofl_osgot->os_shdr->
908 				    sh_addr) - refaddr + arsp->rel_raddend;
909 
910 			} else if ((IS_PC_RELATIVE(arsp->rel_rtype)) &&
911 			    (((flags & FLG_OF_RELOBJ) == 0) ||
912 			    (arsp->rel_osdesc == sdp->sd_isc->is_osdesc))) {
913 				value -= refaddr;
914 
915 			} else if (IS_TLS_INS(arsp->rel_rtype) &&
916 			    IS_GOT_RELATIVE(arsp->rel_rtype) &&
917 			    ((flags & FLG_OF_RELOBJ) == 0)) {
918 				Gotndx	*gnp;
919 
920 				gnp = ld_find_got_ndx(sdp->sd_GOTndxs, gref,
921 				    ofl, arsp);
922 				assert(gnp);
923 				value = (Xword)gnp->gn_gotndx * M_GOT_ENTSIZE;
924 
925 			} else if (IS_GOT_RELATIVE(arsp->rel_rtype) &&
926 			    ((flags & FLG_OF_RELOBJ) == 0)) {
927 				Gotndx *gnp;
928 
929 				gnp = ld_find_got_ndx(sdp->sd_GOTndxs,
930 				    gref, ofl, arsp);
931 				assert(gnp);
932 				value = (Xword)gnp->gn_gotndx * M_GOT_ENTSIZE;
933 
934 			} else if ((arsp->rel_flags & FLG_REL_STLS) &&
935 			    ((flags & FLG_OF_RELOBJ) == 0)) {
936 				Xword	tlsstatsize;
937 
938 				/*
939 				 * This is the LE TLS reference model.  Static
940 				 * offset is hard-coded.
941 				 */
942 				tlsstatsize =
943 				    S_ROUND(ofl->ofl_tlsphdr->p_memsz,
944 				    M_TLSSTATALIGN);
945 				value = tlsstatsize - value;
946 
947 				/*
948 				 * Since this code is fixed up, it assumes a
949 				 * negative offset that can be added to the
950 				 * thread pointer.
951 				 */
952 				if (arsp->rel_rtype == R_AMD64_TPOFF32)
953 					value = -value;
954 			}
955 
956 			if (arsp->rel_isdesc->is_file)
957 				ifl_name = arsp->rel_isdesc->is_file->ifl_name;
958 			else
959 				ifl_name = MSG_INTL(MSG_STR_NULL);
960 
961 			/*
962 			 * Make sure we have data to relocate.  Compiler and
963 			 * assembler developers have been known to generate
964 			 * relocations against invalid sections (normally .bss),
965 			 * so for their benefit give them sufficient information
966 			 * to help analyze the problem.  End users should never
967 			 * see this.
968 			 */
969 			if (arsp->rel_isdesc->is_indata->d_buf == 0) {
970 				Conv_inv_buf_t inv_buf;
971 
972 				eprintf(ofl->ofl_lml, ERR_FATAL,
973 				    MSG_INTL(MSG_REL_EMPTYSEC),
974 				    conv_reloc_amd64_type(arsp->rel_rtype,
975 				    0, &inv_buf), ifl_name,
976 				    demangle(arsp->rel_sname),
977 				    EC_WORD(arsp->rel_isdesc->is_scnndx),
978 				    arsp->rel_isdesc->is_name);
979 				return (S_ERROR);
980 			}
981 
982 			/*
983 			 * Get the address of the data item we need to modify.
984 			 */
985 			addr = (uchar_t *)((uintptr_t)arsp->rel_roffset +
986 			    (uintptr_t)_elf_getxoff(arsp->rel_isdesc->
987 			    is_indata));
988 
989 			DBG_CALL(Dbg_reloc_doact(ofl->ofl_lml, ELF_DBG_LD_ACT,
990 			    M_MACH, SHT_RELA, arsp->rel_rtype, EC_NATPTR(addr),
991 			    value, arsp->rel_sname, arsp->rel_osdesc));
992 			addr += (uintptr_t)arsp->rel_osdesc->os_outdata->d_buf;
993 
994 			if ((((uintptr_t)addr - (uintptr_t)ofl->ofl_nehdr) >
995 			    ofl->ofl_size) || (arsp->rel_roffset >
996 			    arsp->rel_osdesc->os_shdr->sh_size)) {
997 				int		class;
998 				Conv_inv_buf_t inv_buf;
999 
1000 				if (((uintptr_t)addr -
1001 				    (uintptr_t)ofl->ofl_nehdr) > ofl->ofl_size)
1002 					class = ERR_FATAL;
1003 				else
1004 					class = ERR_WARNING;
1005 
1006 				eprintf(ofl->ofl_lml, class,
1007 				    MSG_INTL(MSG_REL_INVALOFFSET),
1008 				    conv_reloc_amd64_type(arsp->rel_rtype,
1009 				    0, &inv_buf), ifl_name,
1010 				    EC_WORD(arsp->rel_isdesc->is_scnndx),
1011 				    arsp->rel_isdesc->is_name,
1012 				    demangle(arsp->rel_sname),
1013 				    EC_ADDR((uintptr_t)addr -
1014 				    (uintptr_t)ofl->ofl_nehdr));
1015 
1016 				if (class == ERR_FATAL) {
1017 					return_code = S_ERROR;
1018 					continue;
1019 				}
1020 			}
1021 
1022 			/*
1023 			 * The relocation is additive.  Ignore the previous
1024 			 * symbol value if this local partial symbol is
1025 			 * expanded.
1026 			 */
1027 			if (moved)
1028 				value -= *addr;
1029 
1030 			/*
1031 			 * If '-z noreloc' is specified - skip the do_reloc_ld
1032 			 * stage.
1033 			 */
1034 			if (OFL_DO_RELOC(ofl)) {
1035 				/*
1036 				 * If this is a PROGBITS section and the
1037 				 * running linker has a different byte order
1038 				 * than the target host, tell do_reloc_ld()
1039 				 * to swap bytes.
1040 				 */
1041 				if (do_reloc_ld((uchar_t)arsp->rel_rtype,
1042 				    addr, &value, arsp->rel_sname, ifl_name,
1043 				    OFL_SWAP_RELOC_DATA(ofl, arsp),
1044 				    ofl->ofl_lml) == 0)
1045 					return_code = S_ERROR;
1046 			}
1047 		}
1048 	}
1049 	return (return_code);
1050 }
1051 
1052 static uintptr_t
1053 ld_add_outrel(Word flags, Rel_desc *rsp, Ofl_desc *ofl)
1054 {
1055 	Rel_desc	*orsp;
1056 	Rel_cache	*rcp;
1057 	Sym_desc	*sdp = rsp->rel_sym;
1058 	static size_t	nextsize = 0;
1059 
1060 	/*
1061 	 * Static executables *do not* want any relocations against them.
1062 	 * Since our engine still creates relocations against a WEAK UNDEFINED
1063 	 * symbol in a static executable, it's best to disable them here
1064 	 * instead of through out the relocation code.
1065 	 */
1066 	if (OFL_IS_STATIC_EXEC(ofl))
1067 		return (1);
1068 
1069 	/*
1070 	 * Obtain the new available relocation cache entry.
1071 	 */
1072 	if ((rcp = ld_add_rel_cache(ofl, &ofl->ofl_outrels, &nextsize,
1073 	    REL_LOIDESCNO, REL_HOIDESCNO)) == (Rel_cache *)S_ERROR)
1074 		return (S_ERROR);
1075 
1076 	orsp = rcp->rc_free;
1077 
1078 	/*
1079 	 * If we are adding a output relocation against a section
1080 	 * symbol (non-RELATIVE) then mark that section.  These sections
1081 	 * will be added to the .dynsym symbol table.
1082 	 */
1083 	if (sdp && (rsp->rel_rtype != M_R_RELATIVE) &&
1084 	    ((flags & FLG_REL_SCNNDX) ||
1085 	    (ELF_ST_TYPE(sdp->sd_sym->st_info) == STT_SECTION))) {
1086 
1087 		/*
1088 		 * If this is a COMMON symbol - no output section
1089 		 * exists yet - (it's created as part of sym_validate()).
1090 		 * So - we mark here that when it's created it should
1091 		 * be tagged with the FLG_OS_OUTREL flag.
1092 		 */
1093 		if ((sdp->sd_flags & FLG_SY_SPECSEC) &&
1094 		    (sdp->sd_sym->st_shndx == SHN_COMMON)) {
1095 			if (ELF_ST_TYPE(sdp->sd_sym->st_info) != STT_TLS)
1096 				ofl->ofl_flags1 |= FLG_OF1_BSSOREL;
1097 			else
1098 				ofl->ofl_flags1 |= FLG_OF1_TLSOREL;
1099 		} else {
1100 			Os_desc	*osp = sdp->sd_isc->is_osdesc;
1101 
1102 			if (osp && ((osp->os_flags & FLG_OS_OUTREL) == 0)) {
1103 				ofl->ofl_dynshdrcnt++;
1104 				osp->os_flags |= FLG_OS_OUTREL;
1105 			}
1106 		}
1107 	}
1108 
1109 	*orsp = *rsp;
1110 	orsp->rel_flags |= flags;
1111 
1112 	rcp->rc_free++;
1113 	ofl->ofl_outrelscnt++;
1114 
1115 	if (flags & FLG_REL_GOT)
1116 		ofl->ofl_relocgotsz += (Xword)sizeof (Rela);
1117 	else if (flags & FLG_REL_PLT)
1118 		ofl->ofl_relocpltsz += (Xword)sizeof (Rela);
1119 	else if (flags & FLG_REL_BSS)
1120 		ofl->ofl_relocbsssz += (Xword)sizeof (Rela);
1121 	else if (flags & FLG_REL_NOINFO)
1122 		ofl->ofl_relocrelsz += (Xword)sizeof (Rela);
1123 	else
1124 		orsp->rel_osdesc->os_szoutrels += (Xword)sizeof (Rela);
1125 
1126 	if (orsp->rel_rtype == M_R_RELATIVE)
1127 		ofl->ofl_relocrelcnt++;
1128 
1129 	/*
1130 	 * We don't perform sorting on PLT relocations because
1131 	 * they have already been assigned a PLT index and if we
1132 	 * were to sort them we would have to re-assign the plt indexes.
1133 	 */
1134 	if (!(flags & FLG_REL_PLT))
1135 		ofl->ofl_reloccnt++;
1136 
1137 	/*
1138 	 * Insure a GLOBAL_OFFSET_TABLE is generated if required.
1139 	 */
1140 	if (IS_GOT_REQUIRED(orsp->rel_rtype))
1141 		ofl->ofl_flags |= FLG_OF_BLDGOT;
1142 
1143 	/*
1144 	 * Identify and possibly warn of a displacement relocation.
1145 	 */
1146 	if (orsp->rel_flags & FLG_REL_DISP) {
1147 		ofl->ofl_dtflags_1 |= DF_1_DISPRELPND;
1148 
1149 		if (ofl->ofl_flags & FLG_OF_VERBOSE)
1150 			ld_disp_errmsg(MSG_INTL(MSG_REL_DISPREL4), orsp, ofl);
1151 	}
1152 	DBG_CALL(Dbg_reloc_ors_entry(ofl->ofl_lml, ELF_DBG_LD, SHT_RELA,
1153 	    M_MACH, orsp));
1154 	return (1);
1155 }
1156 
1157 /*
1158  * process relocation for a LOCAL symbol
1159  */
1160 static uintptr_t
1161 ld_reloc_local(Rel_desc * rsp, Ofl_desc * ofl)
1162 {
1163 	ofl_flag_t	flags = ofl->ofl_flags;
1164 	Sym_desc	*sdp = rsp->rel_sym;
1165 	Word		shndx = sdp->sd_sym->st_shndx;
1166 	Word		ortype = rsp->rel_rtype;
1167 
1168 	/*
1169 	 * if ((shared object) and (not pc relative relocation) and
1170 	 *    (not against ABS symbol))
1171 	 * then
1172 	 *	build R_AMD64_RELATIVE
1173 	 * fi
1174 	 */
1175 	if ((flags & FLG_OF_SHAROBJ) && (rsp->rel_flags & FLG_REL_LOAD) &&
1176 	    !(IS_PC_RELATIVE(rsp->rel_rtype)) && !(IS_SIZE(rsp->rel_rtype)) &&
1177 	    !(IS_GOT_BASED(rsp->rel_rtype)) &&
1178 	    !(rsp->rel_isdesc != NULL &&
1179 	    (rsp->rel_isdesc->is_shdr->sh_type == SHT_SUNW_dof)) &&
1180 	    (((sdp->sd_flags & FLG_SY_SPECSEC) == 0) ||
1181 	    (shndx != SHN_ABS) || (sdp->sd_aux && sdp->sd_aux->sa_symspec))) {
1182 
1183 		/*
1184 		 * R_AMD64_RELATIVE updates a 64bit address, if this
1185 		 * relocation isn't a 64bit binding then we can not
1186 		 * simplify it to a RELATIVE relocation.
1187 		 */
1188 		if (reloc_table[ortype].re_fsize != sizeof (Addr)) {
1189 			return (ld_add_outrel(0, rsp, ofl));
1190 		}
1191 
1192 		rsp->rel_rtype = R_AMD64_RELATIVE;
1193 		if (ld_add_outrel(FLG_REL_ADVAL, rsp, ofl) == S_ERROR)
1194 			return (S_ERROR);
1195 		rsp->rel_rtype = ortype;
1196 		return (1);
1197 	}
1198 
1199 	/*
1200 	 * If the relocation is against a 'non-allocatable' section
1201 	 * and we can not resolve it now - then give a warning
1202 	 * message.
1203 	 *
1204 	 * We can not resolve the symbol if either:
1205 	 *	a) it's undefined
1206 	 *	b) it's defined in a shared library and a
1207 	 *	   COPY relocation hasn't moved it to the executable
1208 	 *
1209 	 * Note: because we process all of the relocations against the
1210 	 *	text segment before any others - we know whether
1211 	 *	or not a copy relocation will be generated before
1212 	 *	we get here (see reloc_init()->reloc_segments()).
1213 	 */
1214 	if (!(rsp->rel_flags & FLG_REL_LOAD) &&
1215 	    ((shndx == SHN_UNDEF) ||
1216 	    ((sdp->sd_ref == REF_DYN_NEED) &&
1217 	    ((sdp->sd_flags & FLG_SY_MVTOCOMM) == 0)))) {
1218 		Conv_inv_buf_t inv_buf;
1219 
1220 		/*
1221 		 * If the relocation is against a SHT_SUNW_ANNOTATE
1222 		 * section - then silently ignore that the relocation
1223 		 * can not be resolved.
1224 		 */
1225 		if (rsp->rel_osdesc &&
1226 		    (rsp->rel_osdesc->os_shdr->sh_type == SHT_SUNW_ANNOTATE))
1227 			return (0);
1228 		(void) eprintf(ofl->ofl_lml, ERR_WARNING,
1229 		    MSG_INTL(MSG_REL_EXTERNSYM),
1230 		    conv_reloc_amd64_type(rsp->rel_rtype, 0, &inv_buf),
1231 		    rsp->rel_isdesc->is_file->ifl_name,
1232 		    demangle(rsp->rel_sname), rsp->rel_osdesc->os_name);
1233 		return (1);
1234 	}
1235 
1236 	/*
1237 	 * Perform relocation.
1238 	 */
1239 	return (ld_add_actrel(NULL, rsp, ofl));
1240 }
1241 
1242 
1243 static uintptr_t
1244 ld_reloc_TLS(Boolean local, Rel_desc * rsp, Ofl_desc * ofl)
1245 {
1246 	Word		rtype = rsp->rel_rtype;
1247 	Sym_desc	*sdp = rsp->rel_sym;
1248 	ofl_flag_t	flags = ofl->ofl_flags;
1249 	Gotndx		*gnp;
1250 
1251 	/*
1252 	 * If we're building an executable - use either the IE or LE access
1253 	 * model.  If we're building a shared object process any IE model.
1254 	 */
1255 	if ((flags & FLG_OF_EXEC) || (IS_TLS_IE(rtype))) {
1256 		/*
1257 		 * Set the DF_STATIC_TLS flag.
1258 		 */
1259 		ofl->ofl_dtflags |= DF_STATIC_TLS;
1260 
1261 		if (!local || ((flags & FLG_OF_EXEC) == 0)) {
1262 			/*
1263 			 * Assign a GOT entry for static TLS references.
1264 			 */
1265 			if ((gnp = ld_find_got_ndx(sdp->sd_GOTndxs,
1266 			    GOT_REF_TLSIE, ofl, rsp)) == NULL) {
1267 
1268 				if (ld_assign_got_TLS(local, rsp, ofl, sdp,
1269 				    gnp, GOT_REF_TLSIE, FLG_REL_STLS,
1270 				    rtype, R_AMD64_TPOFF64, 0) == S_ERROR)
1271 					return (S_ERROR);
1272 			}
1273 
1274 			/*
1275 			 * IE access model.
1276 			 */
1277 			if (IS_TLS_IE(rtype))
1278 				return (ld_add_actrel(FLG_REL_STLS, rsp, ofl));
1279 
1280 			/*
1281 			 * Fixups are required for other executable models.
1282 			 */
1283 			return (ld_add_actrel((FLG_REL_TLSFIX | FLG_REL_STLS),
1284 			    rsp, ofl));
1285 		}
1286 
1287 		/*
1288 		 * LE access model.
1289 		 */
1290 		if (IS_TLS_LE(rtype))
1291 			return (ld_add_actrel(FLG_REL_STLS, rsp, ofl));
1292 
1293 		return (ld_add_actrel((FLG_REL_TLSFIX | FLG_REL_STLS),
1294 		    rsp, ofl));
1295 	}
1296 
1297 	/*
1298 	 * Building a shared object.
1299 	 *
1300 	 * Assign a GOT entry for a dynamic TLS reference.
1301 	 */
1302 	if (IS_TLS_LD(rtype) && ((gnp = ld_find_got_ndx(sdp->sd_GOTndxs,
1303 	    GOT_REF_TLSLD, ofl, rsp)) == NULL)) {
1304 
1305 		if (ld_assign_got_TLS(local, rsp, ofl, sdp, gnp, GOT_REF_TLSLD,
1306 		    FLG_REL_MTLS, rtype, R_AMD64_DTPMOD64, NULL) == S_ERROR)
1307 			return (S_ERROR);
1308 
1309 	} else if (IS_TLS_GD(rtype) &&
1310 	    ((gnp = ld_find_got_ndx(sdp->sd_GOTndxs, GOT_REF_TLSGD,
1311 	    ofl, rsp)) == NULL)) {
1312 
1313 		if (ld_assign_got_TLS(local, rsp, ofl, sdp, gnp, GOT_REF_TLSGD,
1314 		    FLG_REL_DTLS, rtype, R_AMD64_DTPMOD64,
1315 		    R_AMD64_DTPOFF64) == S_ERROR)
1316 			return (S_ERROR);
1317 	}
1318 
1319 	if (IS_TLS_LD(rtype))
1320 		return (ld_add_actrel(FLG_REL_MTLS, rsp, ofl));
1321 
1322 	return (ld_add_actrel(FLG_REL_DTLS, rsp, ofl));
1323 }
1324 
1325 /* ARGSUSED5 */
1326 static uintptr_t
1327 ld_assign_got_ndx(Alist **alpp, Gotndx *pgnp, Gotref gref, Ofl_desc *ofl,
1328     Rel_desc *rsp, Sym_desc *sdp)
1329 {
1330 	Xword		raddend;
1331 	Gotndx		gn, *gnp;
1332 	Aliste		idx;
1333 	uint_t		gotents;
1334 
1335 	raddend = rsp->rel_raddend;
1336 	if (pgnp && (pgnp->gn_addend == raddend) && (pgnp->gn_gotref == gref))
1337 		return (1);
1338 
1339 	if ((gref == GOT_REF_TLSGD) || (gref == GOT_REF_TLSLD))
1340 		gotents = 2;
1341 	else
1342 		gotents = 1;
1343 
1344 	gn.gn_addend = raddend;
1345 	gn.gn_gotndx = ofl->ofl_gotcnt;
1346 	gn.gn_gotref = gref;
1347 
1348 	ofl->ofl_gotcnt += gotents;
1349 
1350 	if (gref == GOT_REF_TLSLD) {
1351 		if (ofl->ofl_tlsldgotndx == NULL) {
1352 			if ((gnp = libld_malloc(sizeof (Gotndx))) == NULL)
1353 				return (S_ERROR);
1354 			(void) memcpy(gnp, &gn, sizeof (Gotndx));
1355 			ofl->ofl_tlsldgotndx = gnp;
1356 		}
1357 		return (1);
1358 	}
1359 
1360 	idx = 0;
1361 	for (ALIST_TRAVERSE(*alpp, idx, gnp)) {
1362 		if (gnp->gn_addend > raddend)
1363 			break;
1364 	}
1365 
1366 	/*
1367 	 * GOT indexes are maintained on an Alist, where there is typically
1368 	 * only one index.  The usage of this list is to scan the list to find
1369 	 * an index, and then apply that index immediately to a relocation.
1370 	 * Thus there are no external references to these GOT index structures
1371 	 * that can be compromised by the Alist being reallocated.
1372 	 */
1373 	if (alist_insert(alpp, &gn, sizeof (Gotndx),
1374 	    AL_CNT_SDP_GOT, idx) == NULL)
1375 		return (S_ERROR);
1376 
1377 	return (1);
1378 }
1379 
1380 static void
1381 ld_assign_plt_ndx(Sym_desc * sdp, Ofl_desc *ofl)
1382 {
1383 	sdp->sd_aux->sa_PLTndx = 1 + ofl->ofl_pltcnt++;
1384 	sdp->sd_aux->sa_PLTGOTndx = ofl->ofl_gotcnt++;
1385 	ofl->ofl_flags |= FLG_OF_BLDGOT;
1386 }
1387 
1388 static uchar_t plt0_template[M_PLT_ENTSIZE] = {
1389 /* 0x00 PUSHQ GOT+8(%rip) */	0xff, 0x35, 0x00, 0x00, 0x00, 0x00,
1390 /* 0x06 JMP   *GOT+16(%rip) */	0xff, 0x25, 0x00, 0x00, 0x00, 0x00,
1391 /* 0x0c NOP */			0x90,
1392 /* 0x0d NOP */			0x90,
1393 /* 0x0e NOP */			0x90,
1394 /* 0x0f NOP */			0x90
1395 };
1396 
1397 /*
1398  * Initializes .got[0] with the _DYNAMIC symbol value.
1399  */
1400 static uintptr_t
1401 ld_fillin_gotplt(Ofl_desc *ofl)
1402 {
1403 	int	bswap = (ofl->ofl_flags1 & FLG_OF1_ENCDIFF) != 0;
1404 
1405 	if (ofl->ofl_osgot) {
1406 		Sym_desc	*sdp;
1407 
1408 		if ((sdp = ld_sym_find(MSG_ORIG(MSG_SYM_DYNAMIC_U),
1409 		    SYM_NOHASH, NULL, ofl)) != NULL) {
1410 			uchar_t	*genptr;
1411 
1412 			genptr = ((uchar_t *)ofl->ofl_osgot->os_outdata->d_buf +
1413 			    (M_GOT_XDYNAMIC * M_GOT_ENTSIZE));
1414 			/* LINTED */
1415 			*(Xword *)genptr = sdp->sd_sym->st_value;
1416 			if (bswap)
1417 				/* LINTED */
1418 				*(Xword *)genptr =
1419 				    /* LINTED */
1420 				    ld_bswap_Xword(*(Xword *)genptr);
1421 		}
1422 	}
1423 
1424 	/*
1425 	 * Fill in the reserved slot in the procedure linkage table the first
1426 	 * entry is:
1427 	 *	0x00 PUSHQ	GOT+8(%rip)	    # GOT[1]
1428 	 *	0x06 JMP	*GOT+16(%rip)	    # GOT[2]
1429 	 *	0x0c NOP
1430 	 *	0x0d NOP
1431 	 *	0x0e NOP
1432 	 *	0x0f NOP
1433 	 */
1434 	if ((ofl->ofl_flags & FLG_OF_DYNAMIC) && ofl->ofl_osplt) {
1435 		uchar_t	*pltent;
1436 		Xword	val1;
1437 
1438 		pltent = (uchar_t *)ofl->ofl_osplt->os_outdata->d_buf;
1439 		bcopy(plt0_template, pltent, sizeof (plt0_template));
1440 
1441 		/*
1442 		 * If '-z noreloc' is specified - skip the do_reloc_ld
1443 		 * stage.
1444 		 */
1445 		if (!OFL_DO_RELOC(ofl))
1446 			return (1);
1447 
1448 		/*
1449 		 * filin:
1450 		 *	PUSHQ GOT + 8(%rip)
1451 		 *
1452 		 * Note: 0x06 below represents the offset to the
1453 		 *	 next instruction - which is what %rip will
1454 		 *	 be pointing at.
1455 		 */
1456 		val1 = (ofl->ofl_osgot->os_shdr->sh_addr) +
1457 		    (M_GOT_XLINKMAP * M_GOT_ENTSIZE) -
1458 		    ofl->ofl_osplt->os_shdr->sh_addr - 0x06;
1459 
1460 		if (do_reloc_ld(R_AMD64_GOTPCREL, &pltent[0x02],
1461 		    &val1, MSG_ORIG(MSG_SYM_PLTENT),
1462 		    MSG_ORIG(MSG_SPECFIL_PLTENT), bswap, ofl->ofl_lml) == 0) {
1463 			eprintf(ofl->ofl_lml, ERR_FATAL,
1464 			    MSG_INTL(MSG_PLT_PLT0FAIL));
1465 			return (S_ERROR);
1466 		}
1467 
1468 		/*
1469 		 * filin:
1470 		 *  JMP	*GOT+16(%rip)
1471 		 */
1472 		val1 = (ofl->ofl_osgot->os_shdr->sh_addr) +
1473 		    (M_GOT_XRTLD * M_GOT_ENTSIZE) -
1474 		    ofl->ofl_osplt->os_shdr->sh_addr - 0x0c;
1475 
1476 		if (do_reloc_ld(R_AMD64_GOTPCREL, &pltent[0x08],
1477 		    &val1, MSG_ORIG(MSG_SYM_PLTENT),
1478 		    MSG_ORIG(MSG_SPECFIL_PLTENT), bswap, ofl->ofl_lml) == 0) {
1479 			eprintf(ofl->ofl_lml, ERR_FATAL,
1480 			    MSG_INTL(MSG_PLT_PLT0FAIL));
1481 			return (S_ERROR);
1482 		}
1483 	}
1484 
1485 	return (1);
1486 }
1487 
1488 
1489 
1490 /*
1491  * Template for generating "void (*)(void)" function
1492  */
1493 static const uchar_t nullfunc_tmpl[] = {	/* amd64 */
1494 /* 0x00 */	0x55,				/* pushq  %rbp */
1495 /* 0x01 */	0x48, 0x8b, 0xec,		/* movq   %rsp,%rbp */
1496 /* 0x04 */	0x48, 0x8b, 0xe5,		/* movq   %rbp,%rsp */
1497 /* 0x07 */	0x5d,				/* popq   %rbp */
1498 /* 0x08 */	0xc3				/* ret */
1499 };
1500 
1501 
1502 /*
1503  * Function used to provide fill padding in SHF_EXECINSTR sections
1504  *
1505  * entry:
1506  *
1507  *	base - base address of section being filled
1508  *	offset - starting offset for fill within memory referenced by base
1509  *	cnt - # bytes to be filled
1510  *
1511  * exit:
1512  *	The fill has been completed.
1513  */
1514 static void
1515 execfill(void *base, off_t off, size_t cnt)
1516 {
1517 	/*
1518 	 * 0x90 is an X86 NOP instruction in both 32 and 64-bit worlds.
1519 	 * There are no alignment constraints.
1520 	 */
1521 	(void) memset(off + (char *)base, 0x90, cnt);
1522 }
1523 
1524 
1525 /*
1526  * Return the ld_targ definition for this target.
1527  */
1528 const Target *
1529 ld_targ_init_x86(void)
1530 {
1531 	static const Target _ld_targ = {
1532 		{			/* Target_mach */
1533 			M_MACH,			/* m_mach */
1534 			M_MACHPLUS,		/* m_machplus */
1535 			M_FLAGSPLUS,		/* m_flagsplus */
1536 			M_CLASS,		/* m_class */
1537 			M_DATA,			/* m_data */
1538 
1539 			M_SEGM_ALIGN,		/* m_segm_align */
1540 			M_SEGM_ORIGIN,		/* m_segm_origin */
1541 			M_SEGM_AORIGIN,		/* m_segm_aorigin */
1542 			M_DATASEG_PERM,		/* m_dataseg_perm */
1543 			M_STACK_PERM,		/* m_stack_perm */
1544 			M_WORD_ALIGN,		/* m_word_align */
1545 			MSG_ORIG(MSG_PTH_RTLD_AMD64), /* m_def_interp */
1546 
1547 			/* Relocation type codes */
1548 			M_R_ARRAYADDR,		/* m_r_arrayaddr */
1549 			M_R_COPY,		/* m_r_copy */
1550 			M_R_GLOB_DAT,		/* m_r_glob_dat */
1551 			M_R_JMP_SLOT,		/* m_r_jmp_slot */
1552 			M_R_NUM,		/* m_r_num */
1553 			M_R_NONE,		/* m_r_none */
1554 			M_R_RELATIVE,		/* m_r_relative */
1555 			M_R_REGISTER,		/* m_r_register */
1556 
1557 			/* Relocation related constants */
1558 			M_REL_DT_COUNT,		/* m_rel_dt_count */
1559 			M_REL_DT_ENT,		/* m_rel_dt_ent */
1560 			M_REL_DT_SIZE,		/* m_rel_dt_size */
1561 			M_REL_DT_TYPE,		/* m_rel_dt_type */
1562 			M_REL_SHT_TYPE,		/* m_rel_sht_type */
1563 
1564 			/* GOT related constants */
1565 			M_GOT_ENTSIZE,		/* m_got_entsize */
1566 			M_GOT_XNumber,		/* m_got_xnumber */
1567 
1568 			/* PLT related constants */
1569 			M_PLT_ALIGN,		/* m_plt_align */
1570 			M_PLT_ENTSIZE,		/* m_plt_entsize */
1571 			M_PLT_RESERVSZ,		/* m_plt_reservsz */
1572 			M_PLT_SHF_FLAGS,	/* m_plt_shf_flags */
1573 
1574 			/* Section type of .eh_frame/.eh_frame_hdr sections */
1575 			SHT_AMD64_UNWIND,	/* m_sht_unwind */
1576 
1577 			M_DT_REGISTER,		/* m_dt_register */
1578 		},
1579 		{			/* Target_machid */
1580 			M_ID_ARRAY,		/* id_array */
1581 			M_ID_BSS,		/* id_bss */
1582 			M_ID_CAP,		/* id_cap */
1583 			M_ID_DATA,		/* id_data */
1584 			M_ID_DYNAMIC,		/* id_dynamic */
1585 			M_ID_DYNSORT,		/* id_dynsort */
1586 			M_ID_DYNSTR,		/* id_dynstr */
1587 			M_ID_DYNSYM,		/* id_dynsym */
1588 			M_ID_DYNSYM_NDX,	/* id_dynsym_ndx */
1589 			M_ID_GOT,		/* id_got */
1590 			M_ID_UNKNOWN,		/* id_gotdata (unused) */
1591 			M_ID_HASH,		/* id_hash */
1592 			M_ID_INTERP,		/* id_interp */
1593 			M_ID_LBSS,		/* id_lbss */
1594 			M_ID_LDYNSYM,		/* id_ldynsym */
1595 			M_ID_NOTE,		/* id_note */
1596 			M_ID_NULL,		/* id_null */
1597 			M_ID_PLT,		/* id_plt */
1598 			M_ID_REL,		/* id_rel */
1599 			M_ID_STRTAB,		/* id_strtab */
1600 			M_ID_SYMINFO,		/* id_syminfo */
1601 			M_ID_SYMTAB,		/* id_symtab */
1602 			M_ID_SYMTAB_NDX,	/* id_symtab_ndx */
1603 			M_ID_TEXT,		/* id_text */
1604 			M_ID_TLS,		/* id_tls */
1605 			M_ID_TLSBSS,		/* id_tlsbss */
1606 			M_ID_UNKNOWN,		/* id_unknown */
1607 			M_ID_UNWIND,		/* id_unwind */
1608 			M_ID_UNWINDHDR,		/* id_unwindhdr */
1609 			M_ID_USER,		/* id_user */
1610 			M_ID_VERSION,		/* id_version */
1611 		},
1612 		{			/* Target_nullfunc */
1613 			nullfunc_tmpl,		/* nf_template */
1614 			sizeof (nullfunc_tmpl),	/* nf_size */
1615 		},
1616 		{			/* Target_fillfunc */
1617 			execfill		/* ff_execfill */
1618 		},
1619 		{			/* Target_machrel */
1620 			reloc_table,
1621 
1622 			ld_init_rel,		/* mr_init_rel */
1623 			ld_mach_eflags,		/* mr_mach_eflags */
1624 			ld_mach_make_dynamic,	/* mr_mach_make_dynamic */
1625 			ld_mach_update_odynamic, /* mr_mach_update_odynamic */
1626 			ld_calc_plt_addr,	/* mr_calc_plt_addr */
1627 			ld_perform_outreloc,	/* mr_perform_outreloc */
1628 			ld_do_activerelocs,	/* mr_do_activerelocs */
1629 			ld_add_outrel,		/* mr_add_outrel */
1630 			NULL,			/* mr_reloc_register */
1631 			ld_reloc_local,		/* mr_reloc_local */
1632 			NULL,			/* mr_reloc_GOTOP */
1633 			ld_reloc_TLS,		/* mr_reloc_TLS */
1634 			NULL,			/* mr_assign_got */
1635 			ld_find_got_ndx,	/* mr_find_got_ndx */
1636 			ld_calc_got_offset,	/* mr_calc_got_offset */
1637 			ld_assign_got_ndx,	/* mr_assign_got_ndx */
1638 			ld_assign_plt_ndx,	/* mr_assign_plt_ndx */
1639 			NULL,			/* mr_allocate_got */
1640 			ld_fillin_gotplt,	/* mr_fillin_gotplt */
1641 		},
1642 		{			/* Target_machsym */
1643 			NULL,			/* ms_reg_check */
1644 			NULL,			/* ms_mach_sym_typecheck */
1645 			NULL,			/* ms_is_regsym */
1646 			NULL,			/* ms_reg_find */
1647 			NULL			/* ms_reg_enter */
1648 		}
1649 	};
1650 
1651 	return (&_ld_targ);
1652 }
1653