xref: /titanic_50/usr/src/uts/intel/amd64/krtld/kobj_boot.c (revision 7c478bd95313f5f23a4c958a745db2134aa03244)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License, Version 1.0 only
6  * (the "License").  You may not use this file except in compliance
7  * with the License.
8  *
9  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10  * or http://www.opensolaris.org/os/licensing.
11  * See the License for the specific language governing permissions
12  * and limitations under the License.
13  *
14  * When distributing Covered Code, include this CDDL HEADER in each
15  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16  * If applicable, add the following below this CDDL HEADER, with the
17  * fields enclosed by brackets "[]" replaced with your own identifying
18  * information: Portions Copyright [yyyy] [name of copyright owner]
19  *
20  * CDDL HEADER END
21  */
22 /*
23  * Copyright 2004 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 /*
30  * Bootstrap the linker/loader.
31  */
32 
33 #include <sys/types.h>
34 #include <sys/bootconf.h>
35 #include <sys/link.h>
36 #include <sys/auxv.h>
37 #include <sys/kobj.h>
38 #include <sys/elf.h>
39 #include <sys/bootsvcs.h>
40 #include <sys/kobj_impl.h>
41 
42 #if !defined(__GNUC__)
43 
44 /*
45  * We don't use the global offset table, but
46  * ld may throw in an UNDEFINED reference in
47  * our symbol table.
48  */
49 #if !defined(_KERNEL)
50 #pragma weak _GLOBAL_OFFSET_TABLE_
51 #endif
52 
53 #else
54 
55 /*
56  * We -do- use the global offset table, but only by
57  * accident -- when you tell gcc to emit PIC code,
58  * it -always- generates a reference to the GOT in
59  * a register, even if the compilation unit never
60  * uses it.
61  *
62  * Rumoured to be fixed in a later version of gcc..
63  */
64 
65 long	_GLOBAL_OFFSET_TABLE_[1];
66 
67 #endif
68 
69 #define	roundup		ALIGN
70 
71 #define	MAXSECT		64	/* max # of sects. */
72 
73 #define	HIBITS		0xffffffff80000000	/* upper 32 bits */
74 
75 /*
76  * Boot transfers control here. At this point,
77  * we haven't relocated our own symbols, so the
78  * world (as we know it) is pretty small right now.
79  */
80 void
81 _kobj_boot(
82 	struct boot_syscalls *syscallp,
83 	void *dvec,
84 	struct bootops *bootops,
85 	Boot *ebp)
86 {
87 	Shdr *section[MAXSECT];	/* cache */
88 	val_t bootaux[BA_NUM];
89 	struct bootops *bop;
90 	Phdr *phdr;
91 	auxv_t *auxv = NULL;
92 	Shdr *sh;
93 	Half sh_num;
94 	ulong_t end, edata = 0;
95 	int i;
96 
97 	bop = (dvec) ? *(struct bootops **)bootops : bootops;
98 
99 	for (i = 0; i < BA_NUM; i++)
100 		bootaux[i].ba_val = NULL;
101 
102 	/*
103 	 * Check the bootstrap vector.
104 	 */
105 	for (; ebp->eb_tag != EB_NULL; ebp++) {
106 		switch (ebp->eb_tag) {
107 #if defined(__GNUC__)
108 		/*
109 		 * gcc 2.95, 3.1 cannot be told to not generate GOT references,
110 		 * which krtld cannot handle.  yet switch statements which
111 		 * can be mapped to jump tables are a frequent generator
112 		 * of such references.
113 		 */
114 		case 0x12345678:
115 			/*
116 			 * deliberately mess up the compilers
117 			 * temptation to create a jump table
118 			 */
119 			break;
120 #endif
121 		case EB_AUXV:
122 			auxv = (auxv_t *)ebp->eb_un.eb_ptr;
123 			break;
124 		case EB_DYNAMIC:
125 			bootaux[BA_DYNAMIC].ba_ptr = (void *)ebp->eb_un.eb_ptr;
126 			break;
127 		default:
128 			break;
129 		}
130 	}
131 
132 	if (auxv == NULL)
133 		return;
134 
135 	/*
136 	 * Now the aux vector.
137 	 */
138 	for (; auxv->a_type != AT_NULL; auxv++) {
139 		switch (auxv->a_type) {
140 #if defined(__GNUC__)
141 		case 0x12345678:
142 			/*
143 			 * deliberately mess up the compilers
144 			 * temptation to create a jump table
145 			 */
146 			break;
147 #endif
148 		case AT_PHDR:
149 			bootaux[BA_PHDR].ba_ptr = auxv->a_un.a_ptr;
150 			break;
151 		case AT_PHENT:
152 			bootaux[BA_PHENT].ba_val = auxv->a_un.a_val;
153 			break;
154 		case AT_PHNUM:
155 			bootaux[BA_PHNUM].ba_val = auxv->a_un.a_val;
156 			break;
157 		case AT_PAGESZ:
158 			bootaux[BA_PAGESZ].ba_val = auxv->a_un.a_val;
159 			break;
160 		case AT_SUN_LDELF:
161 			bootaux[BA_LDELF].ba_ptr = auxv->a_un.a_ptr;
162 			break;
163 		case AT_SUN_LDSHDR:
164 			bootaux[BA_LDSHDR].ba_ptr = auxv->a_un.a_ptr;
165 			break;
166 		case AT_SUN_LDNAME:
167 			bootaux[BA_LDNAME].ba_ptr = auxv->a_un.a_ptr;
168 			break;
169 		case AT_SUN_LPAGESZ:
170 			bootaux[BA_LPAGESZ].ba_val = auxv->a_un.a_val;
171 			break;
172 		case AT_SUN_CPU:
173 			bootaux[BA_CPU].ba_ptr = auxv->a_un.a_ptr;
174 			break;
175 		case AT_SUN_MMU:
176 			bootaux[BA_MMU].ba_ptr = auxv->a_un.a_ptr;
177 			break;
178 		case AT_ENTRY:
179 			bootaux[BA_ENTRY].ba_ptr = auxv->a_un.a_ptr;
180 			break;
181 		default:
182 			break;
183 		}
184 	}
185 
186 
187 	sh = (Shdr *)bootaux[BA_LDSHDR].ba_ptr;
188 	sh_num = ((Ehdr *)bootaux[BA_LDELF].ba_ptr)->e_shnum;
189 	/*
190 	 * Make sure we won't overflow stack allocated cache
191 	 */
192 	if (sh_num >= MAXSECT)
193 		return;
194 
195 	/*
196 	 * Build cache table for section addresses.
197 	 */
198 	for (i = 0; i < sh_num; i++) {
199 		section[i] = sh++;
200 	}
201 
202 	/*
203 	 * Find the end of data
204 	 * (to allocate bss)
205 	 */
206 	phdr = (Phdr *)bootaux[BA_PHDR].ba_ptr;
207 
208 	for (i = 0; i < bootaux[BA_PHNUM].ba_val; i++) {
209 		if (phdr->p_type == PT_LOAD &&
210 		    (phdr->p_flags & PF_W) && (phdr->p_flags & PF_X)) {
211 			edata = end = phdr->p_vaddr + phdr->p_memsz;
212 			break;
213 		}
214 		phdr = (Phdr *)((ulong_t)phdr + bootaux[BA_PHENT].ba_val);
215 	}
216 	if (edata == NULL)
217 		return;
218 
219 	/*
220 	 * Find the symbol table, and then loop
221 	 * through the symbols adjusting their
222 	 * values to reflect where the sections
223 	 * were loaded.
224 	 */
225 	for (i = 1; i < sh_num; i++) {
226 		Shdr *shp;
227 		Sym *sp;
228 		ulong_t off;
229 
230 		shp = section[i];
231 		if (shp->sh_type != SHT_SYMTAB)
232 			continue;
233 
234 		for (off = 0; off < shp->sh_size; off += shp->sh_entsize) {
235 			sp = (Sym *)(shp->sh_addr + off);
236 
237 			if (sp->st_shndx == SHN_ABS ||
238 			    sp->st_shndx == SHN_UNDEF)
239 				continue;
240 
241 			/*
242 			 * Assign the addresses for COMMON
243 			 * symbols even though we haven't
244 			 * actually allocated bss yet.
245 			 */
246 			if (sp->st_shndx == SHN_COMMON) {
247 				end = ALIGN(end, sp->st_value);
248 				sp->st_value = end;
249 				/*
250 				 * Squirrel it away for later.
251 				 */
252 				if (bootaux[BA_BSS].ba_val == 0)
253 					bootaux[BA_BSS].ba_val = end;
254 				end += sp->st_size;
255 				continue;
256 			} else if (sp->st_shndx > (Half)sh_num) {
257 				BSVC_PUTCHAR(syscallp, '>');
258 				return;
259 			}
260 
261 			/*
262 			 * Symbol's new address.
263 			 */
264 			sp->st_value += section[sp->st_shndx]->sh_addr;
265 		}
266 	}
267 
268 	/*
269 	 * Allocate bss for COMMON, if any.
270 	 */
271 	if (end > edata) {
272 		unsigned long va, bva;
273 		unsigned long asize;
274 		unsigned long align;
275 
276 		if (bootaux[BA_LPAGESZ].ba_val) {
277 			asize = bootaux[BA_LPAGESZ].ba_val;
278 			align = bootaux[BA_LPAGESZ].ba_val;
279 		} else {
280 			asize = bootaux[BA_PAGESZ].ba_val;
281 			align = BO_NO_ALIGN;
282 		}
283 		va = roundup(edata, asize);
284 		bva = roundup(end, asize);
285 
286 		if (bva > va) {
287 			bva = (unsigned long)BOP_ALLOC(bop, (caddr_t)va,
288 				bva - va, align);
289 			if (bva == NULL)
290 				return;
291 		}
292 		/*
293 		 * Zero it.
294 		 */
295 		for (va = edata; va < end; va++)
296 			*(char *)va = 0;
297 		/*
298 		 * Update the size of data.
299 		 */
300 		phdr->p_memsz += (end - edata);
301 	}
302 
303 	/*
304 	 * Relocate our own symbols.  We'll handle the
305 	 * undefined symbols later.
306 	 */
307 	for (i = 1; i < sh_num; i++) {
308 		Shdr *rshp, *shp, *ssp;
309 		unsigned long baseaddr, reladdr, rend;
310 		long relocsize;
311 
312 		rshp = section[i];
313 
314 		if (rshp->sh_type != SHT_RELA)
315 			continue;
316 		/*
317 		 * Get the section being relocated
318 		 * and the symbol table.
319 		 */
320 		shp = section[rshp->sh_info];
321 		ssp = section[rshp->sh_link];
322 
323 		/*
324 		 * Only perform relocations against allocatable
325 		 * sections.
326 		 */
327 		if ((shp->sh_flags & SHF_ALLOC) == 0)
328 			continue;
329 
330 		reladdr = rshp->sh_addr;
331 		baseaddr = shp->sh_addr;
332 		rend = reladdr + rshp->sh_size;
333 		relocsize = rshp->sh_entsize;
334 		/*
335 		 * Loop through relocations.
336 		 */
337 
338 		while (reladdr < rend) {
339 			Sym *symref;
340 			Rela *reloc;
341 			unsigned long stndx;
342 			unsigned long off, *offptr;
343 			long addend, value;
344 			unsigned long symoff, symsize;
345 			int rtype;
346 
347 			reloc = (Rela *)reladdr;
348 			off = reloc->r_offset;
349 			addend = (long)reloc->r_addend;
350 			rtype = ELF_R_TYPE(reloc->r_info);
351 			stndx = ELF_R_SYM(reloc->r_info);
352 
353 			reladdr += relocsize;
354 
355 			if (rtype == R_AMD64_NONE)
356 				continue;
357 
358 			off += baseaddr;
359 
360 			symsize = ssp->sh_entsize;
361 			symoff = stndx * symsize;
362 
363 			/*
364 			 * Check for bad symbol index.
365 			 */
366 			if (symoff > ssp->sh_size)
367 				return;
368 
369 			symref = (Sym *)(ssp->sh_addr + symoff);
370 
371 
372 			/*
373 			 * Just bind our own symbols at this point.
374 			 */
375 			if (symref->st_shndx == SHN_UNDEF)
376 				continue;
377 
378 			value = symref->st_value;
379 
380 			if ((rtype == R_AMD64_PC32) ||
381 			    (rtype == R_AMD64_PLT32))
382 				/*
383 				 * If PC-relative, subtract ref addr.
384 				 */
385 				value -= off;
386 			else if (rtype == R_AMD64_32) {
387 				/*
388 				 * It's illegal to have any HIBITS
389 				 * set for R_AMD64_32 reloc.
390 				 */
391 				if (value & HIBITS) {
392 					BSVC_PUTCHAR(syscallp, 'h');
393 					return;
394 				}
395 			} else if (rtype == R_AMD64_32S) {
396 				/*
397 				 * All HIBITS for R_AMD64_32S
398 				 * *must* be set.
399 				 */
400 				if ((value & HIBITS) != HIBITS) {
401 					BSVC_PUTCHAR(syscallp, 'H');
402 					return;
403 				}
404 			}
405 
406 			offptr = (unsigned long *)off;
407 			/*
408 			 * insert value calculated at reference point
409 			 * 2 cases - normal byte order aligned, normal byte
410 			 * order unaligned.
411 			 */
412 			switch (rtype) {
413 #if defined(__GNUC__)
414 			case 0x12345678:
415 				/*
416 				 * deliberately mess up the compilers
417 				 * temptation to create a jump table
418 				 */
419 				break;
420 #endif
421 			case R_AMD64_64:
422 				*(unsigned long *)offptr = value + addend;
423 				break;
424 			case R_AMD64_PC32:
425 			case R_AMD64_32S:
426 			case R_AMD64_PLT32:
427 				*(uint_t *)offptr = (uint_t)(value + addend);
428 				break;
429 			case R_AMD64_GOT32:
430 				BSVC_PUTCHAR(syscallp, 'G');
431 				return;
432 			case R_AMD64_32:
433 				return;
434 			default:
435 				BSVC_PUTCHAR(syscallp, 'R');
436 				return;
437 			}
438 			/*
439 			 * We only need to do it once.
440 			 */
441 			reloc->r_info = ELF_R_INFO(stndx, R_AMD64_NONE);
442 		} /* while */
443 	}
444 
445 	/*
446 	 * Done relocating all of our *defined*
447 	 * symbols, so we hand off.
448 	 */
449 	kobj_init(syscallp, dvec, bootops, bootaux);
450 }
451