xref: /freebsd/usr.bin/gcore/elfcore.c (revision 1d386b48a555f61cb7325543adbbb5c3f3407a66)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2017 Dell EMC
5  * Copyright (c) 2007 Sandvine Incorporated
6  * Copyright (c) 1998 John D. Polstra
7  * All rights reserved.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28  * SUCH DAMAGE.
29  */
30 
31 #include <sys/cdefs.h>
32 #include <sys/endian.h>
33 #include <sys/param.h>
34 #include <sys/procfs.h>
35 #include <sys/ptrace.h>
36 #include <sys/queue.h>
37 #include <sys/linker_set.h>
38 #include <sys/sbuf.h>
39 #include <sys/sysctl.h>
40 #include <sys/user.h>
41 #include <sys/wait.h>
42 #include <machine/elf.h>
43 #include <vm/vm_param.h>
44 #include <vm/vm.h>
45 #include <assert.h>
46 #include <err.h>
47 #include <errno.h>
48 #include <fcntl.h>
49 #include <stdbool.h>
50 #include <stdint.h>
51 #include <stdio.h>
52 #include <stdlib.h>
53 #include <string.h>
54 #include <unistd.h>
55 #include <libutil.h>
56 
57 #include "extern.h"
58 
59 /*
60  * Code for generating ELF core dumps.
61  */
62 
63 struct map_entry {
64 	struct map_entry *next;
65 	vm_offset_t start;
66 	vm_offset_t end;
67 	vm_prot_t protection;
68 };
69 
70 typedef void (*segment_callback)(struct map_entry *, void *);
71 
72 /* Closure for cb_put_phdr(). */
73 struct phdr_closure {
74 	Elf_Phdr *phdr;		/* Program header to fill in */
75 	Elf_Off offset;		/* Offset of segment in core file */
76 };
77 
78 /* Closure for cb_size_segment(). */
79 struct sseg_closure {
80 	int count;		/* Count of writable segments. */
81 	size_t size;		/* Total size of all writable segments. */
82 };
83 
84 #ifdef ELFCORE_COMPAT_32
85 typedef struct prpsinfo32 elfcore_prpsinfo_t;
86 #else
87 typedef prpsinfo_t elfcore_prpsinfo_t;
88 #endif
89 
90 typedef void* (*notefunc_t)(void *, size_t *);
91 
92 static void cb_put_phdr(struct map_entry *, void *);
93 static void cb_size_segment(struct map_entry *, void *);
94 static void each_dumpable_segment(struct map_entry *, segment_callback,
95     void *closure);
96 static void elf_detach(void);	/* atexit() handler. */
97 static void *elf_note_prpsinfo(void *, size_t *);
98 #if defined(__i386__) || defined(__amd64__)
99 static void *elf_note_x86_xstate(void *, size_t *);
100 #endif
101 #if defined(__powerpc__)
102 static void *elf_note_powerpc_vmx(void *, size_t *);
103 static void *elf_note_powerpc_vsx(void *, size_t *);
104 #endif
105 static void *elf_note_procstat_auxv(void *, size_t *);
106 static void *elf_note_procstat_files(void *, size_t *);
107 static void *elf_note_procstat_groups(void *, size_t *);
108 static void *elf_note_procstat_osrel(void *, size_t *);
109 static void *elf_note_procstat_proc(void *, size_t *);
110 static void *elf_note_procstat_psstrings(void *, size_t *);
111 static void *elf_note_procstat_rlimit(void *, size_t *);
112 static void *elf_note_procstat_umask(void *, size_t *);
113 static void *elf_note_procstat_vmmap(void *, size_t *);
114 static void elf_puthdr(int, pid_t, struct map_entry *, void *, size_t, size_t,
115     size_t, int);
116 static void elf_putnote(int, notefunc_t, void *, struct sbuf *);
117 static void elf_putnotes(pid_t, struct sbuf *, size_t *);
118 static void elf_putregnote(int, lwpid_t, struct sbuf *);
119 static void freemap(struct map_entry *);
120 static struct map_entry *readmap(pid_t);
121 static void *procstat_sysctl(void *, int, size_t, size_t *sizep);
122 
123 static pid_t g_pid;		/* Pid being dumped, global for elf_detach */
124 static int g_status;		/* proc status after ptrace attach */
125 
126 static int
elf_ident(int efd,pid_t pid __unused,char * binfile __unused)127 elf_ident(int efd, pid_t pid __unused, char *binfile __unused)
128 {
129 	Elf_Ehdr hdr;
130 	int cnt;
131 	uint16_t machine;
132 
133 	cnt = read(efd, &hdr, sizeof(hdr));
134 	if (cnt != sizeof(hdr))
135 		return (0);
136 	if (!IS_ELF(hdr))
137 		return (0);
138 	switch (hdr.e_ident[EI_DATA]) {
139 	case ELFDATA2LSB:
140 		machine = le16toh(hdr.e_machine);
141 		break;
142 	case ELFDATA2MSB:
143 		machine = be16toh(hdr.e_machine);
144 		break;
145 	default:
146 		return (0);
147 	}
148 	if (!ELF_MACHINE_OK(machine))
149 		return (0);
150 
151 	/* Looks good. */
152 	return (1);
153 }
154 
155 static void
elf_detach(void)156 elf_detach(void)
157 {
158 	int sig;
159 
160 	if (g_pid != 0) {
161 		/*
162 		 * Forward any pending signals. SIGSTOP is generated by ptrace
163 		 * itself, so ignore it.
164 		 */
165 		sig = WIFSTOPPED(g_status) ? WSTOPSIG(g_status) : 0;
166 		if (sig == SIGSTOP)
167 			sig = 0;
168 		ptrace(PT_DETACH, g_pid, (caddr_t)1, sig);
169 	}
170 }
171 
172 /*
173  * Write an ELF coredump for the given pid to the given fd.
174  */
175 static void
elf_coredump(int efd,int fd,pid_t pid)176 elf_coredump(int efd, int fd, pid_t pid)
177 {
178 	struct map_entry *map;
179 	struct sseg_closure seginfo;
180 	struct sbuf *sb;
181 	void *hdr;
182 	size_t hdrsize, notesz, segoff;
183 	ssize_t n, old_len;
184 	Elf_Phdr *php;
185 	int i;
186 
187 	/* Attach to process to dump. */
188 	g_pid = pid;
189 	if (atexit(elf_detach) != 0)
190 		err(1, "atexit");
191 	errno = 0;
192 	ptrace(PT_ATTACH, pid, NULL, 0);
193 	if (errno)
194 		err(1, "PT_ATTACH");
195 	if (waitpid(pid, &g_status, 0) == -1)
196 		err(1, "waitpid");
197 
198 	/* Get the program's memory map. */
199 	map = readmap(pid);
200 
201 	/* Size the program segments. */
202 	seginfo.count = 0;
203 	seginfo.size = 0;
204 	each_dumpable_segment(map, cb_size_segment, &seginfo);
205 
206 	/*
207 	 * Build the header and the notes using sbuf and write to the file.
208 	 */
209 	sb = sbuf_new_auto();
210 	hdrsize = sizeof(Elf_Ehdr) + sizeof(Elf_Phdr) * (1 + seginfo.count);
211 	if (seginfo.count + 1 >= PN_XNUM)
212 		hdrsize += sizeof(Elf_Shdr);
213 	/* Start header + notes section. */
214 	sbuf_start_section(sb, NULL);
215 	/* Make empty header subsection. */
216 	sbuf_start_section(sb, &old_len);
217 	sbuf_putc(sb, 0);
218 	sbuf_end_section(sb, old_len, hdrsize, 0);
219 	/* Put notes. */
220 	elf_putnotes(pid, sb, &notesz);
221 	/* Align up to a page boundary for the program segments. */
222 	sbuf_end_section(sb, -1, getpagesize(), 0);
223 	if (sbuf_finish(sb) != 0)
224 		err(1, "sbuf_finish");
225 	hdr = sbuf_data(sb);
226 	segoff = sbuf_len(sb);
227 	/* Fill in the header. */
228 	elf_puthdr(efd, pid, map, hdr, hdrsize, notesz, segoff, seginfo.count);
229 
230 	n = write(fd, hdr, segoff);
231 	if (n == -1)
232 		err(1, "write");
233 	if (n < segoff)
234               errx(1, "short write");
235 
236 	/* Write the contents of all of the writable segments. */
237 	php = (Elf_Phdr *)((char *)hdr + sizeof(Elf_Ehdr)) + 1;
238 	for (i = 0;  i < seginfo.count;  i++) {
239 		struct ptrace_io_desc iorequest;
240 		uintmax_t nleft = php->p_filesz;
241 
242 		iorequest.piod_op = PIOD_READ_D;
243 		iorequest.piod_offs = (caddr_t)(uintptr_t)php->p_vaddr;
244 		while (nleft > 0) {
245 			char buf[8*1024];
246 			size_t nwant;
247 			ssize_t ngot;
248 
249 			if (nleft > sizeof(buf))
250 				nwant = sizeof buf;
251 			else
252 				nwant = nleft;
253 			iorequest.piod_addr = buf;
254 			iorequest.piod_len = nwant;
255 			ptrace(PT_IO, pid, (caddr_t)&iorequest, 0);
256 			ngot = iorequest.piod_len;
257 			if ((size_t)ngot < nwant)
258 				errx(1, "short read wanted %zu, got %zd",
259 				    nwant, ngot);
260 			ngot = write(fd, buf, nwant);
261 			if (ngot == -1)
262 				err(1, "write of segment %d failed", i);
263 			if ((size_t)ngot != nwant)
264 				errx(1, "short write");
265 			nleft -= nwant;
266 			iorequest.piod_offs += ngot;
267 		}
268 		php++;
269 	}
270 	sbuf_delete(sb);
271 	freemap(map);
272 }
273 
274 /*
275  * A callback for each_dumpable_segment() to write out the segment's
276  * program header entry.
277  */
278 static void
cb_put_phdr(struct map_entry * entry,void * closure)279 cb_put_phdr(struct map_entry *entry, void *closure)
280 {
281 	struct phdr_closure *phc = (struct phdr_closure *)closure;
282 	Elf_Phdr *phdr = phc->phdr;
283 	size_t page_size;
284 
285 	page_size = getpagesize();
286 	phc->offset = roundup2(phc->offset, page_size);
287 
288 	phdr->p_type = PT_LOAD;
289 	phdr->p_offset = phc->offset;
290 	phdr->p_vaddr = entry->start;
291 	phdr->p_paddr = 0;
292 	phdr->p_filesz = phdr->p_memsz = entry->end - entry->start;
293 	phdr->p_align = page_size;
294 	phdr->p_flags = 0;
295 	if (entry->protection & VM_PROT_READ)
296 		phdr->p_flags |= PF_R;
297 	if (entry->protection & VM_PROT_WRITE)
298 		phdr->p_flags |= PF_W;
299 	if (entry->protection & VM_PROT_EXECUTE)
300 		phdr->p_flags |= PF_X;
301 
302 	phc->offset += phdr->p_filesz;
303 	phc->phdr++;
304 }
305 
306 /*
307  * A callback for each_dumpable_segment() to gather information about
308  * the number of segments and their total size.
309  */
310 static void
cb_size_segment(struct map_entry * entry,void * closure)311 cb_size_segment(struct map_entry *entry, void *closure)
312 {
313 	struct sseg_closure *ssc = (struct sseg_closure *)closure;
314 
315 	ssc->count++;
316 	ssc->size += entry->end - entry->start;
317 }
318 
319 /*
320  * For each segment in the given memory map, call the given function
321  * with a pointer to the map entry and some arbitrary caller-supplied
322  * data.
323  */
324 static void
each_dumpable_segment(struct map_entry * map,segment_callback func,void * closure)325 each_dumpable_segment(struct map_entry *map, segment_callback func,
326     void *closure)
327 {
328 	struct map_entry *entry;
329 
330 	for (entry = map; entry != NULL; entry = entry->next)
331 		(*func)(entry, closure);
332 }
333 
334 static void
elf_putnotes(pid_t pid,struct sbuf * sb,size_t * sizep)335 elf_putnotes(pid_t pid, struct sbuf *sb, size_t *sizep)
336 {
337 	lwpid_t *tids;
338 	size_t threads, old_len;
339 	ssize_t size;
340 	int i;
341 
342 	errno = 0;
343 	threads = ptrace(PT_GETNUMLWPS, pid, NULL, 0);
344 	if (errno)
345 		err(1, "PT_GETNUMLWPS");
346 	tids = malloc(threads * sizeof(*tids));
347 	if (tids == NULL)
348 		errx(1, "out of memory");
349 	errno = 0;
350 	ptrace(PT_GETLWPLIST, pid, (void *)tids, threads);
351 	if (errno)
352 		err(1, "PT_GETLWPLIST");
353 
354 	sbuf_start_section(sb, &old_len);
355 	elf_putnote(NT_PRPSINFO, elf_note_prpsinfo, &pid, sb);
356 
357 	for (i = 0; i < threads; ++i) {
358 		elf_putregnote(NT_PRSTATUS, tids[i], sb);
359 		elf_putregnote(NT_FPREGSET, tids[i], sb);
360 		elf_putregnote(NT_THRMISC, tids[i], sb);
361 		elf_putregnote(NT_PTLWPINFO, tids[i], sb);
362 #if defined(__aarch64__) || defined(__arm__)
363 		elf_putregnote(NT_ARM_TLS, tids[i], sb);
364 #endif
365 #if (defined(ELFCORE_COMPAT_32) && defined(__aarch64__)) || defined(__arm__)
366 		elf_putregnote(NT_ARM_VFP, tids[i], sb);
367 #endif
368 #if defined(__i386__) || defined(__amd64__)
369 		elf_putregnote(NT_X86_SEGBASES, tids[i], sb);
370 		elf_putnote(NT_X86_XSTATE, elf_note_x86_xstate, tids + i, sb);
371 #endif
372 #if defined(__powerpc__)
373 		elf_putnote(NT_PPC_VMX, elf_note_powerpc_vmx, tids + i, sb);
374 #ifndef __SPE__
375 		elf_putnote(NT_PPC_VSX, elf_note_powerpc_vsx, tids + i, sb);
376 #endif
377 #endif
378 	}
379 
380 #ifndef ELFCORE_COMPAT_32
381 	elf_putnote(NT_PROCSTAT_PROC, elf_note_procstat_proc, &pid, sb);
382 	elf_putnote(NT_PROCSTAT_FILES, elf_note_procstat_files, &pid, sb);
383 	elf_putnote(NT_PROCSTAT_VMMAP, elf_note_procstat_vmmap, &pid, sb);
384 	elf_putnote(NT_PROCSTAT_GROUPS, elf_note_procstat_groups, &pid, sb);
385 	elf_putnote(NT_PROCSTAT_UMASK, elf_note_procstat_umask, &pid, sb);
386 	elf_putnote(NT_PROCSTAT_RLIMIT, elf_note_procstat_rlimit, &pid, sb);
387 	elf_putnote(NT_PROCSTAT_OSREL, elf_note_procstat_osrel, &pid, sb);
388 	elf_putnote(NT_PROCSTAT_PSSTRINGS, elf_note_procstat_psstrings, &pid,
389 	    sb);
390 	elf_putnote(NT_PROCSTAT_AUXV, elf_note_procstat_auxv, &pid, sb);
391 #endif
392 
393 	size = sbuf_end_section(sb, old_len, 1, 0);
394 	if (size == -1)
395 		err(1, "sbuf_end_section");
396 	free(tids);
397 	*sizep = size;
398 }
399 
400 /*
401  * Emit one register set note section to sbuf.
402  */
403 static void
elf_putregnote(int type,lwpid_t tid,struct sbuf * sb)404 elf_putregnote(int type, lwpid_t tid, struct sbuf *sb)
405 {
406 	Elf_Note note;
407 	struct iovec iov;
408 	ssize_t old_len;
409 
410 	iov.iov_base = NULL;
411 	iov.iov_len = 0;
412 	if (ptrace(PT_GETREGSET, tid, (void *)&iov, type) != 0)
413 		return;
414 	iov.iov_base = calloc(1, iov.iov_len);
415 	if (iov.iov_base == NULL)
416 		errx(1, "out of memory");
417 	if (ptrace(PT_GETREGSET, tid, (void *)&iov, type) != 0)
418 		errx(1, "failed to fetch register set %d", type);
419 
420 	note.n_namesz = 8; /* strlen("FreeBSD") + 1 */
421 	note.n_descsz = iov.iov_len;
422 	note.n_type = type;
423 
424 	sbuf_bcat(sb, &note, sizeof(note));
425 	sbuf_start_section(sb, &old_len);
426 	sbuf_bcat(sb, "FreeBSD", note.n_namesz);
427 	sbuf_end_section(sb, old_len, sizeof(Elf32_Size), 0);
428 	sbuf_start_section(sb, &old_len);
429 	sbuf_bcat(sb, iov.iov_base, iov.iov_len);
430 	sbuf_end_section(sb, old_len, sizeof(Elf32_Size), 0);
431 	free(iov.iov_base);
432 }
433 
434 /*
435  * Emit one note section to sbuf.
436  */
437 static void
elf_putnote(int type,notefunc_t notefunc,void * arg,struct sbuf * sb)438 elf_putnote(int type, notefunc_t notefunc, void *arg, struct sbuf *sb)
439 {
440 	Elf_Note note;
441 	size_t descsz;
442 	ssize_t old_len;
443 	void *desc;
444 
445 	desc = notefunc(arg, &descsz);
446 	note.n_namesz = 8; /* strlen("FreeBSD") + 1 */
447 	note.n_descsz = descsz;
448 	note.n_type = type;
449 
450 	sbuf_bcat(sb, &note, sizeof(note));
451 	sbuf_start_section(sb, &old_len);
452 	sbuf_bcat(sb, "FreeBSD", note.n_namesz);
453 	sbuf_end_section(sb, old_len, sizeof(Elf32_Size), 0);
454 	if (descsz == 0)
455 		return;
456 	sbuf_start_section(sb, &old_len);
457 	sbuf_bcat(sb, desc, descsz);
458 	sbuf_end_section(sb, old_len, sizeof(Elf32_Size), 0);
459 	free(desc);
460 }
461 
462 /*
463  * Generate the ELF coredump header.
464  */
465 static void
elf_puthdr(int efd,pid_t pid,struct map_entry * map,void * hdr,size_t hdrsize,size_t notesz,size_t segoff,int numsegs)466 elf_puthdr(int efd, pid_t pid, struct map_entry *map, void *hdr, size_t hdrsize,
467     size_t notesz, size_t segoff, int numsegs)
468 {
469 	Elf_Ehdr *ehdr, binhdr;
470 	Elf_Phdr *phdr;
471 	Elf_Shdr *shdr;
472 	struct phdr_closure phc;
473 	ssize_t cnt;
474 
475 	cnt = read(efd, &binhdr, sizeof(binhdr));
476 	if (cnt < 0)
477 		err(1, "Failed to re-read ELF header");
478 	else if (cnt != sizeof(binhdr))
479 		errx(1, "Failed to re-read ELF header");
480 
481 	ehdr = (Elf_Ehdr *)hdr;
482 
483 	ehdr->e_ident[EI_MAG0] = ELFMAG0;
484 	ehdr->e_ident[EI_MAG1] = ELFMAG1;
485 	ehdr->e_ident[EI_MAG2] = ELFMAG2;
486 	ehdr->e_ident[EI_MAG3] = ELFMAG3;
487 	ehdr->e_ident[EI_CLASS] = ELF_CLASS;
488 	ehdr->e_ident[EI_DATA] = ELF_DATA;
489 	ehdr->e_ident[EI_VERSION] = EV_CURRENT;
490 	ehdr->e_ident[EI_OSABI] = ELFOSABI_FREEBSD;
491 	ehdr->e_ident[EI_ABIVERSION] = 0;
492 	ehdr->e_ident[EI_PAD] = 0;
493 	ehdr->e_type = ET_CORE;
494 	ehdr->e_machine = binhdr.e_machine;
495 	ehdr->e_version = EV_CURRENT;
496 	ehdr->e_entry = 0;
497 	ehdr->e_phoff = sizeof(Elf_Ehdr);
498 	ehdr->e_flags = binhdr.e_flags;
499 	ehdr->e_ehsize = sizeof(Elf_Ehdr);
500 	ehdr->e_phentsize = sizeof(Elf_Phdr);
501 	ehdr->e_shentsize = sizeof(Elf_Shdr);
502 	ehdr->e_shstrndx = SHN_UNDEF;
503 	if (numsegs + 1 < PN_XNUM) {
504 		ehdr->e_phnum = numsegs + 1;
505 		ehdr->e_shnum = 0;
506 	} else {
507 		ehdr->e_phnum = PN_XNUM;
508 		ehdr->e_shnum = 1;
509 
510 		ehdr->e_shoff = ehdr->e_phoff +
511 		    (numsegs + 1) * ehdr->e_phentsize;
512 
513 		shdr = (Elf_Shdr *)((char *)hdr + ehdr->e_shoff);
514 		memset(shdr, 0, sizeof(*shdr));
515 		/*
516 		 * A special first section is used to hold large segment and
517 		 * section counts.  This was proposed by Sun Microsystems in
518 		 * Solaris and has been adopted by Linux; the standard ELF
519 		 * tools are already familiar with the technique.
520 		 *
521 		 * See table 7-7 of the Solaris "Linker and Libraries Guide"
522 		 * (or 12-7 depending on the version of the document) for more
523 		 * details.
524 		 */
525 		shdr->sh_type = SHT_NULL;
526 		shdr->sh_size = ehdr->e_shnum;
527 		shdr->sh_link = ehdr->e_shstrndx;
528 		shdr->sh_info = numsegs + 1;
529 	}
530 
531 	/*
532 	 * Fill in the program header entries.
533 	 */
534 	phdr = (Elf_Phdr *)((char *)hdr + ehdr->e_phoff);
535 
536 	/* The note segment. */
537 	phdr->p_type = PT_NOTE;
538 	phdr->p_offset = hdrsize;
539 	phdr->p_vaddr = 0;
540 	phdr->p_paddr = 0;
541 	phdr->p_filesz = notesz;
542 	phdr->p_memsz = 0;
543 	phdr->p_flags = PF_R;
544 	phdr->p_align = sizeof(Elf32_Size);
545 	phdr++;
546 
547 	/* All the writable segments from the program. */
548 	phc.phdr = phdr;
549 	phc.offset = segoff;
550 	each_dumpable_segment(map, cb_put_phdr, &phc);
551 }
552 
553 /*
554  * Free the memory map.
555  */
556 static void
freemap(struct map_entry * map)557 freemap(struct map_entry *map)
558 {
559 	struct map_entry *next;
560 
561 	while (map != NULL) {
562 		next = map->next;
563 		free(map);
564 		map = next;
565 	}
566 }
567 
568 /*
569  * Read the process's memory map using kinfo_getvmmap(), and return a list of
570  * VM map entries.  Only the non-device read/writable segments are
571  * returned.  The map entries in the list aren't fully filled in; only
572  * the items we need are present.
573  */
574 static struct map_entry *
readmap(pid_t pid)575 readmap(pid_t pid)
576 {
577 	struct map_entry *ent, **linkp, *map;
578 	struct kinfo_vmentry *vmentl, *kve;
579 	int i, nitems;
580 
581 	vmentl = kinfo_getvmmap(pid, &nitems);
582 	if (vmentl == NULL)
583 		err(1, "cannot retrieve mappings for %u process", pid);
584 
585 	map = NULL;
586 	linkp = &map;
587 	for (i = 0; i < nitems; i++) {
588 		kve = &vmentl[i];
589 
590 		/*
591 		 * Ignore 'malformed' segments or ones representing memory
592 		 * mapping with MAP_NOCORE on.
593 		 * If the 'full' support is disabled, just dump the most
594 		 * meaningful data segments.
595 		 */
596 		if ((kve->kve_protection & KVME_PROT_READ) == 0 ||
597 		    (kve->kve_flags & KVME_FLAG_NOCOREDUMP) != 0 ||
598 		    kve->kve_type == KVME_TYPE_DEAD ||
599 		    kve->kve_type == KVME_TYPE_UNKNOWN ||
600 		    ((pflags & PFLAGS_FULL) == 0 &&
601 		    kve->kve_type != KVME_TYPE_DEFAULT &&
602 		    kve->kve_type != KVME_TYPE_VNODE &&
603 		    kve->kve_type != KVME_TYPE_SWAP &&
604 		    kve->kve_type != KVME_TYPE_PHYS))
605 			continue;
606 
607 		ent = calloc(1, sizeof(*ent));
608 		if (ent == NULL)
609 			errx(1, "out of memory");
610 		ent->start = (vm_offset_t)kve->kve_start;
611 		ent->end = (vm_offset_t)kve->kve_end;
612 		ent->protection = VM_PROT_READ;
613 		if ((kve->kve_protection & KVME_PROT_WRITE) != 0)
614 			ent->protection |= VM_PROT_WRITE;
615 		if ((kve->kve_protection & KVME_PROT_EXEC) != 0)
616 			ent->protection |= VM_PROT_EXECUTE;
617 
618 		*linkp = ent;
619 		linkp = &ent->next;
620 	}
621 	free(vmentl);
622 	return (map);
623 }
624 
625 /*
626  * Miscellaneous note out functions.
627  */
628 
629 static void *
elf_note_prpsinfo(void * arg,size_t * sizep)630 elf_note_prpsinfo(void *arg, size_t *sizep)
631 {
632 	char *cp, *end;
633 	pid_t pid;
634 	elfcore_prpsinfo_t *psinfo;
635 	struct kinfo_proc kip;
636 	size_t len;
637 	int name[4];
638 
639 	pid = *(pid_t *)arg;
640 	psinfo = calloc(1, sizeof(*psinfo));
641 	if (psinfo == NULL)
642 		errx(1, "out of memory");
643 	psinfo->pr_version = PRPSINFO_VERSION;
644 	psinfo->pr_psinfosz = sizeof(*psinfo);
645 
646 	name[0] = CTL_KERN;
647 	name[1] = KERN_PROC;
648 	name[2] = KERN_PROC_PID;
649 	name[3] = pid;
650 	len = sizeof(kip);
651 	if (sysctl(name, 4, &kip, &len, NULL, 0) == -1)
652 		err(1, "kern.proc.pid.%u", pid);
653 	if (kip.ki_pid != pid)
654 		err(1, "kern.proc.pid.%u", pid);
655 	strlcpy(psinfo->pr_fname, kip.ki_comm, sizeof(psinfo->pr_fname));
656 	name[2] = KERN_PROC_ARGS;
657 	len = sizeof(psinfo->pr_psargs) - 1;
658 	if (sysctl(name, 4, psinfo->pr_psargs, &len, NULL, 0) == 0 && len > 0) {
659 		cp = psinfo->pr_psargs;
660 		end = cp + len - 1;
661 		for (;;) {
662 			cp = memchr(cp, '\0', end - cp);
663 			if (cp == NULL)
664 				break;
665 			*cp = ' ';
666 		}
667 	} else
668 		strlcpy(psinfo->pr_psargs, kip.ki_comm,
669 		    sizeof(psinfo->pr_psargs));
670 	psinfo->pr_pid = pid;
671 
672 	*sizep = sizeof(*psinfo);
673 	return (psinfo);
674 }
675 
676 #if defined(__i386__) || defined(__amd64__)
677 static void *
elf_note_x86_xstate(void * arg,size_t * sizep)678 elf_note_x86_xstate(void *arg, size_t *sizep)
679 {
680 	lwpid_t tid;
681 	char *xstate;
682 	static bool xsave_checked = false;
683 	static struct ptrace_xstate_info info;
684 
685 	tid = *(lwpid_t *)arg;
686 	if (!xsave_checked) {
687 		if (ptrace(PT_GETXSTATE_INFO, tid, (void *)&info,
688 		    sizeof(info)) != 0)
689 			info.xsave_len = 0;
690 		xsave_checked = true;
691 	}
692 	if (info.xsave_len == 0) {
693 		*sizep = 0;
694 		return (NULL);
695 	}
696 	xstate = calloc(1, info.xsave_len);
697 	ptrace(PT_GETXSTATE, tid, xstate, 0);
698 	*(uint64_t *)(xstate + X86_XSTATE_XCR0_OFFSET) = info.xsave_mask;
699 	*sizep = info.xsave_len;
700 	return (xstate);
701 }
702 #endif
703 
704 #if defined(__powerpc__)
705 static void *
elf_note_powerpc_vmx(void * arg,size_t * sizep)706 elf_note_powerpc_vmx(void *arg, size_t *sizep)
707 {
708 	lwpid_t tid;
709 	struct vmxreg *vmx;
710 	static bool has_vmx = true;
711 	struct vmxreg info;
712 
713 	tid = *(lwpid_t *)arg;
714 	if (has_vmx) {
715 		if (ptrace(PT_GETVRREGS, tid, (void *)&info,
716 		    sizeof(info)) != 0)
717 			has_vmx = false;
718 	}
719 	if (!has_vmx) {
720 		*sizep = 0;
721 		return (NULL);
722 	}
723 	vmx = calloc(1, sizeof(*vmx));
724 	memcpy(vmx, &info, sizeof(*vmx));
725 	*sizep = sizeof(*vmx);
726 	return (vmx);
727 }
728 
729 static void *
elf_note_powerpc_vsx(void * arg,size_t * sizep)730 elf_note_powerpc_vsx(void *arg, size_t *sizep)
731 {
732 	lwpid_t tid;
733 	char *vshr_data;
734 	static bool has_vsx = true;
735 	uint64_t vshr[32];
736 
737 	tid = *(lwpid_t *)arg;
738 	if (has_vsx) {
739 		if (ptrace(PT_GETVSRREGS, tid, (void *)vshr,
740 		    sizeof(vshr)) != 0)
741 			has_vsx = false;
742 	}
743 	if (!has_vsx) {
744 		*sizep = 0;
745 		return (NULL);
746 	}
747 	vshr_data = calloc(1, sizeof(vshr));
748 	memcpy(vshr_data, vshr, sizeof(vshr));
749 	*sizep = sizeof(vshr);
750 	return (vshr_data);
751 }
752 #endif
753 
754 static void *
procstat_sysctl(void * arg,int what,size_t structsz,size_t * sizep)755 procstat_sysctl(void *arg, int what, size_t structsz, size_t *sizep)
756 {
757 	size_t len;
758 	pid_t pid;
759 	int name[4], structsize;
760 	void *buf, *p;
761 
762 	pid = *(pid_t *)arg;
763 	structsize = structsz;
764 	name[0] = CTL_KERN;
765 	name[1] = KERN_PROC;
766 	name[2] = what;
767 	name[3] = pid;
768 	len = 0;
769 	if (sysctl(name, 4, NULL, &len, NULL, 0) == -1)
770 		err(1, "kern.proc.%d.%u", what, pid);
771 	buf = calloc(1, sizeof(structsize) + len * 4 / 3);
772 	if (buf == NULL)
773 		errx(1, "out of memory");
774 	bcopy(&structsize, buf, sizeof(structsize));
775 	p = (char *)buf + sizeof(structsize);
776 	if (sysctl(name, 4, p, &len, NULL, 0) == -1)
777 		err(1, "kern.proc.%d.%u", what, pid);
778 
779 	*sizep = sizeof(structsize) + len;
780 	return (buf);
781 }
782 
783 static void *
elf_note_procstat_proc(void * arg,size_t * sizep)784 elf_note_procstat_proc(void *arg, size_t *sizep)
785 {
786 
787 	return (procstat_sysctl(arg, KERN_PROC_PID | KERN_PROC_INC_THREAD,
788 	    sizeof(struct kinfo_proc), sizep));
789 }
790 
791 static void *
elf_note_procstat_files(void * arg,size_t * sizep)792 elf_note_procstat_files(void *arg, size_t *sizep)
793 {
794 
795 	return (procstat_sysctl(arg, KERN_PROC_FILEDESC,
796 	    sizeof(struct kinfo_file), sizep));
797 }
798 
799 static void *
elf_note_procstat_vmmap(void * arg,size_t * sizep)800 elf_note_procstat_vmmap(void *arg, size_t *sizep)
801 {
802 
803 	return (procstat_sysctl(arg, KERN_PROC_VMMAP,
804 	    sizeof(struct kinfo_vmentry), sizep));
805 }
806 
807 static void *
elf_note_procstat_groups(void * arg,size_t * sizep)808 elf_note_procstat_groups(void *arg, size_t *sizep)
809 {
810 
811 	return (procstat_sysctl(arg, KERN_PROC_GROUPS, sizeof(gid_t), sizep));
812 }
813 
814 static void *
elf_note_procstat_umask(void * arg,size_t * sizep)815 elf_note_procstat_umask(void *arg, size_t *sizep)
816 {
817 
818 	return (procstat_sysctl(arg, KERN_PROC_UMASK, sizeof(u_short), sizep));
819 }
820 
821 static void *
elf_note_procstat_osrel(void * arg,size_t * sizep)822 elf_note_procstat_osrel(void *arg, size_t *sizep)
823 {
824 
825 	return (procstat_sysctl(arg, KERN_PROC_OSREL, sizeof(int), sizep));
826 }
827 
828 static void *
elf_note_procstat_psstrings(void * arg,size_t * sizep)829 elf_note_procstat_psstrings(void *arg, size_t *sizep)
830 {
831 
832 	return (procstat_sysctl(arg, KERN_PROC_PS_STRINGS,
833 	    sizeof(vm_offset_t), sizep));
834 }
835 
836 static void *
elf_note_procstat_auxv(void * arg,size_t * sizep)837 elf_note_procstat_auxv(void *arg, size_t *sizep)
838 {
839 
840 	return (procstat_sysctl(arg, KERN_PROC_AUXV,
841 	    sizeof(Elf_Auxinfo), sizep));
842 }
843 
844 static void *
elf_note_procstat_rlimit(void * arg,size_t * sizep)845 elf_note_procstat_rlimit(void *arg, size_t *sizep)
846 {
847 	pid_t pid;
848 	size_t len;
849 	int i, name[5], structsize;
850 	void *buf, *p;
851 
852 	pid = *(pid_t *)arg;
853 	structsize = sizeof(struct rlimit) * RLIM_NLIMITS;
854 	buf = calloc(1, sizeof(structsize) + structsize);
855 	if (buf == NULL)
856 		errx(1, "out of memory");
857 	bcopy(&structsize, buf, sizeof(structsize));
858 	p = (char *)buf + sizeof(structsize);
859 	name[0] = CTL_KERN;
860 	name[1] = KERN_PROC;
861 	name[2] = KERN_PROC_RLIMIT;
862 	name[3] = pid;
863 	len = sizeof(struct rlimit);
864 	for (i = 0; i < RLIM_NLIMITS; i++) {
865 		name[4] = i;
866 		if (sysctl(name, 5, p, &len, NULL, 0) == -1)
867 			err(1, "kern.proc.rlimit.%u", pid);
868 		if (len != sizeof(struct rlimit))
869 			errx(1, "kern.proc.rlimit.%u: short read", pid);
870 		p += len;
871 	}
872 
873 	*sizep = sizeof(structsize) + structsize;
874 	return (buf);
875 }
876 
877 struct dumpers __elfN(dump) = { elf_ident, elf_coredump };
878 TEXT_SET(dumpset, __elfN(dump));
879