xref: /freebsd/usr.bin/gcore/elfcore.c (revision 29fc4075e69fd27de0cded313ac6000165d99f8b)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2017 Dell EMC
5  * Copyright (c) 2007 Sandvine Incorporated
6  * Copyright (c) 1998 John D. Polstra
7  * All rights reserved.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28  * SUCH DAMAGE.
29  */
30 
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
33 
34 #include <sys/endian.h>
35 #include <sys/param.h>
36 #include <sys/procfs.h>
37 #include <sys/ptrace.h>
38 #include <sys/queue.h>
39 #include <sys/linker_set.h>
40 #include <sys/sbuf.h>
41 #include <sys/sysctl.h>
42 #include <sys/user.h>
43 #include <sys/wait.h>
44 #include <machine/elf.h>
45 #include <vm/vm_param.h>
46 #include <vm/vm.h>
47 #include <assert.h>
48 #include <err.h>
49 #include <errno.h>
50 #include <fcntl.h>
51 #include <stdbool.h>
52 #include <stdint.h>
53 #include <stdio.h>
54 #include <stdlib.h>
55 #include <string.h>
56 #include <unistd.h>
57 #include <libutil.h>
58 
59 #include "extern.h"
60 
61 /*
62  * Code for generating ELF core dumps.
63  */
64 
65 struct map_entry {
66 	struct map_entry *next;
67 	vm_offset_t start;
68 	vm_offset_t end;
69 	vm_prot_t protection;
70 };
71 
72 typedef void (*segment_callback)(struct map_entry *, void *);
73 
74 /* Closure for cb_put_phdr(). */
75 struct phdr_closure {
76 	Elf_Phdr *phdr;		/* Program header to fill in */
77 	Elf_Off offset;		/* Offset of segment in core file */
78 };
79 
80 /* Closure for cb_size_segment(). */
81 struct sseg_closure {
82 	int count;		/* Count of writable segments. */
83 	size_t size;		/* Total size of all writable segments. */
84 };
85 
86 #ifdef ELFCORE_COMPAT_32
87 typedef struct prpsinfo32 elfcore_prpsinfo_t;
88 #else
89 typedef prpsinfo_t elfcore_prpsinfo_t;
90 #endif
91 
92 typedef void* (*notefunc_t)(void *, size_t *);
93 
94 static void cb_put_phdr(struct map_entry *, void *);
95 static void cb_size_segment(struct map_entry *, void *);
96 static void each_dumpable_segment(struct map_entry *, segment_callback,
97     void *closure);
98 static void elf_detach(void);	/* atexit() handler. */
99 static void *elf_note_prpsinfo(void *, size_t *);
100 #if defined(__i386__) || defined(__amd64__)
101 static void *elf_note_x86_xstate(void *, size_t *);
102 #endif
103 #if defined(__powerpc__)
104 static void *elf_note_powerpc_vmx(void *, size_t *);
105 static void *elf_note_powerpc_vsx(void *, size_t *);
106 #endif
107 static void *elf_note_procstat_auxv(void *, size_t *);
108 static void *elf_note_procstat_files(void *, size_t *);
109 static void *elf_note_procstat_groups(void *, size_t *);
110 static void *elf_note_procstat_osrel(void *, size_t *);
111 static void *elf_note_procstat_proc(void *, size_t *);
112 static void *elf_note_procstat_psstrings(void *, size_t *);
113 static void *elf_note_procstat_rlimit(void *, size_t *);
114 static void *elf_note_procstat_umask(void *, size_t *);
115 static void *elf_note_procstat_vmmap(void *, size_t *);
116 static void elf_puthdr(int, pid_t, struct map_entry *, void *, size_t, size_t,
117     size_t, int);
118 static void elf_putnote(int, notefunc_t, void *, struct sbuf *);
119 static void elf_putnotes(pid_t, struct sbuf *, size_t *);
120 static void elf_putregnote(int, lwpid_t, struct sbuf *);
121 static void freemap(struct map_entry *);
122 static struct map_entry *readmap(pid_t);
123 static void *procstat_sysctl(void *, int, size_t, size_t *sizep);
124 
125 static pid_t g_pid;		/* Pid being dumped, global for elf_detach */
126 static int g_status;		/* proc status after ptrace attach */
127 
128 static int
129 elf_ident(int efd, pid_t pid __unused, char *binfile __unused)
130 {
131 	Elf_Ehdr hdr;
132 	int cnt;
133 	uint16_t machine;
134 
135 	cnt = read(efd, &hdr, sizeof(hdr));
136 	if (cnt != sizeof(hdr))
137 		return (0);
138 	if (!IS_ELF(hdr))
139 		return (0);
140 	switch (hdr.e_ident[EI_DATA]) {
141 	case ELFDATA2LSB:
142 		machine = le16toh(hdr.e_machine);
143 		break;
144 	case ELFDATA2MSB:
145 		machine = be16toh(hdr.e_machine);
146 		break;
147 	default:
148 		return (0);
149 	}
150 	if (!ELF_MACHINE_OK(machine))
151 		return (0);
152 
153 	/* Looks good. */
154 	return (1);
155 }
156 
157 static void
158 elf_detach(void)
159 {
160 	int sig;
161 
162 	if (g_pid != 0) {
163 		/*
164 		 * Forward any pending signals. SIGSTOP is generated by ptrace
165 		 * itself, so ignore it.
166 		 */
167 		sig = WIFSTOPPED(g_status) ? WSTOPSIG(g_status) : 0;
168 		if (sig == SIGSTOP)
169 			sig = 0;
170 		ptrace(PT_DETACH, g_pid, (caddr_t)1, sig);
171 	}
172 }
173 
174 /*
175  * Write an ELF coredump for the given pid to the given fd.
176  */
177 static void
178 elf_coredump(int efd, int fd, pid_t pid)
179 {
180 	struct map_entry *map;
181 	struct sseg_closure seginfo;
182 	struct sbuf *sb;
183 	void *hdr;
184 	size_t hdrsize, notesz, segoff;
185 	ssize_t n, old_len;
186 	Elf_Phdr *php;
187 	int i;
188 
189 	/* Attach to process to dump. */
190 	g_pid = pid;
191 	if (atexit(elf_detach) != 0)
192 		err(1, "atexit");
193 	errno = 0;
194 	ptrace(PT_ATTACH, pid, NULL, 0);
195 	if (errno)
196 		err(1, "PT_ATTACH");
197 	if (waitpid(pid, &g_status, 0) == -1)
198 		err(1, "waitpid");
199 
200 	/* Get the program's memory map. */
201 	map = readmap(pid);
202 
203 	/* Size the program segments. */
204 	seginfo.count = 0;
205 	seginfo.size = 0;
206 	each_dumpable_segment(map, cb_size_segment, &seginfo);
207 
208 	/*
209 	 * Build the header and the notes using sbuf and write to the file.
210 	 */
211 	sb = sbuf_new_auto();
212 	hdrsize = sizeof(Elf_Ehdr) + sizeof(Elf_Phdr) * (1 + seginfo.count);
213 	if (seginfo.count + 1 >= PN_XNUM)
214 		hdrsize += sizeof(Elf_Shdr);
215 	/* Start header + notes section. */
216 	sbuf_start_section(sb, NULL);
217 	/* Make empty header subsection. */
218 	sbuf_start_section(sb, &old_len);
219 	sbuf_putc(sb, 0);
220 	sbuf_end_section(sb, old_len, hdrsize, 0);
221 	/* Put notes. */
222 	elf_putnotes(pid, sb, &notesz);
223 	/* Align up to a page boundary for the program segments. */
224 	sbuf_end_section(sb, -1, getpagesize(), 0);
225 	if (sbuf_finish(sb) != 0)
226 		err(1, "sbuf_finish");
227 	hdr = sbuf_data(sb);
228 	segoff = sbuf_len(sb);
229 	/* Fill in the header. */
230 	elf_puthdr(efd, pid, map, hdr, hdrsize, notesz, segoff, seginfo.count);
231 
232 	n = write(fd, hdr, segoff);
233 	if (n == -1)
234 		err(1, "write");
235 	if (n < segoff)
236               errx(1, "short write");
237 
238 	/* Write the contents of all of the writable segments. */
239 	php = (Elf_Phdr *)((char *)hdr + sizeof(Elf_Ehdr)) + 1;
240 	for (i = 0;  i < seginfo.count;  i++) {
241 		struct ptrace_io_desc iorequest;
242 		uintmax_t nleft = php->p_filesz;
243 
244 		iorequest.piod_op = PIOD_READ_D;
245 		iorequest.piod_offs = (caddr_t)(uintptr_t)php->p_vaddr;
246 		while (nleft > 0) {
247 			char buf[8*1024];
248 			size_t nwant;
249 			ssize_t ngot;
250 
251 			if (nleft > sizeof(buf))
252 				nwant = sizeof buf;
253 			else
254 				nwant = nleft;
255 			iorequest.piod_addr = buf;
256 			iorequest.piod_len = nwant;
257 			ptrace(PT_IO, pid, (caddr_t)&iorequest, 0);
258 			ngot = iorequest.piod_len;
259 			if ((size_t)ngot < nwant)
260 				errx(1, "short read wanted %zu, got %zd",
261 				    nwant, ngot);
262 			ngot = write(fd, buf, nwant);
263 			if (ngot == -1)
264 				err(1, "write of segment %d failed", i);
265 			if ((size_t)ngot != nwant)
266 				errx(1, "short write");
267 			nleft -= nwant;
268 			iorequest.piod_offs += ngot;
269 		}
270 		php++;
271 	}
272 	sbuf_delete(sb);
273 	freemap(map);
274 }
275 
276 /*
277  * A callback for each_dumpable_segment() to write out the segment's
278  * program header entry.
279  */
280 static void
281 cb_put_phdr(struct map_entry *entry, void *closure)
282 {
283 	struct phdr_closure *phc = (struct phdr_closure *)closure;
284 	Elf_Phdr *phdr = phc->phdr;
285 	size_t page_size;
286 
287 	page_size = getpagesize();
288 	phc->offset = roundup2(phc->offset, page_size);
289 
290 	phdr->p_type = PT_LOAD;
291 	phdr->p_offset = phc->offset;
292 	phdr->p_vaddr = entry->start;
293 	phdr->p_paddr = 0;
294 	phdr->p_filesz = phdr->p_memsz = entry->end - entry->start;
295 	phdr->p_align = page_size;
296 	phdr->p_flags = 0;
297 	if (entry->protection & VM_PROT_READ)
298 		phdr->p_flags |= PF_R;
299 	if (entry->protection & VM_PROT_WRITE)
300 		phdr->p_flags |= PF_W;
301 	if (entry->protection & VM_PROT_EXECUTE)
302 		phdr->p_flags |= PF_X;
303 
304 	phc->offset += phdr->p_filesz;
305 	phc->phdr++;
306 }
307 
308 /*
309  * A callback for each_dumpable_segment() to gather information about
310  * the number of segments and their total size.
311  */
312 static void
313 cb_size_segment(struct map_entry *entry, void *closure)
314 {
315 	struct sseg_closure *ssc = (struct sseg_closure *)closure;
316 
317 	ssc->count++;
318 	ssc->size += entry->end - entry->start;
319 }
320 
321 /*
322  * For each segment in the given memory map, call the given function
323  * with a pointer to the map entry and some arbitrary caller-supplied
324  * data.
325  */
326 static void
327 each_dumpable_segment(struct map_entry *map, segment_callback func,
328     void *closure)
329 {
330 	struct map_entry *entry;
331 
332 	for (entry = map; entry != NULL; entry = entry->next)
333 		(*func)(entry, closure);
334 }
335 
336 static void
337 elf_putnotes(pid_t pid, struct sbuf *sb, size_t *sizep)
338 {
339 	lwpid_t *tids;
340 	size_t threads, old_len;
341 	ssize_t size;
342 	int i;
343 
344 	errno = 0;
345 	threads = ptrace(PT_GETNUMLWPS, pid, NULL, 0);
346 	if (errno)
347 		err(1, "PT_GETNUMLWPS");
348 	tids = malloc(threads * sizeof(*tids));
349 	if (tids == NULL)
350 		errx(1, "out of memory");
351 	errno = 0;
352 	ptrace(PT_GETLWPLIST, pid, (void *)tids, threads);
353 	if (errno)
354 		err(1, "PT_GETLWPLIST");
355 
356 	sbuf_start_section(sb, &old_len);
357 	elf_putnote(NT_PRPSINFO, elf_note_prpsinfo, &pid, sb);
358 
359 	for (i = 0; i < threads; ++i) {
360 		elf_putregnote(NT_PRSTATUS, tids[i], sb);
361 		elf_putregnote(NT_FPREGSET, tids[i], sb);
362 		elf_putregnote(NT_THRMISC, tids[i], sb);
363 		elf_putregnote(NT_PTLWPINFO, tids[i], sb);
364 #if defined(__aarch64__) || defined(__arm__)
365 		elf_putregnote(NT_ARM_TLS, tids[i], sb);
366 #endif
367 #if (defined(ELFCORE_COMPAT_32) && defined(__aarch64__)) || defined(__arm__)
368 		elf_putregnote(NT_ARM_VFP, tids[i], sb);
369 #endif
370 #if defined(__i386__) || defined(__amd64__)
371 		elf_putregnote(NT_X86_SEGBASES, tids[i], sb);
372 		elf_putnote(NT_X86_XSTATE, elf_note_x86_xstate, tids + i, sb);
373 #endif
374 #if defined(__powerpc__)
375 		elf_putnote(NT_PPC_VMX, elf_note_powerpc_vmx, tids + i, sb);
376 #ifndef __SPE__
377 		elf_putnote(NT_PPC_VSX, elf_note_powerpc_vsx, tids + i, sb);
378 #endif
379 #endif
380 	}
381 
382 #ifndef ELFCORE_COMPAT_32
383 	elf_putnote(NT_PROCSTAT_PROC, elf_note_procstat_proc, &pid, sb);
384 	elf_putnote(NT_PROCSTAT_FILES, elf_note_procstat_files, &pid, sb);
385 	elf_putnote(NT_PROCSTAT_VMMAP, elf_note_procstat_vmmap, &pid, sb);
386 	elf_putnote(NT_PROCSTAT_GROUPS, elf_note_procstat_groups, &pid, sb);
387 	elf_putnote(NT_PROCSTAT_UMASK, elf_note_procstat_umask, &pid, sb);
388 	elf_putnote(NT_PROCSTAT_RLIMIT, elf_note_procstat_rlimit, &pid, sb);
389 	elf_putnote(NT_PROCSTAT_OSREL, elf_note_procstat_osrel, &pid, sb);
390 	elf_putnote(NT_PROCSTAT_PSSTRINGS, elf_note_procstat_psstrings, &pid,
391 	    sb);
392 	elf_putnote(NT_PROCSTAT_AUXV, elf_note_procstat_auxv, &pid, sb);
393 #endif
394 
395 	size = sbuf_end_section(sb, old_len, 1, 0);
396 	if (size == -1)
397 		err(1, "sbuf_end_section");
398 	free(tids);
399 	*sizep = size;
400 }
401 
402 /*
403  * Emit one register set note section to sbuf.
404  */
405 static void
406 elf_putregnote(int type, lwpid_t tid, struct sbuf *sb)
407 {
408 	Elf_Note note;
409 	struct iovec iov;
410 	ssize_t old_len;
411 
412 	iov.iov_base = NULL;
413 	iov.iov_len = 0;
414 	if (ptrace(PT_GETREGSET, tid, (void *)&iov, type) != 0)
415 		return;
416 	iov.iov_base = calloc(1, iov.iov_len);
417 	if (iov.iov_base == NULL)
418 		errx(1, "out of memory");
419 	if (ptrace(PT_GETREGSET, tid, (void *)&iov, type) != 0)
420 		errx(1, "failed to fetch register set %d", type);
421 
422 	note.n_namesz = 8; /* strlen("FreeBSD") + 1 */
423 	note.n_descsz = iov.iov_len;
424 	note.n_type = type;
425 
426 	sbuf_bcat(sb, &note, sizeof(note));
427 	sbuf_start_section(sb, &old_len);
428 	sbuf_bcat(sb, "FreeBSD", note.n_namesz);
429 	sbuf_end_section(sb, old_len, sizeof(Elf32_Size), 0);
430 	sbuf_start_section(sb, &old_len);
431 	sbuf_bcat(sb, iov.iov_base, iov.iov_len);
432 	sbuf_end_section(sb, old_len, sizeof(Elf32_Size), 0);
433 	free(iov.iov_base);
434 }
435 
436 /*
437  * Emit one note section to sbuf.
438  */
439 static void
440 elf_putnote(int type, notefunc_t notefunc, void *arg, struct sbuf *sb)
441 {
442 	Elf_Note note;
443 	size_t descsz;
444 	ssize_t old_len;
445 	void *desc;
446 
447 	desc = notefunc(arg, &descsz);
448 	note.n_namesz = 8; /* strlen("FreeBSD") + 1 */
449 	note.n_descsz = descsz;
450 	note.n_type = type;
451 
452 	sbuf_bcat(sb, &note, sizeof(note));
453 	sbuf_start_section(sb, &old_len);
454 	sbuf_bcat(sb, "FreeBSD", note.n_namesz);
455 	sbuf_end_section(sb, old_len, sizeof(Elf32_Size), 0);
456 	if (descsz == 0)
457 		return;
458 	sbuf_start_section(sb, &old_len);
459 	sbuf_bcat(sb, desc, descsz);
460 	sbuf_end_section(sb, old_len, sizeof(Elf32_Size), 0);
461 	free(desc);
462 }
463 
464 /*
465  * Generate the ELF coredump header.
466  */
467 static void
468 elf_puthdr(int efd, pid_t pid, struct map_entry *map, void *hdr, size_t hdrsize,
469     size_t notesz, size_t segoff, int numsegs)
470 {
471 	Elf_Ehdr *ehdr, binhdr;
472 	Elf_Phdr *phdr;
473 	Elf_Shdr *shdr;
474 	struct phdr_closure phc;
475 	ssize_t cnt;
476 
477 	cnt = read(efd, &binhdr, sizeof(binhdr));
478 	if (cnt < 0)
479 		err(1, "Failed to re-read ELF header");
480 	else if (cnt != sizeof(binhdr))
481 		errx(1, "Failed to re-read ELF header");
482 
483 	ehdr = (Elf_Ehdr *)hdr;
484 
485 	ehdr->e_ident[EI_MAG0] = ELFMAG0;
486 	ehdr->e_ident[EI_MAG1] = ELFMAG1;
487 	ehdr->e_ident[EI_MAG2] = ELFMAG2;
488 	ehdr->e_ident[EI_MAG3] = ELFMAG3;
489 	ehdr->e_ident[EI_CLASS] = ELF_CLASS;
490 	ehdr->e_ident[EI_DATA] = ELF_DATA;
491 	ehdr->e_ident[EI_VERSION] = EV_CURRENT;
492 	ehdr->e_ident[EI_OSABI] = ELFOSABI_FREEBSD;
493 	ehdr->e_ident[EI_ABIVERSION] = 0;
494 	ehdr->e_ident[EI_PAD] = 0;
495 	ehdr->e_type = ET_CORE;
496 	ehdr->e_machine = binhdr.e_machine;
497 	ehdr->e_version = EV_CURRENT;
498 	ehdr->e_entry = 0;
499 	ehdr->e_phoff = sizeof(Elf_Ehdr);
500 	ehdr->e_flags = binhdr.e_flags;
501 	ehdr->e_ehsize = sizeof(Elf_Ehdr);
502 	ehdr->e_phentsize = sizeof(Elf_Phdr);
503 	ehdr->e_shentsize = sizeof(Elf_Shdr);
504 	ehdr->e_shstrndx = SHN_UNDEF;
505 	if (numsegs + 1 < PN_XNUM) {
506 		ehdr->e_phnum = numsegs + 1;
507 		ehdr->e_shnum = 0;
508 	} else {
509 		ehdr->e_phnum = PN_XNUM;
510 		ehdr->e_shnum = 1;
511 
512 		ehdr->e_shoff = ehdr->e_phoff +
513 		    (numsegs + 1) * ehdr->e_phentsize;
514 
515 		shdr = (Elf_Shdr *)((char *)hdr + ehdr->e_shoff);
516 		memset(shdr, 0, sizeof(*shdr));
517 		/*
518 		 * A special first section is used to hold large segment and
519 		 * section counts.  This was proposed by Sun Microsystems in
520 		 * Solaris and has been adopted by Linux; the standard ELF
521 		 * tools are already familiar with the technique.
522 		 *
523 		 * See table 7-7 of the Solaris "Linker and Libraries Guide"
524 		 * (or 12-7 depending on the version of the document) for more
525 		 * details.
526 		 */
527 		shdr->sh_type = SHT_NULL;
528 		shdr->sh_size = ehdr->e_shnum;
529 		shdr->sh_link = ehdr->e_shstrndx;
530 		shdr->sh_info = numsegs + 1;
531 	}
532 
533 	/*
534 	 * Fill in the program header entries.
535 	 */
536 	phdr = (Elf_Phdr *)((char *)hdr + ehdr->e_phoff);
537 
538 	/* The note segment. */
539 	phdr->p_type = PT_NOTE;
540 	phdr->p_offset = hdrsize;
541 	phdr->p_vaddr = 0;
542 	phdr->p_paddr = 0;
543 	phdr->p_filesz = notesz;
544 	phdr->p_memsz = 0;
545 	phdr->p_flags = PF_R;
546 	phdr->p_align = sizeof(Elf32_Size);
547 	phdr++;
548 
549 	/* All the writable segments from the program. */
550 	phc.phdr = phdr;
551 	phc.offset = segoff;
552 	each_dumpable_segment(map, cb_put_phdr, &phc);
553 }
554 
555 /*
556  * Free the memory map.
557  */
558 static void
559 freemap(struct map_entry *map)
560 {
561 	struct map_entry *next;
562 
563 	while (map != NULL) {
564 		next = map->next;
565 		free(map);
566 		map = next;
567 	}
568 }
569 
570 /*
571  * Read the process's memory map using kinfo_getvmmap(), and return a list of
572  * VM map entries.  Only the non-device read/writable segments are
573  * returned.  The map entries in the list aren't fully filled in; only
574  * the items we need are present.
575  */
576 static struct map_entry *
577 readmap(pid_t pid)
578 {
579 	struct map_entry *ent, **linkp, *map;
580 	struct kinfo_vmentry *vmentl, *kve;
581 	int i, nitems;
582 
583 	vmentl = kinfo_getvmmap(pid, &nitems);
584 	if (vmentl == NULL)
585 		err(1, "cannot retrieve mappings for %u process", pid);
586 
587 	map = NULL;
588 	linkp = &map;
589 	for (i = 0; i < nitems; i++) {
590 		kve = &vmentl[i];
591 
592 		/*
593 		 * Ignore 'malformed' segments or ones representing memory
594 		 * mapping with MAP_NOCORE on.
595 		 * If the 'full' support is disabled, just dump the most
596 		 * meaningful data segments.
597 		 */
598 		if ((kve->kve_protection & KVME_PROT_READ) == 0 ||
599 		    (kve->kve_flags & KVME_FLAG_NOCOREDUMP) != 0 ||
600 		    kve->kve_type == KVME_TYPE_DEAD ||
601 		    kve->kve_type == KVME_TYPE_UNKNOWN ||
602 		    ((pflags & PFLAGS_FULL) == 0 &&
603 		    kve->kve_type != KVME_TYPE_DEFAULT &&
604 		    kve->kve_type != KVME_TYPE_VNODE &&
605 		    kve->kve_type != KVME_TYPE_SWAP &&
606 		    kve->kve_type != KVME_TYPE_PHYS))
607 			continue;
608 
609 		ent = calloc(1, sizeof(*ent));
610 		if (ent == NULL)
611 			errx(1, "out of memory");
612 		ent->start = (vm_offset_t)kve->kve_start;
613 		ent->end = (vm_offset_t)kve->kve_end;
614 		ent->protection = VM_PROT_READ;
615 		if ((kve->kve_protection & KVME_PROT_WRITE) != 0)
616 			ent->protection |= VM_PROT_WRITE;
617 		if ((kve->kve_protection & KVME_PROT_EXEC) != 0)
618 			ent->protection |= VM_PROT_EXECUTE;
619 
620 		*linkp = ent;
621 		linkp = &ent->next;
622 	}
623 	free(vmentl);
624 	return (map);
625 }
626 
627 /*
628  * Miscellaneous note out functions.
629  */
630 
631 static void *
632 elf_note_prpsinfo(void *arg, size_t *sizep)
633 {
634 	char *cp, *end;
635 	pid_t pid;
636 	elfcore_prpsinfo_t *psinfo;
637 	struct kinfo_proc kip;
638 	size_t len;
639 	int name[4];
640 
641 	pid = *(pid_t *)arg;
642 	psinfo = calloc(1, sizeof(*psinfo));
643 	if (psinfo == NULL)
644 		errx(1, "out of memory");
645 	psinfo->pr_version = PRPSINFO_VERSION;
646 	psinfo->pr_psinfosz = sizeof(*psinfo);
647 
648 	name[0] = CTL_KERN;
649 	name[1] = KERN_PROC;
650 	name[2] = KERN_PROC_PID;
651 	name[3] = pid;
652 	len = sizeof(kip);
653 	if (sysctl(name, 4, &kip, &len, NULL, 0) == -1)
654 		err(1, "kern.proc.pid.%u", pid);
655 	if (kip.ki_pid != pid)
656 		err(1, "kern.proc.pid.%u", pid);
657 	strlcpy(psinfo->pr_fname, kip.ki_comm, sizeof(psinfo->pr_fname));
658 	name[2] = KERN_PROC_ARGS;
659 	len = sizeof(psinfo->pr_psargs) - 1;
660 	if (sysctl(name, 4, psinfo->pr_psargs, &len, NULL, 0) == 0 && len > 0) {
661 		cp = psinfo->pr_psargs;
662 		end = cp + len - 1;
663 		for (;;) {
664 			cp = memchr(cp, '\0', end - cp);
665 			if (cp == NULL)
666 				break;
667 			*cp = ' ';
668 		}
669 	} else
670 		strlcpy(psinfo->pr_psargs, kip.ki_comm,
671 		    sizeof(psinfo->pr_psargs));
672 	psinfo->pr_pid = pid;
673 
674 	*sizep = sizeof(*psinfo);
675 	return (psinfo);
676 }
677 
678 #if defined(__i386__) || defined(__amd64__)
679 static void *
680 elf_note_x86_xstate(void *arg, size_t *sizep)
681 {
682 	lwpid_t tid;
683 	char *xstate;
684 	static bool xsave_checked = false;
685 	static struct ptrace_xstate_info info;
686 
687 	tid = *(lwpid_t *)arg;
688 	if (!xsave_checked) {
689 		if (ptrace(PT_GETXSTATE_INFO, tid, (void *)&info,
690 		    sizeof(info)) != 0)
691 			info.xsave_len = 0;
692 		xsave_checked = true;
693 	}
694 	if (info.xsave_len == 0) {
695 		*sizep = 0;
696 		return (NULL);
697 	}
698 	xstate = calloc(1, info.xsave_len);
699 	ptrace(PT_GETXSTATE, tid, xstate, 0);
700 	*(uint64_t *)(xstate + X86_XSTATE_XCR0_OFFSET) = info.xsave_mask;
701 	*sizep = info.xsave_len;
702 	return (xstate);
703 }
704 #endif
705 
706 #if defined(__powerpc__)
707 static void *
708 elf_note_powerpc_vmx(void *arg, size_t *sizep)
709 {
710 	lwpid_t tid;
711 	struct vmxreg *vmx;
712 	static bool has_vmx = true;
713 	struct vmxreg info;
714 
715 	tid = *(lwpid_t *)arg;
716 	if (has_vmx) {
717 		if (ptrace(PT_GETVRREGS, tid, (void *)&info,
718 		    sizeof(info)) != 0)
719 			has_vmx = false;
720 	}
721 	if (!has_vmx) {
722 		*sizep = 0;
723 		return (NULL);
724 	}
725 	vmx = calloc(1, sizeof(*vmx));
726 	memcpy(vmx, &info, sizeof(*vmx));
727 	*sizep = sizeof(*vmx);
728 	return (vmx);
729 }
730 
731 static void *
732 elf_note_powerpc_vsx(void *arg, size_t *sizep)
733 {
734 	lwpid_t tid;
735 	char *vshr_data;
736 	static bool has_vsx = true;
737 	uint64_t vshr[32];
738 
739 	tid = *(lwpid_t *)arg;
740 	if (has_vsx) {
741 		if (ptrace(PT_GETVSRREGS, tid, (void *)vshr,
742 		    sizeof(vshr)) != 0)
743 			has_vsx = false;
744 	}
745 	if (!has_vsx) {
746 		*sizep = 0;
747 		return (NULL);
748 	}
749 	vshr_data = calloc(1, sizeof(vshr));
750 	memcpy(vshr_data, vshr, sizeof(vshr));
751 	*sizep = sizeof(vshr);
752 	return (vshr_data);
753 }
754 #endif
755 
756 static void *
757 procstat_sysctl(void *arg, int what, size_t structsz, size_t *sizep)
758 {
759 	size_t len;
760 	pid_t pid;
761 	int name[4], structsize;
762 	void *buf, *p;
763 
764 	pid = *(pid_t *)arg;
765 	structsize = structsz;
766 	name[0] = CTL_KERN;
767 	name[1] = KERN_PROC;
768 	name[2] = what;
769 	name[3] = pid;
770 	len = 0;
771 	if (sysctl(name, 4, NULL, &len, NULL, 0) == -1)
772 		err(1, "kern.proc.%d.%u", what, pid);
773 	buf = calloc(1, sizeof(structsize) + len * 4 / 3);
774 	if (buf == NULL)
775 		errx(1, "out of memory");
776 	bcopy(&structsize, buf, sizeof(structsize));
777 	p = (char *)buf + sizeof(structsize);
778 	if (sysctl(name, 4, p, &len, NULL, 0) == -1)
779 		err(1, "kern.proc.%d.%u", what, pid);
780 
781 	*sizep = sizeof(structsize) + len;
782 	return (buf);
783 }
784 
785 static void *
786 elf_note_procstat_proc(void *arg, size_t *sizep)
787 {
788 
789 	return (procstat_sysctl(arg, KERN_PROC_PID | KERN_PROC_INC_THREAD,
790 	    sizeof(struct kinfo_proc), sizep));
791 }
792 
793 static void *
794 elf_note_procstat_files(void *arg, size_t *sizep)
795 {
796 
797 	return (procstat_sysctl(arg, KERN_PROC_FILEDESC,
798 	    sizeof(struct kinfo_file), sizep));
799 }
800 
801 static void *
802 elf_note_procstat_vmmap(void *arg, size_t *sizep)
803 {
804 
805 	return (procstat_sysctl(arg, KERN_PROC_VMMAP,
806 	    sizeof(struct kinfo_vmentry), sizep));
807 }
808 
809 static void *
810 elf_note_procstat_groups(void *arg, size_t *sizep)
811 {
812 
813 	return (procstat_sysctl(arg, KERN_PROC_GROUPS, sizeof(gid_t), sizep));
814 }
815 
816 static void *
817 elf_note_procstat_umask(void *arg, size_t *sizep)
818 {
819 
820 	return (procstat_sysctl(arg, KERN_PROC_UMASK, sizeof(u_short), sizep));
821 }
822 
823 static void *
824 elf_note_procstat_osrel(void *arg, size_t *sizep)
825 {
826 
827 	return (procstat_sysctl(arg, KERN_PROC_OSREL, sizeof(int), sizep));
828 }
829 
830 static void *
831 elf_note_procstat_psstrings(void *arg, size_t *sizep)
832 {
833 
834 	return (procstat_sysctl(arg, KERN_PROC_PS_STRINGS,
835 	    sizeof(vm_offset_t), sizep));
836 }
837 
838 static void *
839 elf_note_procstat_auxv(void *arg, size_t *sizep)
840 {
841 
842 	return (procstat_sysctl(arg, KERN_PROC_AUXV,
843 	    sizeof(Elf_Auxinfo), sizep));
844 }
845 
846 static void *
847 elf_note_procstat_rlimit(void *arg, size_t *sizep)
848 {
849 	pid_t pid;
850 	size_t len;
851 	int i, name[5], structsize;
852 	void *buf, *p;
853 
854 	pid = *(pid_t *)arg;
855 	structsize = sizeof(struct rlimit) * RLIM_NLIMITS;
856 	buf = calloc(1, sizeof(structsize) + structsize);
857 	if (buf == NULL)
858 		errx(1, "out of memory");
859 	bcopy(&structsize, buf, sizeof(structsize));
860 	p = (char *)buf + sizeof(structsize);
861 	name[0] = CTL_KERN;
862 	name[1] = KERN_PROC;
863 	name[2] = KERN_PROC_RLIMIT;
864 	name[3] = pid;
865 	len = sizeof(struct rlimit);
866 	for (i = 0; i < RLIM_NLIMITS; i++) {
867 		name[4] = i;
868 		if (sysctl(name, 5, p, &len, NULL, 0) == -1)
869 			err(1, "kern.proc.rlimit.%u", pid);
870 		if (len != sizeof(struct rlimit))
871 			errx(1, "kern.proc.rlimit.%u: short read", pid);
872 		p += len;
873 	}
874 
875 	*sizep = sizeof(structsize) + structsize;
876 	return (buf);
877 }
878 
879 struct dumpers __elfN(dump) = { elf_ident, elf_coredump };
880 TEXT_SET(dumpset, __elfN(dump));
881