1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright (c) 2017 Dell EMC 5 * Copyright (c) 2007 Sandvine Incorporated 6 * Copyright (c) 1998 John D. Polstra 7 * All rights reserved. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 28 * SUCH DAMAGE. 29 */ 30 31 #include <sys/cdefs.h> 32 #include <sys/endian.h> 33 #include <sys/param.h> 34 #include <sys/procfs.h> 35 #include <sys/ptrace.h> 36 #include <sys/queue.h> 37 #include <sys/linker_set.h> 38 #include <sys/sbuf.h> 39 #include <sys/sysctl.h> 40 #include <sys/user.h> 41 #include <sys/wait.h> 42 #include <machine/elf.h> 43 #include <vm/vm_param.h> 44 #include <vm/vm.h> 45 #include <assert.h> 46 #include <err.h> 47 #include <errno.h> 48 #include <fcntl.h> 49 #include <stdbool.h> 50 #include <stdint.h> 51 #include <stdio.h> 52 #include <stdlib.h> 53 #include <string.h> 54 #include <unistd.h> 55 #include <libutil.h> 56 57 #include "extern.h" 58 59 /* 60 * Code for generating ELF core dumps. 61 */ 62 63 struct map_entry { 64 struct map_entry *next; 65 vm_offset_t start; 66 vm_offset_t end; 67 vm_prot_t protection; 68 }; 69 70 typedef void (*segment_callback)(struct map_entry *, void *); 71 72 /* Closure for cb_put_phdr(). */ 73 struct phdr_closure { 74 Elf_Phdr *phdr; /* Program header to fill in */ 75 Elf_Off offset; /* Offset of segment in core file */ 76 }; 77 78 /* Closure for cb_size_segment(). */ 79 struct sseg_closure { 80 int count; /* Count of writable segments. */ 81 size_t size; /* Total size of all writable segments. */ 82 }; 83 84 #ifdef ELFCORE_COMPAT_32 85 typedef struct prpsinfo32 elfcore_prpsinfo_t; 86 #else 87 typedef prpsinfo_t elfcore_prpsinfo_t; 88 #endif 89 90 typedef void* (*notefunc_t)(void *, size_t *); 91 92 static void cb_put_phdr(struct map_entry *, void *); 93 static void cb_size_segment(struct map_entry *, void *); 94 static void each_dumpable_segment(struct map_entry *, segment_callback, 95 void *closure); 96 static void elf_detach(void); /* atexit() handler. */ 97 static void *elf_note_prpsinfo(void *, size_t *); 98 #if defined(__i386__) || defined(__amd64__) 99 static void *elf_note_x86_xstate(void *, size_t *); 100 #endif 101 #if defined(__powerpc__) 102 static void *elf_note_powerpc_vmx(void *, size_t *); 103 static void *elf_note_powerpc_vsx(void *, size_t *); 104 #endif 105 static void *elf_note_procstat_auxv(void *, size_t *); 106 static void *elf_note_procstat_files(void *, size_t *); 107 static void *elf_note_procstat_groups(void *, size_t *); 108 static void *elf_note_procstat_kqueues(void *, size_t *); 109 static void *elf_note_procstat_osrel(void *, size_t *); 110 static void *elf_note_procstat_proc(void *, size_t *); 111 static void *elf_note_procstat_psstrings(void *, size_t *); 112 static void *elf_note_procstat_rlimit(void *, size_t *); 113 static void *elf_note_procstat_umask(void *, size_t *); 114 static void *elf_note_procstat_vmmap(void *, size_t *); 115 static void elf_puthdr(int, pid_t, struct map_entry *, void *, size_t, size_t, 116 size_t, int); 117 static void elf_putnote(int, notefunc_t, void *, struct sbuf *); 118 static void elf_putnotes(pid_t, struct sbuf *, size_t *); 119 static void elf_putregnote(int, lwpid_t, struct sbuf *); 120 static void freemap(struct map_entry *); 121 static struct map_entry *readmap(pid_t); 122 static void *procstat_sysctl(void *, int, size_t, size_t *sizep); 123 124 static pid_t g_pid; /* Pid being dumped, global for elf_detach */ 125 static int g_status; /* proc status after ptrace attach */ 126 127 static int 128 elf_ident(int efd, pid_t pid __unused, char *binfile __unused) 129 { 130 Elf_Ehdr hdr; 131 int cnt; 132 uint16_t machine; 133 134 cnt = read(efd, &hdr, sizeof(hdr)); 135 if (cnt != sizeof(hdr)) 136 return (0); 137 if (!IS_ELF(hdr)) 138 return (0); 139 switch (hdr.e_ident[EI_DATA]) { 140 case ELFDATA2LSB: 141 machine = le16toh(hdr.e_machine); 142 break; 143 case ELFDATA2MSB: 144 machine = be16toh(hdr.e_machine); 145 break; 146 default: 147 return (0); 148 } 149 if (!ELF_MACHINE_OK(machine)) 150 return (0); 151 152 /* Looks good. */ 153 return (1); 154 } 155 156 static void 157 elf_detach(void) 158 { 159 int sig; 160 161 if (g_pid != 0) { 162 /* 163 * Forward any pending signals. SIGSTOP is generated by ptrace 164 * itself, so ignore it. 165 */ 166 sig = WIFSTOPPED(g_status) ? WSTOPSIG(g_status) : 0; 167 if (sig == SIGSTOP) 168 sig = 0; 169 ptrace(PT_DETACH, g_pid, (caddr_t)1, sig); 170 } 171 } 172 173 /* 174 * Write an ELF coredump for the given pid to the given fd. 175 */ 176 static void 177 elf_coredump(int efd, int fd, pid_t pid) 178 { 179 struct map_entry *map; 180 struct sseg_closure seginfo; 181 struct sbuf *sb; 182 void *hdr; 183 size_t hdrsize, notesz, segoff; 184 ssize_t n, old_len; 185 Elf_Phdr *php; 186 int i; 187 188 /* Attach to process to dump. */ 189 g_pid = pid; 190 if (atexit(elf_detach) != 0) 191 err(1, "atexit"); 192 errno = 0; 193 ptrace(PT_ATTACH, pid, NULL, 0); 194 if (errno) 195 err(1, "PT_ATTACH"); 196 if (waitpid(pid, &g_status, 0) == -1) 197 err(1, "waitpid"); 198 199 /* Get the program's memory map. */ 200 map = readmap(pid); 201 202 /* Size the program segments. */ 203 seginfo.count = 0; 204 seginfo.size = 0; 205 each_dumpable_segment(map, cb_size_segment, &seginfo); 206 207 /* 208 * Build the header and the notes using sbuf and write to the file. 209 */ 210 sb = sbuf_new_auto(); 211 hdrsize = sizeof(Elf_Ehdr) + sizeof(Elf_Phdr) * (1 + seginfo.count); 212 if (seginfo.count + 1 >= PN_XNUM) 213 hdrsize += sizeof(Elf_Shdr); 214 /* Start header + notes section. */ 215 sbuf_start_section(sb, NULL); 216 /* Make empty header subsection. */ 217 sbuf_start_section(sb, &old_len); 218 sbuf_putc(sb, 0); 219 sbuf_end_section(sb, old_len, hdrsize, 0); 220 /* Put notes. */ 221 elf_putnotes(pid, sb, ¬esz); 222 /* Align up to a page boundary for the program segments. */ 223 sbuf_end_section(sb, -1, getpagesize(), 0); 224 if (sbuf_finish(sb) != 0) 225 err(1, "sbuf_finish"); 226 hdr = sbuf_data(sb); 227 segoff = sbuf_len(sb); 228 /* Fill in the header. */ 229 elf_puthdr(efd, pid, map, hdr, hdrsize, notesz, segoff, seginfo.count); 230 231 n = write(fd, hdr, segoff); 232 if (n == -1) 233 err(1, "write"); 234 if (n < segoff) 235 errx(1, "short write"); 236 237 /* Write the contents of all of the writable segments. */ 238 php = (Elf_Phdr *)((char *)hdr + sizeof(Elf_Ehdr)) + 1; 239 for (i = 0; i < seginfo.count; i++) { 240 struct ptrace_io_desc iorequest; 241 uintmax_t nleft = php->p_filesz; 242 243 iorequest.piod_op = PIOD_READ_D; 244 iorequest.piod_offs = (caddr_t)(uintptr_t)php->p_vaddr; 245 while (nleft > 0) { 246 char buf[8*1024]; 247 size_t nwant; 248 ssize_t ngot; 249 250 if (nleft > sizeof(buf)) 251 nwant = sizeof buf; 252 else 253 nwant = nleft; 254 iorequest.piod_addr = buf; 255 iorequest.piod_len = nwant; 256 ptrace(PT_IO, pid, (caddr_t)&iorequest, 0); 257 ngot = iorequest.piod_len; 258 if ((size_t)ngot < nwant) 259 errx(1, "short read wanted %zu, got %zd", 260 nwant, ngot); 261 ngot = write(fd, buf, nwant); 262 if (ngot == -1) 263 err(1, "write of segment %d failed", i); 264 if ((size_t)ngot != nwant) 265 errx(1, "short write"); 266 nleft -= nwant; 267 iorequest.piod_offs += ngot; 268 } 269 php++; 270 } 271 sbuf_delete(sb); 272 freemap(map); 273 } 274 275 /* 276 * A callback for each_dumpable_segment() to write out the segment's 277 * program header entry. 278 */ 279 static void 280 cb_put_phdr(struct map_entry *entry, void *closure) 281 { 282 struct phdr_closure *phc = (struct phdr_closure *)closure; 283 Elf_Phdr *phdr = phc->phdr; 284 size_t page_size; 285 286 page_size = getpagesize(); 287 phc->offset = roundup2(phc->offset, page_size); 288 289 phdr->p_type = PT_LOAD; 290 phdr->p_offset = phc->offset; 291 phdr->p_vaddr = entry->start; 292 phdr->p_paddr = 0; 293 phdr->p_filesz = phdr->p_memsz = entry->end - entry->start; 294 phdr->p_align = page_size; 295 phdr->p_flags = 0; 296 if (entry->protection & VM_PROT_READ) 297 phdr->p_flags |= PF_R; 298 if (entry->protection & VM_PROT_WRITE) 299 phdr->p_flags |= PF_W; 300 if (entry->protection & VM_PROT_EXECUTE) 301 phdr->p_flags |= PF_X; 302 303 phc->offset += phdr->p_filesz; 304 phc->phdr++; 305 } 306 307 /* 308 * A callback for each_dumpable_segment() to gather information about 309 * the number of segments and their total size. 310 */ 311 static void 312 cb_size_segment(struct map_entry *entry, void *closure) 313 { 314 struct sseg_closure *ssc = (struct sseg_closure *)closure; 315 316 ssc->count++; 317 ssc->size += entry->end - entry->start; 318 } 319 320 /* 321 * For each segment in the given memory map, call the given function 322 * with a pointer to the map entry and some arbitrary caller-supplied 323 * data. 324 */ 325 static void 326 each_dumpable_segment(struct map_entry *map, segment_callback func, 327 void *closure) 328 { 329 struct map_entry *entry; 330 331 for (entry = map; entry != NULL; entry = entry->next) 332 (*func)(entry, closure); 333 } 334 335 static void 336 elf_putnotes(pid_t pid, struct sbuf *sb, size_t *sizep) 337 { 338 lwpid_t *tids; 339 size_t threads, old_len; 340 ssize_t size; 341 int i; 342 343 errno = 0; 344 threads = ptrace(PT_GETNUMLWPS, pid, NULL, 0); 345 if (errno) 346 err(1, "PT_GETNUMLWPS"); 347 tids = malloc(threads * sizeof(*tids)); 348 if (tids == NULL) 349 errx(1, "out of memory"); 350 errno = 0; 351 ptrace(PT_GETLWPLIST, pid, (void *)tids, threads); 352 if (errno) 353 err(1, "PT_GETLWPLIST"); 354 355 sbuf_start_section(sb, &old_len); 356 elf_putnote(NT_PRPSINFO, elf_note_prpsinfo, &pid, sb); 357 358 for (i = 0; i < threads; ++i) { 359 elf_putregnote(NT_PRSTATUS, tids[i], sb); 360 elf_putregnote(NT_FPREGSET, tids[i], sb); 361 elf_putregnote(NT_THRMISC, tids[i], sb); 362 elf_putregnote(NT_PTLWPINFO, tids[i], sb); 363 #if defined(__aarch64__) || defined(__arm__) 364 elf_putregnote(NT_ARM_TLS, tids[i], sb); 365 #endif 366 #if (defined(ELFCORE_COMPAT_32) && defined(__aarch64__)) || defined(__arm__) 367 elf_putregnote(NT_ARM_VFP, tids[i], sb); 368 #endif 369 #if defined(__i386__) || defined(__amd64__) 370 elf_putregnote(NT_X86_SEGBASES, tids[i], sb); 371 elf_putnote(NT_X86_XSTATE, elf_note_x86_xstate, tids + i, sb); 372 #endif 373 #if defined(__powerpc__) 374 elf_putnote(NT_PPC_VMX, elf_note_powerpc_vmx, tids + i, sb); 375 #ifndef __SPE__ 376 elf_putnote(NT_PPC_VSX, elf_note_powerpc_vsx, tids + i, sb); 377 #endif 378 #endif 379 } 380 381 #ifndef ELFCORE_COMPAT_32 382 elf_putnote(NT_PROCSTAT_PROC, elf_note_procstat_proc, &pid, sb); 383 elf_putnote(NT_PROCSTAT_FILES, elf_note_procstat_files, &pid, sb); 384 elf_putnote(NT_PROCSTAT_VMMAP, elf_note_procstat_vmmap, &pid, sb); 385 elf_putnote(NT_PROCSTAT_GROUPS, elf_note_procstat_groups, &pid, sb); 386 elf_putnote(NT_PROCSTAT_UMASK, elf_note_procstat_umask, &pid, sb); 387 elf_putnote(NT_PROCSTAT_RLIMIT, elf_note_procstat_rlimit, &pid, sb); 388 elf_putnote(NT_PROCSTAT_OSREL, elf_note_procstat_osrel, &pid, sb); 389 elf_putnote(NT_PROCSTAT_PSSTRINGS, elf_note_procstat_psstrings, &pid, 390 sb); 391 elf_putnote(NT_PROCSTAT_AUXV, elf_note_procstat_auxv, &pid, sb); 392 elf_putnote(NT_PROCSTAT_KQUEUES, elf_note_procstat_kqueues, &pid, sb); 393 #endif 394 395 size = sbuf_end_section(sb, old_len, 1, 0); 396 if (size == -1) 397 err(1, "sbuf_end_section"); 398 free(tids); 399 *sizep = size; 400 } 401 402 /* 403 * Emit one register set note section to sbuf. 404 */ 405 static void 406 elf_putregnote(int type, lwpid_t tid, struct sbuf *sb) 407 { 408 Elf_Note note; 409 struct iovec iov; 410 ssize_t old_len; 411 412 iov.iov_base = NULL; 413 iov.iov_len = 0; 414 if (ptrace(PT_GETREGSET, tid, (void *)&iov, type) != 0) 415 return; 416 iov.iov_base = calloc(1, iov.iov_len); 417 if (iov.iov_base == NULL) 418 errx(1, "out of memory"); 419 if (ptrace(PT_GETREGSET, tid, (void *)&iov, type) != 0) 420 errx(1, "failed to fetch register set %d", type); 421 422 note.n_namesz = 8; /* strlen("FreeBSD") + 1 */ 423 note.n_descsz = iov.iov_len; 424 note.n_type = type; 425 426 sbuf_bcat(sb, ¬e, sizeof(note)); 427 sbuf_start_section(sb, &old_len); 428 sbuf_bcat(sb, "FreeBSD", note.n_namesz); 429 sbuf_end_section(sb, old_len, sizeof(Elf32_Size), 0); 430 sbuf_start_section(sb, &old_len); 431 sbuf_bcat(sb, iov.iov_base, iov.iov_len); 432 sbuf_end_section(sb, old_len, sizeof(Elf32_Size), 0); 433 free(iov.iov_base); 434 } 435 436 /* 437 * Emit one note section to sbuf. 438 */ 439 static void 440 elf_putnote(int type, notefunc_t notefunc, void *arg, struct sbuf *sb) 441 { 442 Elf_Note note; 443 size_t descsz; 444 ssize_t old_len; 445 void *desc; 446 447 desc = notefunc(arg, &descsz); 448 note.n_namesz = 8; /* strlen("FreeBSD") + 1 */ 449 note.n_descsz = descsz; 450 note.n_type = type; 451 452 sbuf_bcat(sb, ¬e, sizeof(note)); 453 sbuf_start_section(sb, &old_len); 454 sbuf_bcat(sb, "FreeBSD", note.n_namesz); 455 sbuf_end_section(sb, old_len, sizeof(Elf32_Size), 0); 456 if (descsz == 0) 457 return; 458 sbuf_start_section(sb, &old_len); 459 sbuf_bcat(sb, desc, descsz); 460 sbuf_end_section(sb, old_len, sizeof(Elf32_Size), 0); 461 free(desc); 462 } 463 464 /* 465 * Generate the ELF coredump header. 466 */ 467 static void 468 elf_puthdr(int efd, pid_t pid, struct map_entry *map, void *hdr, size_t hdrsize, 469 size_t notesz, size_t segoff, int numsegs) 470 { 471 Elf_Ehdr *ehdr, binhdr; 472 Elf_Phdr *phdr; 473 Elf_Shdr *shdr; 474 struct phdr_closure phc; 475 ssize_t cnt; 476 477 cnt = read(efd, &binhdr, sizeof(binhdr)); 478 if (cnt < 0) 479 err(1, "Failed to re-read ELF header"); 480 else if (cnt != sizeof(binhdr)) 481 errx(1, "Failed to re-read ELF header"); 482 483 ehdr = (Elf_Ehdr *)hdr; 484 485 ehdr->e_ident[EI_MAG0] = ELFMAG0; 486 ehdr->e_ident[EI_MAG1] = ELFMAG1; 487 ehdr->e_ident[EI_MAG2] = ELFMAG2; 488 ehdr->e_ident[EI_MAG3] = ELFMAG3; 489 ehdr->e_ident[EI_CLASS] = ELF_CLASS; 490 ehdr->e_ident[EI_DATA] = ELF_DATA; 491 ehdr->e_ident[EI_VERSION] = EV_CURRENT; 492 ehdr->e_ident[EI_OSABI] = ELFOSABI_FREEBSD; 493 ehdr->e_ident[EI_ABIVERSION] = 0; 494 ehdr->e_ident[EI_PAD] = 0; 495 ehdr->e_type = ET_CORE; 496 ehdr->e_machine = binhdr.e_machine; 497 ehdr->e_version = EV_CURRENT; 498 ehdr->e_entry = 0; 499 ehdr->e_phoff = sizeof(Elf_Ehdr); 500 ehdr->e_flags = binhdr.e_flags; 501 ehdr->e_ehsize = sizeof(Elf_Ehdr); 502 ehdr->e_phentsize = sizeof(Elf_Phdr); 503 ehdr->e_shentsize = sizeof(Elf_Shdr); 504 ehdr->e_shstrndx = SHN_UNDEF; 505 if (numsegs + 1 < PN_XNUM) { 506 ehdr->e_phnum = numsegs + 1; 507 ehdr->e_shnum = 0; 508 } else { 509 ehdr->e_phnum = PN_XNUM; 510 ehdr->e_shnum = 1; 511 512 ehdr->e_shoff = ehdr->e_phoff + 513 (numsegs + 1) * ehdr->e_phentsize; 514 515 shdr = (Elf_Shdr *)((char *)hdr + ehdr->e_shoff); 516 memset(shdr, 0, sizeof(*shdr)); 517 /* 518 * A special first section is used to hold large segment and 519 * section counts. This was proposed by Sun Microsystems in 520 * Solaris and has been adopted by Linux; the standard ELF 521 * tools are already familiar with the technique. 522 * 523 * See table 7-7 of the Solaris "Linker and Libraries Guide" 524 * (or 12-7 depending on the version of the document) for more 525 * details. 526 */ 527 shdr->sh_type = SHT_NULL; 528 shdr->sh_size = ehdr->e_shnum; 529 shdr->sh_link = ehdr->e_shstrndx; 530 shdr->sh_info = numsegs + 1; 531 } 532 533 /* 534 * Fill in the program header entries. 535 */ 536 phdr = (Elf_Phdr *)((char *)hdr + ehdr->e_phoff); 537 538 /* The note segment. */ 539 phdr->p_type = PT_NOTE; 540 phdr->p_offset = hdrsize; 541 phdr->p_vaddr = 0; 542 phdr->p_paddr = 0; 543 phdr->p_filesz = notesz; 544 phdr->p_memsz = 0; 545 phdr->p_flags = PF_R; 546 phdr->p_align = sizeof(Elf32_Size); 547 phdr++; 548 549 /* All the writable segments from the program. */ 550 phc.phdr = phdr; 551 phc.offset = segoff; 552 each_dumpable_segment(map, cb_put_phdr, &phc); 553 } 554 555 /* 556 * Free the memory map. 557 */ 558 static void 559 freemap(struct map_entry *map) 560 { 561 struct map_entry *next; 562 563 while (map != NULL) { 564 next = map->next; 565 free(map); 566 map = next; 567 } 568 } 569 570 /* 571 * Read the process's memory map using kinfo_getvmmap(), and return a list of 572 * VM map entries. Only the non-device read/writable segments are 573 * returned. The map entries in the list aren't fully filled in; only 574 * the items we need are present. 575 */ 576 static struct map_entry * 577 readmap(pid_t pid) 578 { 579 struct map_entry *ent, **linkp, *map; 580 struct kinfo_vmentry *vmentl, *kve; 581 int i, nitems; 582 583 vmentl = kinfo_getvmmap(pid, &nitems); 584 if (vmentl == NULL) 585 err(1, "cannot retrieve mappings for %u process", pid); 586 587 map = NULL; 588 linkp = ↦ 589 for (i = 0; i < nitems; i++) { 590 kve = &vmentl[i]; 591 592 /* 593 * Ignore 'malformed' segments or ones representing memory 594 * mapping with MAP_NOCORE on. 595 * If the 'full' support is disabled, just dump the most 596 * meaningful data segments. 597 */ 598 if ((kve->kve_protection & KVME_PROT_READ) == 0 || 599 (kve->kve_flags & KVME_FLAG_NOCOREDUMP) != 0 || 600 kve->kve_type == KVME_TYPE_DEAD || 601 kve->kve_type == KVME_TYPE_UNKNOWN || 602 ((pflags & PFLAGS_FULL) == 0 && 603 kve->kve_type != KVME_TYPE_DEFAULT && 604 kve->kve_type != KVME_TYPE_VNODE && 605 kve->kve_type != KVME_TYPE_SWAP && 606 kve->kve_type != KVME_TYPE_PHYS)) 607 continue; 608 609 ent = calloc(1, sizeof(*ent)); 610 if (ent == NULL) 611 errx(1, "out of memory"); 612 ent->start = (vm_offset_t)kve->kve_start; 613 ent->end = (vm_offset_t)kve->kve_end; 614 ent->protection = VM_PROT_READ; 615 if ((kve->kve_protection & KVME_PROT_WRITE) != 0) 616 ent->protection |= VM_PROT_WRITE; 617 if ((kve->kve_protection & KVME_PROT_EXEC) != 0) 618 ent->protection |= VM_PROT_EXECUTE; 619 620 *linkp = ent; 621 linkp = &ent->next; 622 } 623 free(vmentl); 624 return (map); 625 } 626 627 /* 628 * Miscellaneous note out functions. 629 */ 630 631 static void * 632 elf_note_prpsinfo(void *arg, size_t *sizep) 633 { 634 char *cp, *end; 635 pid_t pid; 636 elfcore_prpsinfo_t *psinfo; 637 struct kinfo_proc kip; 638 size_t len; 639 int name[4]; 640 641 pid = *(pid_t *)arg; 642 psinfo = calloc(1, sizeof(*psinfo)); 643 if (psinfo == NULL) 644 errx(1, "out of memory"); 645 psinfo->pr_version = PRPSINFO_VERSION; 646 psinfo->pr_psinfosz = sizeof(*psinfo); 647 648 name[0] = CTL_KERN; 649 name[1] = KERN_PROC; 650 name[2] = KERN_PROC_PID; 651 name[3] = pid; 652 len = sizeof(kip); 653 if (sysctl(name, 4, &kip, &len, NULL, 0) == -1) 654 err(1, "kern.proc.pid.%u", pid); 655 if (kip.ki_pid != pid) 656 err(1, "kern.proc.pid.%u", pid); 657 strlcpy(psinfo->pr_fname, kip.ki_comm, sizeof(psinfo->pr_fname)); 658 name[2] = KERN_PROC_ARGS; 659 len = sizeof(psinfo->pr_psargs) - 1; 660 if (sysctl(name, 4, psinfo->pr_psargs, &len, NULL, 0) == 0 && len > 0) { 661 cp = psinfo->pr_psargs; 662 end = cp + len - 1; 663 for (;;) { 664 cp = memchr(cp, '\0', end - cp); 665 if (cp == NULL) 666 break; 667 *cp = ' '; 668 } 669 } else 670 strlcpy(psinfo->pr_psargs, kip.ki_comm, 671 sizeof(psinfo->pr_psargs)); 672 psinfo->pr_pid = pid; 673 674 *sizep = sizeof(*psinfo); 675 return (psinfo); 676 } 677 678 #if defined(__i386__) || defined(__amd64__) 679 static void * 680 elf_note_x86_xstate(void *arg, size_t *sizep) 681 { 682 lwpid_t tid; 683 char *xstate; 684 static bool xsave_checked = false; 685 static struct ptrace_xstate_info info; 686 687 tid = *(lwpid_t *)arg; 688 if (!xsave_checked) { 689 if (ptrace(PT_GETXSTATE_INFO, tid, (void *)&info, 690 sizeof(info)) != 0) 691 info.xsave_len = 0; 692 xsave_checked = true; 693 } 694 if (info.xsave_len == 0) { 695 *sizep = 0; 696 return (NULL); 697 } 698 xstate = calloc(1, info.xsave_len); 699 ptrace(PT_GETXSTATE, tid, xstate, 0); 700 *(uint64_t *)(xstate + X86_XSTATE_XCR0_OFFSET) = info.xsave_mask; 701 *sizep = info.xsave_len; 702 return (xstate); 703 } 704 #endif 705 706 #if defined(__powerpc__) 707 static void * 708 elf_note_powerpc_vmx(void *arg, size_t *sizep) 709 { 710 lwpid_t tid; 711 struct vmxreg *vmx; 712 static bool has_vmx = true; 713 struct vmxreg info; 714 715 tid = *(lwpid_t *)arg; 716 if (has_vmx) { 717 if (ptrace(PT_GETVRREGS, tid, (void *)&info, 718 sizeof(info)) != 0) 719 has_vmx = false; 720 } 721 if (!has_vmx) { 722 *sizep = 0; 723 return (NULL); 724 } 725 vmx = calloc(1, sizeof(*vmx)); 726 memcpy(vmx, &info, sizeof(*vmx)); 727 *sizep = sizeof(*vmx); 728 return (vmx); 729 } 730 731 static void * 732 elf_note_powerpc_vsx(void *arg, size_t *sizep) 733 { 734 lwpid_t tid; 735 char *vshr_data; 736 static bool has_vsx = true; 737 uint64_t vshr[32]; 738 739 tid = *(lwpid_t *)arg; 740 if (has_vsx) { 741 if (ptrace(PT_GETVSRREGS, tid, (void *)vshr, 742 sizeof(vshr)) != 0) 743 has_vsx = false; 744 } 745 if (!has_vsx) { 746 *sizep = 0; 747 return (NULL); 748 } 749 vshr_data = calloc(1, sizeof(vshr)); 750 memcpy(vshr_data, vshr, sizeof(vshr)); 751 *sizep = sizeof(vshr); 752 return (vshr_data); 753 } 754 #endif 755 756 static void * 757 procstat_sysctl(void *arg, int what, size_t structsz, size_t *sizep) 758 { 759 size_t len; 760 pid_t pid; 761 int name[5], structsize; 762 void *buf, *p; 763 764 pid = *(pid_t *)arg; 765 structsize = structsz; 766 name[0] = CTL_KERN; 767 name[1] = KERN_PROC; 768 name[2] = what; 769 name[3] = pid; 770 len = 0; 771 if (sysctl(name, 4, NULL, &len, NULL, 0) == -1) 772 err(1, "kern.proc.%d.%u", what, pid); 773 buf = calloc(1, sizeof(structsize) + len * 4 / 3); 774 if (buf == NULL) 775 errx(1, "out of memory"); 776 bcopy(&structsize, buf, sizeof(structsize)); 777 p = (char *)buf + sizeof(structsize); 778 if (sysctl(name, 4, p, &len, NULL, 0) == -1) 779 err(1, "kern.proc.%d.%u", what, pid); 780 781 *sizep = sizeof(structsize) + len; 782 return (buf); 783 } 784 785 static void * 786 elf_note_procstat_proc(void *arg, size_t *sizep) 787 { 788 789 return (procstat_sysctl(arg, KERN_PROC_PID | KERN_PROC_INC_THREAD, 790 sizeof(struct kinfo_proc), sizep)); 791 } 792 793 static void * 794 elf_note_procstat_files(void *arg, size_t *sizep) 795 { 796 797 return (procstat_sysctl(arg, KERN_PROC_FILEDESC, 798 sizeof(struct kinfo_file), sizep)); 799 } 800 801 static void * 802 elf_note_procstat_vmmap(void *arg, size_t *sizep) 803 { 804 805 return (procstat_sysctl(arg, KERN_PROC_VMMAP, 806 sizeof(struct kinfo_vmentry), sizep)); 807 } 808 809 static void * 810 elf_note_procstat_groups(void *arg, size_t *sizep) 811 { 812 813 return (procstat_sysctl(arg, KERN_PROC_GROUPS, sizeof(gid_t), sizep)); 814 } 815 816 static void * 817 elf_note_procstat_umask(void *arg, size_t *sizep) 818 { 819 820 return (procstat_sysctl(arg, KERN_PROC_UMASK, sizeof(u_short), sizep)); 821 } 822 823 static void * 824 elf_note_procstat_osrel(void *arg, size_t *sizep) 825 { 826 827 return (procstat_sysctl(arg, KERN_PROC_OSREL, sizeof(int), sizep)); 828 } 829 830 static void * 831 elf_note_procstat_psstrings(void *arg, size_t *sizep) 832 { 833 834 return (procstat_sysctl(arg, KERN_PROC_PS_STRINGS, 835 sizeof(vm_offset_t), sizep)); 836 } 837 838 static void * 839 elf_note_procstat_auxv(void *arg, size_t *sizep) 840 { 841 842 return (procstat_sysctl(arg, KERN_PROC_AUXV, 843 sizeof(Elf_Auxinfo), sizep)); 844 } 845 846 static void * 847 elf_note_procstat_kqueues(void *arg, size_t *sizep) 848 { 849 850 return (procstat_sysctl(arg, KERN_PROC_KQUEUE, 851 sizeof(struct kinfo_knote), sizep)); 852 } 853 854 static void * 855 elf_note_procstat_rlimit(void *arg, size_t *sizep) 856 { 857 pid_t pid; 858 size_t len; 859 int i, name[5], structsize; 860 void *buf, *p; 861 862 pid = *(pid_t *)arg; 863 structsize = sizeof(struct rlimit) * RLIM_NLIMITS; 864 buf = calloc(1, sizeof(structsize) + structsize); 865 if (buf == NULL) 866 errx(1, "out of memory"); 867 bcopy(&structsize, buf, sizeof(structsize)); 868 p = (char *)buf + sizeof(structsize); 869 name[0] = CTL_KERN; 870 name[1] = KERN_PROC; 871 name[2] = KERN_PROC_RLIMIT; 872 name[3] = pid; 873 len = sizeof(struct rlimit); 874 for (i = 0; i < RLIM_NLIMITS; i++) { 875 name[4] = i; 876 if (sysctl(name, 5, p, &len, NULL, 0) == -1) 877 err(1, "kern.proc.rlimit.%u", pid); 878 if (len != sizeof(struct rlimit)) 879 errx(1, "kern.proc.rlimit.%u: short read", pid); 880 p += len; 881 } 882 883 *sizep = sizeof(structsize) + structsize; 884 return (buf); 885 } 886 887 struct dumpers __elfN(dump) = { elf_ident, elf_coredump }; 888 TEXT_SET(dumpset, __elfN(dump)); 889