1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 2017 Dell EMC 5 * Copyright (c) 2000-2001, 2003 David O'Brien 6 * Copyright (c) 1995-1996 Søren Schmidt 7 * Copyright (c) 1996 Peter Wemm 8 * All rights reserved. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer 15 * in this position and unchanged. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 3. The name of the author may not be used to endorse or promote products 20 * derived from this software without specific prior written permission 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 23 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 24 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 25 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 26 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 27 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 31 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #include <sys/cdefs.h> 35 __FBSDID("$FreeBSD$"); 36 37 #include "opt_capsicum.h" 38 39 #include <sys/param.h> 40 #include <sys/capsicum.h> 41 #include <sys/compressor.h> 42 #include <sys/exec.h> 43 #include <sys/fcntl.h> 44 #include <sys/imgact.h> 45 #include <sys/imgact_elf.h> 46 #include <sys/jail.h> 47 #include <sys/kernel.h> 48 #include <sys/lock.h> 49 #include <sys/malloc.h> 50 #include <sys/mount.h> 51 #include <sys/mman.h> 52 #include <sys/namei.h> 53 #include <sys/proc.h> 54 #include <sys/procfs.h> 55 #include <sys/ptrace.h> 56 #include <sys/racct.h> 57 #include <sys/resourcevar.h> 58 #include <sys/rwlock.h> 59 #include <sys/sbuf.h> 60 #include <sys/sf_buf.h> 61 #include <sys/smp.h> 62 #include <sys/systm.h> 63 #include <sys/signalvar.h> 64 #include <sys/stat.h> 65 #include <sys/sx.h> 66 #include <sys/syscall.h> 67 #include <sys/sysctl.h> 68 #include <sys/sysent.h> 69 #include <sys/vnode.h> 70 #include <sys/syslog.h> 71 #include <sys/eventhandler.h> 72 #include <sys/user.h> 73 74 #include <vm/vm.h> 75 #include <vm/vm_kern.h> 76 #include <vm/vm_param.h> 77 #include <vm/pmap.h> 78 #include <vm/vm_map.h> 79 #include <vm/vm_object.h> 80 #include <vm/vm_extern.h> 81 82 #include <machine/elf.h> 83 #include <machine/md_var.h> 84 85 #define ELF_NOTE_ROUNDSIZE 4 86 #define OLD_EI_BRAND 8 87 88 static int __elfN(check_header)(const Elf_Ehdr *hdr); 89 static Elf_Brandinfo *__elfN(get_brandinfo)(struct image_params *imgp, 90 const char *interp, int32_t *osrel, uint32_t *fctl0); 91 static int __elfN(load_file)(struct proc *p, const char *file, u_long *addr, 92 u_long *entry); 93 static int __elfN(load_section)(struct image_params *imgp, vm_ooffset_t offset, 94 caddr_t vmaddr, size_t memsz, size_t filsz, vm_prot_t prot); 95 static int __CONCAT(exec_, __elfN(imgact))(struct image_params *imgp); 96 static bool __elfN(freebsd_trans_osrel)(const Elf_Note *note, 97 int32_t *osrel); 98 static bool kfreebsd_trans_osrel(const Elf_Note *note, int32_t *osrel); 99 static boolean_t __elfN(check_note)(struct image_params *imgp, 100 Elf_Brandnote *checknote, int32_t *osrel, boolean_t *has_fctl0, 101 uint32_t *fctl0); 102 static vm_prot_t __elfN(trans_prot)(Elf_Word); 103 static Elf_Word __elfN(untrans_prot)(vm_prot_t); 104 105 SYSCTL_NODE(_kern, OID_AUTO, __CONCAT(elf, __ELF_WORD_SIZE), 106 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 107 ""); 108 109 #define CORE_BUF_SIZE (16 * 1024) 110 111 int __elfN(fallback_brand) = -1; 112 SYSCTL_INT(__CONCAT(_kern_elf, __ELF_WORD_SIZE), OID_AUTO, 113 fallback_brand, CTLFLAG_RWTUN, &__elfN(fallback_brand), 0, 114 __XSTRING(__CONCAT(ELF, __ELF_WORD_SIZE)) " brand of last resort"); 115 116 static int elf_legacy_coredump = 0; 117 SYSCTL_INT(_debug, OID_AUTO, __elfN(legacy_coredump), CTLFLAG_RW, 118 &elf_legacy_coredump, 0, 119 "include all and only RW pages in core dumps"); 120 121 int __elfN(nxstack) = 122 #if defined(__amd64__) || defined(__powerpc64__) /* both 64 and 32 bit */ || \ 123 (defined(__arm__) && __ARM_ARCH >= 7) || defined(__aarch64__) || \ 124 defined(__riscv) 125 1; 126 #else 127 0; 128 #endif 129 SYSCTL_INT(__CONCAT(_kern_elf, __ELF_WORD_SIZE), OID_AUTO, 130 nxstack, CTLFLAG_RW, &__elfN(nxstack), 0, 131 __XSTRING(__CONCAT(ELF, __ELF_WORD_SIZE)) ": enable non-executable stack"); 132 133 #if __ELF_WORD_SIZE == 32 && (defined(__amd64__) || defined(__i386__)) 134 int i386_read_exec = 0; 135 SYSCTL_INT(_kern_elf32, OID_AUTO, read_exec, CTLFLAG_RW, &i386_read_exec, 0, 136 "enable execution from readable segments"); 137 #endif 138 139 static u_long __elfN(pie_base) = ET_DYN_LOAD_ADDR; 140 static int 141 sysctl_pie_base(SYSCTL_HANDLER_ARGS) 142 { 143 u_long val; 144 int error; 145 146 val = __elfN(pie_base); 147 error = sysctl_handle_long(oidp, &val, 0, req); 148 if (error != 0 || req->newptr == NULL) 149 return (error); 150 if ((val & PAGE_MASK) != 0) 151 return (EINVAL); 152 __elfN(pie_base) = val; 153 return (0); 154 } 155 SYSCTL_PROC(__CONCAT(_kern_elf, __ELF_WORD_SIZE), OID_AUTO, pie_base, 156 CTLTYPE_ULONG | CTLFLAG_MPSAFE | CTLFLAG_RW, NULL, 0, 157 sysctl_pie_base, "LU", 158 "PIE load base without randomization"); 159 160 SYSCTL_NODE(__CONCAT(_kern_elf, __ELF_WORD_SIZE), OID_AUTO, aslr, 161 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 162 ""); 163 #define ASLR_NODE_OID __CONCAT(__CONCAT(_kern_elf, __ELF_WORD_SIZE), _aslr) 164 165 static int __elfN(aslr_enabled) = 0; 166 SYSCTL_INT(ASLR_NODE_OID, OID_AUTO, enable, CTLFLAG_RWTUN, 167 &__elfN(aslr_enabled), 0, 168 __XSTRING(__CONCAT(ELF, __ELF_WORD_SIZE)) 169 ": enable address map randomization"); 170 171 static int __elfN(pie_aslr_enabled) = 0; 172 SYSCTL_INT(ASLR_NODE_OID, OID_AUTO, pie_enable, CTLFLAG_RWTUN, 173 &__elfN(pie_aslr_enabled), 0, 174 __XSTRING(__CONCAT(ELF, __ELF_WORD_SIZE)) 175 ": enable address map randomization for PIE binaries"); 176 177 static int __elfN(aslr_honor_sbrk) = 1; 178 SYSCTL_INT(ASLR_NODE_OID, OID_AUTO, honor_sbrk, CTLFLAG_RW, 179 &__elfN(aslr_honor_sbrk), 0, 180 __XSTRING(__CONCAT(ELF, __ELF_WORD_SIZE)) ": assume sbrk is used"); 181 182 static int __elfN(aslr_stack_gap) = 3; 183 SYSCTL_INT(ASLR_NODE_OID, OID_AUTO, stack_gap, CTLFLAG_RW, 184 &__elfN(aslr_stack_gap), 0, 185 __XSTRING(__CONCAT(ELF, __ELF_WORD_SIZE)) 186 ": maximum percentage of main stack to waste on a random gap"); 187 188 static int __elfN(sigfastblock) = 1; 189 SYSCTL_INT(__CONCAT(_kern_elf, __ELF_WORD_SIZE), OID_AUTO, sigfastblock, 190 CTLFLAG_RWTUN, &__elfN(sigfastblock), 0, 191 "enable sigfastblock for new processes"); 192 193 static bool __elfN(allow_wx) = true; 194 SYSCTL_BOOL(__CONCAT(_kern_elf, __ELF_WORD_SIZE), OID_AUTO, allow_wx, 195 CTLFLAG_RWTUN, &__elfN(allow_wx), 0, 196 "Allow pages to be mapped simultaneously writable and executable"); 197 198 static Elf_Brandinfo *elf_brand_list[MAX_BRANDS]; 199 200 #define aligned(a, t) (rounddown2((u_long)(a), sizeof(t)) == (u_long)(a)) 201 202 static const char FREEBSD_ABI_VENDOR[] = "FreeBSD"; 203 204 Elf_Brandnote __elfN(freebsd_brandnote) = { 205 .hdr.n_namesz = sizeof(FREEBSD_ABI_VENDOR), 206 .hdr.n_descsz = sizeof(int32_t), 207 .hdr.n_type = NT_FREEBSD_ABI_TAG, 208 .vendor = FREEBSD_ABI_VENDOR, 209 .flags = BN_TRANSLATE_OSREL, 210 .trans_osrel = __elfN(freebsd_trans_osrel) 211 }; 212 213 static bool 214 __elfN(freebsd_trans_osrel)(const Elf_Note *note, int32_t *osrel) 215 { 216 uintptr_t p; 217 218 p = (uintptr_t)(note + 1); 219 p += roundup2(note->n_namesz, ELF_NOTE_ROUNDSIZE); 220 *osrel = *(const int32_t *)(p); 221 222 return (true); 223 } 224 225 static const char GNU_ABI_VENDOR[] = "GNU"; 226 static int GNU_KFREEBSD_ABI_DESC = 3; 227 228 Elf_Brandnote __elfN(kfreebsd_brandnote) = { 229 .hdr.n_namesz = sizeof(GNU_ABI_VENDOR), 230 .hdr.n_descsz = 16, /* XXX at least 16 */ 231 .hdr.n_type = 1, 232 .vendor = GNU_ABI_VENDOR, 233 .flags = BN_TRANSLATE_OSREL, 234 .trans_osrel = kfreebsd_trans_osrel 235 }; 236 237 static bool 238 kfreebsd_trans_osrel(const Elf_Note *note, int32_t *osrel) 239 { 240 const Elf32_Word *desc; 241 uintptr_t p; 242 243 p = (uintptr_t)(note + 1); 244 p += roundup2(note->n_namesz, ELF_NOTE_ROUNDSIZE); 245 246 desc = (const Elf32_Word *)p; 247 if (desc[0] != GNU_KFREEBSD_ABI_DESC) 248 return (false); 249 250 /* 251 * Debian GNU/kFreeBSD embed the earliest compatible kernel version 252 * (__FreeBSD_version: <major><two digit minor>Rxx) in the LSB way. 253 */ 254 *osrel = desc[1] * 100000 + desc[2] * 1000 + desc[3]; 255 256 return (true); 257 } 258 259 int 260 __elfN(insert_brand_entry)(Elf_Brandinfo *entry) 261 { 262 int i; 263 264 for (i = 0; i < MAX_BRANDS; i++) { 265 if (elf_brand_list[i] == NULL) { 266 elf_brand_list[i] = entry; 267 break; 268 } 269 } 270 if (i == MAX_BRANDS) { 271 printf("WARNING: %s: could not insert brandinfo entry: %p\n", 272 __func__, entry); 273 return (-1); 274 } 275 return (0); 276 } 277 278 int 279 __elfN(remove_brand_entry)(Elf_Brandinfo *entry) 280 { 281 int i; 282 283 for (i = 0; i < MAX_BRANDS; i++) { 284 if (elf_brand_list[i] == entry) { 285 elf_brand_list[i] = NULL; 286 break; 287 } 288 } 289 if (i == MAX_BRANDS) 290 return (-1); 291 return (0); 292 } 293 294 int 295 __elfN(brand_inuse)(Elf_Brandinfo *entry) 296 { 297 struct proc *p; 298 int rval = FALSE; 299 300 sx_slock(&allproc_lock); 301 FOREACH_PROC_IN_SYSTEM(p) { 302 if (p->p_sysent == entry->sysvec) { 303 rval = TRUE; 304 break; 305 } 306 } 307 sx_sunlock(&allproc_lock); 308 309 return (rval); 310 } 311 312 static Elf_Brandinfo * 313 __elfN(get_brandinfo)(struct image_params *imgp, const char *interp, 314 int32_t *osrel, uint32_t *fctl0) 315 { 316 const Elf_Ehdr *hdr = (const Elf_Ehdr *)imgp->image_header; 317 Elf_Brandinfo *bi, *bi_m; 318 boolean_t ret, has_fctl0; 319 int i, interp_name_len; 320 321 interp_name_len = interp != NULL ? strlen(interp) + 1 : 0; 322 323 /* 324 * We support four types of branding -- (1) the ELF EI_OSABI field 325 * that SCO added to the ELF spec, (2) FreeBSD 3.x's traditional string 326 * branding w/in the ELF header, (3) path of the `interp_path' 327 * field, and (4) the ".note.ABI-tag" ELF section. 328 */ 329 330 /* Look for an ".note.ABI-tag" ELF section */ 331 bi_m = NULL; 332 for (i = 0; i < MAX_BRANDS; i++) { 333 bi = elf_brand_list[i]; 334 if (bi == NULL) 335 continue; 336 if (interp != NULL && (bi->flags & BI_BRAND_ONLY_STATIC) != 0) 337 continue; 338 if (hdr->e_machine == bi->machine && (bi->flags & 339 (BI_BRAND_NOTE|BI_BRAND_NOTE_MANDATORY)) != 0) { 340 has_fctl0 = false; 341 *fctl0 = 0; 342 *osrel = 0; 343 ret = __elfN(check_note)(imgp, bi->brand_note, osrel, 344 &has_fctl0, fctl0); 345 /* Give brand a chance to veto check_note's guess */ 346 if (ret && bi->header_supported) { 347 ret = bi->header_supported(imgp, osrel, 348 has_fctl0 ? fctl0 : NULL); 349 } 350 /* 351 * If note checker claimed the binary, but the 352 * interpreter path in the image does not 353 * match default one for the brand, try to 354 * search for other brands with the same 355 * interpreter. Either there is better brand 356 * with the right interpreter, or, failing 357 * this, we return first brand which accepted 358 * our note and, optionally, header. 359 */ 360 if (ret && bi_m == NULL && interp != NULL && 361 (bi->interp_path == NULL || 362 (strlen(bi->interp_path) + 1 != interp_name_len || 363 strncmp(interp, bi->interp_path, interp_name_len) 364 != 0))) { 365 bi_m = bi; 366 ret = 0; 367 } 368 if (ret) 369 return (bi); 370 } 371 } 372 if (bi_m != NULL) 373 return (bi_m); 374 375 /* If the executable has a brand, search for it in the brand list. */ 376 for (i = 0; i < MAX_BRANDS; i++) { 377 bi = elf_brand_list[i]; 378 if (bi == NULL || (bi->flags & BI_BRAND_NOTE_MANDATORY) != 0 || 379 (interp != NULL && (bi->flags & BI_BRAND_ONLY_STATIC) != 0)) 380 continue; 381 if (hdr->e_machine == bi->machine && 382 (hdr->e_ident[EI_OSABI] == bi->brand || 383 (bi->compat_3_brand != NULL && 384 strcmp((const char *)&hdr->e_ident[OLD_EI_BRAND], 385 bi->compat_3_brand) == 0))) { 386 /* Looks good, but give brand a chance to veto */ 387 if (bi->header_supported == NULL || 388 bi->header_supported(imgp, NULL, NULL)) { 389 /* 390 * Again, prefer strictly matching 391 * interpreter path. 392 */ 393 if (interp_name_len == 0 && 394 bi->interp_path == NULL) 395 return (bi); 396 if (bi->interp_path != NULL && 397 strlen(bi->interp_path) + 1 == 398 interp_name_len && strncmp(interp, 399 bi->interp_path, interp_name_len) == 0) 400 return (bi); 401 if (bi_m == NULL) 402 bi_m = bi; 403 } 404 } 405 } 406 if (bi_m != NULL) 407 return (bi_m); 408 409 /* No known brand, see if the header is recognized by any brand */ 410 for (i = 0; i < MAX_BRANDS; i++) { 411 bi = elf_brand_list[i]; 412 if (bi == NULL || bi->flags & BI_BRAND_NOTE_MANDATORY || 413 bi->header_supported == NULL) 414 continue; 415 if (hdr->e_machine == bi->machine) { 416 ret = bi->header_supported(imgp, NULL, NULL); 417 if (ret) 418 return (bi); 419 } 420 } 421 422 /* Lacking a known brand, search for a recognized interpreter. */ 423 if (interp != NULL) { 424 for (i = 0; i < MAX_BRANDS; i++) { 425 bi = elf_brand_list[i]; 426 if (bi == NULL || (bi->flags & 427 (BI_BRAND_NOTE_MANDATORY | BI_BRAND_ONLY_STATIC)) 428 != 0) 429 continue; 430 if (hdr->e_machine == bi->machine && 431 bi->interp_path != NULL && 432 /* ELF image p_filesz includes terminating zero */ 433 strlen(bi->interp_path) + 1 == interp_name_len && 434 strncmp(interp, bi->interp_path, interp_name_len) 435 == 0 && (bi->header_supported == NULL || 436 bi->header_supported(imgp, NULL, NULL))) 437 return (bi); 438 } 439 } 440 441 /* Lacking a recognized interpreter, try the default brand */ 442 for (i = 0; i < MAX_BRANDS; i++) { 443 bi = elf_brand_list[i]; 444 if (bi == NULL || (bi->flags & BI_BRAND_NOTE_MANDATORY) != 0 || 445 (interp != NULL && (bi->flags & BI_BRAND_ONLY_STATIC) != 0)) 446 continue; 447 if (hdr->e_machine == bi->machine && 448 __elfN(fallback_brand) == bi->brand && 449 (bi->header_supported == NULL || 450 bi->header_supported(imgp, NULL, NULL))) 451 return (bi); 452 } 453 return (NULL); 454 } 455 456 static bool 457 __elfN(phdr_in_zero_page)(const Elf_Ehdr *hdr) 458 { 459 return (hdr->e_phoff <= PAGE_SIZE && 460 (u_int)hdr->e_phentsize * hdr->e_phnum <= PAGE_SIZE - hdr->e_phoff); 461 } 462 463 static int 464 __elfN(check_header)(const Elf_Ehdr *hdr) 465 { 466 Elf_Brandinfo *bi; 467 int i; 468 469 if (!IS_ELF(*hdr) || 470 hdr->e_ident[EI_CLASS] != ELF_TARG_CLASS || 471 hdr->e_ident[EI_DATA] != ELF_TARG_DATA || 472 hdr->e_ident[EI_VERSION] != EV_CURRENT || 473 hdr->e_phentsize != sizeof(Elf_Phdr) || 474 hdr->e_version != ELF_TARG_VER) 475 return (ENOEXEC); 476 477 /* 478 * Make sure we have at least one brand for this machine. 479 */ 480 481 for (i = 0; i < MAX_BRANDS; i++) { 482 bi = elf_brand_list[i]; 483 if (bi != NULL && bi->machine == hdr->e_machine) 484 break; 485 } 486 if (i == MAX_BRANDS) 487 return (ENOEXEC); 488 489 return (0); 490 } 491 492 static int 493 __elfN(map_partial)(vm_map_t map, vm_object_t object, vm_ooffset_t offset, 494 vm_offset_t start, vm_offset_t end, vm_prot_t prot) 495 { 496 struct sf_buf *sf; 497 int error; 498 vm_offset_t off; 499 500 /* 501 * Create the page if it doesn't exist yet. Ignore errors. 502 */ 503 vm_map_fixed(map, NULL, 0, trunc_page(start), round_page(end) - 504 trunc_page(start), VM_PROT_ALL, VM_PROT_ALL, MAP_CHECK_EXCL); 505 506 /* 507 * Find the page from the underlying object. 508 */ 509 if (object != NULL) { 510 sf = vm_imgact_map_page(object, offset); 511 if (sf == NULL) 512 return (KERN_FAILURE); 513 off = offset - trunc_page(offset); 514 error = copyout((caddr_t)sf_buf_kva(sf) + off, (caddr_t)start, 515 end - start); 516 vm_imgact_unmap_page(sf); 517 if (error != 0) 518 return (KERN_FAILURE); 519 } 520 521 return (KERN_SUCCESS); 522 } 523 524 static int 525 __elfN(map_insert)(struct image_params *imgp, vm_map_t map, vm_object_t object, 526 vm_ooffset_t offset, vm_offset_t start, vm_offset_t end, vm_prot_t prot, 527 int cow) 528 { 529 struct sf_buf *sf; 530 vm_offset_t off; 531 vm_size_t sz; 532 int error, locked, rv; 533 534 if (start != trunc_page(start)) { 535 rv = __elfN(map_partial)(map, object, offset, start, 536 round_page(start), prot); 537 if (rv != KERN_SUCCESS) 538 return (rv); 539 offset += round_page(start) - start; 540 start = round_page(start); 541 } 542 if (end != round_page(end)) { 543 rv = __elfN(map_partial)(map, object, offset + 544 trunc_page(end) - start, trunc_page(end), end, prot); 545 if (rv != KERN_SUCCESS) 546 return (rv); 547 end = trunc_page(end); 548 } 549 if (start >= end) 550 return (KERN_SUCCESS); 551 if ((offset & PAGE_MASK) != 0) { 552 /* 553 * The mapping is not page aligned. This means that we have 554 * to copy the data. 555 */ 556 rv = vm_map_fixed(map, NULL, 0, start, end - start, 557 prot | VM_PROT_WRITE, VM_PROT_ALL, MAP_CHECK_EXCL); 558 if (rv != KERN_SUCCESS) 559 return (rv); 560 if (object == NULL) 561 return (KERN_SUCCESS); 562 for (; start < end; start += sz) { 563 sf = vm_imgact_map_page(object, offset); 564 if (sf == NULL) 565 return (KERN_FAILURE); 566 off = offset - trunc_page(offset); 567 sz = end - start; 568 if (sz > PAGE_SIZE - off) 569 sz = PAGE_SIZE - off; 570 error = copyout((caddr_t)sf_buf_kva(sf) + off, 571 (caddr_t)start, sz); 572 vm_imgact_unmap_page(sf); 573 if (error != 0) 574 return (KERN_FAILURE); 575 offset += sz; 576 } 577 } else { 578 vm_object_reference(object); 579 rv = vm_map_fixed(map, object, offset, start, end - start, 580 prot, VM_PROT_ALL, cow | MAP_CHECK_EXCL | 581 (object != NULL ? MAP_VN_EXEC : 0)); 582 if (rv != KERN_SUCCESS) { 583 locked = VOP_ISLOCKED(imgp->vp); 584 VOP_UNLOCK(imgp->vp); 585 vm_object_deallocate(object); 586 vn_lock(imgp->vp, locked | LK_RETRY); 587 return (rv); 588 } else if (object != NULL) { 589 MPASS(imgp->vp->v_object == object); 590 VOP_SET_TEXT_CHECKED(imgp->vp); 591 } 592 } 593 return (KERN_SUCCESS); 594 } 595 596 static int 597 __elfN(load_section)(struct image_params *imgp, vm_ooffset_t offset, 598 caddr_t vmaddr, size_t memsz, size_t filsz, vm_prot_t prot) 599 { 600 struct sf_buf *sf; 601 size_t map_len; 602 vm_map_t map; 603 vm_object_t object; 604 vm_offset_t map_addr; 605 int error, rv, cow; 606 size_t copy_len; 607 vm_ooffset_t file_addr; 608 609 /* 610 * It's necessary to fail if the filsz + offset taken from the 611 * header is greater than the actual file pager object's size. 612 * If we were to allow this, then the vm_map_find() below would 613 * walk right off the end of the file object and into the ether. 614 * 615 * While I'm here, might as well check for something else that 616 * is invalid: filsz cannot be greater than memsz. 617 */ 618 if ((filsz != 0 && (off_t)filsz + offset > imgp->attr->va_size) || 619 filsz > memsz) { 620 uprintf("elf_load_section: truncated ELF file\n"); 621 return (ENOEXEC); 622 } 623 624 object = imgp->object; 625 map = &imgp->proc->p_vmspace->vm_map; 626 map_addr = trunc_page((vm_offset_t)vmaddr); 627 file_addr = trunc_page(offset); 628 629 /* 630 * We have two choices. We can either clear the data in the last page 631 * of an oversized mapping, or we can start the anon mapping a page 632 * early and copy the initialized data into that first page. We 633 * choose the second. 634 */ 635 if (filsz == 0) 636 map_len = 0; 637 else if (memsz > filsz) 638 map_len = trunc_page(offset + filsz) - file_addr; 639 else 640 map_len = round_page(offset + filsz) - file_addr; 641 642 if (map_len != 0) { 643 /* cow flags: don't dump readonly sections in core */ 644 cow = MAP_COPY_ON_WRITE | MAP_PREFAULT | 645 (prot & VM_PROT_WRITE ? 0 : MAP_DISABLE_COREDUMP); 646 647 rv = __elfN(map_insert)(imgp, map, object, file_addr, 648 map_addr, map_addr + map_len, prot, cow); 649 if (rv != KERN_SUCCESS) 650 return (EINVAL); 651 652 /* we can stop now if we've covered it all */ 653 if (memsz == filsz) 654 return (0); 655 } 656 657 /* 658 * We have to get the remaining bit of the file into the first part 659 * of the oversized map segment. This is normally because the .data 660 * segment in the file is extended to provide bss. It's a neat idea 661 * to try and save a page, but it's a pain in the behind to implement. 662 */ 663 copy_len = filsz == 0 ? 0 : (offset + filsz) - trunc_page(offset + 664 filsz); 665 map_addr = trunc_page((vm_offset_t)vmaddr + filsz); 666 map_len = round_page((vm_offset_t)vmaddr + memsz) - map_addr; 667 668 /* This had damn well better be true! */ 669 if (map_len != 0) { 670 rv = __elfN(map_insert)(imgp, map, NULL, 0, map_addr, 671 map_addr + map_len, prot, 0); 672 if (rv != KERN_SUCCESS) 673 return (EINVAL); 674 } 675 676 if (copy_len != 0) { 677 sf = vm_imgact_map_page(object, offset + filsz); 678 if (sf == NULL) 679 return (EIO); 680 681 /* send the page fragment to user space */ 682 error = copyout((caddr_t)sf_buf_kva(sf), (caddr_t)map_addr, 683 copy_len); 684 vm_imgact_unmap_page(sf); 685 if (error != 0) 686 return (error); 687 } 688 689 /* 690 * Remove write access to the page if it was only granted by map_insert 691 * to allow copyout. 692 */ 693 if ((prot & VM_PROT_WRITE) == 0) 694 vm_map_protect(map, trunc_page(map_addr), round_page(map_addr + 695 map_len), prot, 0, VM_MAP_PROTECT_SET_PROT); 696 697 return (0); 698 } 699 700 static int 701 __elfN(load_sections)(struct image_params *imgp, const Elf_Ehdr *hdr, 702 const Elf_Phdr *phdr, u_long rbase, u_long *base_addrp) 703 { 704 vm_prot_t prot; 705 u_long base_addr; 706 bool first; 707 int error, i; 708 709 ASSERT_VOP_LOCKED(imgp->vp, __func__); 710 711 base_addr = 0; 712 first = true; 713 714 for (i = 0; i < hdr->e_phnum; i++) { 715 if (phdr[i].p_type != PT_LOAD || phdr[i].p_memsz == 0) 716 continue; 717 718 /* Loadable segment */ 719 prot = __elfN(trans_prot)(phdr[i].p_flags); 720 error = __elfN(load_section)(imgp, phdr[i].p_offset, 721 (caddr_t)(uintptr_t)phdr[i].p_vaddr + rbase, 722 phdr[i].p_memsz, phdr[i].p_filesz, prot); 723 if (error != 0) 724 return (error); 725 726 /* 727 * Establish the base address if this is the first segment. 728 */ 729 if (first) { 730 base_addr = trunc_page(phdr[i].p_vaddr + rbase); 731 first = false; 732 } 733 } 734 735 if (base_addrp != NULL) 736 *base_addrp = base_addr; 737 738 return (0); 739 } 740 741 /* 742 * Load the file "file" into memory. It may be either a shared object 743 * or an executable. 744 * 745 * The "addr" reference parameter is in/out. On entry, it specifies 746 * the address where a shared object should be loaded. If the file is 747 * an executable, this value is ignored. On exit, "addr" specifies 748 * where the file was actually loaded. 749 * 750 * The "entry" reference parameter is out only. On exit, it specifies 751 * the entry point for the loaded file. 752 */ 753 static int 754 __elfN(load_file)(struct proc *p, const char *file, u_long *addr, 755 u_long *entry) 756 { 757 struct { 758 struct nameidata nd; 759 struct vattr attr; 760 struct image_params image_params; 761 } *tempdata; 762 const Elf_Ehdr *hdr = NULL; 763 const Elf_Phdr *phdr = NULL; 764 struct nameidata *nd; 765 struct vattr *attr; 766 struct image_params *imgp; 767 u_long rbase; 768 u_long base_addr = 0; 769 int error; 770 771 #ifdef CAPABILITY_MODE 772 /* 773 * XXXJA: This check can go away once we are sufficiently confident 774 * that the checks in namei() are correct. 775 */ 776 if (IN_CAPABILITY_MODE(curthread)) 777 return (ECAPMODE); 778 #endif 779 780 tempdata = malloc(sizeof(*tempdata), M_TEMP, M_WAITOK | M_ZERO); 781 nd = &tempdata->nd; 782 attr = &tempdata->attr; 783 imgp = &tempdata->image_params; 784 785 /* 786 * Initialize part of the common data 787 */ 788 imgp->proc = p; 789 imgp->attr = attr; 790 791 NDINIT(nd, LOOKUP, ISOPEN | FOLLOW | LOCKSHARED | LOCKLEAF, 792 UIO_SYSSPACE, file, curthread); 793 if ((error = namei(nd)) != 0) { 794 nd->ni_vp = NULL; 795 goto fail; 796 } 797 NDFREE(nd, NDF_ONLY_PNBUF); 798 imgp->vp = nd->ni_vp; 799 800 /* 801 * Check permissions, modes, uid, etc on the file, and "open" it. 802 */ 803 error = exec_check_permissions(imgp); 804 if (error) 805 goto fail; 806 807 error = exec_map_first_page(imgp); 808 if (error) 809 goto fail; 810 811 imgp->object = nd->ni_vp->v_object; 812 813 hdr = (const Elf_Ehdr *)imgp->image_header; 814 if ((error = __elfN(check_header)(hdr)) != 0) 815 goto fail; 816 if (hdr->e_type == ET_DYN) 817 rbase = *addr; 818 else if (hdr->e_type == ET_EXEC) 819 rbase = 0; 820 else { 821 error = ENOEXEC; 822 goto fail; 823 } 824 825 /* Only support headers that fit within first page for now */ 826 if (!__elfN(phdr_in_zero_page)(hdr)) { 827 error = ENOEXEC; 828 goto fail; 829 } 830 831 phdr = (const Elf_Phdr *)(imgp->image_header + hdr->e_phoff); 832 if (!aligned(phdr, Elf_Addr)) { 833 error = ENOEXEC; 834 goto fail; 835 } 836 837 error = __elfN(load_sections)(imgp, hdr, phdr, rbase, &base_addr); 838 if (error != 0) 839 goto fail; 840 841 *addr = base_addr; 842 *entry = (unsigned long)hdr->e_entry + rbase; 843 844 fail: 845 if (imgp->firstpage) 846 exec_unmap_first_page(imgp); 847 848 if (nd->ni_vp) { 849 if (imgp->textset) 850 VOP_UNSET_TEXT_CHECKED(nd->ni_vp); 851 vput(nd->ni_vp); 852 } 853 free(tempdata, M_TEMP); 854 855 return (error); 856 } 857 858 static u_long 859 __CONCAT(rnd_, __elfN(base))(vm_map_t map __unused, u_long minv, u_long maxv, 860 u_int align) 861 { 862 u_long rbase, res; 863 864 MPASS(vm_map_min(map) <= minv); 865 MPASS(maxv <= vm_map_max(map)); 866 MPASS(minv < maxv); 867 MPASS(minv + align < maxv); 868 arc4rand(&rbase, sizeof(rbase), 0); 869 res = roundup(minv, (u_long)align) + rbase % (maxv - minv); 870 res &= ~((u_long)align - 1); 871 if (res >= maxv) 872 res -= align; 873 KASSERT(res >= minv, 874 ("res %#lx < minv %#lx, maxv %#lx rbase %#lx", 875 res, minv, maxv, rbase)); 876 KASSERT(res < maxv, 877 ("res %#lx > maxv %#lx, minv %#lx rbase %#lx", 878 res, maxv, minv, rbase)); 879 return (res); 880 } 881 882 static int 883 __elfN(enforce_limits)(struct image_params *imgp, const Elf_Ehdr *hdr, 884 const Elf_Phdr *phdr, u_long et_dyn_addr) 885 { 886 struct vmspace *vmspace; 887 const char *err_str; 888 u_long text_size, data_size, total_size, text_addr, data_addr; 889 u_long seg_size, seg_addr; 890 int i; 891 892 err_str = NULL; 893 text_size = data_size = total_size = text_addr = data_addr = 0; 894 895 for (i = 0; i < hdr->e_phnum; i++) { 896 if (phdr[i].p_type != PT_LOAD || phdr[i].p_memsz == 0) 897 continue; 898 899 seg_addr = trunc_page(phdr[i].p_vaddr + et_dyn_addr); 900 seg_size = round_page(phdr[i].p_memsz + 901 phdr[i].p_vaddr + et_dyn_addr - seg_addr); 902 903 /* 904 * Make the largest executable segment the official 905 * text segment and all others data. 906 * 907 * Note that obreak() assumes that data_addr + data_size == end 908 * of data load area, and the ELF file format expects segments 909 * to be sorted by address. If multiple data segments exist, 910 * the last one will be used. 911 */ 912 913 if ((phdr[i].p_flags & PF_X) != 0 && text_size < seg_size) { 914 text_size = seg_size; 915 text_addr = seg_addr; 916 } else { 917 data_size = seg_size; 918 data_addr = seg_addr; 919 } 920 total_size += seg_size; 921 } 922 923 if (data_addr == 0 && data_size == 0) { 924 data_addr = text_addr; 925 data_size = text_size; 926 } 927 928 /* 929 * Check limits. It should be safe to check the 930 * limits after loading the segments since we do 931 * not actually fault in all the segments pages. 932 */ 933 PROC_LOCK(imgp->proc); 934 if (data_size > lim_cur_proc(imgp->proc, RLIMIT_DATA)) 935 err_str = "Data segment size exceeds process limit"; 936 else if (text_size > maxtsiz) 937 err_str = "Text segment size exceeds system limit"; 938 else if (total_size > lim_cur_proc(imgp->proc, RLIMIT_VMEM)) 939 err_str = "Total segment size exceeds process limit"; 940 else if (racct_set(imgp->proc, RACCT_DATA, data_size) != 0) 941 err_str = "Data segment size exceeds resource limit"; 942 else if (racct_set(imgp->proc, RACCT_VMEM, total_size) != 0) 943 err_str = "Total segment size exceeds resource limit"; 944 PROC_UNLOCK(imgp->proc); 945 if (err_str != NULL) { 946 uprintf("%s\n", err_str); 947 return (ENOMEM); 948 } 949 950 vmspace = imgp->proc->p_vmspace; 951 vmspace->vm_tsize = text_size >> PAGE_SHIFT; 952 vmspace->vm_taddr = (caddr_t)(uintptr_t)text_addr; 953 vmspace->vm_dsize = data_size >> PAGE_SHIFT; 954 vmspace->vm_daddr = (caddr_t)(uintptr_t)data_addr; 955 956 return (0); 957 } 958 959 static int 960 __elfN(get_interp)(struct image_params *imgp, const Elf_Phdr *phdr, 961 char **interpp, bool *free_interpp) 962 { 963 struct thread *td; 964 char *interp; 965 int error, interp_name_len; 966 967 KASSERT(phdr->p_type == PT_INTERP, 968 ("%s: p_type %u != PT_INTERP", __func__, phdr->p_type)); 969 ASSERT_VOP_LOCKED(imgp->vp, __func__); 970 971 td = curthread; 972 973 /* Path to interpreter */ 974 if (phdr->p_filesz < 2 || phdr->p_filesz > MAXPATHLEN) { 975 uprintf("Invalid PT_INTERP\n"); 976 return (ENOEXEC); 977 } 978 979 interp_name_len = phdr->p_filesz; 980 if (phdr->p_offset > PAGE_SIZE || 981 interp_name_len > PAGE_SIZE - phdr->p_offset) { 982 /* 983 * The vnode lock might be needed by the pagedaemon to 984 * clean pages owned by the vnode. Do not allow sleep 985 * waiting for memory with the vnode locked, instead 986 * try non-sleepable allocation first, and if it 987 * fails, go to the slow path were we drop the lock 988 * and do M_WAITOK. A text reference prevents 989 * modifications to the vnode content. 990 */ 991 interp = malloc(interp_name_len + 1, M_TEMP, M_NOWAIT); 992 if (interp == NULL) { 993 VOP_UNLOCK(imgp->vp); 994 interp = malloc(interp_name_len + 1, M_TEMP, M_WAITOK); 995 vn_lock(imgp->vp, LK_SHARED | LK_RETRY); 996 } 997 998 error = vn_rdwr(UIO_READ, imgp->vp, interp, 999 interp_name_len, phdr->p_offset, 1000 UIO_SYSSPACE, IO_NODELOCKED, td->td_ucred, 1001 NOCRED, NULL, td); 1002 if (error != 0) { 1003 free(interp, M_TEMP); 1004 uprintf("i/o error PT_INTERP %d\n", error); 1005 return (error); 1006 } 1007 interp[interp_name_len] = '\0'; 1008 1009 *interpp = interp; 1010 *free_interpp = true; 1011 return (0); 1012 } 1013 1014 interp = __DECONST(char *, imgp->image_header) + phdr->p_offset; 1015 if (interp[interp_name_len - 1] != '\0') { 1016 uprintf("Invalid PT_INTERP\n"); 1017 return (ENOEXEC); 1018 } 1019 1020 *interpp = interp; 1021 *free_interpp = false; 1022 return (0); 1023 } 1024 1025 static int 1026 __elfN(load_interp)(struct image_params *imgp, const Elf_Brandinfo *brand_info, 1027 const char *interp, u_long *addr, u_long *entry) 1028 { 1029 char *path; 1030 int error; 1031 1032 if (brand_info->emul_path != NULL && 1033 brand_info->emul_path[0] != '\0') { 1034 path = malloc(MAXPATHLEN, M_TEMP, M_WAITOK); 1035 snprintf(path, MAXPATHLEN, "%s%s", 1036 brand_info->emul_path, interp); 1037 error = __elfN(load_file)(imgp->proc, path, addr, entry); 1038 free(path, M_TEMP); 1039 if (error == 0) 1040 return (0); 1041 } 1042 1043 if (brand_info->interp_newpath != NULL && 1044 (brand_info->interp_path == NULL || 1045 strcmp(interp, brand_info->interp_path) == 0)) { 1046 error = __elfN(load_file)(imgp->proc, 1047 brand_info->interp_newpath, addr, entry); 1048 if (error == 0) 1049 return (0); 1050 } 1051 1052 error = __elfN(load_file)(imgp->proc, interp, addr, entry); 1053 if (error == 0) 1054 return (0); 1055 1056 uprintf("ELF interpreter %s not found, error %d\n", interp, error); 1057 return (error); 1058 } 1059 1060 /* 1061 * Impossible et_dyn_addr initial value indicating that the real base 1062 * must be calculated later with some randomization applied. 1063 */ 1064 #define ET_DYN_ADDR_RAND 1 1065 1066 static int 1067 __CONCAT(exec_, __elfN(imgact))(struct image_params *imgp) 1068 { 1069 struct thread *td; 1070 const Elf_Ehdr *hdr; 1071 const Elf_Phdr *phdr; 1072 Elf_Auxargs *elf_auxargs; 1073 struct vmspace *vmspace; 1074 vm_map_t map; 1075 char *interp; 1076 Elf_Brandinfo *brand_info; 1077 struct sysentvec *sv; 1078 u_long addr, baddr, et_dyn_addr, entry, proghdr; 1079 u_long maxalign, mapsz, maxv, maxv1; 1080 uint32_t fctl0; 1081 int32_t osrel; 1082 bool free_interp; 1083 int error, i, n; 1084 1085 hdr = (const Elf_Ehdr *)imgp->image_header; 1086 1087 /* 1088 * Do we have a valid ELF header ? 1089 * 1090 * Only allow ET_EXEC & ET_DYN here, reject ET_DYN later 1091 * if particular brand doesn't support it. 1092 */ 1093 if (__elfN(check_header)(hdr) != 0 || 1094 (hdr->e_type != ET_EXEC && hdr->e_type != ET_DYN)) 1095 return (-1); 1096 1097 /* 1098 * From here on down, we return an errno, not -1, as we've 1099 * detected an ELF file. 1100 */ 1101 1102 if (!__elfN(phdr_in_zero_page)(hdr)) { 1103 uprintf("Program headers not in the first page\n"); 1104 return (ENOEXEC); 1105 } 1106 phdr = (const Elf_Phdr *)(imgp->image_header + hdr->e_phoff); 1107 if (!aligned(phdr, Elf_Addr)) { 1108 uprintf("Unaligned program headers\n"); 1109 return (ENOEXEC); 1110 } 1111 1112 n = error = 0; 1113 baddr = 0; 1114 osrel = 0; 1115 fctl0 = 0; 1116 entry = proghdr = 0; 1117 interp = NULL; 1118 free_interp = false; 1119 td = curthread; 1120 maxalign = PAGE_SIZE; 1121 mapsz = 0; 1122 1123 for (i = 0; i < hdr->e_phnum; i++) { 1124 switch (phdr[i].p_type) { 1125 case PT_LOAD: 1126 if (n == 0) 1127 baddr = phdr[i].p_vaddr; 1128 if (phdr[i].p_align > maxalign) 1129 maxalign = phdr[i].p_align; 1130 mapsz += phdr[i].p_memsz; 1131 n++; 1132 1133 /* 1134 * If this segment contains the program headers, 1135 * remember their virtual address for the AT_PHDR 1136 * aux entry. Static binaries don't usually include 1137 * a PT_PHDR entry. 1138 */ 1139 if (phdr[i].p_offset == 0 && 1140 hdr->e_phoff + hdr->e_phnum * hdr->e_phentsize 1141 <= phdr[i].p_filesz) 1142 proghdr = phdr[i].p_vaddr + hdr->e_phoff; 1143 break; 1144 case PT_INTERP: 1145 /* Path to interpreter */ 1146 if (interp != NULL) { 1147 uprintf("Multiple PT_INTERP headers\n"); 1148 error = ENOEXEC; 1149 goto ret; 1150 } 1151 error = __elfN(get_interp)(imgp, &phdr[i], &interp, 1152 &free_interp); 1153 if (error != 0) 1154 goto ret; 1155 break; 1156 case PT_GNU_STACK: 1157 if (__elfN(nxstack)) 1158 imgp->stack_prot = 1159 __elfN(trans_prot)(phdr[i].p_flags); 1160 imgp->stack_sz = phdr[i].p_memsz; 1161 break; 1162 case PT_PHDR: /* Program header table info */ 1163 proghdr = phdr[i].p_vaddr; 1164 break; 1165 } 1166 } 1167 1168 brand_info = __elfN(get_brandinfo)(imgp, interp, &osrel, &fctl0); 1169 if (brand_info == NULL) { 1170 uprintf("ELF binary type \"%u\" not known.\n", 1171 hdr->e_ident[EI_OSABI]); 1172 error = ENOEXEC; 1173 goto ret; 1174 } 1175 sv = brand_info->sysvec; 1176 et_dyn_addr = 0; 1177 if (hdr->e_type == ET_DYN) { 1178 if ((brand_info->flags & BI_CAN_EXEC_DYN) == 0) { 1179 uprintf("Cannot execute shared object\n"); 1180 error = ENOEXEC; 1181 goto ret; 1182 } 1183 /* 1184 * Honour the base load address from the dso if it is 1185 * non-zero for some reason. 1186 */ 1187 if (baddr == 0) { 1188 if ((sv->sv_flags & SV_ASLR) == 0 || 1189 (fctl0 & NT_FREEBSD_FCTL_ASLR_DISABLE) != 0) 1190 et_dyn_addr = __elfN(pie_base); 1191 else if ((__elfN(pie_aslr_enabled) && 1192 (imgp->proc->p_flag2 & P2_ASLR_DISABLE) == 0) || 1193 (imgp->proc->p_flag2 & P2_ASLR_ENABLE) != 0) 1194 et_dyn_addr = ET_DYN_ADDR_RAND; 1195 else 1196 et_dyn_addr = __elfN(pie_base); 1197 } 1198 } 1199 1200 /* 1201 * Avoid a possible deadlock if the current address space is destroyed 1202 * and that address space maps the locked vnode. In the common case, 1203 * the locked vnode's v_usecount is decremented but remains greater 1204 * than zero. Consequently, the vnode lock is not needed by vrele(). 1205 * However, in cases where the vnode lock is external, such as nullfs, 1206 * v_usecount may become zero. 1207 * 1208 * The VV_TEXT flag prevents modifications to the executable while 1209 * the vnode is unlocked. 1210 */ 1211 VOP_UNLOCK(imgp->vp); 1212 1213 /* 1214 * Decide whether to enable randomization of user mappings. 1215 * First, reset user preferences for the setid binaries. 1216 * Then, account for the support of the randomization by the 1217 * ABI, by user preferences, and make special treatment for 1218 * PIE binaries. 1219 */ 1220 if (imgp->credential_setid) { 1221 PROC_LOCK(imgp->proc); 1222 imgp->proc->p_flag2 &= ~(P2_ASLR_ENABLE | P2_ASLR_DISABLE); 1223 PROC_UNLOCK(imgp->proc); 1224 } 1225 if ((sv->sv_flags & SV_ASLR) == 0 || 1226 (imgp->proc->p_flag2 & P2_ASLR_DISABLE) != 0 || 1227 (fctl0 & NT_FREEBSD_FCTL_ASLR_DISABLE) != 0) { 1228 KASSERT(et_dyn_addr != ET_DYN_ADDR_RAND, 1229 ("et_dyn_addr == RAND and !ASLR")); 1230 } else if ((imgp->proc->p_flag2 & P2_ASLR_ENABLE) != 0 || 1231 (__elfN(aslr_enabled) && hdr->e_type == ET_EXEC) || 1232 et_dyn_addr == ET_DYN_ADDR_RAND) { 1233 imgp->map_flags |= MAP_ASLR; 1234 /* 1235 * If user does not care about sbrk, utilize the bss 1236 * grow region for mappings as well. We can select 1237 * the base for the image anywere and still not suffer 1238 * from the fragmentation. 1239 */ 1240 if (!__elfN(aslr_honor_sbrk) || 1241 (imgp->proc->p_flag2 & P2_ASLR_IGNSTART) != 0) 1242 imgp->map_flags |= MAP_ASLR_IGNSTART; 1243 } 1244 1245 if (!__elfN(allow_wx) && (fctl0 & NT_FREEBSD_FCTL_WXNEEDED) == 0) 1246 imgp->map_flags |= MAP_WXORX; 1247 1248 error = exec_new_vmspace(imgp, sv); 1249 vmspace = imgp->proc->p_vmspace; 1250 map = &vmspace->vm_map; 1251 1252 imgp->proc->p_sysent = sv; 1253 1254 maxv = vm_map_max(map) - lim_max(td, RLIMIT_STACK); 1255 if (et_dyn_addr == ET_DYN_ADDR_RAND) { 1256 KASSERT((map->flags & MAP_ASLR) != 0, 1257 ("ET_DYN_ADDR_RAND but !MAP_ASLR")); 1258 et_dyn_addr = __CONCAT(rnd_, __elfN(base))(map, 1259 vm_map_min(map) + mapsz + lim_max(td, RLIMIT_DATA), 1260 /* reserve half of the address space to interpreter */ 1261 maxv / 2, 1UL << flsl(maxalign)); 1262 } 1263 1264 vn_lock(imgp->vp, LK_SHARED | LK_RETRY); 1265 if (error != 0) 1266 goto ret; 1267 1268 error = __elfN(load_sections)(imgp, hdr, phdr, et_dyn_addr, NULL); 1269 if (error != 0) 1270 goto ret; 1271 1272 error = __elfN(enforce_limits)(imgp, hdr, phdr, et_dyn_addr); 1273 if (error != 0) 1274 goto ret; 1275 1276 entry = (u_long)hdr->e_entry + et_dyn_addr; 1277 1278 /* 1279 * We load the dynamic linker where a userland call 1280 * to mmap(0, ...) would put it. The rationale behind this 1281 * calculation is that it leaves room for the heap to grow to 1282 * its maximum allowed size. 1283 */ 1284 addr = round_page((vm_offset_t)vmspace->vm_daddr + lim_max(td, 1285 RLIMIT_DATA)); 1286 if ((map->flags & MAP_ASLR) != 0) { 1287 maxv1 = maxv / 2 + addr / 2; 1288 MPASS(maxv1 >= addr); /* No overflow */ 1289 map->anon_loc = __CONCAT(rnd_, __elfN(base))(map, addr, maxv1, 1290 (MAXPAGESIZES > 1 && pagesizes[1] != 0) ? 1291 pagesizes[1] : pagesizes[0]); 1292 } else { 1293 map->anon_loc = addr; 1294 } 1295 1296 imgp->entry_addr = entry; 1297 1298 if (interp != NULL) { 1299 VOP_UNLOCK(imgp->vp); 1300 if ((map->flags & MAP_ASLR) != 0) { 1301 /* Assume that interpreter fits into 1/4 of AS */ 1302 maxv1 = maxv / 2 + addr / 2; 1303 MPASS(maxv1 >= addr); /* No overflow */ 1304 addr = __CONCAT(rnd_, __elfN(base))(map, addr, 1305 maxv1, PAGE_SIZE); 1306 } 1307 error = __elfN(load_interp)(imgp, brand_info, interp, &addr, 1308 &imgp->entry_addr); 1309 vn_lock(imgp->vp, LK_SHARED | LK_RETRY); 1310 if (error != 0) 1311 goto ret; 1312 } else 1313 addr = et_dyn_addr; 1314 1315 /* 1316 * Construct auxargs table (used by the copyout_auxargs routine) 1317 */ 1318 elf_auxargs = malloc(sizeof(Elf_Auxargs), M_TEMP, M_NOWAIT); 1319 if (elf_auxargs == NULL) { 1320 VOP_UNLOCK(imgp->vp); 1321 elf_auxargs = malloc(sizeof(Elf_Auxargs), M_TEMP, M_WAITOK); 1322 vn_lock(imgp->vp, LK_SHARED | LK_RETRY); 1323 } 1324 elf_auxargs->execfd = -1; 1325 elf_auxargs->phdr = proghdr + et_dyn_addr; 1326 elf_auxargs->phent = hdr->e_phentsize; 1327 elf_auxargs->phnum = hdr->e_phnum; 1328 elf_auxargs->pagesz = PAGE_SIZE; 1329 elf_auxargs->base = addr; 1330 elf_auxargs->flags = 0; 1331 elf_auxargs->entry = entry; 1332 elf_auxargs->hdr_eflags = hdr->e_flags; 1333 1334 imgp->auxargs = elf_auxargs; 1335 imgp->interpreted = 0; 1336 imgp->reloc_base = addr; 1337 imgp->proc->p_osrel = osrel; 1338 imgp->proc->p_fctl0 = fctl0; 1339 imgp->proc->p_elf_machine = hdr->e_machine; 1340 imgp->proc->p_elf_flags = hdr->e_flags; 1341 1342 ret: 1343 if (free_interp) 1344 free(interp, M_TEMP); 1345 return (error); 1346 } 1347 1348 #define suword __CONCAT(suword, __ELF_WORD_SIZE) 1349 1350 int 1351 __elfN(freebsd_copyout_auxargs)(struct image_params *imgp, uintptr_t base) 1352 { 1353 Elf_Auxargs *args = (Elf_Auxargs *)imgp->auxargs; 1354 Elf_Auxinfo *argarray, *pos; 1355 int error; 1356 1357 argarray = pos = malloc(AT_COUNT * sizeof(*pos), M_TEMP, 1358 M_WAITOK | M_ZERO); 1359 1360 if (args->execfd != -1) 1361 AUXARGS_ENTRY(pos, AT_EXECFD, args->execfd); 1362 AUXARGS_ENTRY(pos, AT_PHDR, args->phdr); 1363 AUXARGS_ENTRY(pos, AT_PHENT, args->phent); 1364 AUXARGS_ENTRY(pos, AT_PHNUM, args->phnum); 1365 AUXARGS_ENTRY(pos, AT_PAGESZ, args->pagesz); 1366 AUXARGS_ENTRY(pos, AT_FLAGS, args->flags); 1367 AUXARGS_ENTRY(pos, AT_ENTRY, args->entry); 1368 AUXARGS_ENTRY(pos, AT_BASE, args->base); 1369 AUXARGS_ENTRY(pos, AT_EHDRFLAGS, args->hdr_eflags); 1370 if (imgp->execpathp != 0) 1371 AUXARGS_ENTRY_PTR(pos, AT_EXECPATH, imgp->execpathp); 1372 AUXARGS_ENTRY(pos, AT_OSRELDATE, 1373 imgp->proc->p_ucred->cr_prison->pr_osreldate); 1374 if (imgp->canary != 0) { 1375 AUXARGS_ENTRY_PTR(pos, AT_CANARY, imgp->canary); 1376 AUXARGS_ENTRY(pos, AT_CANARYLEN, imgp->canarylen); 1377 } 1378 AUXARGS_ENTRY(pos, AT_NCPUS, mp_ncpus); 1379 if (imgp->pagesizes != 0) { 1380 AUXARGS_ENTRY_PTR(pos, AT_PAGESIZES, imgp->pagesizes); 1381 AUXARGS_ENTRY(pos, AT_PAGESIZESLEN, imgp->pagesizeslen); 1382 } 1383 if (imgp->sysent->sv_timekeep_base != 0) { 1384 AUXARGS_ENTRY(pos, AT_TIMEKEEP, 1385 imgp->sysent->sv_timekeep_base); 1386 } 1387 AUXARGS_ENTRY(pos, AT_STACKPROT, imgp->sysent->sv_shared_page_obj 1388 != NULL && imgp->stack_prot != 0 ? imgp->stack_prot : 1389 imgp->sysent->sv_stackprot); 1390 if (imgp->sysent->sv_hwcap != NULL) 1391 AUXARGS_ENTRY(pos, AT_HWCAP, *imgp->sysent->sv_hwcap); 1392 if (imgp->sysent->sv_hwcap2 != NULL) 1393 AUXARGS_ENTRY(pos, AT_HWCAP2, *imgp->sysent->sv_hwcap2); 1394 AUXARGS_ENTRY(pos, AT_BSDFLAGS, __elfN(sigfastblock) ? 1395 ELF_BSDF_SIGFASTBLK : 0); 1396 AUXARGS_ENTRY(pos, AT_ARGC, imgp->args->argc); 1397 AUXARGS_ENTRY_PTR(pos, AT_ARGV, imgp->argv); 1398 AUXARGS_ENTRY(pos, AT_ENVC, imgp->args->envc); 1399 AUXARGS_ENTRY_PTR(pos, AT_ENVV, imgp->envv); 1400 AUXARGS_ENTRY_PTR(pos, AT_PS_STRINGS, imgp->ps_strings); 1401 if (imgp->sysent->sv_fxrng_gen_base != 0) 1402 AUXARGS_ENTRY(pos, AT_FXRNG, imgp->sysent->sv_fxrng_gen_base); 1403 AUXARGS_ENTRY(pos, AT_NULL, 0); 1404 1405 free(imgp->auxargs, M_TEMP); 1406 imgp->auxargs = NULL; 1407 KASSERT(pos - argarray <= AT_COUNT, ("Too many auxargs")); 1408 1409 error = copyout(argarray, (void *)base, sizeof(*argarray) * AT_COUNT); 1410 free(argarray, M_TEMP); 1411 return (error); 1412 } 1413 1414 int 1415 __elfN(freebsd_fixup)(uintptr_t *stack_base, struct image_params *imgp) 1416 { 1417 Elf_Addr *base; 1418 1419 base = (Elf_Addr *)*stack_base; 1420 base--; 1421 if (suword(base, imgp->args->argc) == -1) 1422 return (EFAULT); 1423 *stack_base = (uintptr_t)base; 1424 return (0); 1425 } 1426 1427 /* 1428 * Code for generating ELF core dumps. 1429 */ 1430 1431 typedef void (*segment_callback)(vm_map_entry_t, void *); 1432 1433 /* Closure for cb_put_phdr(). */ 1434 struct phdr_closure { 1435 Elf_Phdr *phdr; /* Program header to fill in */ 1436 Elf_Off offset; /* Offset of segment in core file */ 1437 }; 1438 1439 /* Closure for cb_size_segment(). */ 1440 struct sseg_closure { 1441 int count; /* Count of writable segments. */ 1442 size_t size; /* Total size of all writable segments. */ 1443 }; 1444 1445 typedef void (*outfunc_t)(void *, struct sbuf *, size_t *); 1446 1447 struct note_info { 1448 int type; /* Note type. */ 1449 outfunc_t outfunc; /* Output function. */ 1450 void *outarg; /* Argument for the output function. */ 1451 size_t outsize; /* Output size. */ 1452 TAILQ_ENTRY(note_info) link; /* Link to the next note info. */ 1453 }; 1454 1455 TAILQ_HEAD(note_info_list, note_info); 1456 1457 /* Coredump output parameters. */ 1458 struct coredump_params { 1459 off_t offset; 1460 struct ucred *active_cred; 1461 struct ucred *file_cred; 1462 struct thread *td; 1463 struct vnode *vp; 1464 struct compressor *comp; 1465 }; 1466 1467 extern int compress_user_cores; 1468 extern int compress_user_cores_level; 1469 1470 static void cb_put_phdr(vm_map_entry_t, void *); 1471 static void cb_size_segment(vm_map_entry_t, void *); 1472 static int core_write(struct coredump_params *, const void *, size_t, off_t, 1473 enum uio_seg, size_t *); 1474 static void each_dumpable_segment(struct thread *, segment_callback, void *); 1475 static int __elfN(corehdr)(struct coredump_params *, int, void *, size_t, 1476 struct note_info_list *, size_t); 1477 static void __elfN(prepare_notes)(struct thread *, struct note_info_list *, 1478 size_t *); 1479 static void __elfN(puthdr)(struct thread *, void *, size_t, int, size_t); 1480 static void __elfN(putnote)(struct note_info *, struct sbuf *); 1481 static size_t register_note(struct note_info_list *, int, outfunc_t, void *); 1482 static int sbuf_drain_core_output(void *, const char *, int); 1483 1484 static void __elfN(note_fpregset)(void *, struct sbuf *, size_t *); 1485 static void __elfN(note_prpsinfo)(void *, struct sbuf *, size_t *); 1486 static void __elfN(note_prstatus)(void *, struct sbuf *, size_t *); 1487 static void __elfN(note_threadmd)(void *, struct sbuf *, size_t *); 1488 static void __elfN(note_thrmisc)(void *, struct sbuf *, size_t *); 1489 static void __elfN(note_ptlwpinfo)(void *, struct sbuf *, size_t *); 1490 static void __elfN(note_procstat_auxv)(void *, struct sbuf *, size_t *); 1491 static void __elfN(note_procstat_proc)(void *, struct sbuf *, size_t *); 1492 static void __elfN(note_procstat_psstrings)(void *, struct sbuf *, size_t *); 1493 static void note_procstat_files(void *, struct sbuf *, size_t *); 1494 static void note_procstat_groups(void *, struct sbuf *, size_t *); 1495 static void note_procstat_osrel(void *, struct sbuf *, size_t *); 1496 static void note_procstat_rlimit(void *, struct sbuf *, size_t *); 1497 static void note_procstat_umask(void *, struct sbuf *, size_t *); 1498 static void note_procstat_vmmap(void *, struct sbuf *, size_t *); 1499 1500 /* 1501 * Write out a core segment to the compression stream. 1502 */ 1503 static int 1504 compress_chunk(struct coredump_params *p, char *base, char *buf, u_int len) 1505 { 1506 u_int chunk_len; 1507 int error; 1508 1509 while (len > 0) { 1510 chunk_len = MIN(len, CORE_BUF_SIZE); 1511 1512 /* 1513 * We can get EFAULT error here. 1514 * In that case zero out the current chunk of the segment. 1515 */ 1516 error = copyin(base, buf, chunk_len); 1517 if (error != 0) 1518 bzero(buf, chunk_len); 1519 error = compressor_write(p->comp, buf, chunk_len); 1520 if (error != 0) 1521 break; 1522 base += chunk_len; 1523 len -= chunk_len; 1524 } 1525 return (error); 1526 } 1527 1528 static int 1529 core_compressed_write(void *base, size_t len, off_t offset, void *arg) 1530 { 1531 1532 return (core_write((struct coredump_params *)arg, base, len, offset, 1533 UIO_SYSSPACE, NULL)); 1534 } 1535 1536 static int 1537 core_write(struct coredump_params *p, const void *base, size_t len, 1538 off_t offset, enum uio_seg seg, size_t *resid) 1539 { 1540 1541 return (vn_rdwr_inchunks(UIO_WRITE, p->vp, __DECONST(void *, base), 1542 len, offset, seg, IO_UNIT | IO_DIRECT | IO_RANGELOCKED, 1543 p->active_cred, p->file_cred, resid, p->td)); 1544 } 1545 1546 static int 1547 core_output(char *base, size_t len, off_t offset, struct coredump_params *p, 1548 void *tmpbuf) 1549 { 1550 vm_map_t map; 1551 struct mount *mp; 1552 size_t resid, runlen; 1553 int error; 1554 bool success; 1555 1556 KASSERT((uintptr_t)base % PAGE_SIZE == 0, 1557 ("%s: user address %p is not page-aligned", __func__, base)); 1558 1559 if (p->comp != NULL) 1560 return (compress_chunk(p, base, tmpbuf, len)); 1561 1562 map = &p->td->td_proc->p_vmspace->vm_map; 1563 for (; len > 0; base += runlen, offset += runlen, len -= runlen) { 1564 /* 1565 * Attempt to page in all virtual pages in the range. If a 1566 * virtual page is not backed by the pager, it is represented as 1567 * a hole in the file. This can occur with zero-filled 1568 * anonymous memory or truncated files, for example. 1569 */ 1570 for (runlen = 0; runlen < len; runlen += PAGE_SIZE) { 1571 error = vm_fault(map, (uintptr_t)base + runlen, 1572 VM_PROT_READ, VM_FAULT_NOFILL, NULL); 1573 if (runlen == 0) 1574 success = error == KERN_SUCCESS; 1575 else if ((error == KERN_SUCCESS) != success) 1576 break; 1577 } 1578 1579 if (success) { 1580 error = core_write(p, base, runlen, offset, 1581 UIO_USERSPACE, &resid); 1582 if (error != 0) { 1583 if (error != EFAULT) 1584 break; 1585 1586 /* 1587 * EFAULT may be returned if the user mapping 1588 * could not be accessed, e.g., because a mapped 1589 * file has been truncated. Skip the page if no 1590 * progress was made, to protect against a 1591 * hypothetical scenario where vm_fault() was 1592 * successful but core_write() returns EFAULT 1593 * anyway. 1594 */ 1595 runlen -= resid; 1596 if (runlen == 0) { 1597 success = false; 1598 runlen = PAGE_SIZE; 1599 } 1600 } 1601 } 1602 if (!success) { 1603 error = vn_start_write(p->vp, &mp, V_WAIT); 1604 if (error != 0) 1605 break; 1606 vn_lock(p->vp, LK_EXCLUSIVE | LK_RETRY); 1607 error = vn_truncate_locked(p->vp, offset + runlen, 1608 false, p->td->td_ucred); 1609 VOP_UNLOCK(p->vp); 1610 vn_finished_write(mp); 1611 if (error != 0) 1612 break; 1613 } 1614 } 1615 return (error); 1616 } 1617 1618 /* 1619 * Drain into a core file. 1620 */ 1621 static int 1622 sbuf_drain_core_output(void *arg, const char *data, int len) 1623 { 1624 struct coredump_params *p; 1625 int error, locked; 1626 1627 p = (struct coredump_params *)arg; 1628 1629 /* 1630 * Some kern_proc out routines that print to this sbuf may 1631 * call us with the process lock held. Draining with the 1632 * non-sleepable lock held is unsafe. The lock is needed for 1633 * those routines when dumping a live process. In our case we 1634 * can safely release the lock before draining and acquire 1635 * again after. 1636 */ 1637 locked = PROC_LOCKED(p->td->td_proc); 1638 if (locked) 1639 PROC_UNLOCK(p->td->td_proc); 1640 if (p->comp != NULL) 1641 error = compressor_write(p->comp, __DECONST(char *, data), len); 1642 else 1643 error = core_write(p, __DECONST(void *, data), len, p->offset, 1644 UIO_SYSSPACE, NULL); 1645 if (locked) 1646 PROC_LOCK(p->td->td_proc); 1647 if (error != 0) 1648 return (-error); 1649 p->offset += len; 1650 return (len); 1651 } 1652 1653 int 1654 __elfN(coredump)(struct thread *td, struct vnode *vp, off_t limit, int flags) 1655 { 1656 struct ucred *cred = td->td_ucred; 1657 int error = 0; 1658 struct sseg_closure seginfo; 1659 struct note_info_list notelst; 1660 struct coredump_params params; 1661 struct note_info *ninfo; 1662 void *hdr, *tmpbuf; 1663 size_t hdrsize, notesz, coresize; 1664 1665 hdr = NULL; 1666 tmpbuf = NULL; 1667 TAILQ_INIT(¬elst); 1668 1669 /* Size the program segments. */ 1670 seginfo.count = 0; 1671 seginfo.size = 0; 1672 each_dumpable_segment(td, cb_size_segment, &seginfo); 1673 1674 /* 1675 * Collect info about the core file header area. 1676 */ 1677 hdrsize = sizeof(Elf_Ehdr) + sizeof(Elf_Phdr) * (1 + seginfo.count); 1678 if (seginfo.count + 1 >= PN_XNUM) 1679 hdrsize += sizeof(Elf_Shdr); 1680 __elfN(prepare_notes)(td, ¬elst, ¬esz); 1681 coresize = round_page(hdrsize + notesz) + seginfo.size; 1682 1683 /* Set up core dump parameters. */ 1684 params.offset = 0; 1685 params.active_cred = cred; 1686 params.file_cred = NOCRED; 1687 params.td = td; 1688 params.vp = vp; 1689 params.comp = NULL; 1690 1691 #ifdef RACCT 1692 if (racct_enable) { 1693 PROC_LOCK(td->td_proc); 1694 error = racct_add(td->td_proc, RACCT_CORE, coresize); 1695 PROC_UNLOCK(td->td_proc); 1696 if (error != 0) { 1697 error = EFAULT; 1698 goto done; 1699 } 1700 } 1701 #endif 1702 if (coresize >= limit) { 1703 error = EFAULT; 1704 goto done; 1705 } 1706 1707 /* Create a compression stream if necessary. */ 1708 if (compress_user_cores != 0) { 1709 params.comp = compressor_init(core_compressed_write, 1710 compress_user_cores, CORE_BUF_SIZE, 1711 compress_user_cores_level, ¶ms); 1712 if (params.comp == NULL) { 1713 error = EFAULT; 1714 goto done; 1715 } 1716 tmpbuf = malloc(CORE_BUF_SIZE, M_TEMP, M_WAITOK | M_ZERO); 1717 } 1718 1719 /* 1720 * Allocate memory for building the header, fill it up, 1721 * and write it out following the notes. 1722 */ 1723 hdr = malloc(hdrsize, M_TEMP, M_WAITOK); 1724 error = __elfN(corehdr)(¶ms, seginfo.count, hdr, hdrsize, ¬elst, 1725 notesz); 1726 1727 /* Write the contents of all of the writable segments. */ 1728 if (error == 0) { 1729 Elf_Phdr *php; 1730 off_t offset; 1731 int i; 1732 1733 php = (Elf_Phdr *)((char *)hdr + sizeof(Elf_Ehdr)) + 1; 1734 offset = round_page(hdrsize + notesz); 1735 for (i = 0; i < seginfo.count; i++) { 1736 error = core_output((char *)(uintptr_t)php->p_vaddr, 1737 php->p_filesz, offset, ¶ms, tmpbuf); 1738 if (error != 0) 1739 break; 1740 offset += php->p_filesz; 1741 php++; 1742 } 1743 if (error == 0 && params.comp != NULL) 1744 error = compressor_flush(params.comp); 1745 } 1746 if (error) { 1747 log(LOG_WARNING, 1748 "Failed to write core file for process %s (error %d)\n", 1749 curproc->p_comm, error); 1750 } 1751 1752 done: 1753 free(tmpbuf, M_TEMP); 1754 if (params.comp != NULL) 1755 compressor_fini(params.comp); 1756 while ((ninfo = TAILQ_FIRST(¬elst)) != NULL) { 1757 TAILQ_REMOVE(¬elst, ninfo, link); 1758 free(ninfo, M_TEMP); 1759 } 1760 if (hdr != NULL) 1761 free(hdr, M_TEMP); 1762 1763 return (error); 1764 } 1765 1766 /* 1767 * A callback for each_dumpable_segment() to write out the segment's 1768 * program header entry. 1769 */ 1770 static void 1771 cb_put_phdr(vm_map_entry_t entry, void *closure) 1772 { 1773 struct phdr_closure *phc = (struct phdr_closure *)closure; 1774 Elf_Phdr *phdr = phc->phdr; 1775 1776 phc->offset = round_page(phc->offset); 1777 1778 phdr->p_type = PT_LOAD; 1779 phdr->p_offset = phc->offset; 1780 phdr->p_vaddr = entry->start; 1781 phdr->p_paddr = 0; 1782 phdr->p_filesz = phdr->p_memsz = entry->end - entry->start; 1783 phdr->p_align = PAGE_SIZE; 1784 phdr->p_flags = __elfN(untrans_prot)(entry->protection); 1785 1786 phc->offset += phdr->p_filesz; 1787 phc->phdr++; 1788 } 1789 1790 /* 1791 * A callback for each_dumpable_segment() to gather information about 1792 * the number of segments and their total size. 1793 */ 1794 static void 1795 cb_size_segment(vm_map_entry_t entry, void *closure) 1796 { 1797 struct sseg_closure *ssc = (struct sseg_closure *)closure; 1798 1799 ssc->count++; 1800 ssc->size += entry->end - entry->start; 1801 } 1802 1803 /* 1804 * For each writable segment in the process's memory map, call the given 1805 * function with a pointer to the map entry and some arbitrary 1806 * caller-supplied data. 1807 */ 1808 static void 1809 each_dumpable_segment(struct thread *td, segment_callback func, void *closure) 1810 { 1811 struct proc *p = td->td_proc; 1812 vm_map_t map = &p->p_vmspace->vm_map; 1813 vm_map_entry_t entry; 1814 vm_object_t backing_object, object; 1815 bool ignore_entry; 1816 1817 vm_map_lock_read(map); 1818 VM_MAP_ENTRY_FOREACH(entry, map) { 1819 /* 1820 * Don't dump inaccessible mappings, deal with legacy 1821 * coredump mode. 1822 * 1823 * Note that read-only segments related to the elf binary 1824 * are marked MAP_ENTRY_NOCOREDUMP now so we no longer 1825 * need to arbitrarily ignore such segments. 1826 */ 1827 if (elf_legacy_coredump) { 1828 if ((entry->protection & VM_PROT_RW) != VM_PROT_RW) 1829 continue; 1830 } else { 1831 if ((entry->protection & VM_PROT_ALL) == 0) 1832 continue; 1833 } 1834 1835 /* 1836 * Dont include memory segment in the coredump if 1837 * MAP_NOCORE is set in mmap(2) or MADV_NOCORE in 1838 * madvise(2). Do not dump submaps (i.e. parts of the 1839 * kernel map). 1840 */ 1841 if (entry->eflags & (MAP_ENTRY_NOCOREDUMP|MAP_ENTRY_IS_SUB_MAP)) 1842 continue; 1843 1844 if ((object = entry->object.vm_object) == NULL) 1845 continue; 1846 1847 /* Ignore memory-mapped devices and such things. */ 1848 VM_OBJECT_RLOCK(object); 1849 while ((backing_object = object->backing_object) != NULL) { 1850 VM_OBJECT_RLOCK(backing_object); 1851 VM_OBJECT_RUNLOCK(object); 1852 object = backing_object; 1853 } 1854 ignore_entry = (object->flags & OBJ_FICTITIOUS) != 0; 1855 VM_OBJECT_RUNLOCK(object); 1856 if (ignore_entry) 1857 continue; 1858 1859 (*func)(entry, closure); 1860 } 1861 vm_map_unlock_read(map); 1862 } 1863 1864 /* 1865 * Write the core file header to the file, including padding up to 1866 * the page boundary. 1867 */ 1868 static int 1869 __elfN(corehdr)(struct coredump_params *p, int numsegs, void *hdr, 1870 size_t hdrsize, struct note_info_list *notelst, size_t notesz) 1871 { 1872 struct note_info *ninfo; 1873 struct sbuf *sb; 1874 int error; 1875 1876 /* Fill in the header. */ 1877 bzero(hdr, hdrsize); 1878 __elfN(puthdr)(p->td, hdr, hdrsize, numsegs, notesz); 1879 1880 sb = sbuf_new(NULL, NULL, CORE_BUF_SIZE, SBUF_FIXEDLEN); 1881 sbuf_set_drain(sb, sbuf_drain_core_output, p); 1882 sbuf_start_section(sb, NULL); 1883 sbuf_bcat(sb, hdr, hdrsize); 1884 TAILQ_FOREACH(ninfo, notelst, link) 1885 __elfN(putnote)(ninfo, sb); 1886 /* Align up to a page boundary for the program segments. */ 1887 sbuf_end_section(sb, -1, PAGE_SIZE, 0); 1888 error = sbuf_finish(sb); 1889 sbuf_delete(sb); 1890 1891 return (error); 1892 } 1893 1894 static void 1895 __elfN(prepare_notes)(struct thread *td, struct note_info_list *list, 1896 size_t *sizep) 1897 { 1898 struct proc *p; 1899 struct thread *thr; 1900 size_t size; 1901 1902 p = td->td_proc; 1903 size = 0; 1904 1905 size += register_note(list, NT_PRPSINFO, __elfN(note_prpsinfo), p); 1906 1907 /* 1908 * To have the debugger select the right thread (LWP) as the initial 1909 * thread, we dump the state of the thread passed to us in td first. 1910 * This is the thread that causes the core dump and thus likely to 1911 * be the right thread one wants to have selected in the debugger. 1912 */ 1913 thr = td; 1914 while (thr != NULL) { 1915 size += register_note(list, NT_PRSTATUS, 1916 __elfN(note_prstatus), thr); 1917 size += register_note(list, NT_FPREGSET, 1918 __elfN(note_fpregset), thr); 1919 size += register_note(list, NT_THRMISC, 1920 __elfN(note_thrmisc), thr); 1921 size += register_note(list, NT_PTLWPINFO, 1922 __elfN(note_ptlwpinfo), thr); 1923 size += register_note(list, -1, 1924 __elfN(note_threadmd), thr); 1925 1926 thr = (thr == td) ? TAILQ_FIRST(&p->p_threads) : 1927 TAILQ_NEXT(thr, td_plist); 1928 if (thr == td) 1929 thr = TAILQ_NEXT(thr, td_plist); 1930 } 1931 1932 size += register_note(list, NT_PROCSTAT_PROC, 1933 __elfN(note_procstat_proc), p); 1934 size += register_note(list, NT_PROCSTAT_FILES, 1935 note_procstat_files, p); 1936 size += register_note(list, NT_PROCSTAT_VMMAP, 1937 note_procstat_vmmap, p); 1938 size += register_note(list, NT_PROCSTAT_GROUPS, 1939 note_procstat_groups, p); 1940 size += register_note(list, NT_PROCSTAT_UMASK, 1941 note_procstat_umask, p); 1942 size += register_note(list, NT_PROCSTAT_RLIMIT, 1943 note_procstat_rlimit, p); 1944 size += register_note(list, NT_PROCSTAT_OSREL, 1945 note_procstat_osrel, p); 1946 size += register_note(list, NT_PROCSTAT_PSSTRINGS, 1947 __elfN(note_procstat_psstrings), p); 1948 size += register_note(list, NT_PROCSTAT_AUXV, 1949 __elfN(note_procstat_auxv), p); 1950 1951 *sizep = size; 1952 } 1953 1954 static void 1955 __elfN(puthdr)(struct thread *td, void *hdr, size_t hdrsize, int numsegs, 1956 size_t notesz) 1957 { 1958 Elf_Ehdr *ehdr; 1959 Elf_Phdr *phdr; 1960 Elf_Shdr *shdr; 1961 struct phdr_closure phc; 1962 1963 ehdr = (Elf_Ehdr *)hdr; 1964 1965 ehdr->e_ident[EI_MAG0] = ELFMAG0; 1966 ehdr->e_ident[EI_MAG1] = ELFMAG1; 1967 ehdr->e_ident[EI_MAG2] = ELFMAG2; 1968 ehdr->e_ident[EI_MAG3] = ELFMAG3; 1969 ehdr->e_ident[EI_CLASS] = ELF_CLASS; 1970 ehdr->e_ident[EI_DATA] = ELF_DATA; 1971 ehdr->e_ident[EI_VERSION] = EV_CURRENT; 1972 ehdr->e_ident[EI_OSABI] = ELFOSABI_FREEBSD; 1973 ehdr->e_ident[EI_ABIVERSION] = 0; 1974 ehdr->e_ident[EI_PAD] = 0; 1975 ehdr->e_type = ET_CORE; 1976 ehdr->e_machine = td->td_proc->p_elf_machine; 1977 ehdr->e_version = EV_CURRENT; 1978 ehdr->e_entry = 0; 1979 ehdr->e_phoff = sizeof(Elf_Ehdr); 1980 ehdr->e_flags = td->td_proc->p_elf_flags; 1981 ehdr->e_ehsize = sizeof(Elf_Ehdr); 1982 ehdr->e_phentsize = sizeof(Elf_Phdr); 1983 ehdr->e_shentsize = sizeof(Elf_Shdr); 1984 ehdr->e_shstrndx = SHN_UNDEF; 1985 if (numsegs + 1 < PN_XNUM) { 1986 ehdr->e_phnum = numsegs + 1; 1987 ehdr->e_shnum = 0; 1988 } else { 1989 ehdr->e_phnum = PN_XNUM; 1990 ehdr->e_shnum = 1; 1991 1992 ehdr->e_shoff = ehdr->e_phoff + 1993 (numsegs + 1) * ehdr->e_phentsize; 1994 KASSERT(ehdr->e_shoff == hdrsize - sizeof(Elf_Shdr), 1995 ("e_shoff: %zu, hdrsize - shdr: %zu", 1996 (size_t)ehdr->e_shoff, hdrsize - sizeof(Elf_Shdr))); 1997 1998 shdr = (Elf_Shdr *)((char *)hdr + ehdr->e_shoff); 1999 memset(shdr, 0, sizeof(*shdr)); 2000 /* 2001 * A special first section is used to hold large segment and 2002 * section counts. This was proposed by Sun Microsystems in 2003 * Solaris and has been adopted by Linux; the standard ELF 2004 * tools are already familiar with the technique. 2005 * 2006 * See table 7-7 of the Solaris "Linker and Libraries Guide" 2007 * (or 12-7 depending on the version of the document) for more 2008 * details. 2009 */ 2010 shdr->sh_type = SHT_NULL; 2011 shdr->sh_size = ehdr->e_shnum; 2012 shdr->sh_link = ehdr->e_shstrndx; 2013 shdr->sh_info = numsegs + 1; 2014 } 2015 2016 /* 2017 * Fill in the program header entries. 2018 */ 2019 phdr = (Elf_Phdr *)((char *)hdr + ehdr->e_phoff); 2020 2021 /* The note segement. */ 2022 phdr->p_type = PT_NOTE; 2023 phdr->p_offset = hdrsize; 2024 phdr->p_vaddr = 0; 2025 phdr->p_paddr = 0; 2026 phdr->p_filesz = notesz; 2027 phdr->p_memsz = 0; 2028 phdr->p_flags = PF_R; 2029 phdr->p_align = ELF_NOTE_ROUNDSIZE; 2030 phdr++; 2031 2032 /* All the writable segments from the program. */ 2033 phc.phdr = phdr; 2034 phc.offset = round_page(hdrsize + notesz); 2035 each_dumpable_segment(td, cb_put_phdr, &phc); 2036 } 2037 2038 static size_t 2039 register_note(struct note_info_list *list, int type, outfunc_t out, void *arg) 2040 { 2041 struct note_info *ninfo; 2042 size_t size, notesize; 2043 2044 size = 0; 2045 out(arg, NULL, &size); 2046 ninfo = malloc(sizeof(*ninfo), M_TEMP, M_ZERO | M_WAITOK); 2047 ninfo->type = type; 2048 ninfo->outfunc = out; 2049 ninfo->outarg = arg; 2050 ninfo->outsize = size; 2051 TAILQ_INSERT_TAIL(list, ninfo, link); 2052 2053 if (type == -1) 2054 return (size); 2055 2056 notesize = sizeof(Elf_Note) + /* note header */ 2057 roundup2(sizeof(FREEBSD_ABI_VENDOR), ELF_NOTE_ROUNDSIZE) + 2058 /* note name */ 2059 roundup2(size, ELF_NOTE_ROUNDSIZE); /* note description */ 2060 2061 return (notesize); 2062 } 2063 2064 static size_t 2065 append_note_data(const void *src, void *dst, size_t len) 2066 { 2067 size_t padded_len; 2068 2069 padded_len = roundup2(len, ELF_NOTE_ROUNDSIZE); 2070 if (dst != NULL) { 2071 bcopy(src, dst, len); 2072 bzero((char *)dst + len, padded_len - len); 2073 } 2074 return (padded_len); 2075 } 2076 2077 size_t 2078 __elfN(populate_note)(int type, void *src, void *dst, size_t size, void **descp) 2079 { 2080 Elf_Note *note; 2081 char *buf; 2082 size_t notesize; 2083 2084 buf = dst; 2085 if (buf != NULL) { 2086 note = (Elf_Note *)buf; 2087 note->n_namesz = sizeof(FREEBSD_ABI_VENDOR); 2088 note->n_descsz = size; 2089 note->n_type = type; 2090 buf += sizeof(*note); 2091 buf += append_note_data(FREEBSD_ABI_VENDOR, buf, 2092 sizeof(FREEBSD_ABI_VENDOR)); 2093 append_note_data(src, buf, size); 2094 if (descp != NULL) 2095 *descp = buf; 2096 } 2097 2098 notesize = sizeof(Elf_Note) + /* note header */ 2099 roundup2(sizeof(FREEBSD_ABI_VENDOR), ELF_NOTE_ROUNDSIZE) + 2100 /* note name */ 2101 roundup2(size, ELF_NOTE_ROUNDSIZE); /* note description */ 2102 2103 return (notesize); 2104 } 2105 2106 static void 2107 __elfN(putnote)(struct note_info *ninfo, struct sbuf *sb) 2108 { 2109 Elf_Note note; 2110 ssize_t old_len, sect_len; 2111 size_t new_len, descsz, i; 2112 2113 if (ninfo->type == -1) { 2114 ninfo->outfunc(ninfo->outarg, sb, &ninfo->outsize); 2115 return; 2116 } 2117 2118 note.n_namesz = sizeof(FREEBSD_ABI_VENDOR); 2119 note.n_descsz = ninfo->outsize; 2120 note.n_type = ninfo->type; 2121 2122 sbuf_bcat(sb, ¬e, sizeof(note)); 2123 sbuf_start_section(sb, &old_len); 2124 sbuf_bcat(sb, FREEBSD_ABI_VENDOR, sizeof(FREEBSD_ABI_VENDOR)); 2125 sbuf_end_section(sb, old_len, ELF_NOTE_ROUNDSIZE, 0); 2126 if (note.n_descsz == 0) 2127 return; 2128 sbuf_start_section(sb, &old_len); 2129 ninfo->outfunc(ninfo->outarg, sb, &ninfo->outsize); 2130 sect_len = sbuf_end_section(sb, old_len, ELF_NOTE_ROUNDSIZE, 0); 2131 if (sect_len < 0) 2132 return; 2133 2134 new_len = (size_t)sect_len; 2135 descsz = roundup(note.n_descsz, ELF_NOTE_ROUNDSIZE); 2136 if (new_len < descsz) { 2137 /* 2138 * It is expected that individual note emitters will correctly 2139 * predict their expected output size and fill up to that size 2140 * themselves, padding in a format-specific way if needed. 2141 * However, in case they don't, just do it here with zeros. 2142 */ 2143 for (i = 0; i < descsz - new_len; i++) 2144 sbuf_putc(sb, 0); 2145 } else if (new_len > descsz) { 2146 /* 2147 * We can't always truncate sb -- we may have drained some 2148 * of it already. 2149 */ 2150 KASSERT(new_len == descsz, ("%s: Note type %u changed as we " 2151 "read it (%zu > %zu). Since it is longer than " 2152 "expected, this coredump's notes are corrupt. THIS " 2153 "IS A BUG in the note_procstat routine for type %u.\n", 2154 __func__, (unsigned)note.n_type, new_len, descsz, 2155 (unsigned)note.n_type)); 2156 } 2157 } 2158 2159 /* 2160 * Miscellaneous note out functions. 2161 */ 2162 2163 #if defined(COMPAT_FREEBSD32) && __ELF_WORD_SIZE == 32 2164 #include <compat/freebsd32/freebsd32.h> 2165 #include <compat/freebsd32/freebsd32_signal.h> 2166 2167 typedef struct prstatus32 elf_prstatus_t; 2168 typedef struct prpsinfo32 elf_prpsinfo_t; 2169 typedef struct fpreg32 elf_prfpregset_t; 2170 typedef struct fpreg32 elf_fpregset_t; 2171 typedef struct reg32 elf_gregset_t; 2172 typedef struct thrmisc32 elf_thrmisc_t; 2173 #define ELF_KERN_PROC_MASK KERN_PROC_MASK32 2174 typedef struct kinfo_proc32 elf_kinfo_proc_t; 2175 typedef uint32_t elf_ps_strings_t; 2176 #else 2177 typedef prstatus_t elf_prstatus_t; 2178 typedef prpsinfo_t elf_prpsinfo_t; 2179 typedef prfpregset_t elf_prfpregset_t; 2180 typedef prfpregset_t elf_fpregset_t; 2181 typedef gregset_t elf_gregset_t; 2182 typedef thrmisc_t elf_thrmisc_t; 2183 #define ELF_KERN_PROC_MASK 0 2184 typedef struct kinfo_proc elf_kinfo_proc_t; 2185 typedef vm_offset_t elf_ps_strings_t; 2186 #endif 2187 2188 static void 2189 __elfN(note_prpsinfo)(void *arg, struct sbuf *sb, size_t *sizep) 2190 { 2191 struct sbuf sbarg; 2192 size_t len; 2193 char *cp, *end; 2194 struct proc *p; 2195 elf_prpsinfo_t *psinfo; 2196 int error; 2197 2198 p = (struct proc *)arg; 2199 if (sb != NULL) { 2200 KASSERT(*sizep == sizeof(*psinfo), ("invalid size")); 2201 psinfo = malloc(sizeof(*psinfo), M_TEMP, M_ZERO | M_WAITOK); 2202 psinfo->pr_version = PRPSINFO_VERSION; 2203 psinfo->pr_psinfosz = sizeof(elf_prpsinfo_t); 2204 strlcpy(psinfo->pr_fname, p->p_comm, sizeof(psinfo->pr_fname)); 2205 PROC_LOCK(p); 2206 if (p->p_args != NULL) { 2207 len = sizeof(psinfo->pr_psargs) - 1; 2208 if (len > p->p_args->ar_length) 2209 len = p->p_args->ar_length; 2210 memcpy(psinfo->pr_psargs, p->p_args->ar_args, len); 2211 PROC_UNLOCK(p); 2212 error = 0; 2213 } else { 2214 _PHOLD(p); 2215 PROC_UNLOCK(p); 2216 sbuf_new(&sbarg, psinfo->pr_psargs, 2217 sizeof(psinfo->pr_psargs), SBUF_FIXEDLEN); 2218 error = proc_getargv(curthread, p, &sbarg); 2219 PRELE(p); 2220 if (sbuf_finish(&sbarg) == 0) 2221 len = sbuf_len(&sbarg) - 1; 2222 else 2223 len = sizeof(psinfo->pr_psargs) - 1; 2224 sbuf_delete(&sbarg); 2225 } 2226 if (error || len == 0) 2227 strlcpy(psinfo->pr_psargs, p->p_comm, 2228 sizeof(psinfo->pr_psargs)); 2229 else { 2230 KASSERT(len < sizeof(psinfo->pr_psargs), 2231 ("len is too long: %zu vs %zu", len, 2232 sizeof(psinfo->pr_psargs))); 2233 cp = psinfo->pr_psargs; 2234 end = cp + len - 1; 2235 for (;;) { 2236 cp = memchr(cp, '\0', end - cp); 2237 if (cp == NULL) 2238 break; 2239 *cp = ' '; 2240 } 2241 } 2242 psinfo->pr_pid = p->p_pid; 2243 sbuf_bcat(sb, psinfo, sizeof(*psinfo)); 2244 free(psinfo, M_TEMP); 2245 } 2246 *sizep = sizeof(*psinfo); 2247 } 2248 2249 static void 2250 __elfN(note_prstatus)(void *arg, struct sbuf *sb, size_t *sizep) 2251 { 2252 struct thread *td; 2253 elf_prstatus_t *status; 2254 2255 td = (struct thread *)arg; 2256 if (sb != NULL) { 2257 KASSERT(*sizep == sizeof(*status), ("invalid size")); 2258 status = malloc(sizeof(*status), M_TEMP, M_ZERO | M_WAITOK); 2259 status->pr_version = PRSTATUS_VERSION; 2260 status->pr_statussz = sizeof(elf_prstatus_t); 2261 status->pr_gregsetsz = sizeof(elf_gregset_t); 2262 status->pr_fpregsetsz = sizeof(elf_fpregset_t); 2263 status->pr_osreldate = osreldate; 2264 status->pr_cursig = td->td_proc->p_sig; 2265 status->pr_pid = td->td_tid; 2266 #if defined(COMPAT_FREEBSD32) && __ELF_WORD_SIZE == 32 2267 fill_regs32(td, &status->pr_reg); 2268 #else 2269 fill_regs(td, &status->pr_reg); 2270 #endif 2271 sbuf_bcat(sb, status, sizeof(*status)); 2272 free(status, M_TEMP); 2273 } 2274 *sizep = sizeof(*status); 2275 } 2276 2277 static void 2278 __elfN(note_fpregset)(void *arg, struct sbuf *sb, size_t *sizep) 2279 { 2280 struct thread *td; 2281 elf_prfpregset_t *fpregset; 2282 2283 td = (struct thread *)arg; 2284 if (sb != NULL) { 2285 KASSERT(*sizep == sizeof(*fpregset), ("invalid size")); 2286 fpregset = malloc(sizeof(*fpregset), M_TEMP, M_ZERO | M_WAITOK); 2287 #if defined(COMPAT_FREEBSD32) && __ELF_WORD_SIZE == 32 2288 fill_fpregs32(td, fpregset); 2289 #else 2290 fill_fpregs(td, fpregset); 2291 #endif 2292 sbuf_bcat(sb, fpregset, sizeof(*fpregset)); 2293 free(fpregset, M_TEMP); 2294 } 2295 *sizep = sizeof(*fpregset); 2296 } 2297 2298 static void 2299 __elfN(note_thrmisc)(void *arg, struct sbuf *sb, size_t *sizep) 2300 { 2301 struct thread *td; 2302 elf_thrmisc_t thrmisc; 2303 2304 td = (struct thread *)arg; 2305 if (sb != NULL) { 2306 KASSERT(*sizep == sizeof(thrmisc), ("invalid size")); 2307 bzero(&thrmisc, sizeof(thrmisc)); 2308 strcpy(thrmisc.pr_tname, td->td_name); 2309 sbuf_bcat(sb, &thrmisc, sizeof(thrmisc)); 2310 } 2311 *sizep = sizeof(thrmisc); 2312 } 2313 2314 static void 2315 __elfN(note_ptlwpinfo)(void *arg, struct sbuf *sb, size_t *sizep) 2316 { 2317 struct thread *td; 2318 size_t size; 2319 int structsize; 2320 #if defined(COMPAT_FREEBSD32) && __ELF_WORD_SIZE == 32 2321 struct ptrace_lwpinfo32 pl; 2322 #else 2323 struct ptrace_lwpinfo pl; 2324 #endif 2325 2326 td = (struct thread *)arg; 2327 size = sizeof(structsize) + sizeof(pl); 2328 if (sb != NULL) { 2329 KASSERT(*sizep == size, ("invalid size")); 2330 structsize = sizeof(pl); 2331 sbuf_bcat(sb, &structsize, sizeof(structsize)); 2332 bzero(&pl, sizeof(pl)); 2333 pl.pl_lwpid = td->td_tid; 2334 pl.pl_event = PL_EVENT_NONE; 2335 pl.pl_sigmask = td->td_sigmask; 2336 pl.pl_siglist = td->td_siglist; 2337 if (td->td_si.si_signo != 0) { 2338 pl.pl_event = PL_EVENT_SIGNAL; 2339 pl.pl_flags |= PL_FLAG_SI; 2340 #if defined(COMPAT_FREEBSD32) && __ELF_WORD_SIZE == 32 2341 siginfo_to_siginfo32(&td->td_si, &pl.pl_siginfo); 2342 #else 2343 pl.pl_siginfo = td->td_si; 2344 #endif 2345 } 2346 strcpy(pl.pl_tdname, td->td_name); 2347 /* XXX TODO: supply more information in struct ptrace_lwpinfo*/ 2348 sbuf_bcat(sb, &pl, sizeof(pl)); 2349 } 2350 *sizep = size; 2351 } 2352 2353 /* 2354 * Allow for MD specific notes, as well as any MD 2355 * specific preparations for writing MI notes. 2356 */ 2357 static void 2358 __elfN(note_threadmd)(void *arg, struct sbuf *sb, size_t *sizep) 2359 { 2360 struct thread *td; 2361 void *buf; 2362 size_t size; 2363 2364 td = (struct thread *)arg; 2365 size = *sizep; 2366 if (size != 0 && sb != NULL) 2367 buf = malloc(size, M_TEMP, M_ZERO | M_WAITOK); 2368 else 2369 buf = NULL; 2370 size = 0; 2371 __elfN(dump_thread)(td, buf, &size); 2372 KASSERT(sb == NULL || *sizep == size, ("invalid size")); 2373 if (size != 0 && sb != NULL) 2374 sbuf_bcat(sb, buf, size); 2375 free(buf, M_TEMP); 2376 *sizep = size; 2377 } 2378 2379 #ifdef KINFO_PROC_SIZE 2380 CTASSERT(sizeof(struct kinfo_proc) == KINFO_PROC_SIZE); 2381 #endif 2382 2383 static void 2384 __elfN(note_procstat_proc)(void *arg, struct sbuf *sb, size_t *sizep) 2385 { 2386 struct proc *p; 2387 size_t size; 2388 int structsize; 2389 2390 p = (struct proc *)arg; 2391 size = sizeof(structsize) + p->p_numthreads * 2392 sizeof(elf_kinfo_proc_t); 2393 2394 if (sb != NULL) { 2395 KASSERT(*sizep == size, ("invalid size")); 2396 structsize = sizeof(elf_kinfo_proc_t); 2397 sbuf_bcat(sb, &structsize, sizeof(structsize)); 2398 sx_slock(&proctree_lock); 2399 PROC_LOCK(p); 2400 kern_proc_out(p, sb, ELF_KERN_PROC_MASK); 2401 sx_sunlock(&proctree_lock); 2402 } 2403 *sizep = size; 2404 } 2405 2406 #ifdef KINFO_FILE_SIZE 2407 CTASSERT(sizeof(struct kinfo_file) == KINFO_FILE_SIZE); 2408 #endif 2409 2410 static void 2411 note_procstat_files(void *arg, struct sbuf *sb, size_t *sizep) 2412 { 2413 struct proc *p; 2414 size_t size, sect_sz, i; 2415 ssize_t start_len, sect_len; 2416 int structsize, filedesc_flags; 2417 2418 if (coredump_pack_fileinfo) 2419 filedesc_flags = KERN_FILEDESC_PACK_KINFO; 2420 else 2421 filedesc_flags = 0; 2422 2423 p = (struct proc *)arg; 2424 structsize = sizeof(struct kinfo_file); 2425 if (sb == NULL) { 2426 size = 0; 2427 sb = sbuf_new(NULL, NULL, 128, SBUF_FIXEDLEN); 2428 sbuf_set_drain(sb, sbuf_count_drain, &size); 2429 sbuf_bcat(sb, &structsize, sizeof(structsize)); 2430 PROC_LOCK(p); 2431 kern_proc_filedesc_out(p, sb, -1, filedesc_flags); 2432 sbuf_finish(sb); 2433 sbuf_delete(sb); 2434 *sizep = size; 2435 } else { 2436 sbuf_start_section(sb, &start_len); 2437 2438 sbuf_bcat(sb, &structsize, sizeof(structsize)); 2439 PROC_LOCK(p); 2440 kern_proc_filedesc_out(p, sb, *sizep - sizeof(structsize), 2441 filedesc_flags); 2442 2443 sect_len = sbuf_end_section(sb, start_len, 0, 0); 2444 if (sect_len < 0) 2445 return; 2446 sect_sz = sect_len; 2447 2448 KASSERT(sect_sz <= *sizep, 2449 ("kern_proc_filedesc_out did not respect maxlen; " 2450 "requested %zu, got %zu", *sizep - sizeof(structsize), 2451 sect_sz - sizeof(structsize))); 2452 2453 for (i = 0; i < *sizep - sect_sz && sb->s_error == 0; i++) 2454 sbuf_putc(sb, 0); 2455 } 2456 } 2457 2458 #ifdef KINFO_VMENTRY_SIZE 2459 CTASSERT(sizeof(struct kinfo_vmentry) == KINFO_VMENTRY_SIZE); 2460 #endif 2461 2462 static void 2463 note_procstat_vmmap(void *arg, struct sbuf *sb, size_t *sizep) 2464 { 2465 struct proc *p; 2466 size_t size; 2467 int structsize, vmmap_flags; 2468 2469 if (coredump_pack_vmmapinfo) 2470 vmmap_flags = KERN_VMMAP_PACK_KINFO; 2471 else 2472 vmmap_flags = 0; 2473 2474 p = (struct proc *)arg; 2475 structsize = sizeof(struct kinfo_vmentry); 2476 if (sb == NULL) { 2477 size = 0; 2478 sb = sbuf_new(NULL, NULL, 128, SBUF_FIXEDLEN); 2479 sbuf_set_drain(sb, sbuf_count_drain, &size); 2480 sbuf_bcat(sb, &structsize, sizeof(structsize)); 2481 PROC_LOCK(p); 2482 kern_proc_vmmap_out(p, sb, -1, vmmap_flags); 2483 sbuf_finish(sb); 2484 sbuf_delete(sb); 2485 *sizep = size; 2486 } else { 2487 sbuf_bcat(sb, &structsize, sizeof(structsize)); 2488 PROC_LOCK(p); 2489 kern_proc_vmmap_out(p, sb, *sizep - sizeof(structsize), 2490 vmmap_flags); 2491 } 2492 } 2493 2494 static void 2495 note_procstat_groups(void *arg, struct sbuf *sb, size_t *sizep) 2496 { 2497 struct proc *p; 2498 size_t size; 2499 int structsize; 2500 2501 p = (struct proc *)arg; 2502 size = sizeof(structsize) + p->p_ucred->cr_ngroups * sizeof(gid_t); 2503 if (sb != NULL) { 2504 KASSERT(*sizep == size, ("invalid size")); 2505 structsize = sizeof(gid_t); 2506 sbuf_bcat(sb, &structsize, sizeof(structsize)); 2507 sbuf_bcat(sb, p->p_ucred->cr_groups, p->p_ucred->cr_ngroups * 2508 sizeof(gid_t)); 2509 } 2510 *sizep = size; 2511 } 2512 2513 static void 2514 note_procstat_umask(void *arg, struct sbuf *sb, size_t *sizep) 2515 { 2516 struct proc *p; 2517 size_t size; 2518 int structsize; 2519 2520 p = (struct proc *)arg; 2521 size = sizeof(structsize) + sizeof(p->p_pd->pd_cmask); 2522 if (sb != NULL) { 2523 KASSERT(*sizep == size, ("invalid size")); 2524 structsize = sizeof(p->p_pd->pd_cmask); 2525 sbuf_bcat(sb, &structsize, sizeof(structsize)); 2526 sbuf_bcat(sb, &p->p_pd->pd_cmask, sizeof(p->p_pd->pd_cmask)); 2527 } 2528 *sizep = size; 2529 } 2530 2531 static void 2532 note_procstat_rlimit(void *arg, struct sbuf *sb, size_t *sizep) 2533 { 2534 struct proc *p; 2535 struct rlimit rlim[RLIM_NLIMITS]; 2536 size_t size; 2537 int structsize, i; 2538 2539 p = (struct proc *)arg; 2540 size = sizeof(structsize) + sizeof(rlim); 2541 if (sb != NULL) { 2542 KASSERT(*sizep == size, ("invalid size")); 2543 structsize = sizeof(rlim); 2544 sbuf_bcat(sb, &structsize, sizeof(structsize)); 2545 PROC_LOCK(p); 2546 for (i = 0; i < RLIM_NLIMITS; i++) 2547 lim_rlimit_proc(p, i, &rlim[i]); 2548 PROC_UNLOCK(p); 2549 sbuf_bcat(sb, rlim, sizeof(rlim)); 2550 } 2551 *sizep = size; 2552 } 2553 2554 static void 2555 note_procstat_osrel(void *arg, struct sbuf *sb, size_t *sizep) 2556 { 2557 struct proc *p; 2558 size_t size; 2559 int structsize; 2560 2561 p = (struct proc *)arg; 2562 size = sizeof(structsize) + sizeof(p->p_osrel); 2563 if (sb != NULL) { 2564 KASSERT(*sizep == size, ("invalid size")); 2565 structsize = sizeof(p->p_osrel); 2566 sbuf_bcat(sb, &structsize, sizeof(structsize)); 2567 sbuf_bcat(sb, &p->p_osrel, sizeof(p->p_osrel)); 2568 } 2569 *sizep = size; 2570 } 2571 2572 static void 2573 __elfN(note_procstat_psstrings)(void *arg, struct sbuf *sb, size_t *sizep) 2574 { 2575 struct proc *p; 2576 elf_ps_strings_t ps_strings; 2577 size_t size; 2578 int structsize; 2579 2580 p = (struct proc *)arg; 2581 size = sizeof(structsize) + sizeof(ps_strings); 2582 if (sb != NULL) { 2583 KASSERT(*sizep == size, ("invalid size")); 2584 structsize = sizeof(ps_strings); 2585 #if defined(COMPAT_FREEBSD32) && __ELF_WORD_SIZE == 32 2586 ps_strings = PTROUT(p->p_sysent->sv_psstrings); 2587 #else 2588 ps_strings = p->p_sysent->sv_psstrings; 2589 #endif 2590 sbuf_bcat(sb, &structsize, sizeof(structsize)); 2591 sbuf_bcat(sb, &ps_strings, sizeof(ps_strings)); 2592 } 2593 *sizep = size; 2594 } 2595 2596 static void 2597 __elfN(note_procstat_auxv)(void *arg, struct sbuf *sb, size_t *sizep) 2598 { 2599 struct proc *p; 2600 size_t size; 2601 int structsize; 2602 2603 p = (struct proc *)arg; 2604 if (sb == NULL) { 2605 size = 0; 2606 sb = sbuf_new(NULL, NULL, 128, SBUF_FIXEDLEN); 2607 sbuf_set_drain(sb, sbuf_count_drain, &size); 2608 sbuf_bcat(sb, &structsize, sizeof(structsize)); 2609 PHOLD(p); 2610 proc_getauxv(curthread, p, sb); 2611 PRELE(p); 2612 sbuf_finish(sb); 2613 sbuf_delete(sb); 2614 *sizep = size; 2615 } else { 2616 structsize = sizeof(Elf_Auxinfo); 2617 sbuf_bcat(sb, &structsize, sizeof(structsize)); 2618 PHOLD(p); 2619 proc_getauxv(curthread, p, sb); 2620 PRELE(p); 2621 } 2622 } 2623 2624 static boolean_t 2625 __elfN(parse_notes)(struct image_params *imgp, Elf_Note *checknote, 2626 const char *note_vendor, const Elf_Phdr *pnote, 2627 boolean_t (*cb)(const Elf_Note *, void *, boolean_t *), void *cb_arg) 2628 { 2629 const Elf_Note *note, *note0, *note_end; 2630 const char *note_name; 2631 char *buf; 2632 int i, error; 2633 boolean_t res; 2634 2635 /* We need some limit, might as well use PAGE_SIZE. */ 2636 if (pnote == NULL || pnote->p_filesz > PAGE_SIZE) 2637 return (FALSE); 2638 ASSERT_VOP_LOCKED(imgp->vp, "parse_notes"); 2639 if (pnote->p_offset > PAGE_SIZE || 2640 pnote->p_filesz > PAGE_SIZE - pnote->p_offset) { 2641 buf = malloc(pnote->p_filesz, M_TEMP, M_NOWAIT); 2642 if (buf == NULL) { 2643 VOP_UNLOCK(imgp->vp); 2644 buf = malloc(pnote->p_filesz, M_TEMP, M_WAITOK); 2645 vn_lock(imgp->vp, LK_SHARED | LK_RETRY); 2646 } 2647 error = vn_rdwr(UIO_READ, imgp->vp, buf, pnote->p_filesz, 2648 pnote->p_offset, UIO_SYSSPACE, IO_NODELOCKED, 2649 curthread->td_ucred, NOCRED, NULL, curthread); 2650 if (error != 0) { 2651 uprintf("i/o error PT_NOTE\n"); 2652 goto retf; 2653 } 2654 note = note0 = (const Elf_Note *)buf; 2655 note_end = (const Elf_Note *)(buf + pnote->p_filesz); 2656 } else { 2657 note = note0 = (const Elf_Note *)(imgp->image_header + 2658 pnote->p_offset); 2659 note_end = (const Elf_Note *)(imgp->image_header + 2660 pnote->p_offset + pnote->p_filesz); 2661 buf = NULL; 2662 } 2663 for (i = 0; i < 100 && note >= note0 && note < note_end; i++) { 2664 if (!aligned(note, Elf32_Addr) || (const char *)note_end - 2665 (const char *)note < sizeof(Elf_Note)) { 2666 goto retf; 2667 } 2668 if (note->n_namesz != checknote->n_namesz || 2669 note->n_descsz != checknote->n_descsz || 2670 note->n_type != checknote->n_type) 2671 goto nextnote; 2672 note_name = (const char *)(note + 1); 2673 if (note_name + checknote->n_namesz >= 2674 (const char *)note_end || strncmp(note_vendor, 2675 note_name, checknote->n_namesz) != 0) 2676 goto nextnote; 2677 2678 if (cb(note, cb_arg, &res)) 2679 goto ret; 2680 nextnote: 2681 note = (const Elf_Note *)((const char *)(note + 1) + 2682 roundup2(note->n_namesz, ELF_NOTE_ROUNDSIZE) + 2683 roundup2(note->n_descsz, ELF_NOTE_ROUNDSIZE)); 2684 } 2685 retf: 2686 res = FALSE; 2687 ret: 2688 free(buf, M_TEMP); 2689 return (res); 2690 } 2691 2692 struct brandnote_cb_arg { 2693 Elf_Brandnote *brandnote; 2694 int32_t *osrel; 2695 }; 2696 2697 static boolean_t 2698 brandnote_cb(const Elf_Note *note, void *arg0, boolean_t *res) 2699 { 2700 struct brandnote_cb_arg *arg; 2701 2702 arg = arg0; 2703 2704 /* 2705 * Fetch the osreldate for binary from the ELF OSABI-note if 2706 * necessary. 2707 */ 2708 *res = (arg->brandnote->flags & BN_TRANSLATE_OSREL) != 0 && 2709 arg->brandnote->trans_osrel != NULL ? 2710 arg->brandnote->trans_osrel(note, arg->osrel) : TRUE; 2711 2712 return (TRUE); 2713 } 2714 2715 static Elf_Note fctl_note = { 2716 .n_namesz = sizeof(FREEBSD_ABI_VENDOR), 2717 .n_descsz = sizeof(uint32_t), 2718 .n_type = NT_FREEBSD_FEATURE_CTL, 2719 }; 2720 2721 struct fctl_cb_arg { 2722 boolean_t *has_fctl0; 2723 uint32_t *fctl0; 2724 }; 2725 2726 static boolean_t 2727 note_fctl_cb(const Elf_Note *note, void *arg0, boolean_t *res) 2728 { 2729 struct fctl_cb_arg *arg; 2730 const Elf32_Word *desc; 2731 uintptr_t p; 2732 2733 arg = arg0; 2734 p = (uintptr_t)(note + 1); 2735 p += roundup2(note->n_namesz, ELF_NOTE_ROUNDSIZE); 2736 desc = (const Elf32_Word *)p; 2737 *arg->has_fctl0 = TRUE; 2738 *arg->fctl0 = desc[0]; 2739 return (TRUE); 2740 } 2741 2742 /* 2743 * Try to find the appropriate ABI-note section for checknote, fetch 2744 * the osreldate and feature control flags for binary from the ELF 2745 * OSABI-note. Only the first page of the image is searched, the same 2746 * as for headers. 2747 */ 2748 static boolean_t 2749 __elfN(check_note)(struct image_params *imgp, Elf_Brandnote *brandnote, 2750 int32_t *osrel, boolean_t *has_fctl0, uint32_t *fctl0) 2751 { 2752 const Elf_Phdr *phdr; 2753 const Elf_Ehdr *hdr; 2754 struct brandnote_cb_arg b_arg; 2755 struct fctl_cb_arg f_arg; 2756 int i, j; 2757 2758 hdr = (const Elf_Ehdr *)imgp->image_header; 2759 phdr = (const Elf_Phdr *)(imgp->image_header + hdr->e_phoff); 2760 b_arg.brandnote = brandnote; 2761 b_arg.osrel = osrel; 2762 f_arg.has_fctl0 = has_fctl0; 2763 f_arg.fctl0 = fctl0; 2764 2765 for (i = 0; i < hdr->e_phnum; i++) { 2766 if (phdr[i].p_type == PT_NOTE && __elfN(parse_notes)(imgp, 2767 &brandnote->hdr, brandnote->vendor, &phdr[i], brandnote_cb, 2768 &b_arg)) { 2769 for (j = 0; j < hdr->e_phnum; j++) { 2770 if (phdr[j].p_type == PT_NOTE && 2771 __elfN(parse_notes)(imgp, &fctl_note, 2772 FREEBSD_ABI_VENDOR, &phdr[j], 2773 note_fctl_cb, &f_arg)) 2774 break; 2775 } 2776 return (TRUE); 2777 } 2778 } 2779 return (FALSE); 2780 2781 } 2782 2783 /* 2784 * Tell kern_execve.c about it, with a little help from the linker. 2785 */ 2786 static struct execsw __elfN(execsw) = { 2787 .ex_imgact = __CONCAT(exec_, __elfN(imgact)), 2788 .ex_name = __XSTRING(__CONCAT(ELF, __ELF_WORD_SIZE)) 2789 }; 2790 EXEC_SET(__CONCAT(elf, __ELF_WORD_SIZE), __elfN(execsw)); 2791 2792 static vm_prot_t 2793 __elfN(trans_prot)(Elf_Word flags) 2794 { 2795 vm_prot_t prot; 2796 2797 prot = 0; 2798 if (flags & PF_X) 2799 prot |= VM_PROT_EXECUTE; 2800 if (flags & PF_W) 2801 prot |= VM_PROT_WRITE; 2802 if (flags & PF_R) 2803 prot |= VM_PROT_READ; 2804 #if __ELF_WORD_SIZE == 32 && (defined(__amd64__) || defined(__i386__)) 2805 if (i386_read_exec && (flags & PF_R)) 2806 prot |= VM_PROT_EXECUTE; 2807 #endif 2808 return (prot); 2809 } 2810 2811 static Elf_Word 2812 __elfN(untrans_prot)(vm_prot_t prot) 2813 { 2814 Elf_Word flags; 2815 2816 flags = 0; 2817 if (prot & VM_PROT_EXECUTE) 2818 flags |= PF_X; 2819 if (prot & VM_PROT_READ) 2820 flags |= PF_R; 2821 if (prot & VM_PROT_WRITE) 2822 flags |= PF_W; 2823 return (flags); 2824 } 2825 2826 void 2827 __elfN(stackgap)(struct image_params *imgp, uintptr_t *stack_base) 2828 { 2829 uintptr_t range, rbase, gap; 2830 int pct; 2831 2832 pct = __elfN(aslr_stack_gap); 2833 if (pct == 0) 2834 return; 2835 if (pct > 50) 2836 pct = 50; 2837 range = imgp->eff_stack_sz * pct / 100; 2838 arc4rand(&rbase, sizeof(rbase), 0); 2839 gap = rbase % range; 2840 gap &= ~(sizeof(u_long) - 1); 2841 *stack_base -= gap; 2842 } 2843