1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 2017 Dell EMC 5 * Copyright (c) 2000-2001, 2003 David O'Brien 6 * Copyright (c) 1995-1996 Søren Schmidt 7 * Copyright (c) 1996 Peter Wemm 8 * All rights reserved. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer 15 * in this position and unchanged. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 3. The name of the author may not be used to endorse or promote products 20 * derived from this software without specific prior written permission 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 23 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 24 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 25 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 26 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 27 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 31 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #include <sys/cdefs.h> 35 __FBSDID("$FreeBSD$"); 36 37 #include "opt_capsicum.h" 38 39 #include <sys/param.h> 40 #include <sys/capsicum.h> 41 #include <sys/compressor.h> 42 #include <sys/exec.h> 43 #include <sys/fcntl.h> 44 #include <sys/imgact.h> 45 #include <sys/imgact_elf.h> 46 #include <sys/jail.h> 47 #include <sys/kernel.h> 48 #include <sys/lock.h> 49 #include <sys/malloc.h> 50 #include <sys/mount.h> 51 #include <sys/mman.h> 52 #include <sys/namei.h> 53 #include <sys/proc.h> 54 #include <sys/procfs.h> 55 #include <sys/ptrace.h> 56 #include <sys/racct.h> 57 #include <sys/reg.h> 58 #include <sys/resourcevar.h> 59 #include <sys/rwlock.h> 60 #include <sys/sbuf.h> 61 #include <sys/sf_buf.h> 62 #include <sys/smp.h> 63 #include <sys/systm.h> 64 #include <sys/signalvar.h> 65 #include <sys/stat.h> 66 #include <sys/sx.h> 67 #include <sys/syscall.h> 68 #include <sys/sysctl.h> 69 #include <sys/sysent.h> 70 #include <sys/vnode.h> 71 #include <sys/syslog.h> 72 #include <sys/eventhandler.h> 73 #include <sys/user.h> 74 75 #include <vm/vm.h> 76 #include <vm/vm_kern.h> 77 #include <vm/vm_param.h> 78 #include <vm/pmap.h> 79 #include <vm/vm_map.h> 80 #include <vm/vm_object.h> 81 #include <vm/vm_extern.h> 82 83 #include <machine/elf.h> 84 #include <machine/md_var.h> 85 86 #define ELF_NOTE_ROUNDSIZE 4 87 #define OLD_EI_BRAND 8 88 89 static int __elfN(check_header)(const Elf_Ehdr *hdr); 90 static Elf_Brandinfo *__elfN(get_brandinfo)(struct image_params *imgp, 91 const char *interp, int32_t *osrel, uint32_t *fctl0); 92 static int __elfN(load_file)(struct proc *p, const char *file, u_long *addr, 93 u_long *entry); 94 static int __elfN(load_section)(struct image_params *imgp, vm_ooffset_t offset, 95 caddr_t vmaddr, size_t memsz, size_t filsz, vm_prot_t prot); 96 static int __CONCAT(exec_, __elfN(imgact))(struct image_params *imgp); 97 static bool __elfN(freebsd_trans_osrel)(const Elf_Note *note, 98 int32_t *osrel); 99 static bool kfreebsd_trans_osrel(const Elf_Note *note, int32_t *osrel); 100 static bool __elfN(check_note)(struct image_params *imgp, 101 Elf_Brandnote *checknote, int32_t *osrel, bool *has_fctl0, 102 uint32_t *fctl0); 103 static vm_prot_t __elfN(trans_prot)(Elf_Word); 104 static Elf_Word __elfN(untrans_prot)(vm_prot_t); 105 static size_t __elfN(prepare_register_notes)(struct thread *td, 106 struct note_info_list *list, struct thread *target_td); 107 108 SYSCTL_NODE(_kern, OID_AUTO, __CONCAT(elf, __ELF_WORD_SIZE), 109 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 110 ""); 111 112 int __elfN(fallback_brand) = -1; 113 SYSCTL_INT(__CONCAT(_kern_elf, __ELF_WORD_SIZE), OID_AUTO, 114 fallback_brand, CTLFLAG_RWTUN, &__elfN(fallback_brand), 0, 115 __XSTRING(__CONCAT(ELF, __ELF_WORD_SIZE)) " brand of last resort"); 116 117 static int elf_legacy_coredump = 0; 118 SYSCTL_INT(_debug, OID_AUTO, __elfN(legacy_coredump), CTLFLAG_RW, 119 &elf_legacy_coredump, 0, 120 "include all and only RW pages in core dumps"); 121 122 int __elfN(nxstack) = 123 #if defined(__amd64__) || defined(__powerpc64__) /* both 64 and 32 bit */ || \ 124 (defined(__arm__) && __ARM_ARCH >= 7) || defined(__aarch64__) || \ 125 defined(__riscv) 126 1; 127 #else 128 0; 129 #endif 130 SYSCTL_INT(__CONCAT(_kern_elf, __ELF_WORD_SIZE), OID_AUTO, 131 nxstack, CTLFLAG_RW, &__elfN(nxstack), 0, 132 __XSTRING(__CONCAT(ELF, __ELF_WORD_SIZE)) ": enable non-executable stack"); 133 134 #if defined(__amd64__) 135 static int __elfN(vdso) = 1; 136 SYSCTL_INT(__CONCAT(_kern_elf, __ELF_WORD_SIZE), OID_AUTO, 137 vdso, CTLFLAG_RWTUN, &__elfN(vdso), 0, 138 __XSTRING(__CONCAT(ELF, __ELF_WORD_SIZE)) ": enable vdso preloading"); 139 #else 140 static int __elfN(vdso) = 0; 141 #endif 142 143 #if __ELF_WORD_SIZE == 32 && (defined(__amd64__) || defined(__i386__)) 144 int i386_read_exec = 0; 145 SYSCTL_INT(_kern_elf32, OID_AUTO, read_exec, CTLFLAG_RW, &i386_read_exec, 0, 146 "enable execution from readable segments"); 147 #endif 148 149 static u_long __elfN(pie_base) = ET_DYN_LOAD_ADDR; 150 static int 151 sysctl_pie_base(SYSCTL_HANDLER_ARGS) 152 { 153 u_long val; 154 int error; 155 156 val = __elfN(pie_base); 157 error = sysctl_handle_long(oidp, &val, 0, req); 158 if (error != 0 || req->newptr == NULL) 159 return (error); 160 if ((val & PAGE_MASK) != 0) 161 return (EINVAL); 162 __elfN(pie_base) = val; 163 return (0); 164 } 165 SYSCTL_PROC(__CONCAT(_kern_elf, __ELF_WORD_SIZE), OID_AUTO, pie_base, 166 CTLTYPE_ULONG | CTLFLAG_MPSAFE | CTLFLAG_RW, NULL, 0, 167 sysctl_pie_base, "LU", 168 "PIE load base without randomization"); 169 170 SYSCTL_NODE(__CONCAT(_kern_elf, __ELF_WORD_SIZE), OID_AUTO, aslr, 171 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 172 ""); 173 #define ASLR_NODE_OID __CONCAT(__CONCAT(_kern_elf, __ELF_WORD_SIZE), _aslr) 174 175 /* 176 * Enable ASLR by default for 64-bit non-PIE binaries. 32-bit architectures 177 * have limited address space (which can cause issues for applications with 178 * high memory use) so we leave it off there. 179 */ 180 static int __elfN(aslr_enabled) = __ELF_WORD_SIZE == 64; 181 SYSCTL_INT(ASLR_NODE_OID, OID_AUTO, enable, CTLFLAG_RWTUN, 182 &__elfN(aslr_enabled), 0, 183 __XSTRING(__CONCAT(ELF, __ELF_WORD_SIZE)) 184 ": enable address map randomization"); 185 186 /* 187 * Enable ASLR by default for 64-bit PIE binaries. 188 */ 189 static int __elfN(pie_aslr_enabled) = __ELF_WORD_SIZE == 64; 190 SYSCTL_INT(ASLR_NODE_OID, OID_AUTO, pie_enable, CTLFLAG_RWTUN, 191 &__elfN(pie_aslr_enabled), 0, 192 __XSTRING(__CONCAT(ELF, __ELF_WORD_SIZE)) 193 ": enable address map randomization for PIE binaries"); 194 195 /* 196 * Sbrk is deprecated and it can be assumed that in most cases it will not be 197 * used anyway. This setting is valid only with ASLR enabled, and allows ASLR 198 * to use the bss grow region. 199 */ 200 static int __elfN(aslr_honor_sbrk) = 0; 201 SYSCTL_INT(ASLR_NODE_OID, OID_AUTO, honor_sbrk, CTLFLAG_RW, 202 &__elfN(aslr_honor_sbrk), 0, 203 __XSTRING(__CONCAT(ELF, __ELF_WORD_SIZE)) ": assume sbrk is used"); 204 205 static int __elfN(aslr_stack) = 1; 206 SYSCTL_INT(ASLR_NODE_OID, OID_AUTO, stack, CTLFLAG_RWTUN, 207 &__elfN(aslr_stack), 0, 208 __XSTRING(__CONCAT(ELF, __ELF_WORD_SIZE)) 209 ": enable stack address randomization"); 210 211 static int __elfN(aslr_shared_page) = __ELF_WORD_SIZE == 64; 212 SYSCTL_INT(ASLR_NODE_OID, OID_AUTO, shared_page, CTLFLAG_RWTUN, 213 &__elfN(aslr_shared_page), 0, 214 __XSTRING(__CONCAT(ELF, __ELF_WORD_SIZE)) 215 ": enable shared page address randomization"); 216 217 static int __elfN(sigfastblock) = 1; 218 SYSCTL_INT(__CONCAT(_kern_elf, __ELF_WORD_SIZE), OID_AUTO, sigfastblock, 219 CTLFLAG_RWTUN, &__elfN(sigfastblock), 0, 220 "enable sigfastblock for new processes"); 221 222 static bool __elfN(allow_wx) = true; 223 SYSCTL_BOOL(__CONCAT(_kern_elf, __ELF_WORD_SIZE), OID_AUTO, allow_wx, 224 CTLFLAG_RWTUN, &__elfN(allow_wx), 0, 225 "Allow pages to be mapped simultaneously writable and executable"); 226 227 static Elf_Brandinfo *elf_brand_list[MAX_BRANDS]; 228 229 #define aligned(a, t) (rounddown2((u_long)(a), sizeof(t)) == (u_long)(a)) 230 231 Elf_Brandnote __elfN(freebsd_brandnote) = { 232 .hdr.n_namesz = sizeof(FREEBSD_ABI_VENDOR), 233 .hdr.n_descsz = sizeof(int32_t), 234 .hdr.n_type = NT_FREEBSD_ABI_TAG, 235 .vendor = FREEBSD_ABI_VENDOR, 236 .flags = BN_TRANSLATE_OSREL, 237 .trans_osrel = __elfN(freebsd_trans_osrel) 238 }; 239 240 static bool 241 __elfN(freebsd_trans_osrel)(const Elf_Note *note, int32_t *osrel) 242 { 243 uintptr_t p; 244 245 p = (uintptr_t)(note + 1); 246 p += roundup2(note->n_namesz, ELF_NOTE_ROUNDSIZE); 247 *osrel = *(const int32_t *)(p); 248 249 return (true); 250 } 251 252 static const char GNU_ABI_VENDOR[] = "GNU"; 253 static int GNU_KFREEBSD_ABI_DESC = 3; 254 255 Elf_Brandnote __elfN(kfreebsd_brandnote) = { 256 .hdr.n_namesz = sizeof(GNU_ABI_VENDOR), 257 .hdr.n_descsz = 16, /* XXX at least 16 */ 258 .hdr.n_type = 1, 259 .vendor = GNU_ABI_VENDOR, 260 .flags = BN_TRANSLATE_OSREL, 261 .trans_osrel = kfreebsd_trans_osrel 262 }; 263 264 static bool 265 kfreebsd_trans_osrel(const Elf_Note *note, int32_t *osrel) 266 { 267 const Elf32_Word *desc; 268 uintptr_t p; 269 270 p = (uintptr_t)(note + 1); 271 p += roundup2(note->n_namesz, ELF_NOTE_ROUNDSIZE); 272 273 desc = (const Elf32_Word *)p; 274 if (desc[0] != GNU_KFREEBSD_ABI_DESC) 275 return (false); 276 277 /* 278 * Debian GNU/kFreeBSD embed the earliest compatible kernel version 279 * (__FreeBSD_version: <major><two digit minor>Rxx) in the LSB way. 280 */ 281 *osrel = desc[1] * 100000 + desc[2] * 1000 + desc[3]; 282 283 return (true); 284 } 285 286 int 287 __elfN(insert_brand_entry)(Elf_Brandinfo *entry) 288 { 289 int i; 290 291 for (i = 0; i < MAX_BRANDS; i++) { 292 if (elf_brand_list[i] == NULL) { 293 elf_brand_list[i] = entry; 294 break; 295 } 296 } 297 if (i == MAX_BRANDS) { 298 printf("WARNING: %s: could not insert brandinfo entry: %p\n", 299 __func__, entry); 300 return (-1); 301 } 302 return (0); 303 } 304 305 int 306 __elfN(remove_brand_entry)(Elf_Brandinfo *entry) 307 { 308 int i; 309 310 for (i = 0; i < MAX_BRANDS; i++) { 311 if (elf_brand_list[i] == entry) { 312 elf_brand_list[i] = NULL; 313 break; 314 } 315 } 316 if (i == MAX_BRANDS) 317 return (-1); 318 return (0); 319 } 320 321 bool 322 __elfN(brand_inuse)(Elf_Brandinfo *entry) 323 { 324 struct proc *p; 325 bool rval = false; 326 327 sx_slock(&allproc_lock); 328 FOREACH_PROC_IN_SYSTEM(p) { 329 if (p->p_sysent == entry->sysvec) { 330 rval = true; 331 break; 332 } 333 } 334 sx_sunlock(&allproc_lock); 335 336 return (rval); 337 } 338 339 static Elf_Brandinfo * 340 __elfN(get_brandinfo)(struct image_params *imgp, const char *interp, 341 int32_t *osrel, uint32_t *fctl0) 342 { 343 const Elf_Ehdr *hdr = (const Elf_Ehdr *)imgp->image_header; 344 Elf_Brandinfo *bi, *bi_m; 345 bool ret, has_fctl0; 346 int i, interp_name_len; 347 348 interp_name_len = interp != NULL ? strlen(interp) + 1 : 0; 349 350 /* 351 * We support four types of branding -- (1) the ELF EI_OSABI field 352 * that SCO added to the ELF spec, (2) FreeBSD 3.x's traditional string 353 * branding w/in the ELF header, (3) path of the `interp_path' 354 * field, and (4) the ".note.ABI-tag" ELF section. 355 */ 356 357 /* Look for an ".note.ABI-tag" ELF section */ 358 bi_m = NULL; 359 for (i = 0; i < MAX_BRANDS; i++) { 360 bi = elf_brand_list[i]; 361 if (bi == NULL) 362 continue; 363 if (interp != NULL && (bi->flags & BI_BRAND_ONLY_STATIC) != 0) 364 continue; 365 if (hdr->e_machine == bi->machine && (bi->flags & 366 (BI_BRAND_NOTE|BI_BRAND_NOTE_MANDATORY)) != 0) { 367 has_fctl0 = false; 368 *fctl0 = 0; 369 *osrel = 0; 370 ret = __elfN(check_note)(imgp, bi->brand_note, osrel, 371 &has_fctl0, fctl0); 372 /* Give brand a chance to veto check_note's guess */ 373 if (ret && bi->header_supported) { 374 ret = bi->header_supported(imgp, osrel, 375 has_fctl0 ? fctl0 : NULL); 376 } 377 /* 378 * If note checker claimed the binary, but the 379 * interpreter path in the image does not 380 * match default one for the brand, try to 381 * search for other brands with the same 382 * interpreter. Either there is better brand 383 * with the right interpreter, or, failing 384 * this, we return first brand which accepted 385 * our note and, optionally, header. 386 */ 387 if (ret && bi_m == NULL && interp != NULL && 388 (bi->interp_path == NULL || 389 (strlen(bi->interp_path) + 1 != interp_name_len || 390 strncmp(interp, bi->interp_path, interp_name_len) 391 != 0))) { 392 bi_m = bi; 393 ret = 0; 394 } 395 if (ret) 396 return (bi); 397 } 398 } 399 if (bi_m != NULL) 400 return (bi_m); 401 402 /* If the executable has a brand, search for it in the brand list. */ 403 for (i = 0; i < MAX_BRANDS; i++) { 404 bi = elf_brand_list[i]; 405 if (bi == NULL || (bi->flags & BI_BRAND_NOTE_MANDATORY) != 0 || 406 (interp != NULL && (bi->flags & BI_BRAND_ONLY_STATIC) != 0)) 407 continue; 408 if (hdr->e_machine == bi->machine && 409 (hdr->e_ident[EI_OSABI] == bi->brand || 410 (bi->compat_3_brand != NULL && 411 strcmp((const char *)&hdr->e_ident[OLD_EI_BRAND], 412 bi->compat_3_brand) == 0))) { 413 /* Looks good, but give brand a chance to veto */ 414 if (bi->header_supported == NULL || 415 bi->header_supported(imgp, NULL, NULL)) { 416 /* 417 * Again, prefer strictly matching 418 * interpreter path. 419 */ 420 if (interp_name_len == 0 && 421 bi->interp_path == NULL) 422 return (bi); 423 if (bi->interp_path != NULL && 424 strlen(bi->interp_path) + 1 == 425 interp_name_len && strncmp(interp, 426 bi->interp_path, interp_name_len) == 0) 427 return (bi); 428 if (bi_m == NULL) 429 bi_m = bi; 430 } 431 } 432 } 433 if (bi_m != NULL) 434 return (bi_m); 435 436 /* No known brand, see if the header is recognized by any brand */ 437 for (i = 0; i < MAX_BRANDS; i++) { 438 bi = elf_brand_list[i]; 439 if (bi == NULL || bi->flags & BI_BRAND_NOTE_MANDATORY || 440 bi->header_supported == NULL) 441 continue; 442 if (hdr->e_machine == bi->machine) { 443 ret = bi->header_supported(imgp, NULL, NULL); 444 if (ret) 445 return (bi); 446 } 447 } 448 449 /* Lacking a known brand, search for a recognized interpreter. */ 450 if (interp != NULL) { 451 for (i = 0; i < MAX_BRANDS; i++) { 452 bi = elf_brand_list[i]; 453 if (bi == NULL || (bi->flags & 454 (BI_BRAND_NOTE_MANDATORY | BI_BRAND_ONLY_STATIC)) 455 != 0) 456 continue; 457 if (hdr->e_machine == bi->machine && 458 bi->interp_path != NULL && 459 /* ELF image p_filesz includes terminating zero */ 460 strlen(bi->interp_path) + 1 == interp_name_len && 461 strncmp(interp, bi->interp_path, interp_name_len) 462 == 0 && (bi->header_supported == NULL || 463 bi->header_supported(imgp, NULL, NULL))) 464 return (bi); 465 } 466 } 467 468 /* Lacking a recognized interpreter, try the default brand */ 469 for (i = 0; i < MAX_BRANDS; i++) { 470 bi = elf_brand_list[i]; 471 if (bi == NULL || (bi->flags & BI_BRAND_NOTE_MANDATORY) != 0 || 472 (interp != NULL && (bi->flags & BI_BRAND_ONLY_STATIC) != 0)) 473 continue; 474 if (hdr->e_machine == bi->machine && 475 __elfN(fallback_brand) == bi->brand && 476 (bi->header_supported == NULL || 477 bi->header_supported(imgp, NULL, NULL))) 478 return (bi); 479 } 480 return (NULL); 481 } 482 483 static bool 484 __elfN(phdr_in_zero_page)(const Elf_Ehdr *hdr) 485 { 486 return (hdr->e_phoff <= PAGE_SIZE && 487 (u_int)hdr->e_phentsize * hdr->e_phnum <= PAGE_SIZE - hdr->e_phoff); 488 } 489 490 static int 491 __elfN(check_header)(const Elf_Ehdr *hdr) 492 { 493 Elf_Brandinfo *bi; 494 int i; 495 496 if (!IS_ELF(*hdr) || 497 hdr->e_ident[EI_CLASS] != ELF_TARG_CLASS || 498 hdr->e_ident[EI_DATA] != ELF_TARG_DATA || 499 hdr->e_ident[EI_VERSION] != EV_CURRENT || 500 hdr->e_phentsize != sizeof(Elf_Phdr) || 501 hdr->e_version != ELF_TARG_VER) 502 return (ENOEXEC); 503 504 /* 505 * Make sure we have at least one brand for this machine. 506 */ 507 508 for (i = 0; i < MAX_BRANDS; i++) { 509 bi = elf_brand_list[i]; 510 if (bi != NULL && bi->machine == hdr->e_machine) 511 break; 512 } 513 if (i == MAX_BRANDS) 514 return (ENOEXEC); 515 516 return (0); 517 } 518 519 static int 520 __elfN(map_partial)(vm_map_t map, vm_object_t object, vm_ooffset_t offset, 521 vm_offset_t start, vm_offset_t end, vm_prot_t prot) 522 { 523 struct sf_buf *sf; 524 int error; 525 vm_offset_t off; 526 527 /* 528 * Create the page if it doesn't exist yet. Ignore errors. 529 */ 530 vm_map_fixed(map, NULL, 0, trunc_page(start), round_page(end) - 531 trunc_page(start), VM_PROT_ALL, VM_PROT_ALL, MAP_CHECK_EXCL); 532 533 /* 534 * Find the page from the underlying object. 535 */ 536 if (object != NULL) { 537 sf = vm_imgact_map_page(object, offset); 538 if (sf == NULL) 539 return (KERN_FAILURE); 540 off = offset - trunc_page(offset); 541 error = copyout((caddr_t)sf_buf_kva(sf) + off, (caddr_t)start, 542 end - start); 543 vm_imgact_unmap_page(sf); 544 if (error != 0) 545 return (KERN_FAILURE); 546 } 547 548 return (KERN_SUCCESS); 549 } 550 551 static int 552 __elfN(map_insert)(struct image_params *imgp, vm_map_t map, vm_object_t object, 553 vm_ooffset_t offset, vm_offset_t start, vm_offset_t end, vm_prot_t prot, 554 int cow) 555 { 556 struct sf_buf *sf; 557 vm_offset_t off; 558 vm_size_t sz; 559 int error, locked, rv; 560 561 if (start != trunc_page(start)) { 562 rv = __elfN(map_partial)(map, object, offset, start, 563 round_page(start), prot); 564 if (rv != KERN_SUCCESS) 565 return (rv); 566 offset += round_page(start) - start; 567 start = round_page(start); 568 } 569 if (end != round_page(end)) { 570 rv = __elfN(map_partial)(map, object, offset + 571 trunc_page(end) - start, trunc_page(end), end, prot); 572 if (rv != KERN_SUCCESS) 573 return (rv); 574 end = trunc_page(end); 575 } 576 if (start >= end) 577 return (KERN_SUCCESS); 578 if ((offset & PAGE_MASK) != 0) { 579 /* 580 * The mapping is not page aligned. This means that we have 581 * to copy the data. 582 */ 583 rv = vm_map_fixed(map, NULL, 0, start, end - start, 584 prot | VM_PROT_WRITE, VM_PROT_ALL, MAP_CHECK_EXCL); 585 if (rv != KERN_SUCCESS) 586 return (rv); 587 if (object == NULL) 588 return (KERN_SUCCESS); 589 for (; start < end; start += sz) { 590 sf = vm_imgact_map_page(object, offset); 591 if (sf == NULL) 592 return (KERN_FAILURE); 593 off = offset - trunc_page(offset); 594 sz = end - start; 595 if (sz > PAGE_SIZE - off) 596 sz = PAGE_SIZE - off; 597 error = copyout((caddr_t)sf_buf_kva(sf) + off, 598 (caddr_t)start, sz); 599 vm_imgact_unmap_page(sf); 600 if (error != 0) 601 return (KERN_FAILURE); 602 offset += sz; 603 } 604 } else { 605 vm_object_reference(object); 606 rv = vm_map_fixed(map, object, offset, start, end - start, 607 prot, VM_PROT_ALL, cow | MAP_CHECK_EXCL | 608 (object != NULL ? MAP_VN_EXEC : 0)); 609 if (rv != KERN_SUCCESS) { 610 locked = VOP_ISLOCKED(imgp->vp); 611 VOP_UNLOCK(imgp->vp); 612 vm_object_deallocate(object); 613 vn_lock(imgp->vp, locked | LK_RETRY); 614 return (rv); 615 } else if (object != NULL) { 616 MPASS(imgp->vp->v_object == object); 617 VOP_SET_TEXT_CHECKED(imgp->vp); 618 } 619 } 620 return (KERN_SUCCESS); 621 } 622 623 static int 624 __elfN(load_section)(struct image_params *imgp, vm_ooffset_t offset, 625 caddr_t vmaddr, size_t memsz, size_t filsz, vm_prot_t prot) 626 { 627 struct sf_buf *sf; 628 size_t map_len; 629 vm_map_t map; 630 vm_object_t object; 631 vm_offset_t map_addr; 632 int error, rv, cow; 633 size_t copy_len; 634 vm_ooffset_t file_addr; 635 636 /* 637 * It's necessary to fail if the filsz + offset taken from the 638 * header is greater than the actual file pager object's size. 639 * If we were to allow this, then the vm_map_find() below would 640 * walk right off the end of the file object and into the ether. 641 * 642 * While I'm here, might as well check for something else that 643 * is invalid: filsz cannot be greater than memsz. 644 */ 645 if ((filsz != 0 && (off_t)filsz + offset > imgp->attr->va_size) || 646 filsz > memsz) { 647 uprintf("elf_load_section: truncated ELF file\n"); 648 return (ENOEXEC); 649 } 650 651 object = imgp->object; 652 map = &imgp->proc->p_vmspace->vm_map; 653 map_addr = trunc_page((vm_offset_t)vmaddr); 654 file_addr = trunc_page(offset); 655 656 /* 657 * We have two choices. We can either clear the data in the last page 658 * of an oversized mapping, or we can start the anon mapping a page 659 * early and copy the initialized data into that first page. We 660 * choose the second. 661 */ 662 if (filsz == 0) 663 map_len = 0; 664 else if (memsz > filsz) 665 map_len = trunc_page(offset + filsz) - file_addr; 666 else 667 map_len = round_page(offset + filsz) - file_addr; 668 669 if (map_len != 0) { 670 /* cow flags: don't dump readonly sections in core */ 671 cow = MAP_COPY_ON_WRITE | MAP_PREFAULT | 672 (prot & VM_PROT_WRITE ? 0 : MAP_DISABLE_COREDUMP); 673 674 rv = __elfN(map_insert)(imgp, map, object, file_addr, 675 map_addr, map_addr + map_len, prot, cow); 676 if (rv != KERN_SUCCESS) 677 return (EINVAL); 678 679 /* we can stop now if we've covered it all */ 680 if (memsz == filsz) 681 return (0); 682 } 683 684 /* 685 * We have to get the remaining bit of the file into the first part 686 * of the oversized map segment. This is normally because the .data 687 * segment in the file is extended to provide bss. It's a neat idea 688 * to try and save a page, but it's a pain in the behind to implement. 689 */ 690 copy_len = filsz == 0 ? 0 : (offset + filsz) - trunc_page(offset + 691 filsz); 692 map_addr = trunc_page((vm_offset_t)vmaddr + filsz); 693 map_len = round_page((vm_offset_t)vmaddr + memsz) - map_addr; 694 695 /* This had damn well better be true! */ 696 if (map_len != 0) { 697 rv = __elfN(map_insert)(imgp, map, NULL, 0, map_addr, 698 map_addr + map_len, prot, 0); 699 if (rv != KERN_SUCCESS) 700 return (EINVAL); 701 } 702 703 if (copy_len != 0) { 704 sf = vm_imgact_map_page(object, offset + filsz); 705 if (sf == NULL) 706 return (EIO); 707 708 /* send the page fragment to user space */ 709 error = copyout((caddr_t)sf_buf_kva(sf), (caddr_t)map_addr, 710 copy_len); 711 vm_imgact_unmap_page(sf); 712 if (error != 0) 713 return (error); 714 } 715 716 /* 717 * Remove write access to the page if it was only granted by map_insert 718 * to allow copyout. 719 */ 720 if ((prot & VM_PROT_WRITE) == 0) 721 vm_map_protect(map, trunc_page(map_addr), round_page(map_addr + 722 map_len), prot, 0, VM_MAP_PROTECT_SET_PROT); 723 724 return (0); 725 } 726 727 static int 728 __elfN(load_sections)(struct image_params *imgp, const Elf_Ehdr *hdr, 729 const Elf_Phdr *phdr, u_long rbase, u_long *base_addrp) 730 { 731 vm_prot_t prot; 732 u_long base_addr; 733 bool first; 734 int error, i; 735 736 ASSERT_VOP_LOCKED(imgp->vp, __func__); 737 738 base_addr = 0; 739 first = true; 740 741 for (i = 0; i < hdr->e_phnum; i++) { 742 if (phdr[i].p_type != PT_LOAD || phdr[i].p_memsz == 0) 743 continue; 744 745 /* Loadable segment */ 746 prot = __elfN(trans_prot)(phdr[i].p_flags); 747 error = __elfN(load_section)(imgp, phdr[i].p_offset, 748 (caddr_t)(uintptr_t)phdr[i].p_vaddr + rbase, 749 phdr[i].p_memsz, phdr[i].p_filesz, prot); 750 if (error != 0) 751 return (error); 752 753 /* 754 * Establish the base address if this is the first segment. 755 */ 756 if (first) { 757 base_addr = trunc_page(phdr[i].p_vaddr + rbase); 758 first = false; 759 } 760 } 761 762 if (base_addrp != NULL) 763 *base_addrp = base_addr; 764 765 return (0); 766 } 767 768 /* 769 * Load the file "file" into memory. It may be either a shared object 770 * or an executable. 771 * 772 * The "addr" reference parameter is in/out. On entry, it specifies 773 * the address where a shared object should be loaded. If the file is 774 * an executable, this value is ignored. On exit, "addr" specifies 775 * where the file was actually loaded. 776 * 777 * The "entry" reference parameter is out only. On exit, it specifies 778 * the entry point for the loaded file. 779 */ 780 static int 781 __elfN(load_file)(struct proc *p, const char *file, u_long *addr, 782 u_long *entry) 783 { 784 struct { 785 struct nameidata nd; 786 struct vattr attr; 787 struct image_params image_params; 788 } *tempdata; 789 const Elf_Ehdr *hdr = NULL; 790 const Elf_Phdr *phdr = NULL; 791 struct nameidata *nd; 792 struct vattr *attr; 793 struct image_params *imgp; 794 u_long rbase; 795 u_long base_addr = 0; 796 int error; 797 798 #ifdef CAPABILITY_MODE 799 /* 800 * XXXJA: This check can go away once we are sufficiently confident 801 * that the checks in namei() are correct. 802 */ 803 if (IN_CAPABILITY_MODE(curthread)) 804 return (ECAPMODE); 805 #endif 806 807 tempdata = malloc(sizeof(*tempdata), M_TEMP, M_WAITOK | M_ZERO); 808 nd = &tempdata->nd; 809 attr = &tempdata->attr; 810 imgp = &tempdata->image_params; 811 812 /* 813 * Initialize part of the common data 814 */ 815 imgp->proc = p; 816 imgp->attr = attr; 817 818 NDINIT(nd, LOOKUP, ISOPEN | FOLLOW | LOCKSHARED | LOCKLEAF, 819 UIO_SYSSPACE, file); 820 if ((error = namei(nd)) != 0) { 821 nd->ni_vp = NULL; 822 goto fail; 823 } 824 NDFREE_PNBUF(nd); 825 imgp->vp = nd->ni_vp; 826 827 /* 828 * Check permissions, modes, uid, etc on the file, and "open" it. 829 */ 830 error = exec_check_permissions(imgp); 831 if (error) 832 goto fail; 833 834 error = exec_map_first_page(imgp); 835 if (error) 836 goto fail; 837 838 imgp->object = nd->ni_vp->v_object; 839 840 hdr = (const Elf_Ehdr *)imgp->image_header; 841 if ((error = __elfN(check_header)(hdr)) != 0) 842 goto fail; 843 if (hdr->e_type == ET_DYN) 844 rbase = *addr; 845 else if (hdr->e_type == ET_EXEC) 846 rbase = 0; 847 else { 848 error = ENOEXEC; 849 goto fail; 850 } 851 852 /* Only support headers that fit within first page for now */ 853 if (!__elfN(phdr_in_zero_page)(hdr)) { 854 error = ENOEXEC; 855 goto fail; 856 } 857 858 phdr = (const Elf_Phdr *)(imgp->image_header + hdr->e_phoff); 859 if (!aligned(phdr, Elf_Addr)) { 860 error = ENOEXEC; 861 goto fail; 862 } 863 864 error = __elfN(load_sections)(imgp, hdr, phdr, rbase, &base_addr); 865 if (error != 0) 866 goto fail; 867 868 *addr = base_addr; 869 *entry = (unsigned long)hdr->e_entry + rbase; 870 871 fail: 872 if (imgp->firstpage) 873 exec_unmap_first_page(imgp); 874 875 if (nd->ni_vp) { 876 if (imgp->textset) 877 VOP_UNSET_TEXT_CHECKED(nd->ni_vp); 878 vput(nd->ni_vp); 879 } 880 free(tempdata, M_TEMP); 881 882 return (error); 883 } 884 885 /* 886 * Select randomized valid address in the map map, between minv and 887 * maxv, with specified alignment. The [minv, maxv) range must belong 888 * to the map. Note that function only allocates the address, it is 889 * up to caller to clamp maxv in a way that the final allocation 890 * length fit into the map. 891 * 892 * Result is returned in *resp, error code indicates that arguments 893 * did not pass sanity checks for overflow and range correctness. 894 */ 895 static int 896 __CONCAT(rnd_, __elfN(base))(vm_map_t map, u_long minv, u_long maxv, 897 u_int align, u_long *resp) 898 { 899 u_long rbase, res; 900 901 MPASS(vm_map_min(map) <= minv); 902 903 if (minv >= maxv || minv + align >= maxv || maxv > vm_map_max(map)) { 904 uprintf("Invalid ELF segments layout\n"); 905 return (ENOEXEC); 906 } 907 908 arc4rand(&rbase, sizeof(rbase), 0); 909 res = roundup(minv, (u_long)align) + rbase % (maxv - minv); 910 res &= ~((u_long)align - 1); 911 if (res >= maxv) 912 res -= align; 913 914 KASSERT(res >= minv, 915 ("res %#lx < minv %#lx, maxv %#lx rbase %#lx", 916 res, minv, maxv, rbase)); 917 KASSERT(res < maxv, 918 ("res %#lx > maxv %#lx, minv %#lx rbase %#lx", 919 res, maxv, minv, rbase)); 920 921 *resp = res; 922 return (0); 923 } 924 925 static int 926 __elfN(enforce_limits)(struct image_params *imgp, const Elf_Ehdr *hdr, 927 const Elf_Phdr *phdr, u_long et_dyn_addr) 928 { 929 struct vmspace *vmspace; 930 const char *err_str; 931 u_long text_size, data_size, total_size, text_addr, data_addr; 932 u_long seg_size, seg_addr; 933 int i; 934 935 err_str = NULL; 936 text_size = data_size = total_size = text_addr = data_addr = 0; 937 938 for (i = 0; i < hdr->e_phnum; i++) { 939 if (phdr[i].p_type != PT_LOAD || phdr[i].p_memsz == 0) 940 continue; 941 942 seg_addr = trunc_page(phdr[i].p_vaddr + et_dyn_addr); 943 seg_size = round_page(phdr[i].p_memsz + 944 phdr[i].p_vaddr + et_dyn_addr - seg_addr); 945 946 /* 947 * Make the largest executable segment the official 948 * text segment and all others data. 949 * 950 * Note that obreak() assumes that data_addr + data_size == end 951 * of data load area, and the ELF file format expects segments 952 * to be sorted by address. If multiple data segments exist, 953 * the last one will be used. 954 */ 955 956 if ((phdr[i].p_flags & PF_X) != 0 && text_size < seg_size) { 957 text_size = seg_size; 958 text_addr = seg_addr; 959 } else { 960 data_size = seg_size; 961 data_addr = seg_addr; 962 } 963 total_size += seg_size; 964 } 965 966 if (data_addr == 0 && data_size == 0) { 967 data_addr = text_addr; 968 data_size = text_size; 969 } 970 971 /* 972 * Check limits. It should be safe to check the 973 * limits after loading the segments since we do 974 * not actually fault in all the segments pages. 975 */ 976 PROC_LOCK(imgp->proc); 977 if (data_size > lim_cur_proc(imgp->proc, RLIMIT_DATA)) 978 err_str = "Data segment size exceeds process limit"; 979 else if (text_size > maxtsiz) 980 err_str = "Text segment size exceeds system limit"; 981 else if (total_size > lim_cur_proc(imgp->proc, RLIMIT_VMEM)) 982 err_str = "Total segment size exceeds process limit"; 983 else if (racct_set(imgp->proc, RACCT_DATA, data_size) != 0) 984 err_str = "Data segment size exceeds resource limit"; 985 else if (racct_set(imgp->proc, RACCT_VMEM, total_size) != 0) 986 err_str = "Total segment size exceeds resource limit"; 987 PROC_UNLOCK(imgp->proc); 988 if (err_str != NULL) { 989 uprintf("%s\n", err_str); 990 return (ENOMEM); 991 } 992 993 vmspace = imgp->proc->p_vmspace; 994 vmspace->vm_tsize = text_size >> PAGE_SHIFT; 995 vmspace->vm_taddr = (caddr_t)(uintptr_t)text_addr; 996 vmspace->vm_dsize = data_size >> PAGE_SHIFT; 997 vmspace->vm_daddr = (caddr_t)(uintptr_t)data_addr; 998 999 return (0); 1000 } 1001 1002 static int 1003 __elfN(get_interp)(struct image_params *imgp, const Elf_Phdr *phdr, 1004 char **interpp, bool *free_interpp) 1005 { 1006 struct thread *td; 1007 char *interp; 1008 int error, interp_name_len; 1009 1010 KASSERT(phdr->p_type == PT_INTERP, 1011 ("%s: p_type %u != PT_INTERP", __func__, phdr->p_type)); 1012 ASSERT_VOP_LOCKED(imgp->vp, __func__); 1013 1014 td = curthread; 1015 1016 /* Path to interpreter */ 1017 if (phdr->p_filesz < 2 || phdr->p_filesz > MAXPATHLEN) { 1018 uprintf("Invalid PT_INTERP\n"); 1019 return (ENOEXEC); 1020 } 1021 1022 interp_name_len = phdr->p_filesz; 1023 if (phdr->p_offset > PAGE_SIZE || 1024 interp_name_len > PAGE_SIZE - phdr->p_offset) { 1025 /* 1026 * The vnode lock might be needed by the pagedaemon to 1027 * clean pages owned by the vnode. Do not allow sleep 1028 * waiting for memory with the vnode locked, instead 1029 * try non-sleepable allocation first, and if it 1030 * fails, go to the slow path were we drop the lock 1031 * and do M_WAITOK. A text reference prevents 1032 * modifications to the vnode content. 1033 */ 1034 interp = malloc(interp_name_len + 1, M_TEMP, M_NOWAIT); 1035 if (interp == NULL) { 1036 VOP_UNLOCK(imgp->vp); 1037 interp = malloc(interp_name_len + 1, M_TEMP, M_WAITOK); 1038 vn_lock(imgp->vp, LK_SHARED | LK_RETRY); 1039 } 1040 1041 error = vn_rdwr(UIO_READ, imgp->vp, interp, 1042 interp_name_len, phdr->p_offset, 1043 UIO_SYSSPACE, IO_NODELOCKED, td->td_ucred, 1044 NOCRED, NULL, td); 1045 if (error != 0) { 1046 free(interp, M_TEMP); 1047 uprintf("i/o error PT_INTERP %d\n", error); 1048 return (error); 1049 } 1050 interp[interp_name_len] = '\0'; 1051 1052 *interpp = interp; 1053 *free_interpp = true; 1054 return (0); 1055 } 1056 1057 interp = __DECONST(char *, imgp->image_header) + phdr->p_offset; 1058 if (interp[interp_name_len - 1] != '\0') { 1059 uprintf("Invalid PT_INTERP\n"); 1060 return (ENOEXEC); 1061 } 1062 1063 *interpp = interp; 1064 *free_interpp = false; 1065 return (0); 1066 } 1067 1068 static int 1069 __elfN(load_interp)(struct image_params *imgp, const Elf_Brandinfo *brand_info, 1070 const char *interp, u_long *addr, u_long *entry) 1071 { 1072 char *path; 1073 int error; 1074 1075 if (brand_info->emul_path != NULL && 1076 brand_info->emul_path[0] != '\0') { 1077 path = malloc(MAXPATHLEN, M_TEMP, M_WAITOK); 1078 snprintf(path, MAXPATHLEN, "%s%s", 1079 brand_info->emul_path, interp); 1080 error = __elfN(load_file)(imgp->proc, path, addr, entry); 1081 free(path, M_TEMP); 1082 if (error == 0) 1083 return (0); 1084 } 1085 1086 if (brand_info->interp_newpath != NULL && 1087 (brand_info->interp_path == NULL || 1088 strcmp(interp, brand_info->interp_path) == 0)) { 1089 error = __elfN(load_file)(imgp->proc, 1090 brand_info->interp_newpath, addr, entry); 1091 if (error == 0) 1092 return (0); 1093 } 1094 1095 error = __elfN(load_file)(imgp->proc, interp, addr, entry); 1096 if (error == 0) 1097 return (0); 1098 1099 uprintf("ELF interpreter %s not found, error %d\n", interp, error); 1100 return (error); 1101 } 1102 1103 /* 1104 * Impossible et_dyn_addr initial value indicating that the real base 1105 * must be calculated later with some randomization applied. 1106 */ 1107 #define ET_DYN_ADDR_RAND 1 1108 1109 static int 1110 __CONCAT(exec_, __elfN(imgact))(struct image_params *imgp) 1111 { 1112 struct thread *td; 1113 const Elf_Ehdr *hdr; 1114 const Elf_Phdr *phdr; 1115 Elf_Auxargs *elf_auxargs; 1116 struct vmspace *vmspace; 1117 vm_map_t map; 1118 char *interp; 1119 Elf_Brandinfo *brand_info; 1120 struct sysentvec *sv; 1121 u_long addr, baddr, et_dyn_addr, entry, proghdr; 1122 u_long maxalign, maxsalign, mapsz, maxv, maxv1, anon_loc; 1123 uint32_t fctl0; 1124 int32_t osrel; 1125 bool free_interp; 1126 int error, i, n; 1127 1128 hdr = (const Elf_Ehdr *)imgp->image_header; 1129 1130 /* 1131 * Do we have a valid ELF header ? 1132 * 1133 * Only allow ET_EXEC & ET_DYN here, reject ET_DYN later 1134 * if particular brand doesn't support it. 1135 */ 1136 if (__elfN(check_header)(hdr) != 0 || 1137 (hdr->e_type != ET_EXEC && hdr->e_type != ET_DYN)) 1138 return (-1); 1139 1140 /* 1141 * From here on down, we return an errno, not -1, as we've 1142 * detected an ELF file. 1143 */ 1144 1145 if (!__elfN(phdr_in_zero_page)(hdr)) { 1146 uprintf("Program headers not in the first page\n"); 1147 return (ENOEXEC); 1148 } 1149 phdr = (const Elf_Phdr *)(imgp->image_header + hdr->e_phoff); 1150 if (!aligned(phdr, Elf_Addr)) { 1151 uprintf("Unaligned program headers\n"); 1152 return (ENOEXEC); 1153 } 1154 1155 n = error = 0; 1156 baddr = 0; 1157 osrel = 0; 1158 fctl0 = 0; 1159 entry = proghdr = 0; 1160 interp = NULL; 1161 free_interp = false; 1162 td = curthread; 1163 1164 /* 1165 * Somewhat arbitrary, limit accepted max alignment for the 1166 * loadable segment to the max supported superpage size. Too 1167 * large alignment requests are not useful and are indicators 1168 * of corrupted or outright malicious binary. 1169 */ 1170 maxalign = PAGE_SIZE; 1171 maxsalign = PAGE_SIZE * 1024; 1172 for (i = MAXPAGESIZES - 1; i > 0; i--) { 1173 if (pagesizes[i] > maxsalign) 1174 maxsalign = pagesizes[i]; 1175 } 1176 1177 mapsz = 0; 1178 1179 for (i = 0; i < hdr->e_phnum; i++) { 1180 switch (phdr[i].p_type) { 1181 case PT_LOAD: 1182 if (n == 0) 1183 baddr = phdr[i].p_vaddr; 1184 if (!powerof2(phdr[i].p_align) || 1185 phdr[i].p_align > maxsalign) { 1186 uprintf("Invalid segment alignment\n"); 1187 error = ENOEXEC; 1188 goto ret; 1189 } 1190 if (phdr[i].p_align > maxalign) 1191 maxalign = phdr[i].p_align; 1192 if (mapsz + phdr[i].p_memsz < mapsz) { 1193 uprintf("Mapsize overflow\n"); 1194 error = ENOEXEC; 1195 goto ret; 1196 } 1197 mapsz += phdr[i].p_memsz; 1198 n++; 1199 1200 /* 1201 * If this segment contains the program headers, 1202 * remember their virtual address for the AT_PHDR 1203 * aux entry. Static binaries don't usually include 1204 * a PT_PHDR entry. 1205 */ 1206 if (phdr[i].p_offset == 0 && 1207 hdr->e_phoff + hdr->e_phnum * hdr->e_phentsize <= 1208 phdr[i].p_filesz) 1209 proghdr = phdr[i].p_vaddr + hdr->e_phoff; 1210 break; 1211 case PT_INTERP: 1212 /* Path to interpreter */ 1213 if (interp != NULL) { 1214 uprintf("Multiple PT_INTERP headers\n"); 1215 error = ENOEXEC; 1216 goto ret; 1217 } 1218 error = __elfN(get_interp)(imgp, &phdr[i], &interp, 1219 &free_interp); 1220 if (error != 0) 1221 goto ret; 1222 break; 1223 case PT_GNU_STACK: 1224 if (__elfN(nxstack)) { 1225 imgp->stack_prot = 1226 __elfN(trans_prot)(phdr[i].p_flags); 1227 if ((imgp->stack_prot & VM_PROT_RW) != 1228 VM_PROT_RW) { 1229 uprintf("Invalid PT_GNU_STACK\n"); 1230 error = ENOEXEC; 1231 goto ret; 1232 } 1233 } 1234 imgp->stack_sz = phdr[i].p_memsz; 1235 break; 1236 case PT_PHDR: /* Program header table info */ 1237 proghdr = phdr[i].p_vaddr; 1238 break; 1239 } 1240 } 1241 1242 brand_info = __elfN(get_brandinfo)(imgp, interp, &osrel, &fctl0); 1243 if (brand_info == NULL) { 1244 uprintf("ELF binary type \"%u\" not known.\n", 1245 hdr->e_ident[EI_OSABI]); 1246 error = ENOEXEC; 1247 goto ret; 1248 } 1249 sv = brand_info->sysvec; 1250 et_dyn_addr = 0; 1251 if (hdr->e_type == ET_DYN) { 1252 if ((brand_info->flags & BI_CAN_EXEC_DYN) == 0) { 1253 uprintf("Cannot execute shared object\n"); 1254 error = ENOEXEC; 1255 goto ret; 1256 } 1257 /* 1258 * Honour the base load address from the dso if it is 1259 * non-zero for some reason. 1260 */ 1261 if (baddr == 0) { 1262 if ((sv->sv_flags & SV_ASLR) == 0 || 1263 (fctl0 & NT_FREEBSD_FCTL_ASLR_DISABLE) != 0) 1264 et_dyn_addr = __elfN(pie_base); 1265 else if ((__elfN(pie_aslr_enabled) && 1266 (imgp->proc->p_flag2 & P2_ASLR_DISABLE) == 0) || 1267 (imgp->proc->p_flag2 & P2_ASLR_ENABLE) != 0) 1268 et_dyn_addr = ET_DYN_ADDR_RAND; 1269 else 1270 et_dyn_addr = __elfN(pie_base); 1271 } 1272 } 1273 1274 /* 1275 * Avoid a possible deadlock if the current address space is destroyed 1276 * and that address space maps the locked vnode. In the common case, 1277 * the locked vnode's v_usecount is decremented but remains greater 1278 * than zero. Consequently, the vnode lock is not needed by vrele(). 1279 * However, in cases where the vnode lock is external, such as nullfs, 1280 * v_usecount may become zero. 1281 * 1282 * The VV_TEXT flag prevents modifications to the executable while 1283 * the vnode is unlocked. 1284 */ 1285 VOP_UNLOCK(imgp->vp); 1286 1287 /* 1288 * Decide whether to enable randomization of user mappings. 1289 * First, reset user preferences for the setid binaries. 1290 * Then, account for the support of the randomization by the 1291 * ABI, by user preferences, and make special treatment for 1292 * PIE binaries. 1293 */ 1294 if (imgp->credential_setid) { 1295 PROC_LOCK(imgp->proc); 1296 imgp->proc->p_flag2 &= ~(P2_ASLR_ENABLE | P2_ASLR_DISABLE | 1297 P2_WXORX_DISABLE | P2_WXORX_ENABLE_EXEC); 1298 PROC_UNLOCK(imgp->proc); 1299 } 1300 if ((sv->sv_flags & SV_ASLR) == 0 || 1301 (imgp->proc->p_flag2 & P2_ASLR_DISABLE) != 0 || 1302 (fctl0 & NT_FREEBSD_FCTL_ASLR_DISABLE) != 0) { 1303 KASSERT(et_dyn_addr != ET_DYN_ADDR_RAND, 1304 ("et_dyn_addr == RAND and !ASLR")); 1305 } else if ((imgp->proc->p_flag2 & P2_ASLR_ENABLE) != 0 || 1306 (__elfN(aslr_enabled) && hdr->e_type == ET_EXEC) || 1307 et_dyn_addr == ET_DYN_ADDR_RAND) { 1308 imgp->map_flags |= MAP_ASLR; 1309 /* 1310 * If user does not care about sbrk, utilize the bss 1311 * grow region for mappings as well. We can select 1312 * the base for the image anywere and still not suffer 1313 * from the fragmentation. 1314 */ 1315 if (!__elfN(aslr_honor_sbrk) || 1316 (imgp->proc->p_flag2 & P2_ASLR_IGNSTART) != 0) 1317 imgp->map_flags |= MAP_ASLR_IGNSTART; 1318 if (__elfN(aslr_stack)) 1319 imgp->map_flags |= MAP_ASLR_STACK; 1320 if (__elfN(aslr_shared_page)) 1321 imgp->imgp_flags |= IMGP_ASLR_SHARED_PAGE; 1322 } 1323 1324 if ((!__elfN(allow_wx) && (fctl0 & NT_FREEBSD_FCTL_WXNEEDED) == 0 && 1325 (imgp->proc->p_flag2 & P2_WXORX_DISABLE) == 0) || 1326 (imgp->proc->p_flag2 & P2_WXORX_ENABLE_EXEC) != 0) 1327 imgp->map_flags |= MAP_WXORX; 1328 1329 error = exec_new_vmspace(imgp, sv); 1330 1331 imgp->proc->p_sysent = sv; 1332 imgp->proc->p_elf_brandinfo = brand_info; 1333 1334 vmspace = imgp->proc->p_vmspace; 1335 map = &vmspace->vm_map; 1336 maxv = sv->sv_usrstack; 1337 if ((imgp->map_flags & MAP_ASLR_STACK) == 0) 1338 maxv -= lim_max(td, RLIMIT_STACK); 1339 if (error == 0 && mapsz >= maxv - vm_map_min(map)) { 1340 uprintf("Excessive mapping size\n"); 1341 error = ENOEXEC; 1342 } 1343 1344 if (error == 0 && et_dyn_addr == ET_DYN_ADDR_RAND) { 1345 KASSERT((map->flags & MAP_ASLR) != 0, 1346 ("ET_DYN_ADDR_RAND but !MAP_ASLR")); 1347 error = __CONCAT(rnd_, __elfN(base))(map, 1348 vm_map_min(map) + mapsz + lim_max(td, RLIMIT_DATA), 1349 /* reserve half of the address space to interpreter */ 1350 maxv / 2, maxalign, &et_dyn_addr); 1351 } 1352 1353 vn_lock(imgp->vp, LK_SHARED | LK_RETRY); 1354 if (error != 0) 1355 goto ret; 1356 1357 error = __elfN(load_sections)(imgp, hdr, phdr, et_dyn_addr, NULL); 1358 if (error != 0) 1359 goto ret; 1360 1361 error = __elfN(enforce_limits)(imgp, hdr, phdr, et_dyn_addr); 1362 if (error != 0) 1363 goto ret; 1364 1365 /* 1366 * We load the dynamic linker where a userland call 1367 * to mmap(0, ...) would put it. The rationale behind this 1368 * calculation is that it leaves room for the heap to grow to 1369 * its maximum allowed size. 1370 */ 1371 addr = round_page((vm_offset_t)vmspace->vm_daddr + lim_max(td, 1372 RLIMIT_DATA)); 1373 if ((map->flags & MAP_ASLR) != 0) { 1374 maxv1 = maxv / 2 + addr / 2; 1375 error = __CONCAT(rnd_, __elfN(base))(map, addr, maxv1, 1376 (MAXPAGESIZES > 1 && pagesizes[1] != 0) ? 1377 pagesizes[1] : pagesizes[0], &anon_loc); 1378 if (error != 0) 1379 goto ret; 1380 map->anon_loc = anon_loc; 1381 } else { 1382 map->anon_loc = addr; 1383 } 1384 1385 entry = (u_long)hdr->e_entry + et_dyn_addr; 1386 imgp->entry_addr = entry; 1387 1388 if (interp != NULL) { 1389 VOP_UNLOCK(imgp->vp); 1390 if ((map->flags & MAP_ASLR) != 0) { 1391 /* Assume that interpreter fits into 1/4 of AS */ 1392 maxv1 = maxv / 2 + addr / 2; 1393 error = __CONCAT(rnd_, __elfN(base))(map, addr, 1394 maxv1, PAGE_SIZE, &addr); 1395 } 1396 if (error == 0) { 1397 error = __elfN(load_interp)(imgp, brand_info, interp, 1398 &addr, &imgp->entry_addr); 1399 } 1400 vn_lock(imgp->vp, LK_SHARED | LK_RETRY); 1401 if (error != 0) 1402 goto ret; 1403 } else 1404 addr = et_dyn_addr; 1405 1406 error = exec_map_stack(imgp); 1407 if (error != 0) 1408 goto ret; 1409 1410 /* 1411 * Construct auxargs table (used by the copyout_auxargs routine) 1412 */ 1413 elf_auxargs = malloc(sizeof(Elf_Auxargs), M_TEMP, M_NOWAIT); 1414 if (elf_auxargs == NULL) { 1415 VOP_UNLOCK(imgp->vp); 1416 elf_auxargs = malloc(sizeof(Elf_Auxargs), M_TEMP, M_WAITOK); 1417 vn_lock(imgp->vp, LK_SHARED | LK_RETRY); 1418 } 1419 elf_auxargs->execfd = -1; 1420 elf_auxargs->phdr = proghdr + et_dyn_addr; 1421 elf_auxargs->phent = hdr->e_phentsize; 1422 elf_auxargs->phnum = hdr->e_phnum; 1423 elf_auxargs->pagesz = PAGE_SIZE; 1424 elf_auxargs->base = addr; 1425 elf_auxargs->flags = 0; 1426 elf_auxargs->entry = entry; 1427 elf_auxargs->hdr_eflags = hdr->e_flags; 1428 1429 imgp->auxargs = elf_auxargs; 1430 imgp->interpreted = 0; 1431 imgp->reloc_base = addr; 1432 imgp->proc->p_osrel = osrel; 1433 imgp->proc->p_fctl0 = fctl0; 1434 imgp->proc->p_elf_flags = hdr->e_flags; 1435 1436 ret: 1437 ASSERT_VOP_LOCKED(imgp->vp, "skipped relock"); 1438 if (free_interp) 1439 free(interp, M_TEMP); 1440 return (error); 1441 } 1442 1443 #define elf_suword __CONCAT(suword, __ELF_WORD_SIZE) 1444 1445 int 1446 __elfN(freebsd_copyout_auxargs)(struct image_params *imgp, uintptr_t base) 1447 { 1448 Elf_Auxargs *args = (Elf_Auxargs *)imgp->auxargs; 1449 Elf_Auxinfo *argarray, *pos; 1450 struct vmspace *vmspace; 1451 rlim_t stacksz; 1452 int error, bsdflags, oc; 1453 1454 argarray = pos = malloc(AT_COUNT * sizeof(*pos), M_TEMP, 1455 M_WAITOK | M_ZERO); 1456 1457 vmspace = imgp->proc->p_vmspace; 1458 1459 if (args->execfd != -1) 1460 AUXARGS_ENTRY(pos, AT_EXECFD, args->execfd); 1461 AUXARGS_ENTRY(pos, AT_PHDR, args->phdr); 1462 AUXARGS_ENTRY(pos, AT_PHENT, args->phent); 1463 AUXARGS_ENTRY(pos, AT_PHNUM, args->phnum); 1464 AUXARGS_ENTRY(pos, AT_PAGESZ, args->pagesz); 1465 AUXARGS_ENTRY(pos, AT_FLAGS, args->flags); 1466 AUXARGS_ENTRY(pos, AT_ENTRY, args->entry); 1467 AUXARGS_ENTRY(pos, AT_BASE, args->base); 1468 AUXARGS_ENTRY(pos, AT_EHDRFLAGS, args->hdr_eflags); 1469 if (imgp->execpathp != 0) 1470 AUXARGS_ENTRY_PTR(pos, AT_EXECPATH, imgp->execpathp); 1471 AUXARGS_ENTRY(pos, AT_OSRELDATE, 1472 imgp->proc->p_ucred->cr_prison->pr_osreldate); 1473 if (imgp->canary != 0) { 1474 AUXARGS_ENTRY_PTR(pos, AT_CANARY, imgp->canary); 1475 AUXARGS_ENTRY(pos, AT_CANARYLEN, imgp->canarylen); 1476 } 1477 AUXARGS_ENTRY(pos, AT_NCPUS, mp_ncpus); 1478 if (imgp->pagesizes != 0) { 1479 AUXARGS_ENTRY_PTR(pos, AT_PAGESIZES, imgp->pagesizes); 1480 AUXARGS_ENTRY(pos, AT_PAGESIZESLEN, imgp->pagesizeslen); 1481 } 1482 if ((imgp->sysent->sv_flags & SV_TIMEKEEP) != 0) { 1483 AUXARGS_ENTRY(pos, AT_TIMEKEEP, 1484 vmspace->vm_shp_base + imgp->sysent->sv_timekeep_offset); 1485 } 1486 AUXARGS_ENTRY(pos, AT_STACKPROT, imgp->sysent->sv_shared_page_obj 1487 != NULL && imgp->stack_prot != 0 ? imgp->stack_prot : 1488 imgp->sysent->sv_stackprot); 1489 if (imgp->sysent->sv_hwcap != NULL) 1490 AUXARGS_ENTRY(pos, AT_HWCAP, *imgp->sysent->sv_hwcap); 1491 if (imgp->sysent->sv_hwcap2 != NULL) 1492 AUXARGS_ENTRY(pos, AT_HWCAP2, *imgp->sysent->sv_hwcap2); 1493 bsdflags = 0; 1494 bsdflags |= __elfN(sigfastblock) ? ELF_BSDF_SIGFASTBLK : 0; 1495 oc = atomic_load_int(&vm_overcommit); 1496 bsdflags |= (oc & (SWAP_RESERVE_FORCE_ON | SWAP_RESERVE_RLIMIT_ON)) != 1497 0 ? ELF_BSDF_VMNOOVERCOMMIT : 0; 1498 AUXARGS_ENTRY(pos, AT_BSDFLAGS, bsdflags); 1499 AUXARGS_ENTRY(pos, AT_ARGC, imgp->args->argc); 1500 AUXARGS_ENTRY_PTR(pos, AT_ARGV, imgp->argv); 1501 AUXARGS_ENTRY(pos, AT_ENVC, imgp->args->envc); 1502 AUXARGS_ENTRY_PTR(pos, AT_ENVV, imgp->envv); 1503 AUXARGS_ENTRY_PTR(pos, AT_PS_STRINGS, imgp->ps_strings); 1504 #ifdef RANDOM_FENESTRASX 1505 if ((imgp->sysent->sv_flags & SV_RNG_SEED_VER) != 0) { 1506 AUXARGS_ENTRY(pos, AT_FXRNG, 1507 vmspace->vm_shp_base + imgp->sysent->sv_fxrng_gen_offset); 1508 } 1509 #endif 1510 if ((imgp->sysent->sv_flags & SV_DSO_SIG) != 0 && __elfN(vdso) != 0) { 1511 AUXARGS_ENTRY(pos, AT_KPRELOAD, 1512 vmspace->vm_shp_base + imgp->sysent->sv_vdso_offset); 1513 } 1514 AUXARGS_ENTRY(pos, AT_USRSTACKBASE, round_page(vmspace->vm_stacktop)); 1515 stacksz = imgp->proc->p_limit->pl_rlimit[RLIMIT_STACK].rlim_cur; 1516 AUXARGS_ENTRY(pos, AT_USRSTACKLIM, stacksz); 1517 AUXARGS_ENTRY(pos, AT_NULL, 0); 1518 1519 free(imgp->auxargs, M_TEMP); 1520 imgp->auxargs = NULL; 1521 KASSERT(pos - argarray <= AT_COUNT, ("Too many auxargs")); 1522 1523 error = copyout(argarray, (void *)base, sizeof(*argarray) * AT_COUNT); 1524 free(argarray, M_TEMP); 1525 return (error); 1526 } 1527 1528 int 1529 __elfN(freebsd_fixup)(uintptr_t *stack_base, struct image_params *imgp) 1530 { 1531 Elf_Addr *base; 1532 1533 base = (Elf_Addr *)*stack_base; 1534 base--; 1535 if (elf_suword(base, imgp->args->argc) == -1) 1536 return (EFAULT); 1537 *stack_base = (uintptr_t)base; 1538 return (0); 1539 } 1540 1541 /* 1542 * Code for generating ELF core dumps. 1543 */ 1544 1545 typedef void (*segment_callback)(vm_map_entry_t, void *); 1546 1547 /* Closure for cb_put_phdr(). */ 1548 struct phdr_closure { 1549 Elf_Phdr *phdr; /* Program header to fill in */ 1550 Elf_Off offset; /* Offset of segment in core file */ 1551 }; 1552 1553 struct note_info { 1554 int type; /* Note type. */ 1555 struct regset *regset; /* Register set. */ 1556 outfunc_t outfunc; /* Output function. */ 1557 void *outarg; /* Argument for the output function. */ 1558 size_t outsize; /* Output size. */ 1559 TAILQ_ENTRY(note_info) link; /* Link to the next note info. */ 1560 }; 1561 1562 TAILQ_HEAD(note_info_list, note_info); 1563 1564 extern int compress_user_cores; 1565 extern int compress_user_cores_level; 1566 1567 static void cb_put_phdr(vm_map_entry_t, void *); 1568 static void cb_size_segment(vm_map_entry_t, void *); 1569 static void each_dumpable_segment(struct thread *, segment_callback, void *, 1570 int); 1571 static int __elfN(corehdr)(struct coredump_params *, int, void *, size_t, 1572 struct note_info_list *, size_t, int); 1573 static void __elfN(putnote)(struct thread *td, struct note_info *, struct sbuf *); 1574 1575 static void __elfN(note_prpsinfo)(void *, struct sbuf *, size_t *); 1576 static void __elfN(note_threadmd)(void *, struct sbuf *, size_t *); 1577 static void __elfN(note_procstat_auxv)(void *, struct sbuf *, size_t *); 1578 static void __elfN(note_procstat_proc)(void *, struct sbuf *, size_t *); 1579 static void __elfN(note_procstat_psstrings)(void *, struct sbuf *, size_t *); 1580 static void note_procstat_files(void *, struct sbuf *, size_t *); 1581 static void note_procstat_groups(void *, struct sbuf *, size_t *); 1582 static void note_procstat_osrel(void *, struct sbuf *, size_t *); 1583 static void note_procstat_rlimit(void *, struct sbuf *, size_t *); 1584 static void note_procstat_umask(void *, struct sbuf *, size_t *); 1585 static void note_procstat_vmmap(void *, struct sbuf *, size_t *); 1586 1587 static int 1588 core_compressed_write(void *base, size_t len, off_t offset, void *arg) 1589 { 1590 1591 return (core_write((struct coredump_params *)arg, base, len, offset, 1592 UIO_SYSSPACE, NULL)); 1593 } 1594 1595 int 1596 __elfN(coredump)(struct thread *td, struct vnode *vp, off_t limit, int flags) 1597 { 1598 struct ucred *cred = td->td_ucred; 1599 int compm, error = 0; 1600 struct sseg_closure seginfo; 1601 struct note_info_list notelst; 1602 struct coredump_params params; 1603 struct note_info *ninfo; 1604 void *hdr, *tmpbuf; 1605 size_t hdrsize, notesz, coresize; 1606 1607 hdr = NULL; 1608 tmpbuf = NULL; 1609 TAILQ_INIT(¬elst); 1610 1611 /* Size the program segments. */ 1612 __elfN(size_segments)(td, &seginfo, flags); 1613 1614 /* 1615 * Collect info about the core file header area. 1616 */ 1617 hdrsize = sizeof(Elf_Ehdr) + sizeof(Elf_Phdr) * (1 + seginfo.count); 1618 if (seginfo.count + 1 >= PN_XNUM) 1619 hdrsize += sizeof(Elf_Shdr); 1620 td->td_proc->p_sysent->sv_elf_core_prepare_notes(td, ¬elst, ¬esz); 1621 coresize = round_page(hdrsize + notesz) + seginfo.size; 1622 1623 /* Set up core dump parameters. */ 1624 params.offset = 0; 1625 params.active_cred = cred; 1626 params.file_cred = NOCRED; 1627 params.td = td; 1628 params.vp = vp; 1629 params.comp = NULL; 1630 1631 #ifdef RACCT 1632 if (racct_enable) { 1633 PROC_LOCK(td->td_proc); 1634 error = racct_add(td->td_proc, RACCT_CORE, coresize); 1635 PROC_UNLOCK(td->td_proc); 1636 if (error != 0) { 1637 error = EFAULT; 1638 goto done; 1639 } 1640 } 1641 #endif 1642 if (coresize >= limit) { 1643 error = EFAULT; 1644 goto done; 1645 } 1646 1647 /* Create a compression stream if necessary. */ 1648 compm = compress_user_cores; 1649 if ((flags & (SVC_PT_COREDUMP | SVC_NOCOMPRESS)) == SVC_PT_COREDUMP && 1650 compm == 0) 1651 compm = COMPRESS_GZIP; 1652 if (compm != 0) { 1653 params.comp = compressor_init(core_compressed_write, 1654 compm, CORE_BUF_SIZE, 1655 compress_user_cores_level, ¶ms); 1656 if (params.comp == NULL) { 1657 error = EFAULT; 1658 goto done; 1659 } 1660 tmpbuf = malloc(CORE_BUF_SIZE, M_TEMP, M_WAITOK | M_ZERO); 1661 } 1662 1663 /* 1664 * Allocate memory for building the header, fill it up, 1665 * and write it out following the notes. 1666 */ 1667 hdr = malloc(hdrsize, M_TEMP, M_WAITOK); 1668 error = __elfN(corehdr)(¶ms, seginfo.count, hdr, hdrsize, ¬elst, 1669 notesz, flags); 1670 1671 /* Write the contents of all of the writable segments. */ 1672 if (error == 0) { 1673 Elf_Phdr *php; 1674 off_t offset; 1675 int i; 1676 1677 php = (Elf_Phdr *)((char *)hdr + sizeof(Elf_Ehdr)) + 1; 1678 offset = round_page(hdrsize + notesz); 1679 for (i = 0; i < seginfo.count; i++) { 1680 error = core_output((char *)(uintptr_t)php->p_vaddr, 1681 php->p_filesz, offset, ¶ms, tmpbuf); 1682 if (error != 0) 1683 break; 1684 offset += php->p_filesz; 1685 php++; 1686 } 1687 if (error == 0 && params.comp != NULL) 1688 error = compressor_flush(params.comp); 1689 } 1690 if (error) { 1691 log(LOG_WARNING, 1692 "Failed to write core file for process %s (error %d)\n", 1693 curproc->p_comm, error); 1694 } 1695 1696 done: 1697 free(tmpbuf, M_TEMP); 1698 if (params.comp != NULL) 1699 compressor_fini(params.comp); 1700 while ((ninfo = TAILQ_FIRST(¬elst)) != NULL) { 1701 TAILQ_REMOVE(¬elst, ninfo, link); 1702 free(ninfo, M_TEMP); 1703 } 1704 if (hdr != NULL) 1705 free(hdr, M_TEMP); 1706 1707 return (error); 1708 } 1709 1710 /* 1711 * A callback for each_dumpable_segment() to write out the segment's 1712 * program header entry. 1713 */ 1714 static void 1715 cb_put_phdr(vm_map_entry_t entry, void *closure) 1716 { 1717 struct phdr_closure *phc = (struct phdr_closure *)closure; 1718 Elf_Phdr *phdr = phc->phdr; 1719 1720 phc->offset = round_page(phc->offset); 1721 1722 phdr->p_type = PT_LOAD; 1723 phdr->p_offset = phc->offset; 1724 phdr->p_vaddr = entry->start; 1725 phdr->p_paddr = 0; 1726 phdr->p_filesz = phdr->p_memsz = entry->end - entry->start; 1727 phdr->p_align = PAGE_SIZE; 1728 phdr->p_flags = __elfN(untrans_prot)(entry->protection); 1729 1730 phc->offset += phdr->p_filesz; 1731 phc->phdr++; 1732 } 1733 1734 /* 1735 * A callback for each_dumpable_segment() to gather information about 1736 * the number of segments and their total size. 1737 */ 1738 static void 1739 cb_size_segment(vm_map_entry_t entry, void *closure) 1740 { 1741 struct sseg_closure *ssc = (struct sseg_closure *)closure; 1742 1743 ssc->count++; 1744 ssc->size += entry->end - entry->start; 1745 } 1746 1747 void 1748 __elfN(size_segments)(struct thread *td, struct sseg_closure *seginfo, 1749 int flags) 1750 { 1751 seginfo->count = 0; 1752 seginfo->size = 0; 1753 1754 each_dumpable_segment(td, cb_size_segment, seginfo, flags); 1755 } 1756 1757 /* 1758 * For each writable segment in the process's memory map, call the given 1759 * function with a pointer to the map entry and some arbitrary 1760 * caller-supplied data. 1761 */ 1762 static void 1763 each_dumpable_segment(struct thread *td, segment_callback func, void *closure, 1764 int flags) 1765 { 1766 struct proc *p = td->td_proc; 1767 vm_map_t map = &p->p_vmspace->vm_map; 1768 vm_map_entry_t entry; 1769 vm_object_t backing_object, object; 1770 bool ignore_entry; 1771 1772 vm_map_lock_read(map); 1773 VM_MAP_ENTRY_FOREACH(entry, map) { 1774 /* 1775 * Don't dump inaccessible mappings, deal with legacy 1776 * coredump mode. 1777 * 1778 * Note that read-only segments related to the elf binary 1779 * are marked MAP_ENTRY_NOCOREDUMP now so we no longer 1780 * need to arbitrarily ignore such segments. 1781 */ 1782 if ((flags & SVC_ALL) == 0) { 1783 if (elf_legacy_coredump) { 1784 if ((entry->protection & VM_PROT_RW) != 1785 VM_PROT_RW) 1786 continue; 1787 } else { 1788 if ((entry->protection & VM_PROT_ALL) == 0) 1789 continue; 1790 } 1791 } 1792 1793 /* 1794 * Dont include memory segment in the coredump if 1795 * MAP_NOCORE is set in mmap(2) or MADV_NOCORE in 1796 * madvise(2). Do not dump submaps (i.e. parts of the 1797 * kernel map). 1798 */ 1799 if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) != 0) 1800 continue; 1801 if ((entry->eflags & MAP_ENTRY_NOCOREDUMP) != 0 && 1802 (flags & SVC_ALL) == 0) 1803 continue; 1804 if ((object = entry->object.vm_object) == NULL) 1805 continue; 1806 1807 /* Ignore memory-mapped devices and such things. */ 1808 VM_OBJECT_RLOCK(object); 1809 while ((backing_object = object->backing_object) != NULL) { 1810 VM_OBJECT_RLOCK(backing_object); 1811 VM_OBJECT_RUNLOCK(object); 1812 object = backing_object; 1813 } 1814 ignore_entry = (object->flags & OBJ_FICTITIOUS) != 0; 1815 VM_OBJECT_RUNLOCK(object); 1816 if (ignore_entry) 1817 continue; 1818 1819 (*func)(entry, closure); 1820 } 1821 vm_map_unlock_read(map); 1822 } 1823 1824 /* 1825 * Write the core file header to the file, including padding up to 1826 * the page boundary. 1827 */ 1828 static int 1829 __elfN(corehdr)(struct coredump_params *p, int numsegs, void *hdr, 1830 size_t hdrsize, struct note_info_list *notelst, size_t notesz, 1831 int flags) 1832 { 1833 struct note_info *ninfo; 1834 struct sbuf *sb; 1835 int error; 1836 1837 /* Fill in the header. */ 1838 bzero(hdr, hdrsize); 1839 __elfN(puthdr)(p->td, hdr, hdrsize, numsegs, notesz, flags); 1840 1841 sb = sbuf_new(NULL, NULL, CORE_BUF_SIZE, SBUF_FIXEDLEN); 1842 sbuf_set_drain(sb, sbuf_drain_core_output, p); 1843 sbuf_start_section(sb, NULL); 1844 sbuf_bcat(sb, hdr, hdrsize); 1845 TAILQ_FOREACH(ninfo, notelst, link) 1846 __elfN(putnote)(p->td, ninfo, sb); 1847 /* Align up to a page boundary for the program segments. */ 1848 sbuf_end_section(sb, -1, PAGE_SIZE, 0); 1849 error = sbuf_finish(sb); 1850 sbuf_delete(sb); 1851 1852 return (error); 1853 } 1854 1855 void 1856 __elfN(prepare_notes)(struct thread *td, struct note_info_list *list, 1857 size_t *sizep) 1858 { 1859 struct proc *p; 1860 struct thread *thr; 1861 size_t size; 1862 1863 p = td->td_proc; 1864 size = 0; 1865 1866 size += __elfN(register_note)(td, list, NT_PRPSINFO, 1867 __elfN(note_prpsinfo), p); 1868 1869 /* 1870 * To have the debugger select the right thread (LWP) as the initial 1871 * thread, we dump the state of the thread passed to us in td first. 1872 * This is the thread that causes the core dump and thus likely to 1873 * be the right thread one wants to have selected in the debugger. 1874 */ 1875 thr = td; 1876 while (thr != NULL) { 1877 size += __elfN(prepare_register_notes)(td, list, thr); 1878 size += __elfN(register_note)(td, list, -1, 1879 __elfN(note_threadmd), thr); 1880 1881 thr = thr == td ? TAILQ_FIRST(&p->p_threads) : 1882 TAILQ_NEXT(thr, td_plist); 1883 if (thr == td) 1884 thr = TAILQ_NEXT(thr, td_plist); 1885 } 1886 1887 size += __elfN(register_note)(td, list, NT_PROCSTAT_PROC, 1888 __elfN(note_procstat_proc), p); 1889 size += __elfN(register_note)(td, list, NT_PROCSTAT_FILES, 1890 note_procstat_files, p); 1891 size += __elfN(register_note)(td, list, NT_PROCSTAT_VMMAP, 1892 note_procstat_vmmap, p); 1893 size += __elfN(register_note)(td, list, NT_PROCSTAT_GROUPS, 1894 note_procstat_groups, p); 1895 size += __elfN(register_note)(td, list, NT_PROCSTAT_UMASK, 1896 note_procstat_umask, p); 1897 size += __elfN(register_note)(td, list, NT_PROCSTAT_RLIMIT, 1898 note_procstat_rlimit, p); 1899 size += __elfN(register_note)(td, list, NT_PROCSTAT_OSREL, 1900 note_procstat_osrel, p); 1901 size += __elfN(register_note)(td, list, NT_PROCSTAT_PSSTRINGS, 1902 __elfN(note_procstat_psstrings), p); 1903 size += __elfN(register_note)(td, list, NT_PROCSTAT_AUXV, 1904 __elfN(note_procstat_auxv), p); 1905 1906 *sizep = size; 1907 } 1908 1909 void 1910 __elfN(puthdr)(struct thread *td, void *hdr, size_t hdrsize, int numsegs, 1911 size_t notesz, int flags) 1912 { 1913 Elf_Ehdr *ehdr; 1914 Elf_Phdr *phdr; 1915 Elf_Shdr *shdr; 1916 struct phdr_closure phc; 1917 Elf_Brandinfo *bi; 1918 1919 ehdr = (Elf_Ehdr *)hdr; 1920 bi = td->td_proc->p_elf_brandinfo; 1921 1922 ehdr->e_ident[EI_MAG0] = ELFMAG0; 1923 ehdr->e_ident[EI_MAG1] = ELFMAG1; 1924 ehdr->e_ident[EI_MAG2] = ELFMAG2; 1925 ehdr->e_ident[EI_MAG3] = ELFMAG3; 1926 ehdr->e_ident[EI_CLASS] = ELF_CLASS; 1927 ehdr->e_ident[EI_DATA] = ELF_DATA; 1928 ehdr->e_ident[EI_VERSION] = EV_CURRENT; 1929 ehdr->e_ident[EI_OSABI] = td->td_proc->p_sysent->sv_elf_core_osabi; 1930 ehdr->e_ident[EI_ABIVERSION] = 0; 1931 ehdr->e_ident[EI_PAD] = 0; 1932 ehdr->e_type = ET_CORE; 1933 ehdr->e_machine = bi->machine; 1934 ehdr->e_version = EV_CURRENT; 1935 ehdr->e_entry = 0; 1936 ehdr->e_phoff = sizeof(Elf_Ehdr); 1937 ehdr->e_flags = td->td_proc->p_elf_flags; 1938 ehdr->e_ehsize = sizeof(Elf_Ehdr); 1939 ehdr->e_phentsize = sizeof(Elf_Phdr); 1940 ehdr->e_shentsize = sizeof(Elf_Shdr); 1941 ehdr->e_shstrndx = SHN_UNDEF; 1942 if (numsegs + 1 < PN_XNUM) { 1943 ehdr->e_phnum = numsegs + 1; 1944 ehdr->e_shnum = 0; 1945 } else { 1946 ehdr->e_phnum = PN_XNUM; 1947 ehdr->e_shnum = 1; 1948 1949 ehdr->e_shoff = ehdr->e_phoff + 1950 (numsegs + 1) * ehdr->e_phentsize; 1951 KASSERT(ehdr->e_shoff == hdrsize - sizeof(Elf_Shdr), 1952 ("e_shoff: %zu, hdrsize - shdr: %zu", 1953 (size_t)ehdr->e_shoff, hdrsize - sizeof(Elf_Shdr))); 1954 1955 shdr = (Elf_Shdr *)((char *)hdr + ehdr->e_shoff); 1956 memset(shdr, 0, sizeof(*shdr)); 1957 /* 1958 * A special first section is used to hold large segment and 1959 * section counts. This was proposed by Sun Microsystems in 1960 * Solaris and has been adopted by Linux; the standard ELF 1961 * tools are already familiar with the technique. 1962 * 1963 * See table 7-7 of the Solaris "Linker and Libraries Guide" 1964 * (or 12-7 depending on the version of the document) for more 1965 * details. 1966 */ 1967 shdr->sh_type = SHT_NULL; 1968 shdr->sh_size = ehdr->e_shnum; 1969 shdr->sh_link = ehdr->e_shstrndx; 1970 shdr->sh_info = numsegs + 1; 1971 } 1972 1973 /* 1974 * Fill in the program header entries. 1975 */ 1976 phdr = (Elf_Phdr *)((char *)hdr + ehdr->e_phoff); 1977 1978 /* The note segement. */ 1979 phdr->p_type = PT_NOTE; 1980 phdr->p_offset = hdrsize; 1981 phdr->p_vaddr = 0; 1982 phdr->p_paddr = 0; 1983 phdr->p_filesz = notesz; 1984 phdr->p_memsz = 0; 1985 phdr->p_flags = PF_R; 1986 phdr->p_align = ELF_NOTE_ROUNDSIZE; 1987 phdr++; 1988 1989 /* All the writable segments from the program. */ 1990 phc.phdr = phdr; 1991 phc.offset = round_page(hdrsize + notesz); 1992 each_dumpable_segment(td, cb_put_phdr, &phc, flags); 1993 } 1994 1995 static size_t 1996 __elfN(register_regset_note)(struct thread *td, struct note_info_list *list, 1997 struct regset *regset, struct thread *target_td) 1998 { 1999 const struct sysentvec *sv; 2000 struct note_info *ninfo; 2001 size_t size, notesize; 2002 2003 size = 0; 2004 if (!regset->get(regset, target_td, NULL, &size) || size == 0) 2005 return (0); 2006 2007 ninfo = malloc(sizeof(*ninfo), M_TEMP, M_ZERO | M_WAITOK); 2008 ninfo->type = regset->note; 2009 ninfo->regset = regset; 2010 ninfo->outarg = target_td; 2011 ninfo->outsize = size; 2012 TAILQ_INSERT_TAIL(list, ninfo, link); 2013 2014 sv = td->td_proc->p_sysent; 2015 notesize = sizeof(Elf_Note) + /* note header */ 2016 roundup2(strlen(sv->sv_elf_core_abi_vendor) + 1, ELF_NOTE_ROUNDSIZE) + 2017 /* note name */ 2018 roundup2(size, ELF_NOTE_ROUNDSIZE); /* note description */ 2019 2020 return (notesize); 2021 } 2022 2023 size_t 2024 __elfN(register_note)(struct thread *td, struct note_info_list *list, 2025 int type, outfunc_t out, void *arg) 2026 { 2027 const struct sysentvec *sv; 2028 struct note_info *ninfo; 2029 size_t size, notesize; 2030 2031 sv = td->td_proc->p_sysent; 2032 size = 0; 2033 out(arg, NULL, &size); 2034 ninfo = malloc(sizeof(*ninfo), M_TEMP, M_ZERO | M_WAITOK); 2035 ninfo->type = type; 2036 ninfo->outfunc = out; 2037 ninfo->outarg = arg; 2038 ninfo->outsize = size; 2039 TAILQ_INSERT_TAIL(list, ninfo, link); 2040 2041 if (type == -1) 2042 return (size); 2043 2044 notesize = sizeof(Elf_Note) + /* note header */ 2045 roundup2(strlen(sv->sv_elf_core_abi_vendor) + 1, ELF_NOTE_ROUNDSIZE) + 2046 /* note name */ 2047 roundup2(size, ELF_NOTE_ROUNDSIZE); /* note description */ 2048 2049 return (notesize); 2050 } 2051 2052 static size_t 2053 append_note_data(const void *src, void *dst, size_t len) 2054 { 2055 size_t padded_len; 2056 2057 padded_len = roundup2(len, ELF_NOTE_ROUNDSIZE); 2058 if (dst != NULL) { 2059 bcopy(src, dst, len); 2060 bzero((char *)dst + len, padded_len - len); 2061 } 2062 return (padded_len); 2063 } 2064 2065 size_t 2066 __elfN(populate_note)(int type, void *src, void *dst, size_t size, void **descp) 2067 { 2068 Elf_Note *note; 2069 char *buf; 2070 size_t notesize; 2071 2072 buf = dst; 2073 if (buf != NULL) { 2074 note = (Elf_Note *)buf; 2075 note->n_namesz = sizeof(FREEBSD_ABI_VENDOR); 2076 note->n_descsz = size; 2077 note->n_type = type; 2078 buf += sizeof(*note); 2079 buf += append_note_data(FREEBSD_ABI_VENDOR, buf, 2080 sizeof(FREEBSD_ABI_VENDOR)); 2081 append_note_data(src, buf, size); 2082 if (descp != NULL) 2083 *descp = buf; 2084 } 2085 2086 notesize = sizeof(Elf_Note) + /* note header */ 2087 roundup2(sizeof(FREEBSD_ABI_VENDOR), ELF_NOTE_ROUNDSIZE) + 2088 /* note name */ 2089 roundup2(size, ELF_NOTE_ROUNDSIZE); /* note description */ 2090 2091 return (notesize); 2092 } 2093 2094 static void 2095 __elfN(putnote)(struct thread *td, struct note_info *ninfo, struct sbuf *sb) 2096 { 2097 Elf_Note note; 2098 const struct sysentvec *sv; 2099 ssize_t old_len, sect_len; 2100 size_t new_len, descsz, i; 2101 2102 if (ninfo->type == -1) { 2103 ninfo->outfunc(ninfo->outarg, sb, &ninfo->outsize); 2104 return; 2105 } 2106 2107 sv = td->td_proc->p_sysent; 2108 2109 note.n_namesz = strlen(sv->sv_elf_core_abi_vendor) + 1; 2110 note.n_descsz = ninfo->outsize; 2111 note.n_type = ninfo->type; 2112 2113 sbuf_bcat(sb, ¬e, sizeof(note)); 2114 sbuf_start_section(sb, &old_len); 2115 sbuf_bcat(sb, sv->sv_elf_core_abi_vendor, 2116 strlen(sv->sv_elf_core_abi_vendor) + 1); 2117 sbuf_end_section(sb, old_len, ELF_NOTE_ROUNDSIZE, 0); 2118 if (note.n_descsz == 0) 2119 return; 2120 sbuf_start_section(sb, &old_len); 2121 if (ninfo->regset != NULL) { 2122 struct regset *regset = ninfo->regset; 2123 void *buf; 2124 2125 buf = malloc(ninfo->outsize, M_TEMP, M_ZERO | M_WAITOK); 2126 (void)regset->get(regset, ninfo->outarg, buf, &ninfo->outsize); 2127 sbuf_bcat(sb, buf, ninfo->outsize); 2128 free(buf, M_TEMP); 2129 } else 2130 ninfo->outfunc(ninfo->outarg, sb, &ninfo->outsize); 2131 sect_len = sbuf_end_section(sb, old_len, ELF_NOTE_ROUNDSIZE, 0); 2132 if (sect_len < 0) 2133 return; 2134 2135 new_len = (size_t)sect_len; 2136 descsz = roundup(note.n_descsz, ELF_NOTE_ROUNDSIZE); 2137 if (new_len < descsz) { 2138 /* 2139 * It is expected that individual note emitters will correctly 2140 * predict their expected output size and fill up to that size 2141 * themselves, padding in a format-specific way if needed. 2142 * However, in case they don't, just do it here with zeros. 2143 */ 2144 for (i = 0; i < descsz - new_len; i++) 2145 sbuf_putc(sb, 0); 2146 } else if (new_len > descsz) { 2147 /* 2148 * We can't always truncate sb -- we may have drained some 2149 * of it already. 2150 */ 2151 KASSERT(new_len == descsz, ("%s: Note type %u changed as we " 2152 "read it (%zu > %zu). Since it is longer than " 2153 "expected, this coredump's notes are corrupt. THIS " 2154 "IS A BUG in the note_procstat routine for type %u.\n", 2155 __func__, (unsigned)note.n_type, new_len, descsz, 2156 (unsigned)note.n_type)); 2157 } 2158 } 2159 2160 /* 2161 * Miscellaneous note out functions. 2162 */ 2163 2164 #if defined(COMPAT_FREEBSD32) && __ELF_WORD_SIZE == 32 2165 #include <compat/freebsd32/freebsd32.h> 2166 #include <compat/freebsd32/freebsd32_signal.h> 2167 2168 typedef struct prstatus32 elf_prstatus_t; 2169 typedef struct prpsinfo32 elf_prpsinfo_t; 2170 typedef struct fpreg32 elf_prfpregset_t; 2171 typedef struct fpreg32 elf_fpregset_t; 2172 typedef struct reg32 elf_gregset_t; 2173 typedef struct thrmisc32 elf_thrmisc_t; 2174 typedef struct ptrace_lwpinfo32 elf_lwpinfo_t; 2175 #define ELF_KERN_PROC_MASK KERN_PROC_MASK32 2176 typedef struct kinfo_proc32 elf_kinfo_proc_t; 2177 typedef uint32_t elf_ps_strings_t; 2178 #else 2179 typedef prstatus_t elf_prstatus_t; 2180 typedef prpsinfo_t elf_prpsinfo_t; 2181 typedef prfpregset_t elf_prfpregset_t; 2182 typedef prfpregset_t elf_fpregset_t; 2183 typedef gregset_t elf_gregset_t; 2184 typedef thrmisc_t elf_thrmisc_t; 2185 typedef struct ptrace_lwpinfo elf_lwpinfo_t; 2186 #define ELF_KERN_PROC_MASK 0 2187 typedef struct kinfo_proc elf_kinfo_proc_t; 2188 typedef vm_offset_t elf_ps_strings_t; 2189 #endif 2190 2191 static void 2192 __elfN(note_prpsinfo)(void *arg, struct sbuf *sb, size_t *sizep) 2193 { 2194 struct sbuf sbarg; 2195 size_t len; 2196 char *cp, *end; 2197 struct proc *p; 2198 elf_prpsinfo_t *psinfo; 2199 int error; 2200 2201 p = arg; 2202 if (sb != NULL) { 2203 KASSERT(*sizep == sizeof(*psinfo), ("invalid size")); 2204 psinfo = malloc(sizeof(*psinfo), M_TEMP, M_ZERO | M_WAITOK); 2205 psinfo->pr_version = PRPSINFO_VERSION; 2206 psinfo->pr_psinfosz = sizeof(elf_prpsinfo_t); 2207 strlcpy(psinfo->pr_fname, p->p_comm, sizeof(psinfo->pr_fname)); 2208 PROC_LOCK(p); 2209 if (p->p_args != NULL) { 2210 len = sizeof(psinfo->pr_psargs) - 1; 2211 if (len > p->p_args->ar_length) 2212 len = p->p_args->ar_length; 2213 memcpy(psinfo->pr_psargs, p->p_args->ar_args, len); 2214 PROC_UNLOCK(p); 2215 error = 0; 2216 } else { 2217 _PHOLD(p); 2218 PROC_UNLOCK(p); 2219 sbuf_new(&sbarg, psinfo->pr_psargs, 2220 sizeof(psinfo->pr_psargs), SBUF_FIXEDLEN); 2221 error = proc_getargv(curthread, p, &sbarg); 2222 PRELE(p); 2223 if (sbuf_finish(&sbarg) == 0) { 2224 len = sbuf_len(&sbarg); 2225 if (len > 0) 2226 len--; 2227 } else { 2228 len = sizeof(psinfo->pr_psargs) - 1; 2229 } 2230 sbuf_delete(&sbarg); 2231 } 2232 if (error != 0 || len == 0 || (ssize_t)len == -1) 2233 strlcpy(psinfo->pr_psargs, p->p_comm, 2234 sizeof(psinfo->pr_psargs)); 2235 else { 2236 KASSERT(len < sizeof(psinfo->pr_psargs), 2237 ("len is too long: %zu vs %zu", len, 2238 sizeof(psinfo->pr_psargs))); 2239 cp = psinfo->pr_psargs; 2240 end = cp + len - 1; 2241 for (;;) { 2242 cp = memchr(cp, '\0', end - cp); 2243 if (cp == NULL) 2244 break; 2245 *cp = ' '; 2246 } 2247 } 2248 psinfo->pr_pid = p->p_pid; 2249 sbuf_bcat(sb, psinfo, sizeof(*psinfo)); 2250 free(psinfo, M_TEMP); 2251 } 2252 *sizep = sizeof(*psinfo); 2253 } 2254 2255 static bool 2256 __elfN(get_prstatus)(struct regset *rs, struct thread *td, void *buf, 2257 size_t *sizep) 2258 { 2259 elf_prstatus_t *status; 2260 2261 if (buf != NULL) { 2262 KASSERT(*sizep == sizeof(*status), ("%s: invalid size", 2263 __func__)); 2264 status = buf; 2265 memset(status, 0, *sizep); 2266 status->pr_version = PRSTATUS_VERSION; 2267 status->pr_statussz = sizeof(elf_prstatus_t); 2268 status->pr_gregsetsz = sizeof(elf_gregset_t); 2269 status->pr_fpregsetsz = sizeof(elf_fpregset_t); 2270 status->pr_osreldate = osreldate; 2271 status->pr_cursig = td->td_proc->p_sig; 2272 status->pr_pid = td->td_tid; 2273 #if defined(COMPAT_FREEBSD32) && __ELF_WORD_SIZE == 32 2274 fill_regs32(td, &status->pr_reg); 2275 #else 2276 fill_regs(td, &status->pr_reg); 2277 #endif 2278 } 2279 *sizep = sizeof(*status); 2280 return (true); 2281 } 2282 2283 static bool 2284 __elfN(set_prstatus)(struct regset *rs, struct thread *td, void *buf, 2285 size_t size) 2286 { 2287 elf_prstatus_t *status; 2288 2289 KASSERT(size == sizeof(*status), ("%s: invalid size", __func__)); 2290 status = buf; 2291 #if defined(COMPAT_FREEBSD32) && __ELF_WORD_SIZE == 32 2292 set_regs32(td, &status->pr_reg); 2293 #else 2294 set_regs(td, &status->pr_reg); 2295 #endif 2296 return (true); 2297 } 2298 2299 static struct regset __elfN(regset_prstatus) = { 2300 .note = NT_PRSTATUS, 2301 .size = sizeof(elf_prstatus_t), 2302 .get = __elfN(get_prstatus), 2303 .set = __elfN(set_prstatus), 2304 }; 2305 ELF_REGSET(__elfN(regset_prstatus)); 2306 2307 static bool 2308 __elfN(get_fpregset)(struct regset *rs, struct thread *td, void *buf, 2309 size_t *sizep) 2310 { 2311 elf_prfpregset_t *fpregset; 2312 2313 if (buf != NULL) { 2314 KASSERT(*sizep == sizeof(*fpregset), ("%s: invalid size", 2315 __func__)); 2316 fpregset = buf; 2317 #if defined(COMPAT_FREEBSD32) && __ELF_WORD_SIZE == 32 2318 fill_fpregs32(td, fpregset); 2319 #else 2320 fill_fpregs(td, fpregset); 2321 #endif 2322 } 2323 *sizep = sizeof(*fpregset); 2324 return (true); 2325 } 2326 2327 static bool 2328 __elfN(set_fpregset)(struct regset *rs, struct thread *td, void *buf, 2329 size_t size) 2330 { 2331 elf_prfpregset_t *fpregset; 2332 2333 fpregset = buf; 2334 KASSERT(size == sizeof(*fpregset), ("%s: invalid size", __func__)); 2335 #if defined(COMPAT_FREEBSD32) && __ELF_WORD_SIZE == 32 2336 set_fpregs32(td, fpregset); 2337 #else 2338 set_fpregs(td, fpregset); 2339 #endif 2340 return (true); 2341 } 2342 2343 static struct regset __elfN(regset_fpregset) = { 2344 .note = NT_FPREGSET, 2345 .size = sizeof(elf_prfpregset_t), 2346 .get = __elfN(get_fpregset), 2347 .set = __elfN(set_fpregset), 2348 }; 2349 ELF_REGSET(__elfN(regset_fpregset)); 2350 2351 static bool 2352 __elfN(get_thrmisc)(struct regset *rs, struct thread *td, void *buf, 2353 size_t *sizep) 2354 { 2355 elf_thrmisc_t *thrmisc; 2356 2357 if (buf != NULL) { 2358 KASSERT(*sizep == sizeof(*thrmisc), 2359 ("%s: invalid size", __func__)); 2360 thrmisc = buf; 2361 bzero(thrmisc, sizeof(*thrmisc)); 2362 strcpy(thrmisc->pr_tname, td->td_name); 2363 } 2364 *sizep = sizeof(*thrmisc); 2365 return (true); 2366 } 2367 2368 static struct regset __elfN(regset_thrmisc) = { 2369 .note = NT_THRMISC, 2370 .size = sizeof(elf_thrmisc_t), 2371 .get = __elfN(get_thrmisc), 2372 }; 2373 ELF_REGSET(__elfN(regset_thrmisc)); 2374 2375 static bool 2376 __elfN(get_lwpinfo)(struct regset *rs, struct thread *td, void *buf, 2377 size_t *sizep) 2378 { 2379 elf_lwpinfo_t pl; 2380 size_t size; 2381 int structsize; 2382 2383 size = sizeof(structsize) + sizeof(pl); 2384 if (buf != NULL) { 2385 KASSERT(*sizep == size, ("%s: invalid size", __func__)); 2386 structsize = sizeof(pl); 2387 memcpy(buf, &structsize, sizeof(structsize)); 2388 bzero(&pl, sizeof(pl)); 2389 pl.pl_lwpid = td->td_tid; 2390 pl.pl_event = PL_EVENT_NONE; 2391 pl.pl_sigmask = td->td_sigmask; 2392 pl.pl_siglist = td->td_siglist; 2393 if (td->td_si.si_signo != 0) { 2394 pl.pl_event = PL_EVENT_SIGNAL; 2395 pl.pl_flags |= PL_FLAG_SI; 2396 #if defined(COMPAT_FREEBSD32) && __ELF_WORD_SIZE == 32 2397 siginfo_to_siginfo32(&td->td_si, &pl.pl_siginfo); 2398 #else 2399 pl.pl_siginfo = td->td_si; 2400 #endif 2401 } 2402 strcpy(pl.pl_tdname, td->td_name); 2403 /* XXX TODO: supply more information in struct ptrace_lwpinfo*/ 2404 memcpy((int *)buf + 1, &pl, sizeof(pl)); 2405 } 2406 *sizep = size; 2407 return (true); 2408 } 2409 2410 static struct regset __elfN(regset_lwpinfo) = { 2411 .note = NT_PTLWPINFO, 2412 .size = sizeof(int) + sizeof(elf_lwpinfo_t), 2413 .get = __elfN(get_lwpinfo), 2414 }; 2415 ELF_REGSET(__elfN(regset_lwpinfo)); 2416 2417 static size_t 2418 __elfN(prepare_register_notes)(struct thread *td, struct note_info_list *list, 2419 struct thread *target_td) 2420 { 2421 struct sysentvec *sv = td->td_proc->p_sysent; 2422 struct regset **regsetp, **regset_end, *regset; 2423 size_t size; 2424 2425 size = 0; 2426 2427 /* NT_PRSTATUS must be the first register set note. */ 2428 size += __elfN(register_regset_note)(td, list, &__elfN(regset_prstatus), 2429 target_td); 2430 2431 regsetp = sv->sv_regset_begin; 2432 if (regsetp == NULL) { 2433 /* XXX: This shouldn't be true for any FreeBSD ABIs. */ 2434 size += __elfN(register_regset_note)(td, list, 2435 &__elfN(regset_fpregset), target_td); 2436 return (size); 2437 } 2438 regset_end = sv->sv_regset_end; 2439 MPASS(regset_end != NULL); 2440 for (; regsetp < regset_end; regsetp++) { 2441 regset = *regsetp; 2442 if (regset->note == NT_PRSTATUS) 2443 continue; 2444 size += __elfN(register_regset_note)(td, list, regset, 2445 target_td); 2446 } 2447 return (size); 2448 } 2449 2450 /* 2451 * Allow for MD specific notes, as well as any MD 2452 * specific preparations for writing MI notes. 2453 */ 2454 static void 2455 __elfN(note_threadmd)(void *arg, struct sbuf *sb, size_t *sizep) 2456 { 2457 struct thread *td; 2458 void *buf; 2459 size_t size; 2460 2461 td = (struct thread *)arg; 2462 size = *sizep; 2463 if (size != 0 && sb != NULL) 2464 buf = malloc(size, M_TEMP, M_ZERO | M_WAITOK); 2465 else 2466 buf = NULL; 2467 size = 0; 2468 __elfN(dump_thread)(td, buf, &size); 2469 KASSERT(sb == NULL || *sizep == size, ("invalid size")); 2470 if (size != 0 && sb != NULL) 2471 sbuf_bcat(sb, buf, size); 2472 free(buf, M_TEMP); 2473 *sizep = size; 2474 } 2475 2476 #ifdef KINFO_PROC_SIZE 2477 CTASSERT(sizeof(struct kinfo_proc) == KINFO_PROC_SIZE); 2478 #endif 2479 2480 static void 2481 __elfN(note_procstat_proc)(void *arg, struct sbuf *sb, size_t *sizep) 2482 { 2483 struct proc *p; 2484 size_t size; 2485 int structsize; 2486 2487 p = arg; 2488 size = sizeof(structsize) + p->p_numthreads * 2489 sizeof(elf_kinfo_proc_t); 2490 2491 if (sb != NULL) { 2492 KASSERT(*sizep == size, ("invalid size")); 2493 structsize = sizeof(elf_kinfo_proc_t); 2494 sbuf_bcat(sb, &structsize, sizeof(structsize)); 2495 sx_slock(&proctree_lock); 2496 PROC_LOCK(p); 2497 kern_proc_out(p, sb, ELF_KERN_PROC_MASK); 2498 sx_sunlock(&proctree_lock); 2499 } 2500 *sizep = size; 2501 } 2502 2503 #ifdef KINFO_FILE_SIZE 2504 CTASSERT(sizeof(struct kinfo_file) == KINFO_FILE_SIZE); 2505 #endif 2506 2507 static void 2508 note_procstat_files(void *arg, struct sbuf *sb, size_t *sizep) 2509 { 2510 struct proc *p; 2511 size_t size, sect_sz, i; 2512 ssize_t start_len, sect_len; 2513 int structsize, filedesc_flags; 2514 2515 if (coredump_pack_fileinfo) 2516 filedesc_flags = KERN_FILEDESC_PACK_KINFO; 2517 else 2518 filedesc_flags = 0; 2519 2520 p = arg; 2521 structsize = sizeof(struct kinfo_file); 2522 if (sb == NULL) { 2523 size = 0; 2524 sb = sbuf_new(NULL, NULL, 128, SBUF_FIXEDLEN); 2525 sbuf_set_drain(sb, sbuf_count_drain, &size); 2526 sbuf_bcat(sb, &structsize, sizeof(structsize)); 2527 PROC_LOCK(p); 2528 kern_proc_filedesc_out(p, sb, -1, filedesc_flags); 2529 sbuf_finish(sb); 2530 sbuf_delete(sb); 2531 *sizep = size; 2532 } else { 2533 sbuf_start_section(sb, &start_len); 2534 2535 sbuf_bcat(sb, &structsize, sizeof(structsize)); 2536 PROC_LOCK(p); 2537 kern_proc_filedesc_out(p, sb, *sizep - sizeof(structsize), 2538 filedesc_flags); 2539 2540 sect_len = sbuf_end_section(sb, start_len, 0, 0); 2541 if (sect_len < 0) 2542 return; 2543 sect_sz = sect_len; 2544 2545 KASSERT(sect_sz <= *sizep, 2546 ("kern_proc_filedesc_out did not respect maxlen; " 2547 "requested %zu, got %zu", *sizep - sizeof(structsize), 2548 sect_sz - sizeof(structsize))); 2549 2550 for (i = 0; i < *sizep - sect_sz && sb->s_error == 0; i++) 2551 sbuf_putc(sb, 0); 2552 } 2553 } 2554 2555 #ifdef KINFO_VMENTRY_SIZE 2556 CTASSERT(sizeof(struct kinfo_vmentry) == KINFO_VMENTRY_SIZE); 2557 #endif 2558 2559 static void 2560 note_procstat_vmmap(void *arg, struct sbuf *sb, size_t *sizep) 2561 { 2562 struct proc *p; 2563 size_t size; 2564 int structsize, vmmap_flags; 2565 2566 if (coredump_pack_vmmapinfo) 2567 vmmap_flags = KERN_VMMAP_PACK_KINFO; 2568 else 2569 vmmap_flags = 0; 2570 2571 p = arg; 2572 structsize = sizeof(struct kinfo_vmentry); 2573 if (sb == NULL) { 2574 size = 0; 2575 sb = sbuf_new(NULL, NULL, 128, SBUF_FIXEDLEN); 2576 sbuf_set_drain(sb, sbuf_count_drain, &size); 2577 sbuf_bcat(sb, &structsize, sizeof(structsize)); 2578 PROC_LOCK(p); 2579 kern_proc_vmmap_out(p, sb, -1, vmmap_flags); 2580 sbuf_finish(sb); 2581 sbuf_delete(sb); 2582 *sizep = size; 2583 } else { 2584 sbuf_bcat(sb, &structsize, sizeof(structsize)); 2585 PROC_LOCK(p); 2586 kern_proc_vmmap_out(p, sb, *sizep - sizeof(structsize), 2587 vmmap_flags); 2588 } 2589 } 2590 2591 static void 2592 note_procstat_groups(void *arg, struct sbuf *sb, size_t *sizep) 2593 { 2594 struct proc *p; 2595 size_t size; 2596 int structsize; 2597 2598 p = arg; 2599 size = sizeof(structsize) + p->p_ucred->cr_ngroups * sizeof(gid_t); 2600 if (sb != NULL) { 2601 KASSERT(*sizep == size, ("invalid size")); 2602 structsize = sizeof(gid_t); 2603 sbuf_bcat(sb, &structsize, sizeof(structsize)); 2604 sbuf_bcat(sb, p->p_ucred->cr_groups, p->p_ucred->cr_ngroups * 2605 sizeof(gid_t)); 2606 } 2607 *sizep = size; 2608 } 2609 2610 static void 2611 note_procstat_umask(void *arg, struct sbuf *sb, size_t *sizep) 2612 { 2613 struct proc *p; 2614 size_t size; 2615 int structsize; 2616 2617 p = arg; 2618 size = sizeof(structsize) + sizeof(p->p_pd->pd_cmask); 2619 if (sb != NULL) { 2620 KASSERT(*sizep == size, ("invalid size")); 2621 structsize = sizeof(p->p_pd->pd_cmask); 2622 sbuf_bcat(sb, &structsize, sizeof(structsize)); 2623 sbuf_bcat(sb, &p->p_pd->pd_cmask, sizeof(p->p_pd->pd_cmask)); 2624 } 2625 *sizep = size; 2626 } 2627 2628 static void 2629 note_procstat_rlimit(void *arg, struct sbuf *sb, size_t *sizep) 2630 { 2631 struct proc *p; 2632 struct rlimit rlim[RLIM_NLIMITS]; 2633 size_t size; 2634 int structsize, i; 2635 2636 p = arg; 2637 size = sizeof(structsize) + sizeof(rlim); 2638 if (sb != NULL) { 2639 KASSERT(*sizep == size, ("invalid size")); 2640 structsize = sizeof(rlim); 2641 sbuf_bcat(sb, &structsize, sizeof(structsize)); 2642 PROC_LOCK(p); 2643 for (i = 0; i < RLIM_NLIMITS; i++) 2644 lim_rlimit_proc(p, i, &rlim[i]); 2645 PROC_UNLOCK(p); 2646 sbuf_bcat(sb, rlim, sizeof(rlim)); 2647 } 2648 *sizep = size; 2649 } 2650 2651 static void 2652 note_procstat_osrel(void *arg, struct sbuf *sb, size_t *sizep) 2653 { 2654 struct proc *p; 2655 size_t size; 2656 int structsize; 2657 2658 p = arg; 2659 size = sizeof(structsize) + sizeof(p->p_osrel); 2660 if (sb != NULL) { 2661 KASSERT(*sizep == size, ("invalid size")); 2662 structsize = sizeof(p->p_osrel); 2663 sbuf_bcat(sb, &structsize, sizeof(structsize)); 2664 sbuf_bcat(sb, &p->p_osrel, sizeof(p->p_osrel)); 2665 } 2666 *sizep = size; 2667 } 2668 2669 static void 2670 __elfN(note_procstat_psstrings)(void *arg, struct sbuf *sb, size_t *sizep) 2671 { 2672 struct proc *p; 2673 elf_ps_strings_t ps_strings; 2674 size_t size; 2675 int structsize; 2676 2677 p = arg; 2678 size = sizeof(structsize) + sizeof(ps_strings); 2679 if (sb != NULL) { 2680 KASSERT(*sizep == size, ("invalid size")); 2681 structsize = sizeof(ps_strings); 2682 #if defined(COMPAT_FREEBSD32) && __ELF_WORD_SIZE == 32 2683 ps_strings = PTROUT(PROC_PS_STRINGS(p)); 2684 #else 2685 ps_strings = PROC_PS_STRINGS(p); 2686 #endif 2687 sbuf_bcat(sb, &structsize, sizeof(structsize)); 2688 sbuf_bcat(sb, &ps_strings, sizeof(ps_strings)); 2689 } 2690 *sizep = size; 2691 } 2692 2693 static void 2694 __elfN(note_procstat_auxv)(void *arg, struct sbuf *sb, size_t *sizep) 2695 { 2696 struct proc *p; 2697 size_t size; 2698 int structsize; 2699 2700 p = arg; 2701 if (sb == NULL) { 2702 size = 0; 2703 sb = sbuf_new(NULL, NULL, AT_COUNT * sizeof(Elf_Auxinfo), 2704 SBUF_FIXEDLEN); 2705 sbuf_set_drain(sb, sbuf_count_drain, &size); 2706 sbuf_bcat(sb, &structsize, sizeof(structsize)); 2707 PHOLD(p); 2708 proc_getauxv(curthread, p, sb); 2709 PRELE(p); 2710 sbuf_finish(sb); 2711 sbuf_delete(sb); 2712 *sizep = size; 2713 } else { 2714 structsize = sizeof(Elf_Auxinfo); 2715 sbuf_bcat(sb, &structsize, sizeof(structsize)); 2716 PHOLD(p); 2717 proc_getauxv(curthread, p, sb); 2718 PRELE(p); 2719 } 2720 } 2721 2722 static bool 2723 __elfN(parse_notes)(struct image_params *imgp, Elf_Note *checknote, 2724 const char *note_vendor, const Elf_Phdr *pnote, 2725 bool (*cb)(const Elf_Note *, void *, bool *), void *cb_arg) 2726 { 2727 const Elf_Note *note, *note0, *note_end; 2728 const char *note_name; 2729 char *buf; 2730 int i, error; 2731 bool res; 2732 2733 /* We need some limit, might as well use PAGE_SIZE. */ 2734 if (pnote == NULL || pnote->p_filesz > PAGE_SIZE) 2735 return (false); 2736 ASSERT_VOP_LOCKED(imgp->vp, "parse_notes"); 2737 if (pnote->p_offset > PAGE_SIZE || 2738 pnote->p_filesz > PAGE_SIZE - pnote->p_offset) { 2739 buf = malloc(pnote->p_filesz, M_TEMP, M_NOWAIT); 2740 if (buf == NULL) { 2741 VOP_UNLOCK(imgp->vp); 2742 buf = malloc(pnote->p_filesz, M_TEMP, M_WAITOK); 2743 vn_lock(imgp->vp, LK_SHARED | LK_RETRY); 2744 } 2745 error = vn_rdwr(UIO_READ, imgp->vp, buf, pnote->p_filesz, 2746 pnote->p_offset, UIO_SYSSPACE, IO_NODELOCKED, 2747 curthread->td_ucred, NOCRED, NULL, curthread); 2748 if (error != 0) { 2749 uprintf("i/o error PT_NOTE\n"); 2750 goto retf; 2751 } 2752 note = note0 = (const Elf_Note *)buf; 2753 note_end = (const Elf_Note *)(buf + pnote->p_filesz); 2754 } else { 2755 note = note0 = (const Elf_Note *)(imgp->image_header + 2756 pnote->p_offset); 2757 note_end = (const Elf_Note *)(imgp->image_header + 2758 pnote->p_offset + pnote->p_filesz); 2759 buf = NULL; 2760 } 2761 for (i = 0; i < 100 && note >= note0 && note < note_end; i++) { 2762 if (!aligned(note, Elf32_Addr) || (const char *)note_end - 2763 (const char *)note < sizeof(Elf_Note)) { 2764 goto retf; 2765 } 2766 if (note->n_namesz != checknote->n_namesz || 2767 note->n_descsz != checknote->n_descsz || 2768 note->n_type != checknote->n_type) 2769 goto nextnote; 2770 note_name = (const char *)(note + 1); 2771 if (note_name + checknote->n_namesz >= 2772 (const char *)note_end || strncmp(note_vendor, 2773 note_name, checknote->n_namesz) != 0) 2774 goto nextnote; 2775 2776 if (cb(note, cb_arg, &res)) 2777 goto ret; 2778 nextnote: 2779 note = (const Elf_Note *)((const char *)(note + 1) + 2780 roundup2(note->n_namesz, ELF_NOTE_ROUNDSIZE) + 2781 roundup2(note->n_descsz, ELF_NOTE_ROUNDSIZE)); 2782 } 2783 retf: 2784 res = false; 2785 ret: 2786 free(buf, M_TEMP); 2787 return (res); 2788 } 2789 2790 struct brandnote_cb_arg { 2791 Elf_Brandnote *brandnote; 2792 int32_t *osrel; 2793 }; 2794 2795 static bool 2796 brandnote_cb(const Elf_Note *note, void *arg0, bool *res) 2797 { 2798 struct brandnote_cb_arg *arg; 2799 2800 arg = arg0; 2801 2802 /* 2803 * Fetch the osreldate for binary from the ELF OSABI-note if 2804 * necessary. 2805 */ 2806 *res = (arg->brandnote->flags & BN_TRANSLATE_OSREL) != 0 && 2807 arg->brandnote->trans_osrel != NULL ? 2808 arg->brandnote->trans_osrel(note, arg->osrel) : true; 2809 2810 return (true); 2811 } 2812 2813 static Elf_Note fctl_note = { 2814 .n_namesz = sizeof(FREEBSD_ABI_VENDOR), 2815 .n_descsz = sizeof(uint32_t), 2816 .n_type = NT_FREEBSD_FEATURE_CTL, 2817 }; 2818 2819 struct fctl_cb_arg { 2820 bool *has_fctl0; 2821 uint32_t *fctl0; 2822 }; 2823 2824 static bool 2825 note_fctl_cb(const Elf_Note *note, void *arg0, bool *res) 2826 { 2827 struct fctl_cb_arg *arg; 2828 const Elf32_Word *desc; 2829 uintptr_t p; 2830 2831 arg = arg0; 2832 p = (uintptr_t)(note + 1); 2833 p += roundup2(note->n_namesz, ELF_NOTE_ROUNDSIZE); 2834 desc = (const Elf32_Word *)p; 2835 *arg->has_fctl0 = true; 2836 *arg->fctl0 = desc[0]; 2837 *res = true; 2838 return (true); 2839 } 2840 2841 /* 2842 * Try to find the appropriate ABI-note section for checknote, fetch 2843 * the osreldate and feature control flags for binary from the ELF 2844 * OSABI-note. Only the first page of the image is searched, the same 2845 * as for headers. 2846 */ 2847 static bool 2848 __elfN(check_note)(struct image_params *imgp, Elf_Brandnote *brandnote, 2849 int32_t *osrel, bool *has_fctl0, uint32_t *fctl0) 2850 { 2851 const Elf_Phdr *phdr; 2852 const Elf_Ehdr *hdr; 2853 struct brandnote_cb_arg b_arg; 2854 struct fctl_cb_arg f_arg; 2855 int i, j; 2856 2857 hdr = (const Elf_Ehdr *)imgp->image_header; 2858 phdr = (const Elf_Phdr *)(imgp->image_header + hdr->e_phoff); 2859 b_arg.brandnote = brandnote; 2860 b_arg.osrel = osrel; 2861 f_arg.has_fctl0 = has_fctl0; 2862 f_arg.fctl0 = fctl0; 2863 2864 for (i = 0; i < hdr->e_phnum; i++) { 2865 if (phdr[i].p_type == PT_NOTE && __elfN(parse_notes)(imgp, 2866 &brandnote->hdr, brandnote->vendor, &phdr[i], brandnote_cb, 2867 &b_arg)) { 2868 for (j = 0; j < hdr->e_phnum; j++) { 2869 if (phdr[j].p_type == PT_NOTE && 2870 __elfN(parse_notes)(imgp, &fctl_note, 2871 FREEBSD_ABI_VENDOR, &phdr[j], 2872 note_fctl_cb, &f_arg)) 2873 break; 2874 } 2875 return (true); 2876 } 2877 } 2878 return (false); 2879 2880 } 2881 2882 /* 2883 * Tell kern_execve.c about it, with a little help from the linker. 2884 */ 2885 static struct execsw __elfN(execsw) = { 2886 .ex_imgact = __CONCAT(exec_, __elfN(imgact)), 2887 .ex_name = __XSTRING(__CONCAT(ELF, __ELF_WORD_SIZE)) 2888 }; 2889 EXEC_SET(__CONCAT(elf, __ELF_WORD_SIZE), __elfN(execsw)); 2890 2891 static vm_prot_t 2892 __elfN(trans_prot)(Elf_Word flags) 2893 { 2894 vm_prot_t prot; 2895 2896 prot = 0; 2897 if (flags & PF_X) 2898 prot |= VM_PROT_EXECUTE; 2899 if (flags & PF_W) 2900 prot |= VM_PROT_WRITE; 2901 if (flags & PF_R) 2902 prot |= VM_PROT_READ; 2903 #if __ELF_WORD_SIZE == 32 && (defined(__amd64__) || defined(__i386__)) 2904 if (i386_read_exec && (flags & PF_R)) 2905 prot |= VM_PROT_EXECUTE; 2906 #endif 2907 return (prot); 2908 } 2909 2910 static Elf_Word 2911 __elfN(untrans_prot)(vm_prot_t prot) 2912 { 2913 Elf_Word flags; 2914 2915 flags = 0; 2916 if (prot & VM_PROT_EXECUTE) 2917 flags |= PF_X; 2918 if (prot & VM_PROT_READ) 2919 flags |= PF_R; 2920 if (prot & VM_PROT_WRITE) 2921 flags |= PF_W; 2922 return (flags); 2923 } 2924