1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 2017 Dell EMC 5 * Copyright (c) 2000-2001, 2003 David O'Brien 6 * Copyright (c) 1995-1996 Søren Schmidt 7 * Copyright (c) 1996 Peter Wemm 8 * All rights reserved. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer 15 * in this position and unchanged. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 3. The name of the author may not be used to endorse or promote products 20 * derived from this software without specific prior written permission 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 23 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 24 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 25 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 26 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 27 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 31 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #include <sys/cdefs.h> 35 __FBSDID("$FreeBSD$"); 36 37 #include "opt_capsicum.h" 38 39 #include <sys/param.h> 40 #include <sys/capsicum.h> 41 #include <sys/compressor.h> 42 #include <sys/exec.h> 43 #include <sys/fcntl.h> 44 #include <sys/imgact.h> 45 #include <sys/imgact_elf.h> 46 #include <sys/jail.h> 47 #include <sys/kernel.h> 48 #include <sys/lock.h> 49 #include <sys/malloc.h> 50 #include <sys/mount.h> 51 #include <sys/mman.h> 52 #include <sys/namei.h> 53 #include <sys/pioctl.h> 54 #include <sys/proc.h> 55 #include <sys/procfs.h> 56 #include <sys/ptrace.h> 57 #include <sys/racct.h> 58 #include <sys/resourcevar.h> 59 #include <sys/rwlock.h> 60 #include <sys/sbuf.h> 61 #include <sys/sf_buf.h> 62 #include <sys/smp.h> 63 #include <sys/systm.h> 64 #include <sys/signalvar.h> 65 #include <sys/stat.h> 66 #include <sys/sx.h> 67 #include <sys/syscall.h> 68 #include <sys/sysctl.h> 69 #include <sys/sysent.h> 70 #include <sys/vnode.h> 71 #include <sys/syslog.h> 72 #include <sys/eventhandler.h> 73 #include <sys/user.h> 74 75 #include <vm/vm.h> 76 #include <vm/vm_kern.h> 77 #include <vm/vm_param.h> 78 #include <vm/pmap.h> 79 #include <vm/vm_map.h> 80 #include <vm/vm_object.h> 81 #include <vm/vm_extern.h> 82 83 #include <machine/elf.h> 84 #include <machine/md_var.h> 85 86 #define ELF_NOTE_ROUNDSIZE 4 87 #define OLD_EI_BRAND 8 88 89 static int __elfN(check_header)(const Elf_Ehdr *hdr); 90 static Elf_Brandinfo *__elfN(get_brandinfo)(struct image_params *imgp, 91 const char *interp, int32_t *osrel, uint32_t *fctl0); 92 static int __elfN(load_file)(struct proc *p, const char *file, u_long *addr, 93 u_long *entry); 94 static int __elfN(load_section)(struct image_params *imgp, vm_ooffset_t offset, 95 caddr_t vmaddr, size_t memsz, size_t filsz, vm_prot_t prot); 96 static int __CONCAT(exec_, __elfN(imgact))(struct image_params *imgp); 97 static bool __elfN(freebsd_trans_osrel)(const Elf_Note *note, 98 int32_t *osrel); 99 static bool kfreebsd_trans_osrel(const Elf_Note *note, int32_t *osrel); 100 static boolean_t __elfN(check_note)(struct image_params *imgp, 101 Elf_Brandnote *checknote, int32_t *osrel, uint32_t *fctl0); 102 static vm_prot_t __elfN(trans_prot)(Elf_Word); 103 static Elf_Word __elfN(untrans_prot)(vm_prot_t); 104 105 SYSCTL_NODE(_kern, OID_AUTO, __CONCAT(elf, __ELF_WORD_SIZE), CTLFLAG_RW, 0, 106 ""); 107 108 #define CORE_BUF_SIZE (16 * 1024) 109 110 int __elfN(fallback_brand) = -1; 111 SYSCTL_INT(__CONCAT(_kern_elf, __ELF_WORD_SIZE), OID_AUTO, 112 fallback_brand, CTLFLAG_RWTUN, &__elfN(fallback_brand), 0, 113 __XSTRING(__CONCAT(ELF, __ELF_WORD_SIZE)) " brand of last resort"); 114 115 static int elf_legacy_coredump = 0; 116 SYSCTL_INT(_debug, OID_AUTO, __elfN(legacy_coredump), CTLFLAG_RW, 117 &elf_legacy_coredump, 0, 118 "include all and only RW pages in core dumps"); 119 120 int __elfN(nxstack) = 121 #if defined(__amd64__) || defined(__powerpc64__) /* both 64 and 32 bit */ || \ 122 (defined(__arm__) && __ARM_ARCH >= 7) || defined(__aarch64__) || \ 123 defined(__riscv) 124 1; 125 #else 126 0; 127 #endif 128 SYSCTL_INT(__CONCAT(_kern_elf, __ELF_WORD_SIZE), OID_AUTO, 129 nxstack, CTLFLAG_RW, &__elfN(nxstack), 0, 130 __XSTRING(__CONCAT(ELF, __ELF_WORD_SIZE)) ": enable non-executable stack"); 131 132 #if __ELF_WORD_SIZE == 32 && (defined(__amd64__) || defined(__i386__)) 133 int i386_read_exec = 0; 134 SYSCTL_INT(_kern_elf32, OID_AUTO, read_exec, CTLFLAG_RW, &i386_read_exec, 0, 135 "enable execution from readable segments"); 136 #endif 137 138 SYSCTL_NODE(__CONCAT(_kern_elf, __ELF_WORD_SIZE), OID_AUTO, aslr, CTLFLAG_RW, 0, 139 ""); 140 #define ASLR_NODE_OID __CONCAT(__CONCAT(_kern_elf, __ELF_WORD_SIZE), _aslr) 141 142 static int __elfN(aslr_enabled) = 0; 143 SYSCTL_INT(ASLR_NODE_OID, OID_AUTO, enable, CTLFLAG_RWTUN, 144 &__elfN(aslr_enabled), 0, 145 __XSTRING(__CONCAT(ELF, __ELF_WORD_SIZE)) 146 ": enable address map randomization"); 147 148 static int __elfN(pie_aslr_enabled) = 0; 149 SYSCTL_INT(ASLR_NODE_OID, OID_AUTO, pie_enable, CTLFLAG_RWTUN, 150 &__elfN(pie_aslr_enabled), 0, 151 __XSTRING(__CONCAT(ELF, __ELF_WORD_SIZE)) 152 ": enable address map randomization for PIE binaries"); 153 154 static int __elfN(aslr_honor_sbrk) = 1; 155 SYSCTL_INT(ASLR_NODE_OID, OID_AUTO, honor_sbrk, CTLFLAG_RW, 156 &__elfN(aslr_honor_sbrk), 0, 157 __XSTRING(__CONCAT(ELF, __ELF_WORD_SIZE)) ": assume sbrk is used"); 158 159 static Elf_Brandinfo *elf_brand_list[MAX_BRANDS]; 160 161 #define aligned(a, t) (rounddown2((u_long)(a), sizeof(t)) == (u_long)(a)) 162 163 static const char FREEBSD_ABI_VENDOR[] = "FreeBSD"; 164 165 Elf_Brandnote __elfN(freebsd_brandnote) = { 166 .hdr.n_namesz = sizeof(FREEBSD_ABI_VENDOR), 167 .hdr.n_descsz = sizeof(int32_t), 168 .hdr.n_type = NT_FREEBSD_ABI_TAG, 169 .vendor = FREEBSD_ABI_VENDOR, 170 .flags = BN_TRANSLATE_OSREL, 171 .trans_osrel = __elfN(freebsd_trans_osrel) 172 }; 173 174 static bool 175 __elfN(freebsd_trans_osrel)(const Elf_Note *note, int32_t *osrel) 176 { 177 uintptr_t p; 178 179 p = (uintptr_t)(note + 1); 180 p += roundup2(note->n_namesz, ELF_NOTE_ROUNDSIZE); 181 *osrel = *(const int32_t *)(p); 182 183 return (true); 184 } 185 186 static const char GNU_ABI_VENDOR[] = "GNU"; 187 static int GNU_KFREEBSD_ABI_DESC = 3; 188 189 Elf_Brandnote __elfN(kfreebsd_brandnote) = { 190 .hdr.n_namesz = sizeof(GNU_ABI_VENDOR), 191 .hdr.n_descsz = 16, /* XXX at least 16 */ 192 .hdr.n_type = 1, 193 .vendor = GNU_ABI_VENDOR, 194 .flags = BN_TRANSLATE_OSREL, 195 .trans_osrel = kfreebsd_trans_osrel 196 }; 197 198 static bool 199 kfreebsd_trans_osrel(const Elf_Note *note, int32_t *osrel) 200 { 201 const Elf32_Word *desc; 202 uintptr_t p; 203 204 p = (uintptr_t)(note + 1); 205 p += roundup2(note->n_namesz, ELF_NOTE_ROUNDSIZE); 206 207 desc = (const Elf32_Word *)p; 208 if (desc[0] != GNU_KFREEBSD_ABI_DESC) 209 return (false); 210 211 /* 212 * Debian GNU/kFreeBSD embed the earliest compatible kernel version 213 * (__FreeBSD_version: <major><two digit minor>Rxx) in the LSB way. 214 */ 215 *osrel = desc[1] * 100000 + desc[2] * 1000 + desc[3]; 216 217 return (true); 218 } 219 220 int 221 __elfN(insert_brand_entry)(Elf_Brandinfo *entry) 222 { 223 int i; 224 225 for (i = 0; i < MAX_BRANDS; i++) { 226 if (elf_brand_list[i] == NULL) { 227 elf_brand_list[i] = entry; 228 break; 229 } 230 } 231 if (i == MAX_BRANDS) { 232 printf("WARNING: %s: could not insert brandinfo entry: %p\n", 233 __func__, entry); 234 return (-1); 235 } 236 return (0); 237 } 238 239 int 240 __elfN(remove_brand_entry)(Elf_Brandinfo *entry) 241 { 242 int i; 243 244 for (i = 0; i < MAX_BRANDS; i++) { 245 if (elf_brand_list[i] == entry) { 246 elf_brand_list[i] = NULL; 247 break; 248 } 249 } 250 if (i == MAX_BRANDS) 251 return (-1); 252 return (0); 253 } 254 255 int 256 __elfN(brand_inuse)(Elf_Brandinfo *entry) 257 { 258 struct proc *p; 259 int rval = FALSE; 260 261 sx_slock(&allproc_lock); 262 FOREACH_PROC_IN_SYSTEM(p) { 263 if (p->p_sysent == entry->sysvec) { 264 rval = TRUE; 265 break; 266 } 267 } 268 sx_sunlock(&allproc_lock); 269 270 return (rval); 271 } 272 273 static Elf_Brandinfo * 274 __elfN(get_brandinfo)(struct image_params *imgp, const char *interp, 275 int32_t *osrel, uint32_t *fctl0) 276 { 277 const Elf_Ehdr *hdr = (const Elf_Ehdr *)imgp->image_header; 278 Elf_Brandinfo *bi, *bi_m; 279 boolean_t ret; 280 int i, interp_name_len; 281 282 interp_name_len = interp != NULL ? strlen(interp) + 1 : 0; 283 284 /* 285 * We support four types of branding -- (1) the ELF EI_OSABI field 286 * that SCO added to the ELF spec, (2) FreeBSD 3.x's traditional string 287 * branding w/in the ELF header, (3) path of the `interp_path' 288 * field, and (4) the ".note.ABI-tag" ELF section. 289 */ 290 291 /* Look for an ".note.ABI-tag" ELF section */ 292 bi_m = NULL; 293 for (i = 0; i < MAX_BRANDS; i++) { 294 bi = elf_brand_list[i]; 295 if (bi == NULL) 296 continue; 297 if (interp != NULL && (bi->flags & BI_BRAND_ONLY_STATIC) != 0) 298 continue; 299 if (hdr->e_machine == bi->machine && (bi->flags & 300 (BI_BRAND_NOTE|BI_BRAND_NOTE_MANDATORY)) != 0) { 301 ret = __elfN(check_note)(imgp, bi->brand_note, osrel, 302 fctl0); 303 /* Give brand a chance to veto check_note's guess */ 304 if (ret && bi->header_supported) 305 ret = bi->header_supported(imgp); 306 /* 307 * If note checker claimed the binary, but the 308 * interpreter path in the image does not 309 * match default one for the brand, try to 310 * search for other brands with the same 311 * interpreter. Either there is better brand 312 * with the right interpreter, or, failing 313 * this, we return first brand which accepted 314 * our note and, optionally, header. 315 */ 316 if (ret && bi_m == NULL && interp != NULL && 317 (bi->interp_path == NULL || 318 (strlen(bi->interp_path) + 1 != interp_name_len || 319 strncmp(interp, bi->interp_path, interp_name_len) 320 != 0))) { 321 bi_m = bi; 322 ret = 0; 323 } 324 if (ret) 325 return (bi); 326 } 327 } 328 if (bi_m != NULL) 329 return (bi_m); 330 331 /* If the executable has a brand, search for it in the brand list. */ 332 for (i = 0; i < MAX_BRANDS; i++) { 333 bi = elf_brand_list[i]; 334 if (bi == NULL || (bi->flags & BI_BRAND_NOTE_MANDATORY) != 0 || 335 (interp != NULL && (bi->flags & BI_BRAND_ONLY_STATIC) != 0)) 336 continue; 337 if (hdr->e_machine == bi->machine && 338 (hdr->e_ident[EI_OSABI] == bi->brand || 339 (bi->compat_3_brand != NULL && 340 strcmp((const char *)&hdr->e_ident[OLD_EI_BRAND], 341 bi->compat_3_brand) == 0))) { 342 /* Looks good, but give brand a chance to veto */ 343 if (bi->header_supported == NULL || 344 bi->header_supported(imgp)) { 345 /* 346 * Again, prefer strictly matching 347 * interpreter path. 348 */ 349 if (interp_name_len == 0 && 350 bi->interp_path == NULL) 351 return (bi); 352 if (bi->interp_path != NULL && 353 strlen(bi->interp_path) + 1 == 354 interp_name_len && strncmp(interp, 355 bi->interp_path, interp_name_len) == 0) 356 return (bi); 357 if (bi_m == NULL) 358 bi_m = bi; 359 } 360 } 361 } 362 if (bi_m != NULL) 363 return (bi_m); 364 365 /* No known brand, see if the header is recognized by any brand */ 366 for (i = 0; i < MAX_BRANDS; i++) { 367 bi = elf_brand_list[i]; 368 if (bi == NULL || bi->flags & BI_BRAND_NOTE_MANDATORY || 369 bi->header_supported == NULL) 370 continue; 371 if (hdr->e_machine == bi->machine) { 372 ret = bi->header_supported(imgp); 373 if (ret) 374 return (bi); 375 } 376 } 377 378 /* Lacking a known brand, search for a recognized interpreter. */ 379 if (interp != NULL) { 380 for (i = 0; i < MAX_BRANDS; i++) { 381 bi = elf_brand_list[i]; 382 if (bi == NULL || (bi->flags & 383 (BI_BRAND_NOTE_MANDATORY | BI_BRAND_ONLY_STATIC)) 384 != 0) 385 continue; 386 if (hdr->e_machine == bi->machine && 387 bi->interp_path != NULL && 388 /* ELF image p_filesz includes terminating zero */ 389 strlen(bi->interp_path) + 1 == interp_name_len && 390 strncmp(interp, bi->interp_path, interp_name_len) 391 == 0 && (bi->header_supported == NULL || 392 bi->header_supported(imgp))) 393 return (bi); 394 } 395 } 396 397 /* Lacking a recognized interpreter, try the default brand */ 398 for (i = 0; i < MAX_BRANDS; i++) { 399 bi = elf_brand_list[i]; 400 if (bi == NULL || (bi->flags & BI_BRAND_NOTE_MANDATORY) != 0 || 401 (interp != NULL && (bi->flags & BI_BRAND_ONLY_STATIC) != 0)) 402 continue; 403 if (hdr->e_machine == bi->machine && 404 __elfN(fallback_brand) == bi->brand && 405 (bi->header_supported == NULL || 406 bi->header_supported(imgp))) 407 return (bi); 408 } 409 return (NULL); 410 } 411 412 static int 413 __elfN(check_header)(const Elf_Ehdr *hdr) 414 { 415 Elf_Brandinfo *bi; 416 int i; 417 418 if (!IS_ELF(*hdr) || 419 hdr->e_ident[EI_CLASS] != ELF_TARG_CLASS || 420 hdr->e_ident[EI_DATA] != ELF_TARG_DATA || 421 hdr->e_ident[EI_VERSION] != EV_CURRENT || 422 hdr->e_phentsize != sizeof(Elf_Phdr) || 423 hdr->e_version != ELF_TARG_VER) 424 return (ENOEXEC); 425 426 /* 427 * Make sure we have at least one brand for this machine. 428 */ 429 430 for (i = 0; i < MAX_BRANDS; i++) { 431 bi = elf_brand_list[i]; 432 if (bi != NULL && bi->machine == hdr->e_machine) 433 break; 434 } 435 if (i == MAX_BRANDS) 436 return (ENOEXEC); 437 438 return (0); 439 } 440 441 static int 442 __elfN(map_partial)(vm_map_t map, vm_object_t object, vm_ooffset_t offset, 443 vm_offset_t start, vm_offset_t end, vm_prot_t prot) 444 { 445 struct sf_buf *sf; 446 int error; 447 vm_offset_t off; 448 449 /* 450 * Create the page if it doesn't exist yet. Ignore errors. 451 */ 452 vm_map_fixed(map, NULL, 0, trunc_page(start), round_page(end) - 453 trunc_page(start), VM_PROT_ALL, VM_PROT_ALL, MAP_CHECK_EXCL); 454 455 /* 456 * Find the page from the underlying object. 457 */ 458 if (object != NULL) { 459 sf = vm_imgact_map_page(object, offset); 460 if (sf == NULL) 461 return (KERN_FAILURE); 462 off = offset - trunc_page(offset); 463 error = copyout((caddr_t)sf_buf_kva(sf) + off, (caddr_t)start, 464 end - start); 465 vm_imgact_unmap_page(sf); 466 if (error != 0) 467 return (KERN_FAILURE); 468 } 469 470 return (KERN_SUCCESS); 471 } 472 473 static int 474 __elfN(map_insert)(struct image_params *imgp, vm_map_t map, vm_object_t object, 475 vm_ooffset_t offset, vm_offset_t start, vm_offset_t end, vm_prot_t prot, 476 int cow) 477 { 478 struct sf_buf *sf; 479 vm_offset_t off; 480 vm_size_t sz; 481 int error, locked, rv; 482 483 if (start != trunc_page(start)) { 484 rv = __elfN(map_partial)(map, object, offset, start, 485 round_page(start), prot); 486 if (rv != KERN_SUCCESS) 487 return (rv); 488 offset += round_page(start) - start; 489 start = round_page(start); 490 } 491 if (end != round_page(end)) { 492 rv = __elfN(map_partial)(map, object, offset + 493 trunc_page(end) - start, trunc_page(end), end, prot); 494 if (rv != KERN_SUCCESS) 495 return (rv); 496 end = trunc_page(end); 497 } 498 if (start >= end) 499 return (KERN_SUCCESS); 500 if ((offset & PAGE_MASK) != 0) { 501 /* 502 * The mapping is not page aligned. This means that we have 503 * to copy the data. 504 */ 505 rv = vm_map_fixed(map, NULL, 0, start, end - start, 506 prot | VM_PROT_WRITE, VM_PROT_ALL, MAP_CHECK_EXCL); 507 if (rv != KERN_SUCCESS) 508 return (rv); 509 if (object == NULL) 510 return (KERN_SUCCESS); 511 for (; start < end; start += sz) { 512 sf = vm_imgact_map_page(object, offset); 513 if (sf == NULL) 514 return (KERN_FAILURE); 515 off = offset - trunc_page(offset); 516 sz = end - start; 517 if (sz > PAGE_SIZE - off) 518 sz = PAGE_SIZE - off; 519 error = copyout((caddr_t)sf_buf_kva(sf) + off, 520 (caddr_t)start, sz); 521 vm_imgact_unmap_page(sf); 522 if (error != 0) 523 return (KERN_FAILURE); 524 offset += sz; 525 } 526 } else { 527 vm_object_reference(object); 528 rv = vm_map_fixed(map, object, offset, start, end - start, 529 prot, VM_PROT_ALL, cow | MAP_CHECK_EXCL | 530 (object != NULL ? MAP_VN_EXEC : 0)); 531 if (rv != KERN_SUCCESS) { 532 locked = VOP_ISLOCKED(imgp->vp); 533 VOP_UNLOCK(imgp->vp, 0); 534 vm_object_deallocate(object); 535 vn_lock(imgp->vp, locked | LK_RETRY); 536 return (rv); 537 } else if (object != NULL) { 538 MPASS(imgp->vp->v_object == object); 539 VOP_SET_TEXT_CHECKED(imgp->vp); 540 } 541 } 542 return (KERN_SUCCESS); 543 } 544 545 static int 546 __elfN(load_section)(struct image_params *imgp, vm_ooffset_t offset, 547 caddr_t vmaddr, size_t memsz, size_t filsz, vm_prot_t prot) 548 { 549 struct sf_buf *sf; 550 size_t map_len; 551 vm_map_t map; 552 vm_object_t object; 553 vm_offset_t off, map_addr; 554 int error, rv, cow; 555 size_t copy_len; 556 vm_ooffset_t file_addr; 557 558 /* 559 * It's necessary to fail if the filsz + offset taken from the 560 * header is greater than the actual file pager object's size. 561 * If we were to allow this, then the vm_map_find() below would 562 * walk right off the end of the file object and into the ether. 563 * 564 * While I'm here, might as well check for something else that 565 * is invalid: filsz cannot be greater than memsz. 566 */ 567 if ((filsz != 0 && (off_t)filsz + offset > imgp->attr->va_size) || 568 filsz > memsz) { 569 uprintf("elf_load_section: truncated ELF file\n"); 570 return (ENOEXEC); 571 } 572 573 object = imgp->object; 574 map = &imgp->proc->p_vmspace->vm_map; 575 map_addr = trunc_page((vm_offset_t)vmaddr); 576 file_addr = trunc_page(offset); 577 578 /* 579 * We have two choices. We can either clear the data in the last page 580 * of an oversized mapping, or we can start the anon mapping a page 581 * early and copy the initialized data into that first page. We 582 * choose the second. 583 */ 584 if (filsz == 0) 585 map_len = 0; 586 else if (memsz > filsz) 587 map_len = trunc_page(offset + filsz) - file_addr; 588 else 589 map_len = round_page(offset + filsz) - file_addr; 590 591 if (map_len != 0) { 592 /* cow flags: don't dump readonly sections in core */ 593 cow = MAP_COPY_ON_WRITE | MAP_PREFAULT | 594 (prot & VM_PROT_WRITE ? 0 : MAP_DISABLE_COREDUMP); 595 596 rv = __elfN(map_insert)(imgp, map, object, file_addr, 597 map_addr, map_addr + map_len, prot, cow); 598 if (rv != KERN_SUCCESS) 599 return (EINVAL); 600 601 /* we can stop now if we've covered it all */ 602 if (memsz == filsz) 603 return (0); 604 } 605 606 607 /* 608 * We have to get the remaining bit of the file into the first part 609 * of the oversized map segment. This is normally because the .data 610 * segment in the file is extended to provide bss. It's a neat idea 611 * to try and save a page, but it's a pain in the behind to implement. 612 */ 613 copy_len = filsz == 0 ? 0 : (offset + filsz) - trunc_page(offset + 614 filsz); 615 map_addr = trunc_page((vm_offset_t)vmaddr + filsz); 616 map_len = round_page((vm_offset_t)vmaddr + memsz) - map_addr; 617 618 /* This had damn well better be true! */ 619 if (map_len != 0) { 620 rv = __elfN(map_insert)(imgp, map, NULL, 0, map_addr, 621 map_addr + map_len, prot, 0); 622 if (rv != KERN_SUCCESS) 623 return (EINVAL); 624 } 625 626 if (copy_len != 0) { 627 sf = vm_imgact_map_page(object, offset + filsz); 628 if (sf == NULL) 629 return (EIO); 630 631 /* send the page fragment to user space */ 632 off = trunc_page(offset + filsz) - trunc_page(offset + filsz); 633 error = copyout((caddr_t)sf_buf_kva(sf) + off, 634 (caddr_t)map_addr, copy_len); 635 vm_imgact_unmap_page(sf); 636 if (error != 0) 637 return (error); 638 } 639 640 /* 641 * Remove write access to the page if it was only granted by map_insert 642 * to allow copyout. 643 */ 644 if ((prot & VM_PROT_WRITE) == 0) 645 vm_map_protect(map, trunc_page(map_addr), round_page(map_addr + 646 map_len), prot, FALSE); 647 648 return (0); 649 } 650 651 static int 652 __elfN(load_sections)(struct image_params *imgp, const Elf_Ehdr *hdr, 653 const Elf_Phdr *phdr, u_long rbase, u_long *base_addrp) 654 { 655 vm_prot_t prot; 656 u_long base_addr; 657 bool first; 658 int error, i; 659 660 ASSERT_VOP_LOCKED(imgp->vp, __func__); 661 662 base_addr = 0; 663 first = true; 664 665 for (i = 0; i < hdr->e_phnum; i++) { 666 if (phdr[i].p_type != PT_LOAD || phdr[i].p_memsz == 0) 667 continue; 668 669 /* Loadable segment */ 670 prot = __elfN(trans_prot)(phdr[i].p_flags); 671 error = __elfN(load_section)(imgp, phdr[i].p_offset, 672 (caddr_t)(uintptr_t)phdr[i].p_vaddr + rbase, 673 phdr[i].p_memsz, phdr[i].p_filesz, prot); 674 if (error != 0) 675 return (error); 676 677 /* 678 * Establish the base address if this is the first segment. 679 */ 680 if (first) { 681 base_addr = trunc_page(phdr[i].p_vaddr + rbase); 682 first = false; 683 } 684 } 685 686 if (base_addrp != NULL) 687 *base_addrp = base_addr; 688 689 return (0); 690 } 691 692 /* 693 * Load the file "file" into memory. It may be either a shared object 694 * or an executable. 695 * 696 * The "addr" reference parameter is in/out. On entry, it specifies 697 * the address where a shared object should be loaded. If the file is 698 * an executable, this value is ignored. On exit, "addr" specifies 699 * where the file was actually loaded. 700 * 701 * The "entry" reference parameter is out only. On exit, it specifies 702 * the entry point for the loaded file. 703 */ 704 static int 705 __elfN(load_file)(struct proc *p, const char *file, u_long *addr, 706 u_long *entry) 707 { 708 struct { 709 struct nameidata nd; 710 struct vattr attr; 711 struct image_params image_params; 712 } *tempdata; 713 const Elf_Ehdr *hdr = NULL; 714 const Elf_Phdr *phdr = NULL; 715 struct nameidata *nd; 716 struct vattr *attr; 717 struct image_params *imgp; 718 u_long rbase; 719 u_long base_addr = 0; 720 int error; 721 722 #ifdef CAPABILITY_MODE 723 /* 724 * XXXJA: This check can go away once we are sufficiently confident 725 * that the checks in namei() are correct. 726 */ 727 if (IN_CAPABILITY_MODE(curthread)) 728 return (ECAPMODE); 729 #endif 730 731 tempdata = malloc(sizeof(*tempdata), M_TEMP, M_WAITOK); 732 nd = &tempdata->nd; 733 attr = &tempdata->attr; 734 imgp = &tempdata->image_params; 735 736 /* 737 * Initialize part of the common data 738 */ 739 imgp->proc = p; 740 imgp->attr = attr; 741 imgp->firstpage = NULL; 742 imgp->image_header = NULL; 743 imgp->object = NULL; 744 imgp->execlabel = NULL; 745 746 NDINIT(nd, LOOKUP, FOLLOW | LOCKSHARED | LOCKLEAF, UIO_SYSSPACE, file, 747 curthread); 748 if ((error = namei(nd)) != 0) { 749 nd->ni_vp = NULL; 750 goto fail; 751 } 752 NDFREE(nd, NDF_ONLY_PNBUF); 753 imgp->vp = nd->ni_vp; 754 755 /* 756 * Check permissions, modes, uid, etc on the file, and "open" it. 757 */ 758 error = exec_check_permissions(imgp); 759 if (error) 760 goto fail; 761 762 error = exec_map_first_page(imgp); 763 if (error) 764 goto fail; 765 766 imgp->object = nd->ni_vp->v_object; 767 768 hdr = (const Elf_Ehdr *)imgp->image_header; 769 if ((error = __elfN(check_header)(hdr)) != 0) 770 goto fail; 771 if (hdr->e_type == ET_DYN) 772 rbase = *addr; 773 else if (hdr->e_type == ET_EXEC) 774 rbase = 0; 775 else { 776 error = ENOEXEC; 777 goto fail; 778 } 779 780 /* Only support headers that fit within first page for now */ 781 if ((hdr->e_phoff > PAGE_SIZE) || 782 (u_int)hdr->e_phentsize * hdr->e_phnum > PAGE_SIZE - hdr->e_phoff) { 783 error = ENOEXEC; 784 goto fail; 785 } 786 787 phdr = (const Elf_Phdr *)(imgp->image_header + hdr->e_phoff); 788 if (!aligned(phdr, Elf_Addr)) { 789 error = ENOEXEC; 790 goto fail; 791 } 792 793 error = __elfN(load_sections)(imgp, hdr, phdr, rbase, &base_addr); 794 if (error != 0) 795 goto fail; 796 797 *addr = base_addr; 798 *entry = (unsigned long)hdr->e_entry + rbase; 799 800 fail: 801 if (imgp->firstpage) 802 exec_unmap_first_page(imgp); 803 804 if (nd->ni_vp) { 805 if (imgp->textset) 806 VOP_UNSET_TEXT_CHECKED(nd->ni_vp); 807 vput(nd->ni_vp); 808 } 809 free(tempdata, M_TEMP); 810 811 return (error); 812 } 813 814 static u_long 815 __CONCAT(rnd_, __elfN(base))(vm_map_t map __unused, u_long minv, u_long maxv, 816 u_int align) 817 { 818 u_long rbase, res; 819 820 MPASS(vm_map_min(map) <= minv); 821 MPASS(maxv <= vm_map_max(map)); 822 MPASS(minv < maxv); 823 MPASS(minv + align < maxv); 824 arc4rand(&rbase, sizeof(rbase), 0); 825 res = roundup(minv, (u_long)align) + rbase % (maxv - minv); 826 res &= ~((u_long)align - 1); 827 if (res >= maxv) 828 res -= align; 829 KASSERT(res >= minv, 830 ("res %#lx < minv %#lx, maxv %#lx rbase %#lx", 831 res, minv, maxv, rbase)); 832 KASSERT(res < maxv, 833 ("res %#lx > maxv %#lx, minv %#lx rbase %#lx", 834 res, maxv, minv, rbase)); 835 return (res); 836 } 837 838 static int 839 __elfN(enforce_limits)(struct image_params *imgp, const Elf_Ehdr *hdr, 840 const Elf_Phdr *phdr, u_long et_dyn_addr) 841 { 842 struct vmspace *vmspace; 843 const char *err_str; 844 u_long text_size, data_size, total_size, text_addr, data_addr; 845 u_long seg_size, seg_addr; 846 int i; 847 848 err_str = NULL; 849 text_size = data_size = total_size = text_addr = data_addr = 0; 850 851 for (i = 0; i < hdr->e_phnum; i++) { 852 if (phdr[i].p_type != PT_LOAD || phdr[i].p_memsz == 0) 853 continue; 854 855 seg_addr = trunc_page(phdr[i].p_vaddr + et_dyn_addr); 856 seg_size = round_page(phdr[i].p_memsz + 857 phdr[i].p_vaddr + et_dyn_addr - seg_addr); 858 859 /* 860 * Make the largest executable segment the official 861 * text segment and all others data. 862 * 863 * Note that obreak() assumes that data_addr + data_size == end 864 * of data load area, and the ELF file format expects segments 865 * to be sorted by address. If multiple data segments exist, 866 * the last one will be used. 867 */ 868 869 if ((phdr[i].p_flags & PF_X) != 0 && text_size < seg_size) { 870 text_size = seg_size; 871 text_addr = seg_addr; 872 } else { 873 data_size = seg_size; 874 data_addr = seg_addr; 875 } 876 total_size += seg_size; 877 } 878 879 if (data_addr == 0 && data_size == 0) { 880 data_addr = text_addr; 881 data_size = text_size; 882 } 883 884 /* 885 * Check limits. It should be safe to check the 886 * limits after loading the segments since we do 887 * not actually fault in all the segments pages. 888 */ 889 PROC_LOCK(imgp->proc); 890 if (data_size > lim_cur_proc(imgp->proc, RLIMIT_DATA)) 891 err_str = "Data segment size exceeds process limit"; 892 else if (text_size > maxtsiz) 893 err_str = "Text segment size exceeds system limit"; 894 else if (total_size > lim_cur_proc(imgp->proc, RLIMIT_VMEM)) 895 err_str = "Total segment size exceeds process limit"; 896 else if (racct_set(imgp->proc, RACCT_DATA, data_size) != 0) 897 err_str = "Data segment size exceeds resource limit"; 898 else if (racct_set(imgp->proc, RACCT_VMEM, total_size) != 0) 899 err_str = "Total segment size exceeds resource limit"; 900 PROC_UNLOCK(imgp->proc); 901 if (err_str != NULL) { 902 uprintf("%s\n", err_str); 903 return (ENOMEM); 904 } 905 906 vmspace = imgp->proc->p_vmspace; 907 vmspace->vm_tsize = text_size >> PAGE_SHIFT; 908 vmspace->vm_taddr = (caddr_t)(uintptr_t)text_addr; 909 vmspace->vm_dsize = data_size >> PAGE_SHIFT; 910 vmspace->vm_daddr = (caddr_t)(uintptr_t)data_addr; 911 912 return (0); 913 } 914 915 static int 916 __elfN(get_interp)(struct image_params *imgp, const Elf_Phdr *phdr, 917 char **interpp, bool *free_interpp) 918 { 919 struct thread *td; 920 char *interp; 921 int error, interp_name_len; 922 923 KASSERT(phdr->p_type == PT_INTERP, 924 ("%s: p_type %u != PT_INTERP", __func__, phdr->p_type)); 925 ASSERT_VOP_LOCKED(imgp->vp, __func__); 926 927 td = curthread; 928 929 /* Path to interpreter */ 930 if (phdr->p_filesz < 2 || phdr->p_filesz > MAXPATHLEN) { 931 uprintf("Invalid PT_INTERP\n"); 932 return (ENOEXEC); 933 } 934 935 interp_name_len = phdr->p_filesz; 936 if (phdr->p_offset > PAGE_SIZE || 937 interp_name_len > PAGE_SIZE - phdr->p_offset) { 938 interp = malloc(interp_name_len + 1, M_TEMP, M_NOWAIT); 939 if (interp == NULL) { 940 VOP_UNLOCK(imgp->vp, 0); 941 interp = malloc(interp_name_len + 1, M_TEMP, M_WAITOK); 942 vn_lock(imgp->vp, LK_SHARED | LK_RETRY); 943 } 944 error = vn_rdwr(UIO_READ, imgp->vp, interp, 945 interp_name_len, phdr->p_offset, 946 UIO_SYSSPACE, IO_NODELOCKED, td->td_ucred, 947 NOCRED, NULL, td); 948 if (error != 0) { 949 free(interp, M_TEMP); 950 uprintf("i/o error PT_INTERP %d\n", error); 951 return (error); 952 } 953 interp[interp_name_len] = '\0'; 954 955 *interpp = interp; 956 *free_interpp = true; 957 return (0); 958 } 959 960 interp = __DECONST(char *, imgp->image_header) + phdr->p_offset; 961 if (interp[interp_name_len - 1] != '\0') { 962 uprintf("Invalid PT_INTERP\n"); 963 return (ENOEXEC); 964 } 965 966 *interpp = interp; 967 *free_interpp = false; 968 return (0); 969 } 970 971 static int 972 __elfN(load_interp)(struct image_params *imgp, const Elf_Brandinfo *brand_info, 973 const char *interp, u_long *addr, u_long *entry) 974 { 975 char *path; 976 int error; 977 978 if (brand_info->emul_path != NULL && 979 brand_info->emul_path[0] != '\0') { 980 path = malloc(MAXPATHLEN, M_TEMP, M_WAITOK); 981 snprintf(path, MAXPATHLEN, "%s%s", 982 brand_info->emul_path, interp); 983 error = __elfN(load_file)(imgp->proc, path, addr, entry); 984 free(path, M_TEMP); 985 if (error == 0) 986 return (0); 987 } 988 989 if (brand_info->interp_newpath != NULL && 990 (brand_info->interp_path == NULL || 991 strcmp(interp, brand_info->interp_path) == 0)) { 992 error = __elfN(load_file)(imgp->proc, 993 brand_info->interp_newpath, addr, entry); 994 if (error == 0) 995 return (0); 996 } 997 998 error = __elfN(load_file)(imgp->proc, interp, addr, entry); 999 if (error == 0) 1000 return (0); 1001 1002 uprintf("ELF interpreter %s not found, error %d\n", interp, error); 1003 return (error); 1004 } 1005 1006 /* 1007 * Impossible et_dyn_addr initial value indicating that the real base 1008 * must be calculated later with some randomization applied. 1009 */ 1010 #define ET_DYN_ADDR_RAND 1 1011 1012 static int 1013 __CONCAT(exec_, __elfN(imgact))(struct image_params *imgp) 1014 { 1015 struct thread *td; 1016 const Elf_Ehdr *hdr; 1017 const Elf_Phdr *phdr; 1018 Elf_Auxargs *elf_auxargs; 1019 struct vmspace *vmspace; 1020 vm_map_t map; 1021 char *interp; 1022 Elf_Brandinfo *brand_info; 1023 struct sysentvec *sv; 1024 u_long addr, baddr, et_dyn_addr, entry, proghdr; 1025 u_long maxalign, mapsz, maxv, maxv1; 1026 uint32_t fctl0; 1027 int32_t osrel; 1028 bool free_interp; 1029 int error, i, n; 1030 1031 hdr = (const Elf_Ehdr *)imgp->image_header; 1032 1033 /* 1034 * Do we have a valid ELF header ? 1035 * 1036 * Only allow ET_EXEC & ET_DYN here, reject ET_DYN later 1037 * if particular brand doesn't support it. 1038 */ 1039 if (__elfN(check_header)(hdr) != 0 || 1040 (hdr->e_type != ET_EXEC && hdr->e_type != ET_DYN)) 1041 return (-1); 1042 1043 /* 1044 * From here on down, we return an errno, not -1, as we've 1045 * detected an ELF file. 1046 */ 1047 1048 if ((hdr->e_phoff > PAGE_SIZE) || 1049 (u_int)hdr->e_phentsize * hdr->e_phnum > PAGE_SIZE - hdr->e_phoff) { 1050 /* Only support headers in first page for now */ 1051 uprintf("Program headers not in the first page\n"); 1052 return (ENOEXEC); 1053 } 1054 phdr = (const Elf_Phdr *)(imgp->image_header + hdr->e_phoff); 1055 if (!aligned(phdr, Elf_Addr)) { 1056 uprintf("Unaligned program headers\n"); 1057 return (ENOEXEC); 1058 } 1059 1060 n = error = 0; 1061 baddr = 0; 1062 osrel = 0; 1063 fctl0 = 0; 1064 entry = proghdr = 0; 1065 interp = NULL; 1066 free_interp = false; 1067 td = curthread; 1068 maxalign = PAGE_SIZE; 1069 mapsz = 0; 1070 1071 for (i = 0; i < hdr->e_phnum; i++) { 1072 switch (phdr[i].p_type) { 1073 case PT_LOAD: 1074 if (n == 0) 1075 baddr = phdr[i].p_vaddr; 1076 if (phdr[i].p_align > maxalign) 1077 maxalign = phdr[i].p_align; 1078 mapsz += phdr[i].p_memsz; 1079 n++; 1080 1081 /* 1082 * If this segment contains the program headers, 1083 * remember their virtual address for the AT_PHDR 1084 * aux entry. Static binaries don't usually include 1085 * a PT_PHDR entry. 1086 */ 1087 if (phdr[i].p_offset == 0 && 1088 hdr->e_phoff + hdr->e_phnum * hdr->e_phentsize 1089 <= phdr[i].p_filesz) 1090 proghdr = phdr[i].p_vaddr + hdr->e_phoff; 1091 break; 1092 case PT_INTERP: 1093 /* Path to interpreter */ 1094 if (interp != NULL) { 1095 uprintf("Multiple PT_INTERP headers\n"); 1096 error = ENOEXEC; 1097 goto ret; 1098 } 1099 error = __elfN(get_interp)(imgp, &phdr[i], &interp, 1100 &free_interp); 1101 if (error != 0) 1102 goto ret; 1103 break; 1104 case PT_GNU_STACK: 1105 if (__elfN(nxstack)) 1106 imgp->stack_prot = 1107 __elfN(trans_prot)(phdr[i].p_flags); 1108 imgp->stack_sz = phdr[i].p_memsz; 1109 break; 1110 case PT_PHDR: /* Program header table info */ 1111 proghdr = phdr[i].p_vaddr; 1112 break; 1113 } 1114 } 1115 1116 brand_info = __elfN(get_brandinfo)(imgp, interp, &osrel, &fctl0); 1117 if (brand_info == NULL) { 1118 uprintf("ELF binary type \"%u\" not known.\n", 1119 hdr->e_ident[EI_OSABI]); 1120 error = ENOEXEC; 1121 goto ret; 1122 } 1123 sv = brand_info->sysvec; 1124 et_dyn_addr = 0; 1125 if (hdr->e_type == ET_DYN) { 1126 if ((brand_info->flags & BI_CAN_EXEC_DYN) == 0) { 1127 uprintf("Cannot execute shared object\n"); 1128 error = ENOEXEC; 1129 goto ret; 1130 } 1131 /* 1132 * Honour the base load address from the dso if it is 1133 * non-zero for some reason. 1134 */ 1135 if (baddr == 0) { 1136 if ((sv->sv_flags & SV_ASLR) == 0 || 1137 (fctl0 & NT_FREEBSD_FCTL_ASLR_DISABLE) != 0) 1138 et_dyn_addr = ET_DYN_LOAD_ADDR; 1139 else if ((__elfN(pie_aslr_enabled) && 1140 (imgp->proc->p_flag2 & P2_ASLR_DISABLE) == 0) || 1141 (imgp->proc->p_flag2 & P2_ASLR_ENABLE) != 0) 1142 et_dyn_addr = ET_DYN_ADDR_RAND; 1143 else 1144 et_dyn_addr = ET_DYN_LOAD_ADDR; 1145 } 1146 } 1147 1148 /* 1149 * Avoid a possible deadlock if the current address space is destroyed 1150 * and that address space maps the locked vnode. In the common case, 1151 * the locked vnode's v_usecount is decremented but remains greater 1152 * than zero. Consequently, the vnode lock is not needed by vrele(). 1153 * However, in cases where the vnode lock is external, such as nullfs, 1154 * v_usecount may become zero. 1155 * 1156 * The VV_TEXT flag prevents modifications to the executable while 1157 * the vnode is unlocked. 1158 */ 1159 VOP_UNLOCK(imgp->vp, 0); 1160 1161 /* 1162 * Decide whether to enable randomization of user mappings. 1163 * First, reset user preferences for the setid binaries. 1164 * Then, account for the support of the randomization by the 1165 * ABI, by user preferences, and make special treatment for 1166 * PIE binaries. 1167 */ 1168 if (imgp->credential_setid) { 1169 PROC_LOCK(imgp->proc); 1170 imgp->proc->p_flag2 &= ~(P2_ASLR_ENABLE | P2_ASLR_DISABLE); 1171 PROC_UNLOCK(imgp->proc); 1172 } 1173 if ((sv->sv_flags & SV_ASLR) == 0 || 1174 (imgp->proc->p_flag2 & P2_ASLR_DISABLE) != 0 || 1175 (fctl0 & NT_FREEBSD_FCTL_ASLR_DISABLE) != 0) { 1176 KASSERT(et_dyn_addr != ET_DYN_ADDR_RAND, 1177 ("et_dyn_addr == RAND and !ASLR")); 1178 } else if ((imgp->proc->p_flag2 & P2_ASLR_ENABLE) != 0 || 1179 (__elfN(aslr_enabled) && hdr->e_type == ET_EXEC) || 1180 et_dyn_addr == ET_DYN_ADDR_RAND) { 1181 imgp->map_flags |= MAP_ASLR; 1182 /* 1183 * If user does not care about sbrk, utilize the bss 1184 * grow region for mappings as well. We can select 1185 * the base for the image anywere and still not suffer 1186 * from the fragmentation. 1187 */ 1188 if (!__elfN(aslr_honor_sbrk) || 1189 (imgp->proc->p_flag2 & P2_ASLR_IGNSTART) != 0) 1190 imgp->map_flags |= MAP_ASLR_IGNSTART; 1191 } 1192 1193 error = exec_new_vmspace(imgp, sv); 1194 vmspace = imgp->proc->p_vmspace; 1195 map = &vmspace->vm_map; 1196 1197 imgp->proc->p_sysent = sv; 1198 1199 maxv = vm_map_max(map) - lim_max(td, RLIMIT_STACK); 1200 if (et_dyn_addr == ET_DYN_ADDR_RAND) { 1201 KASSERT((map->flags & MAP_ASLR) != 0, 1202 ("ET_DYN_ADDR_RAND but !MAP_ASLR")); 1203 et_dyn_addr = __CONCAT(rnd_, __elfN(base))(map, 1204 vm_map_min(map) + mapsz + lim_max(td, RLIMIT_DATA), 1205 /* reserve half of the address space to interpreter */ 1206 maxv / 2, 1UL << flsl(maxalign)); 1207 } 1208 1209 vn_lock(imgp->vp, LK_SHARED | LK_RETRY); 1210 if (error != 0) 1211 goto ret; 1212 1213 error = __elfN(load_sections)(imgp, hdr, phdr, et_dyn_addr, NULL); 1214 if (error != 0) 1215 goto ret; 1216 1217 error = __elfN(enforce_limits)(imgp, hdr, phdr, et_dyn_addr); 1218 if (error != 0) 1219 goto ret; 1220 1221 entry = (u_long)hdr->e_entry + et_dyn_addr; 1222 1223 /* 1224 * We load the dynamic linker where a userland call 1225 * to mmap(0, ...) would put it. The rationale behind this 1226 * calculation is that it leaves room for the heap to grow to 1227 * its maximum allowed size. 1228 */ 1229 addr = round_page((vm_offset_t)vmspace->vm_daddr + lim_max(td, 1230 RLIMIT_DATA)); 1231 if ((map->flags & MAP_ASLR) != 0) { 1232 maxv1 = maxv / 2 + addr / 2; 1233 MPASS(maxv1 >= addr); /* No overflow */ 1234 map->anon_loc = __CONCAT(rnd_, __elfN(base))(map, addr, maxv1, 1235 MAXPAGESIZES > 1 ? pagesizes[1] : pagesizes[0]); 1236 } else { 1237 map->anon_loc = addr; 1238 } 1239 1240 imgp->entry_addr = entry; 1241 1242 if (interp != NULL) { 1243 VOP_UNLOCK(imgp->vp, 0); 1244 if ((map->flags & MAP_ASLR) != 0) { 1245 /* Assume that interpeter fits into 1/4 of AS */ 1246 maxv1 = maxv / 2 + addr / 2; 1247 MPASS(maxv1 >= addr); /* No overflow */ 1248 addr = __CONCAT(rnd_, __elfN(base))(map, addr, 1249 maxv1, PAGE_SIZE); 1250 } 1251 error = __elfN(load_interp)(imgp, brand_info, interp, &addr, 1252 &imgp->entry_addr); 1253 vn_lock(imgp->vp, LK_SHARED | LK_RETRY); 1254 if (error != 0) 1255 goto ret; 1256 } else 1257 addr = et_dyn_addr; 1258 1259 /* 1260 * Construct auxargs table (used by the fixup routine) 1261 */ 1262 elf_auxargs = malloc(sizeof(Elf_Auxargs), M_TEMP, M_NOWAIT); 1263 if (elf_auxargs == NULL) { 1264 VOP_UNLOCK(imgp->vp, 0); 1265 elf_auxargs = malloc(sizeof(Elf_Auxargs), M_TEMP, M_WAITOK); 1266 vn_lock(imgp->vp, LK_SHARED | LK_RETRY); 1267 } 1268 elf_auxargs->execfd = -1; 1269 elf_auxargs->phdr = proghdr + et_dyn_addr; 1270 elf_auxargs->phent = hdr->e_phentsize; 1271 elf_auxargs->phnum = hdr->e_phnum; 1272 elf_auxargs->pagesz = PAGE_SIZE; 1273 elf_auxargs->base = addr; 1274 elf_auxargs->flags = 0; 1275 elf_auxargs->entry = entry; 1276 elf_auxargs->hdr_eflags = hdr->e_flags; 1277 1278 imgp->auxargs = elf_auxargs; 1279 imgp->interpreted = 0; 1280 imgp->reloc_base = addr; 1281 imgp->proc->p_osrel = osrel; 1282 imgp->proc->p_fctl0 = fctl0; 1283 imgp->proc->p_elf_machine = hdr->e_machine; 1284 imgp->proc->p_elf_flags = hdr->e_flags; 1285 1286 ret: 1287 if (free_interp) 1288 free(interp, M_TEMP); 1289 return (error); 1290 } 1291 1292 #define suword __CONCAT(suword, __ELF_WORD_SIZE) 1293 1294 int 1295 __elfN(freebsd_fixup)(register_t **stack_base, struct image_params *imgp) 1296 { 1297 Elf_Auxargs *args = (Elf_Auxargs *)imgp->auxargs; 1298 Elf_Auxinfo *argarray, *pos; 1299 Elf_Addr *base, *auxbase; 1300 int error; 1301 1302 base = (Elf_Addr *)*stack_base; 1303 auxbase = base + imgp->args->argc + 1 + imgp->args->envc + 1; 1304 argarray = pos = malloc(AT_COUNT * sizeof(*pos), M_TEMP, 1305 M_WAITOK | M_ZERO); 1306 1307 if (args->execfd != -1) 1308 AUXARGS_ENTRY(pos, AT_EXECFD, args->execfd); 1309 AUXARGS_ENTRY(pos, AT_PHDR, args->phdr); 1310 AUXARGS_ENTRY(pos, AT_PHENT, args->phent); 1311 AUXARGS_ENTRY(pos, AT_PHNUM, args->phnum); 1312 AUXARGS_ENTRY(pos, AT_PAGESZ, args->pagesz); 1313 AUXARGS_ENTRY(pos, AT_FLAGS, args->flags); 1314 AUXARGS_ENTRY(pos, AT_ENTRY, args->entry); 1315 AUXARGS_ENTRY(pos, AT_BASE, args->base); 1316 AUXARGS_ENTRY(pos, AT_EHDRFLAGS, args->hdr_eflags); 1317 if (imgp->execpathp != 0) 1318 AUXARGS_ENTRY(pos, AT_EXECPATH, imgp->execpathp); 1319 AUXARGS_ENTRY(pos, AT_OSRELDATE, 1320 imgp->proc->p_ucred->cr_prison->pr_osreldate); 1321 if (imgp->canary != 0) { 1322 AUXARGS_ENTRY(pos, AT_CANARY, imgp->canary); 1323 AUXARGS_ENTRY(pos, AT_CANARYLEN, imgp->canarylen); 1324 } 1325 AUXARGS_ENTRY(pos, AT_NCPUS, mp_ncpus); 1326 if (imgp->pagesizes != 0) { 1327 AUXARGS_ENTRY(pos, AT_PAGESIZES, imgp->pagesizes); 1328 AUXARGS_ENTRY(pos, AT_PAGESIZESLEN, imgp->pagesizeslen); 1329 } 1330 if (imgp->sysent->sv_timekeep_base != 0) { 1331 AUXARGS_ENTRY(pos, AT_TIMEKEEP, 1332 imgp->sysent->sv_timekeep_base); 1333 } 1334 AUXARGS_ENTRY(pos, AT_STACKPROT, imgp->sysent->sv_shared_page_obj 1335 != NULL && imgp->stack_prot != 0 ? imgp->stack_prot : 1336 imgp->sysent->sv_stackprot); 1337 if (imgp->sysent->sv_hwcap != NULL) 1338 AUXARGS_ENTRY(pos, AT_HWCAP, *imgp->sysent->sv_hwcap); 1339 if (imgp->sysent->sv_hwcap2 != NULL) 1340 AUXARGS_ENTRY(pos, AT_HWCAP2, *imgp->sysent->sv_hwcap2); 1341 AUXARGS_ENTRY(pos, AT_NULL, 0); 1342 1343 free(imgp->auxargs, M_TEMP); 1344 imgp->auxargs = NULL; 1345 KASSERT(pos - argarray <= AT_COUNT, ("Too many auxargs")); 1346 1347 error = copyout(argarray, auxbase, sizeof(*argarray) * AT_COUNT); 1348 free(argarray, M_TEMP); 1349 if (error != 0) 1350 return (error); 1351 1352 base--; 1353 if (suword(base, imgp->args->argc) == -1) 1354 return (EFAULT); 1355 *stack_base = (register_t *)base; 1356 return (0); 1357 } 1358 1359 /* 1360 * Code for generating ELF core dumps. 1361 */ 1362 1363 typedef void (*segment_callback)(vm_map_entry_t, void *); 1364 1365 /* Closure for cb_put_phdr(). */ 1366 struct phdr_closure { 1367 Elf_Phdr *phdr; /* Program header to fill in */ 1368 Elf_Off offset; /* Offset of segment in core file */ 1369 }; 1370 1371 /* Closure for cb_size_segment(). */ 1372 struct sseg_closure { 1373 int count; /* Count of writable segments. */ 1374 size_t size; /* Total size of all writable segments. */ 1375 }; 1376 1377 typedef void (*outfunc_t)(void *, struct sbuf *, size_t *); 1378 1379 struct note_info { 1380 int type; /* Note type. */ 1381 outfunc_t outfunc; /* Output function. */ 1382 void *outarg; /* Argument for the output function. */ 1383 size_t outsize; /* Output size. */ 1384 TAILQ_ENTRY(note_info) link; /* Link to the next note info. */ 1385 }; 1386 1387 TAILQ_HEAD(note_info_list, note_info); 1388 1389 /* Coredump output parameters. */ 1390 struct coredump_params { 1391 off_t offset; 1392 struct ucred *active_cred; 1393 struct ucred *file_cred; 1394 struct thread *td; 1395 struct vnode *vp; 1396 struct compressor *comp; 1397 }; 1398 1399 extern int compress_user_cores; 1400 extern int compress_user_cores_level; 1401 1402 static void cb_put_phdr(vm_map_entry_t, void *); 1403 static void cb_size_segment(vm_map_entry_t, void *); 1404 static int core_write(struct coredump_params *, const void *, size_t, off_t, 1405 enum uio_seg); 1406 static void each_dumpable_segment(struct thread *, segment_callback, void *); 1407 static int __elfN(corehdr)(struct coredump_params *, int, void *, size_t, 1408 struct note_info_list *, size_t); 1409 static void __elfN(prepare_notes)(struct thread *, struct note_info_list *, 1410 size_t *); 1411 static void __elfN(puthdr)(struct thread *, void *, size_t, int, size_t); 1412 static void __elfN(putnote)(struct note_info *, struct sbuf *); 1413 static size_t register_note(struct note_info_list *, int, outfunc_t, void *); 1414 static int sbuf_drain_core_output(void *, const char *, int); 1415 static int sbuf_drain_count(void *arg, const char *data, int len); 1416 1417 static void __elfN(note_fpregset)(void *, struct sbuf *, size_t *); 1418 static void __elfN(note_prpsinfo)(void *, struct sbuf *, size_t *); 1419 static void __elfN(note_prstatus)(void *, struct sbuf *, size_t *); 1420 static void __elfN(note_threadmd)(void *, struct sbuf *, size_t *); 1421 static void __elfN(note_thrmisc)(void *, struct sbuf *, size_t *); 1422 static void __elfN(note_ptlwpinfo)(void *, struct sbuf *, size_t *); 1423 static void __elfN(note_procstat_auxv)(void *, struct sbuf *, size_t *); 1424 static void __elfN(note_procstat_proc)(void *, struct sbuf *, size_t *); 1425 static void __elfN(note_procstat_psstrings)(void *, struct sbuf *, size_t *); 1426 static void note_procstat_files(void *, struct sbuf *, size_t *); 1427 static void note_procstat_groups(void *, struct sbuf *, size_t *); 1428 static void note_procstat_osrel(void *, struct sbuf *, size_t *); 1429 static void note_procstat_rlimit(void *, struct sbuf *, size_t *); 1430 static void note_procstat_umask(void *, struct sbuf *, size_t *); 1431 static void note_procstat_vmmap(void *, struct sbuf *, size_t *); 1432 1433 /* 1434 * Write out a core segment to the compression stream. 1435 */ 1436 static int 1437 compress_chunk(struct coredump_params *p, char *base, char *buf, u_int len) 1438 { 1439 u_int chunk_len; 1440 int error; 1441 1442 while (len > 0) { 1443 chunk_len = MIN(len, CORE_BUF_SIZE); 1444 1445 /* 1446 * We can get EFAULT error here. 1447 * In that case zero out the current chunk of the segment. 1448 */ 1449 error = copyin(base, buf, chunk_len); 1450 if (error != 0) 1451 bzero(buf, chunk_len); 1452 error = compressor_write(p->comp, buf, chunk_len); 1453 if (error != 0) 1454 break; 1455 base += chunk_len; 1456 len -= chunk_len; 1457 } 1458 return (error); 1459 } 1460 1461 static int 1462 core_compressed_write(void *base, size_t len, off_t offset, void *arg) 1463 { 1464 1465 return (core_write((struct coredump_params *)arg, base, len, offset, 1466 UIO_SYSSPACE)); 1467 } 1468 1469 static int 1470 core_write(struct coredump_params *p, const void *base, size_t len, 1471 off_t offset, enum uio_seg seg) 1472 { 1473 1474 return (vn_rdwr_inchunks(UIO_WRITE, p->vp, __DECONST(void *, base), 1475 len, offset, seg, IO_UNIT | IO_DIRECT | IO_RANGELOCKED, 1476 p->active_cred, p->file_cred, NULL, p->td)); 1477 } 1478 1479 static int 1480 core_output(void *base, size_t len, off_t offset, struct coredump_params *p, 1481 void *tmpbuf) 1482 { 1483 int error; 1484 1485 if (p->comp != NULL) 1486 return (compress_chunk(p, base, tmpbuf, len)); 1487 1488 /* 1489 * EFAULT is a non-fatal error that we can get, for example, 1490 * if the segment is backed by a file but extends beyond its 1491 * end. 1492 */ 1493 error = core_write(p, base, len, offset, UIO_USERSPACE); 1494 if (error == EFAULT) { 1495 log(LOG_WARNING, "Failed to fully fault in a core file segment " 1496 "at VA %p with size 0x%zx to be written at offset 0x%jx " 1497 "for process %s\n", base, len, offset, curproc->p_comm); 1498 1499 /* 1500 * Write a "real" zero byte at the end of the target region 1501 * in the case this is the last segment. 1502 * The intermediate space will be implicitly zero-filled. 1503 */ 1504 error = core_write(p, zero_region, 1, offset + len - 1, 1505 UIO_SYSSPACE); 1506 } 1507 return (error); 1508 } 1509 1510 /* 1511 * Drain into a core file. 1512 */ 1513 static int 1514 sbuf_drain_core_output(void *arg, const char *data, int len) 1515 { 1516 struct coredump_params *p; 1517 int error, locked; 1518 1519 p = (struct coredump_params *)arg; 1520 1521 /* 1522 * Some kern_proc out routines that print to this sbuf may 1523 * call us with the process lock held. Draining with the 1524 * non-sleepable lock held is unsafe. The lock is needed for 1525 * those routines when dumping a live process. In our case we 1526 * can safely release the lock before draining and acquire 1527 * again after. 1528 */ 1529 locked = PROC_LOCKED(p->td->td_proc); 1530 if (locked) 1531 PROC_UNLOCK(p->td->td_proc); 1532 if (p->comp != NULL) 1533 error = compressor_write(p->comp, __DECONST(char *, data), len); 1534 else 1535 error = core_write(p, __DECONST(void *, data), len, p->offset, 1536 UIO_SYSSPACE); 1537 if (locked) 1538 PROC_LOCK(p->td->td_proc); 1539 if (error != 0) 1540 return (-error); 1541 p->offset += len; 1542 return (len); 1543 } 1544 1545 /* 1546 * Drain into a counter. 1547 */ 1548 static int 1549 sbuf_drain_count(void *arg, const char *data __unused, int len) 1550 { 1551 size_t *sizep; 1552 1553 sizep = (size_t *)arg; 1554 *sizep += len; 1555 return (len); 1556 } 1557 1558 int 1559 __elfN(coredump)(struct thread *td, struct vnode *vp, off_t limit, int flags) 1560 { 1561 struct ucred *cred = td->td_ucred; 1562 int error = 0; 1563 struct sseg_closure seginfo; 1564 struct note_info_list notelst; 1565 struct coredump_params params; 1566 struct note_info *ninfo; 1567 void *hdr, *tmpbuf; 1568 size_t hdrsize, notesz, coresize; 1569 1570 hdr = NULL; 1571 tmpbuf = NULL; 1572 TAILQ_INIT(¬elst); 1573 1574 /* Size the program segments. */ 1575 seginfo.count = 0; 1576 seginfo.size = 0; 1577 each_dumpable_segment(td, cb_size_segment, &seginfo); 1578 1579 /* 1580 * Collect info about the core file header area. 1581 */ 1582 hdrsize = sizeof(Elf_Ehdr) + sizeof(Elf_Phdr) * (1 + seginfo.count); 1583 if (seginfo.count + 1 >= PN_XNUM) 1584 hdrsize += sizeof(Elf_Shdr); 1585 __elfN(prepare_notes)(td, ¬elst, ¬esz); 1586 coresize = round_page(hdrsize + notesz) + seginfo.size; 1587 1588 /* Set up core dump parameters. */ 1589 params.offset = 0; 1590 params.active_cred = cred; 1591 params.file_cred = NOCRED; 1592 params.td = td; 1593 params.vp = vp; 1594 params.comp = NULL; 1595 1596 #ifdef RACCT 1597 if (racct_enable) { 1598 PROC_LOCK(td->td_proc); 1599 error = racct_add(td->td_proc, RACCT_CORE, coresize); 1600 PROC_UNLOCK(td->td_proc); 1601 if (error != 0) { 1602 error = EFAULT; 1603 goto done; 1604 } 1605 } 1606 #endif 1607 if (coresize >= limit) { 1608 error = EFAULT; 1609 goto done; 1610 } 1611 1612 /* Create a compression stream if necessary. */ 1613 if (compress_user_cores != 0) { 1614 params.comp = compressor_init(core_compressed_write, 1615 compress_user_cores, CORE_BUF_SIZE, 1616 compress_user_cores_level, ¶ms); 1617 if (params.comp == NULL) { 1618 error = EFAULT; 1619 goto done; 1620 } 1621 tmpbuf = malloc(CORE_BUF_SIZE, M_TEMP, M_WAITOK | M_ZERO); 1622 } 1623 1624 /* 1625 * Allocate memory for building the header, fill it up, 1626 * and write it out following the notes. 1627 */ 1628 hdr = malloc(hdrsize, M_TEMP, M_WAITOK); 1629 error = __elfN(corehdr)(¶ms, seginfo.count, hdr, hdrsize, ¬elst, 1630 notesz); 1631 1632 /* Write the contents of all of the writable segments. */ 1633 if (error == 0) { 1634 Elf_Phdr *php; 1635 off_t offset; 1636 int i; 1637 1638 php = (Elf_Phdr *)((char *)hdr + sizeof(Elf_Ehdr)) + 1; 1639 offset = round_page(hdrsize + notesz); 1640 for (i = 0; i < seginfo.count; i++) { 1641 error = core_output((caddr_t)(uintptr_t)php->p_vaddr, 1642 php->p_filesz, offset, ¶ms, tmpbuf); 1643 if (error != 0) 1644 break; 1645 offset += php->p_filesz; 1646 php++; 1647 } 1648 if (error == 0 && params.comp != NULL) 1649 error = compressor_flush(params.comp); 1650 } 1651 if (error) { 1652 log(LOG_WARNING, 1653 "Failed to write core file for process %s (error %d)\n", 1654 curproc->p_comm, error); 1655 } 1656 1657 done: 1658 free(tmpbuf, M_TEMP); 1659 if (params.comp != NULL) 1660 compressor_fini(params.comp); 1661 while ((ninfo = TAILQ_FIRST(¬elst)) != NULL) { 1662 TAILQ_REMOVE(¬elst, ninfo, link); 1663 free(ninfo, M_TEMP); 1664 } 1665 if (hdr != NULL) 1666 free(hdr, M_TEMP); 1667 1668 return (error); 1669 } 1670 1671 /* 1672 * A callback for each_dumpable_segment() to write out the segment's 1673 * program header entry. 1674 */ 1675 static void 1676 cb_put_phdr(vm_map_entry_t entry, void *closure) 1677 { 1678 struct phdr_closure *phc = (struct phdr_closure *)closure; 1679 Elf_Phdr *phdr = phc->phdr; 1680 1681 phc->offset = round_page(phc->offset); 1682 1683 phdr->p_type = PT_LOAD; 1684 phdr->p_offset = phc->offset; 1685 phdr->p_vaddr = entry->start; 1686 phdr->p_paddr = 0; 1687 phdr->p_filesz = phdr->p_memsz = entry->end - entry->start; 1688 phdr->p_align = PAGE_SIZE; 1689 phdr->p_flags = __elfN(untrans_prot)(entry->protection); 1690 1691 phc->offset += phdr->p_filesz; 1692 phc->phdr++; 1693 } 1694 1695 /* 1696 * A callback for each_dumpable_segment() to gather information about 1697 * the number of segments and their total size. 1698 */ 1699 static void 1700 cb_size_segment(vm_map_entry_t entry, void *closure) 1701 { 1702 struct sseg_closure *ssc = (struct sseg_closure *)closure; 1703 1704 ssc->count++; 1705 ssc->size += entry->end - entry->start; 1706 } 1707 1708 /* 1709 * For each writable segment in the process's memory map, call the given 1710 * function with a pointer to the map entry and some arbitrary 1711 * caller-supplied data. 1712 */ 1713 static void 1714 each_dumpable_segment(struct thread *td, segment_callback func, void *closure) 1715 { 1716 struct proc *p = td->td_proc; 1717 vm_map_t map = &p->p_vmspace->vm_map; 1718 vm_map_entry_t entry; 1719 vm_object_t backing_object, object; 1720 boolean_t ignore_entry; 1721 1722 vm_map_lock_read(map); 1723 for (entry = map->header.next; entry != &map->header; 1724 entry = entry->next) { 1725 /* 1726 * Don't dump inaccessible mappings, deal with legacy 1727 * coredump mode. 1728 * 1729 * Note that read-only segments related to the elf binary 1730 * are marked MAP_ENTRY_NOCOREDUMP now so we no longer 1731 * need to arbitrarily ignore such segments. 1732 */ 1733 if (elf_legacy_coredump) { 1734 if ((entry->protection & VM_PROT_RW) != VM_PROT_RW) 1735 continue; 1736 } else { 1737 if ((entry->protection & VM_PROT_ALL) == 0) 1738 continue; 1739 } 1740 1741 /* 1742 * Dont include memory segment in the coredump if 1743 * MAP_NOCORE is set in mmap(2) or MADV_NOCORE in 1744 * madvise(2). Do not dump submaps (i.e. parts of the 1745 * kernel map). 1746 */ 1747 if (entry->eflags & (MAP_ENTRY_NOCOREDUMP|MAP_ENTRY_IS_SUB_MAP)) 1748 continue; 1749 1750 if ((object = entry->object.vm_object) == NULL) 1751 continue; 1752 1753 /* Ignore memory-mapped devices and such things. */ 1754 VM_OBJECT_RLOCK(object); 1755 while ((backing_object = object->backing_object) != NULL) { 1756 VM_OBJECT_RLOCK(backing_object); 1757 VM_OBJECT_RUNLOCK(object); 1758 object = backing_object; 1759 } 1760 ignore_entry = object->type != OBJT_DEFAULT && 1761 object->type != OBJT_SWAP && object->type != OBJT_VNODE && 1762 object->type != OBJT_PHYS; 1763 VM_OBJECT_RUNLOCK(object); 1764 if (ignore_entry) 1765 continue; 1766 1767 (*func)(entry, closure); 1768 } 1769 vm_map_unlock_read(map); 1770 } 1771 1772 /* 1773 * Write the core file header to the file, including padding up to 1774 * the page boundary. 1775 */ 1776 static int 1777 __elfN(corehdr)(struct coredump_params *p, int numsegs, void *hdr, 1778 size_t hdrsize, struct note_info_list *notelst, size_t notesz) 1779 { 1780 struct note_info *ninfo; 1781 struct sbuf *sb; 1782 int error; 1783 1784 /* Fill in the header. */ 1785 bzero(hdr, hdrsize); 1786 __elfN(puthdr)(p->td, hdr, hdrsize, numsegs, notesz); 1787 1788 sb = sbuf_new(NULL, NULL, CORE_BUF_SIZE, SBUF_FIXEDLEN); 1789 sbuf_set_drain(sb, sbuf_drain_core_output, p); 1790 sbuf_start_section(sb, NULL); 1791 sbuf_bcat(sb, hdr, hdrsize); 1792 TAILQ_FOREACH(ninfo, notelst, link) 1793 __elfN(putnote)(ninfo, sb); 1794 /* Align up to a page boundary for the program segments. */ 1795 sbuf_end_section(sb, -1, PAGE_SIZE, 0); 1796 error = sbuf_finish(sb); 1797 sbuf_delete(sb); 1798 1799 return (error); 1800 } 1801 1802 static void 1803 __elfN(prepare_notes)(struct thread *td, struct note_info_list *list, 1804 size_t *sizep) 1805 { 1806 struct proc *p; 1807 struct thread *thr; 1808 size_t size; 1809 1810 p = td->td_proc; 1811 size = 0; 1812 1813 size += register_note(list, NT_PRPSINFO, __elfN(note_prpsinfo), p); 1814 1815 /* 1816 * To have the debugger select the right thread (LWP) as the initial 1817 * thread, we dump the state of the thread passed to us in td first. 1818 * This is the thread that causes the core dump and thus likely to 1819 * be the right thread one wants to have selected in the debugger. 1820 */ 1821 thr = td; 1822 while (thr != NULL) { 1823 size += register_note(list, NT_PRSTATUS, 1824 __elfN(note_prstatus), thr); 1825 size += register_note(list, NT_FPREGSET, 1826 __elfN(note_fpregset), thr); 1827 size += register_note(list, NT_THRMISC, 1828 __elfN(note_thrmisc), thr); 1829 size += register_note(list, NT_PTLWPINFO, 1830 __elfN(note_ptlwpinfo), thr); 1831 size += register_note(list, -1, 1832 __elfN(note_threadmd), thr); 1833 1834 thr = (thr == td) ? TAILQ_FIRST(&p->p_threads) : 1835 TAILQ_NEXT(thr, td_plist); 1836 if (thr == td) 1837 thr = TAILQ_NEXT(thr, td_plist); 1838 } 1839 1840 size += register_note(list, NT_PROCSTAT_PROC, 1841 __elfN(note_procstat_proc), p); 1842 size += register_note(list, NT_PROCSTAT_FILES, 1843 note_procstat_files, p); 1844 size += register_note(list, NT_PROCSTAT_VMMAP, 1845 note_procstat_vmmap, p); 1846 size += register_note(list, NT_PROCSTAT_GROUPS, 1847 note_procstat_groups, p); 1848 size += register_note(list, NT_PROCSTAT_UMASK, 1849 note_procstat_umask, p); 1850 size += register_note(list, NT_PROCSTAT_RLIMIT, 1851 note_procstat_rlimit, p); 1852 size += register_note(list, NT_PROCSTAT_OSREL, 1853 note_procstat_osrel, p); 1854 size += register_note(list, NT_PROCSTAT_PSSTRINGS, 1855 __elfN(note_procstat_psstrings), p); 1856 size += register_note(list, NT_PROCSTAT_AUXV, 1857 __elfN(note_procstat_auxv), p); 1858 1859 *sizep = size; 1860 } 1861 1862 static void 1863 __elfN(puthdr)(struct thread *td, void *hdr, size_t hdrsize, int numsegs, 1864 size_t notesz) 1865 { 1866 Elf_Ehdr *ehdr; 1867 Elf_Phdr *phdr; 1868 Elf_Shdr *shdr; 1869 struct phdr_closure phc; 1870 1871 ehdr = (Elf_Ehdr *)hdr; 1872 1873 ehdr->e_ident[EI_MAG0] = ELFMAG0; 1874 ehdr->e_ident[EI_MAG1] = ELFMAG1; 1875 ehdr->e_ident[EI_MAG2] = ELFMAG2; 1876 ehdr->e_ident[EI_MAG3] = ELFMAG3; 1877 ehdr->e_ident[EI_CLASS] = ELF_CLASS; 1878 ehdr->e_ident[EI_DATA] = ELF_DATA; 1879 ehdr->e_ident[EI_VERSION] = EV_CURRENT; 1880 ehdr->e_ident[EI_OSABI] = ELFOSABI_FREEBSD; 1881 ehdr->e_ident[EI_ABIVERSION] = 0; 1882 ehdr->e_ident[EI_PAD] = 0; 1883 ehdr->e_type = ET_CORE; 1884 ehdr->e_machine = td->td_proc->p_elf_machine; 1885 ehdr->e_version = EV_CURRENT; 1886 ehdr->e_entry = 0; 1887 ehdr->e_phoff = sizeof(Elf_Ehdr); 1888 ehdr->e_flags = td->td_proc->p_elf_flags; 1889 ehdr->e_ehsize = sizeof(Elf_Ehdr); 1890 ehdr->e_phentsize = sizeof(Elf_Phdr); 1891 ehdr->e_shentsize = sizeof(Elf_Shdr); 1892 ehdr->e_shstrndx = SHN_UNDEF; 1893 if (numsegs + 1 < PN_XNUM) { 1894 ehdr->e_phnum = numsegs + 1; 1895 ehdr->e_shnum = 0; 1896 } else { 1897 ehdr->e_phnum = PN_XNUM; 1898 ehdr->e_shnum = 1; 1899 1900 ehdr->e_shoff = ehdr->e_phoff + 1901 (numsegs + 1) * ehdr->e_phentsize; 1902 KASSERT(ehdr->e_shoff == hdrsize - sizeof(Elf_Shdr), 1903 ("e_shoff: %zu, hdrsize - shdr: %zu", 1904 (size_t)ehdr->e_shoff, hdrsize - sizeof(Elf_Shdr))); 1905 1906 shdr = (Elf_Shdr *)((char *)hdr + ehdr->e_shoff); 1907 memset(shdr, 0, sizeof(*shdr)); 1908 /* 1909 * A special first section is used to hold large segment and 1910 * section counts. This was proposed by Sun Microsystems in 1911 * Solaris and has been adopted by Linux; the standard ELF 1912 * tools are already familiar with the technique. 1913 * 1914 * See table 7-7 of the Solaris "Linker and Libraries Guide" 1915 * (or 12-7 depending on the version of the document) for more 1916 * details. 1917 */ 1918 shdr->sh_type = SHT_NULL; 1919 shdr->sh_size = ehdr->e_shnum; 1920 shdr->sh_link = ehdr->e_shstrndx; 1921 shdr->sh_info = numsegs + 1; 1922 } 1923 1924 /* 1925 * Fill in the program header entries. 1926 */ 1927 phdr = (Elf_Phdr *)((char *)hdr + ehdr->e_phoff); 1928 1929 /* The note segement. */ 1930 phdr->p_type = PT_NOTE; 1931 phdr->p_offset = hdrsize; 1932 phdr->p_vaddr = 0; 1933 phdr->p_paddr = 0; 1934 phdr->p_filesz = notesz; 1935 phdr->p_memsz = 0; 1936 phdr->p_flags = PF_R; 1937 phdr->p_align = ELF_NOTE_ROUNDSIZE; 1938 phdr++; 1939 1940 /* All the writable segments from the program. */ 1941 phc.phdr = phdr; 1942 phc.offset = round_page(hdrsize + notesz); 1943 each_dumpable_segment(td, cb_put_phdr, &phc); 1944 } 1945 1946 static size_t 1947 register_note(struct note_info_list *list, int type, outfunc_t out, void *arg) 1948 { 1949 struct note_info *ninfo; 1950 size_t size, notesize; 1951 1952 size = 0; 1953 out(arg, NULL, &size); 1954 ninfo = malloc(sizeof(*ninfo), M_TEMP, M_ZERO | M_WAITOK); 1955 ninfo->type = type; 1956 ninfo->outfunc = out; 1957 ninfo->outarg = arg; 1958 ninfo->outsize = size; 1959 TAILQ_INSERT_TAIL(list, ninfo, link); 1960 1961 if (type == -1) 1962 return (size); 1963 1964 notesize = sizeof(Elf_Note) + /* note header */ 1965 roundup2(sizeof(FREEBSD_ABI_VENDOR), ELF_NOTE_ROUNDSIZE) + 1966 /* note name */ 1967 roundup2(size, ELF_NOTE_ROUNDSIZE); /* note description */ 1968 1969 return (notesize); 1970 } 1971 1972 static size_t 1973 append_note_data(const void *src, void *dst, size_t len) 1974 { 1975 size_t padded_len; 1976 1977 padded_len = roundup2(len, ELF_NOTE_ROUNDSIZE); 1978 if (dst != NULL) { 1979 bcopy(src, dst, len); 1980 bzero((char *)dst + len, padded_len - len); 1981 } 1982 return (padded_len); 1983 } 1984 1985 size_t 1986 __elfN(populate_note)(int type, void *src, void *dst, size_t size, void **descp) 1987 { 1988 Elf_Note *note; 1989 char *buf; 1990 size_t notesize; 1991 1992 buf = dst; 1993 if (buf != NULL) { 1994 note = (Elf_Note *)buf; 1995 note->n_namesz = sizeof(FREEBSD_ABI_VENDOR); 1996 note->n_descsz = size; 1997 note->n_type = type; 1998 buf += sizeof(*note); 1999 buf += append_note_data(FREEBSD_ABI_VENDOR, buf, 2000 sizeof(FREEBSD_ABI_VENDOR)); 2001 append_note_data(src, buf, size); 2002 if (descp != NULL) 2003 *descp = buf; 2004 } 2005 2006 notesize = sizeof(Elf_Note) + /* note header */ 2007 roundup2(sizeof(FREEBSD_ABI_VENDOR), ELF_NOTE_ROUNDSIZE) + 2008 /* note name */ 2009 roundup2(size, ELF_NOTE_ROUNDSIZE); /* note description */ 2010 2011 return (notesize); 2012 } 2013 2014 static void 2015 __elfN(putnote)(struct note_info *ninfo, struct sbuf *sb) 2016 { 2017 Elf_Note note; 2018 ssize_t old_len, sect_len; 2019 size_t new_len, descsz, i; 2020 2021 if (ninfo->type == -1) { 2022 ninfo->outfunc(ninfo->outarg, sb, &ninfo->outsize); 2023 return; 2024 } 2025 2026 note.n_namesz = sizeof(FREEBSD_ABI_VENDOR); 2027 note.n_descsz = ninfo->outsize; 2028 note.n_type = ninfo->type; 2029 2030 sbuf_bcat(sb, ¬e, sizeof(note)); 2031 sbuf_start_section(sb, &old_len); 2032 sbuf_bcat(sb, FREEBSD_ABI_VENDOR, sizeof(FREEBSD_ABI_VENDOR)); 2033 sbuf_end_section(sb, old_len, ELF_NOTE_ROUNDSIZE, 0); 2034 if (note.n_descsz == 0) 2035 return; 2036 sbuf_start_section(sb, &old_len); 2037 ninfo->outfunc(ninfo->outarg, sb, &ninfo->outsize); 2038 sect_len = sbuf_end_section(sb, old_len, ELF_NOTE_ROUNDSIZE, 0); 2039 if (sect_len < 0) 2040 return; 2041 2042 new_len = (size_t)sect_len; 2043 descsz = roundup(note.n_descsz, ELF_NOTE_ROUNDSIZE); 2044 if (new_len < descsz) { 2045 /* 2046 * It is expected that individual note emitters will correctly 2047 * predict their expected output size and fill up to that size 2048 * themselves, padding in a format-specific way if needed. 2049 * However, in case they don't, just do it here with zeros. 2050 */ 2051 for (i = 0; i < descsz - new_len; i++) 2052 sbuf_putc(sb, 0); 2053 } else if (new_len > descsz) { 2054 /* 2055 * We can't always truncate sb -- we may have drained some 2056 * of it already. 2057 */ 2058 KASSERT(new_len == descsz, ("%s: Note type %u changed as we " 2059 "read it (%zu > %zu). Since it is longer than " 2060 "expected, this coredump's notes are corrupt. THIS " 2061 "IS A BUG in the note_procstat routine for type %u.\n", 2062 __func__, (unsigned)note.n_type, new_len, descsz, 2063 (unsigned)note.n_type)); 2064 } 2065 } 2066 2067 /* 2068 * Miscellaneous note out functions. 2069 */ 2070 2071 #if defined(COMPAT_FREEBSD32) && __ELF_WORD_SIZE == 32 2072 #include <compat/freebsd32/freebsd32.h> 2073 #include <compat/freebsd32/freebsd32_signal.h> 2074 2075 typedef struct prstatus32 elf_prstatus_t; 2076 typedef struct prpsinfo32 elf_prpsinfo_t; 2077 typedef struct fpreg32 elf_prfpregset_t; 2078 typedef struct fpreg32 elf_fpregset_t; 2079 typedef struct reg32 elf_gregset_t; 2080 typedef struct thrmisc32 elf_thrmisc_t; 2081 #define ELF_KERN_PROC_MASK KERN_PROC_MASK32 2082 typedef struct kinfo_proc32 elf_kinfo_proc_t; 2083 typedef uint32_t elf_ps_strings_t; 2084 #else 2085 typedef prstatus_t elf_prstatus_t; 2086 typedef prpsinfo_t elf_prpsinfo_t; 2087 typedef prfpregset_t elf_prfpregset_t; 2088 typedef prfpregset_t elf_fpregset_t; 2089 typedef gregset_t elf_gregset_t; 2090 typedef thrmisc_t elf_thrmisc_t; 2091 #define ELF_KERN_PROC_MASK 0 2092 typedef struct kinfo_proc elf_kinfo_proc_t; 2093 typedef vm_offset_t elf_ps_strings_t; 2094 #endif 2095 2096 static void 2097 __elfN(note_prpsinfo)(void *arg, struct sbuf *sb, size_t *sizep) 2098 { 2099 struct sbuf sbarg; 2100 size_t len; 2101 char *cp, *end; 2102 struct proc *p; 2103 elf_prpsinfo_t *psinfo; 2104 int error; 2105 2106 p = (struct proc *)arg; 2107 if (sb != NULL) { 2108 KASSERT(*sizep == sizeof(*psinfo), ("invalid size")); 2109 psinfo = malloc(sizeof(*psinfo), M_TEMP, M_ZERO | M_WAITOK); 2110 psinfo->pr_version = PRPSINFO_VERSION; 2111 psinfo->pr_psinfosz = sizeof(elf_prpsinfo_t); 2112 strlcpy(psinfo->pr_fname, p->p_comm, sizeof(psinfo->pr_fname)); 2113 PROC_LOCK(p); 2114 if (p->p_args != NULL) { 2115 len = sizeof(psinfo->pr_psargs) - 1; 2116 if (len > p->p_args->ar_length) 2117 len = p->p_args->ar_length; 2118 memcpy(psinfo->pr_psargs, p->p_args->ar_args, len); 2119 PROC_UNLOCK(p); 2120 error = 0; 2121 } else { 2122 _PHOLD(p); 2123 PROC_UNLOCK(p); 2124 sbuf_new(&sbarg, psinfo->pr_psargs, 2125 sizeof(psinfo->pr_psargs), SBUF_FIXEDLEN); 2126 error = proc_getargv(curthread, p, &sbarg); 2127 PRELE(p); 2128 if (sbuf_finish(&sbarg) == 0) 2129 len = sbuf_len(&sbarg) - 1; 2130 else 2131 len = sizeof(psinfo->pr_psargs) - 1; 2132 sbuf_delete(&sbarg); 2133 } 2134 if (error || len == 0) 2135 strlcpy(psinfo->pr_psargs, p->p_comm, 2136 sizeof(psinfo->pr_psargs)); 2137 else { 2138 KASSERT(len < sizeof(psinfo->pr_psargs), 2139 ("len is too long: %zu vs %zu", len, 2140 sizeof(psinfo->pr_psargs))); 2141 cp = psinfo->pr_psargs; 2142 end = cp + len - 1; 2143 for (;;) { 2144 cp = memchr(cp, '\0', end - cp); 2145 if (cp == NULL) 2146 break; 2147 *cp = ' '; 2148 } 2149 } 2150 psinfo->pr_pid = p->p_pid; 2151 sbuf_bcat(sb, psinfo, sizeof(*psinfo)); 2152 free(psinfo, M_TEMP); 2153 } 2154 *sizep = sizeof(*psinfo); 2155 } 2156 2157 static void 2158 __elfN(note_prstatus)(void *arg, struct sbuf *sb, size_t *sizep) 2159 { 2160 struct thread *td; 2161 elf_prstatus_t *status; 2162 2163 td = (struct thread *)arg; 2164 if (sb != NULL) { 2165 KASSERT(*sizep == sizeof(*status), ("invalid size")); 2166 status = malloc(sizeof(*status), M_TEMP, M_ZERO | M_WAITOK); 2167 status->pr_version = PRSTATUS_VERSION; 2168 status->pr_statussz = sizeof(elf_prstatus_t); 2169 status->pr_gregsetsz = sizeof(elf_gregset_t); 2170 status->pr_fpregsetsz = sizeof(elf_fpregset_t); 2171 status->pr_osreldate = osreldate; 2172 status->pr_cursig = td->td_proc->p_sig; 2173 status->pr_pid = td->td_tid; 2174 #if defined(COMPAT_FREEBSD32) && __ELF_WORD_SIZE == 32 2175 fill_regs32(td, &status->pr_reg); 2176 #else 2177 fill_regs(td, &status->pr_reg); 2178 #endif 2179 sbuf_bcat(sb, status, sizeof(*status)); 2180 free(status, M_TEMP); 2181 } 2182 *sizep = sizeof(*status); 2183 } 2184 2185 static void 2186 __elfN(note_fpregset)(void *arg, struct sbuf *sb, size_t *sizep) 2187 { 2188 struct thread *td; 2189 elf_prfpregset_t *fpregset; 2190 2191 td = (struct thread *)arg; 2192 if (sb != NULL) { 2193 KASSERT(*sizep == sizeof(*fpregset), ("invalid size")); 2194 fpregset = malloc(sizeof(*fpregset), M_TEMP, M_ZERO | M_WAITOK); 2195 #if defined(COMPAT_FREEBSD32) && __ELF_WORD_SIZE == 32 2196 fill_fpregs32(td, fpregset); 2197 #else 2198 fill_fpregs(td, fpregset); 2199 #endif 2200 sbuf_bcat(sb, fpregset, sizeof(*fpregset)); 2201 free(fpregset, M_TEMP); 2202 } 2203 *sizep = sizeof(*fpregset); 2204 } 2205 2206 static void 2207 __elfN(note_thrmisc)(void *arg, struct sbuf *sb, size_t *sizep) 2208 { 2209 struct thread *td; 2210 elf_thrmisc_t thrmisc; 2211 2212 td = (struct thread *)arg; 2213 if (sb != NULL) { 2214 KASSERT(*sizep == sizeof(thrmisc), ("invalid size")); 2215 bzero(&thrmisc._pad, sizeof(thrmisc._pad)); 2216 strcpy(thrmisc.pr_tname, td->td_name); 2217 sbuf_bcat(sb, &thrmisc, sizeof(thrmisc)); 2218 } 2219 *sizep = sizeof(thrmisc); 2220 } 2221 2222 static void 2223 __elfN(note_ptlwpinfo)(void *arg, struct sbuf *sb, size_t *sizep) 2224 { 2225 struct thread *td; 2226 size_t size; 2227 int structsize; 2228 #if defined(COMPAT_FREEBSD32) && __ELF_WORD_SIZE == 32 2229 struct ptrace_lwpinfo32 pl; 2230 #else 2231 struct ptrace_lwpinfo pl; 2232 #endif 2233 2234 td = (struct thread *)arg; 2235 size = sizeof(structsize) + sizeof(pl); 2236 if (sb != NULL) { 2237 KASSERT(*sizep == size, ("invalid size")); 2238 structsize = sizeof(pl); 2239 sbuf_bcat(sb, &structsize, sizeof(structsize)); 2240 bzero(&pl, sizeof(pl)); 2241 pl.pl_lwpid = td->td_tid; 2242 pl.pl_event = PL_EVENT_NONE; 2243 pl.pl_sigmask = td->td_sigmask; 2244 pl.pl_siglist = td->td_siglist; 2245 if (td->td_si.si_signo != 0) { 2246 pl.pl_event = PL_EVENT_SIGNAL; 2247 pl.pl_flags |= PL_FLAG_SI; 2248 #if defined(COMPAT_FREEBSD32) && __ELF_WORD_SIZE == 32 2249 siginfo_to_siginfo32(&td->td_si, &pl.pl_siginfo); 2250 #else 2251 pl.pl_siginfo = td->td_si; 2252 #endif 2253 } 2254 strcpy(pl.pl_tdname, td->td_name); 2255 /* XXX TODO: supply more information in struct ptrace_lwpinfo*/ 2256 sbuf_bcat(sb, &pl, sizeof(pl)); 2257 } 2258 *sizep = size; 2259 } 2260 2261 /* 2262 * Allow for MD specific notes, as well as any MD 2263 * specific preparations for writing MI notes. 2264 */ 2265 static void 2266 __elfN(note_threadmd)(void *arg, struct sbuf *sb, size_t *sizep) 2267 { 2268 struct thread *td; 2269 void *buf; 2270 size_t size; 2271 2272 td = (struct thread *)arg; 2273 size = *sizep; 2274 if (size != 0 && sb != NULL) 2275 buf = malloc(size, M_TEMP, M_ZERO | M_WAITOK); 2276 else 2277 buf = NULL; 2278 size = 0; 2279 __elfN(dump_thread)(td, buf, &size); 2280 KASSERT(sb == NULL || *sizep == size, ("invalid size")); 2281 if (size != 0 && sb != NULL) 2282 sbuf_bcat(sb, buf, size); 2283 free(buf, M_TEMP); 2284 *sizep = size; 2285 } 2286 2287 #ifdef KINFO_PROC_SIZE 2288 CTASSERT(sizeof(struct kinfo_proc) == KINFO_PROC_SIZE); 2289 #endif 2290 2291 static void 2292 __elfN(note_procstat_proc)(void *arg, struct sbuf *sb, size_t *sizep) 2293 { 2294 struct proc *p; 2295 size_t size; 2296 int structsize; 2297 2298 p = (struct proc *)arg; 2299 size = sizeof(structsize) + p->p_numthreads * 2300 sizeof(elf_kinfo_proc_t); 2301 2302 if (sb != NULL) { 2303 KASSERT(*sizep == size, ("invalid size")); 2304 structsize = sizeof(elf_kinfo_proc_t); 2305 sbuf_bcat(sb, &structsize, sizeof(structsize)); 2306 PROC_LOCK(p); 2307 kern_proc_out(p, sb, ELF_KERN_PROC_MASK); 2308 } 2309 *sizep = size; 2310 } 2311 2312 #ifdef KINFO_FILE_SIZE 2313 CTASSERT(sizeof(struct kinfo_file) == KINFO_FILE_SIZE); 2314 #endif 2315 2316 static void 2317 note_procstat_files(void *arg, struct sbuf *sb, size_t *sizep) 2318 { 2319 struct proc *p; 2320 size_t size, sect_sz, i; 2321 ssize_t start_len, sect_len; 2322 int structsize, filedesc_flags; 2323 2324 if (coredump_pack_fileinfo) 2325 filedesc_flags = KERN_FILEDESC_PACK_KINFO; 2326 else 2327 filedesc_flags = 0; 2328 2329 p = (struct proc *)arg; 2330 structsize = sizeof(struct kinfo_file); 2331 if (sb == NULL) { 2332 size = 0; 2333 sb = sbuf_new(NULL, NULL, 128, SBUF_FIXEDLEN); 2334 sbuf_set_drain(sb, sbuf_drain_count, &size); 2335 sbuf_bcat(sb, &structsize, sizeof(structsize)); 2336 PROC_LOCK(p); 2337 kern_proc_filedesc_out(p, sb, -1, filedesc_flags); 2338 sbuf_finish(sb); 2339 sbuf_delete(sb); 2340 *sizep = size; 2341 } else { 2342 sbuf_start_section(sb, &start_len); 2343 2344 sbuf_bcat(sb, &structsize, sizeof(structsize)); 2345 PROC_LOCK(p); 2346 kern_proc_filedesc_out(p, sb, *sizep - sizeof(structsize), 2347 filedesc_flags); 2348 2349 sect_len = sbuf_end_section(sb, start_len, 0, 0); 2350 if (sect_len < 0) 2351 return; 2352 sect_sz = sect_len; 2353 2354 KASSERT(sect_sz <= *sizep, 2355 ("kern_proc_filedesc_out did not respect maxlen; " 2356 "requested %zu, got %zu", *sizep - sizeof(structsize), 2357 sect_sz - sizeof(structsize))); 2358 2359 for (i = 0; i < *sizep - sect_sz && sb->s_error == 0; i++) 2360 sbuf_putc(sb, 0); 2361 } 2362 } 2363 2364 #ifdef KINFO_VMENTRY_SIZE 2365 CTASSERT(sizeof(struct kinfo_vmentry) == KINFO_VMENTRY_SIZE); 2366 #endif 2367 2368 static void 2369 note_procstat_vmmap(void *arg, struct sbuf *sb, size_t *sizep) 2370 { 2371 struct proc *p; 2372 size_t size; 2373 int structsize, vmmap_flags; 2374 2375 if (coredump_pack_vmmapinfo) 2376 vmmap_flags = KERN_VMMAP_PACK_KINFO; 2377 else 2378 vmmap_flags = 0; 2379 2380 p = (struct proc *)arg; 2381 structsize = sizeof(struct kinfo_vmentry); 2382 if (sb == NULL) { 2383 size = 0; 2384 sb = sbuf_new(NULL, NULL, 128, SBUF_FIXEDLEN); 2385 sbuf_set_drain(sb, sbuf_drain_count, &size); 2386 sbuf_bcat(sb, &structsize, sizeof(structsize)); 2387 PROC_LOCK(p); 2388 kern_proc_vmmap_out(p, sb, -1, vmmap_flags); 2389 sbuf_finish(sb); 2390 sbuf_delete(sb); 2391 *sizep = size; 2392 } else { 2393 sbuf_bcat(sb, &structsize, sizeof(structsize)); 2394 PROC_LOCK(p); 2395 kern_proc_vmmap_out(p, sb, *sizep - sizeof(structsize), 2396 vmmap_flags); 2397 } 2398 } 2399 2400 static void 2401 note_procstat_groups(void *arg, struct sbuf *sb, size_t *sizep) 2402 { 2403 struct proc *p; 2404 size_t size; 2405 int structsize; 2406 2407 p = (struct proc *)arg; 2408 size = sizeof(structsize) + p->p_ucred->cr_ngroups * sizeof(gid_t); 2409 if (sb != NULL) { 2410 KASSERT(*sizep == size, ("invalid size")); 2411 structsize = sizeof(gid_t); 2412 sbuf_bcat(sb, &structsize, sizeof(structsize)); 2413 sbuf_bcat(sb, p->p_ucred->cr_groups, p->p_ucred->cr_ngroups * 2414 sizeof(gid_t)); 2415 } 2416 *sizep = size; 2417 } 2418 2419 static void 2420 note_procstat_umask(void *arg, struct sbuf *sb, size_t *sizep) 2421 { 2422 struct proc *p; 2423 size_t size; 2424 int structsize; 2425 2426 p = (struct proc *)arg; 2427 size = sizeof(structsize) + sizeof(p->p_fd->fd_cmask); 2428 if (sb != NULL) { 2429 KASSERT(*sizep == size, ("invalid size")); 2430 structsize = sizeof(p->p_fd->fd_cmask); 2431 sbuf_bcat(sb, &structsize, sizeof(structsize)); 2432 sbuf_bcat(sb, &p->p_fd->fd_cmask, sizeof(p->p_fd->fd_cmask)); 2433 } 2434 *sizep = size; 2435 } 2436 2437 static void 2438 note_procstat_rlimit(void *arg, struct sbuf *sb, size_t *sizep) 2439 { 2440 struct proc *p; 2441 struct rlimit rlim[RLIM_NLIMITS]; 2442 size_t size; 2443 int structsize, i; 2444 2445 p = (struct proc *)arg; 2446 size = sizeof(structsize) + sizeof(rlim); 2447 if (sb != NULL) { 2448 KASSERT(*sizep == size, ("invalid size")); 2449 structsize = sizeof(rlim); 2450 sbuf_bcat(sb, &structsize, sizeof(structsize)); 2451 PROC_LOCK(p); 2452 for (i = 0; i < RLIM_NLIMITS; i++) 2453 lim_rlimit_proc(p, i, &rlim[i]); 2454 PROC_UNLOCK(p); 2455 sbuf_bcat(sb, rlim, sizeof(rlim)); 2456 } 2457 *sizep = size; 2458 } 2459 2460 static void 2461 note_procstat_osrel(void *arg, struct sbuf *sb, size_t *sizep) 2462 { 2463 struct proc *p; 2464 size_t size; 2465 int structsize; 2466 2467 p = (struct proc *)arg; 2468 size = sizeof(structsize) + sizeof(p->p_osrel); 2469 if (sb != NULL) { 2470 KASSERT(*sizep == size, ("invalid size")); 2471 structsize = sizeof(p->p_osrel); 2472 sbuf_bcat(sb, &structsize, sizeof(structsize)); 2473 sbuf_bcat(sb, &p->p_osrel, sizeof(p->p_osrel)); 2474 } 2475 *sizep = size; 2476 } 2477 2478 static void 2479 __elfN(note_procstat_psstrings)(void *arg, struct sbuf *sb, size_t *sizep) 2480 { 2481 struct proc *p; 2482 elf_ps_strings_t ps_strings; 2483 size_t size; 2484 int structsize; 2485 2486 p = (struct proc *)arg; 2487 size = sizeof(structsize) + sizeof(ps_strings); 2488 if (sb != NULL) { 2489 KASSERT(*sizep == size, ("invalid size")); 2490 structsize = sizeof(ps_strings); 2491 #if defined(COMPAT_FREEBSD32) && __ELF_WORD_SIZE == 32 2492 ps_strings = PTROUT(p->p_sysent->sv_psstrings); 2493 #else 2494 ps_strings = p->p_sysent->sv_psstrings; 2495 #endif 2496 sbuf_bcat(sb, &structsize, sizeof(structsize)); 2497 sbuf_bcat(sb, &ps_strings, sizeof(ps_strings)); 2498 } 2499 *sizep = size; 2500 } 2501 2502 static void 2503 __elfN(note_procstat_auxv)(void *arg, struct sbuf *sb, size_t *sizep) 2504 { 2505 struct proc *p; 2506 size_t size; 2507 int structsize; 2508 2509 p = (struct proc *)arg; 2510 if (sb == NULL) { 2511 size = 0; 2512 sb = sbuf_new(NULL, NULL, 128, SBUF_FIXEDLEN); 2513 sbuf_set_drain(sb, sbuf_drain_count, &size); 2514 sbuf_bcat(sb, &structsize, sizeof(structsize)); 2515 PHOLD(p); 2516 proc_getauxv(curthread, p, sb); 2517 PRELE(p); 2518 sbuf_finish(sb); 2519 sbuf_delete(sb); 2520 *sizep = size; 2521 } else { 2522 structsize = sizeof(Elf_Auxinfo); 2523 sbuf_bcat(sb, &structsize, sizeof(structsize)); 2524 PHOLD(p); 2525 proc_getauxv(curthread, p, sb); 2526 PRELE(p); 2527 } 2528 } 2529 2530 static boolean_t 2531 __elfN(parse_notes)(struct image_params *imgp, Elf_Note *checknote, 2532 const char *note_vendor, const Elf_Phdr *pnote, 2533 boolean_t (*cb)(const Elf_Note *, void *, boolean_t *), void *cb_arg) 2534 { 2535 const Elf_Note *note, *note0, *note_end; 2536 const char *note_name; 2537 char *buf; 2538 int i, error; 2539 boolean_t res; 2540 2541 /* We need some limit, might as well use PAGE_SIZE. */ 2542 if (pnote == NULL || pnote->p_filesz > PAGE_SIZE) 2543 return (FALSE); 2544 ASSERT_VOP_LOCKED(imgp->vp, "parse_notes"); 2545 if (pnote->p_offset > PAGE_SIZE || 2546 pnote->p_filesz > PAGE_SIZE - pnote->p_offset) { 2547 buf = malloc(pnote->p_filesz, M_TEMP, M_NOWAIT); 2548 if (buf == NULL) { 2549 VOP_UNLOCK(imgp->vp, 0); 2550 buf = malloc(pnote->p_filesz, M_TEMP, M_WAITOK); 2551 vn_lock(imgp->vp, LK_SHARED | LK_RETRY); 2552 } 2553 error = vn_rdwr(UIO_READ, imgp->vp, buf, pnote->p_filesz, 2554 pnote->p_offset, UIO_SYSSPACE, IO_NODELOCKED, 2555 curthread->td_ucred, NOCRED, NULL, curthread); 2556 if (error != 0) { 2557 uprintf("i/o error PT_NOTE\n"); 2558 goto retf; 2559 } 2560 note = note0 = (const Elf_Note *)buf; 2561 note_end = (const Elf_Note *)(buf + pnote->p_filesz); 2562 } else { 2563 note = note0 = (const Elf_Note *)(imgp->image_header + 2564 pnote->p_offset); 2565 note_end = (const Elf_Note *)(imgp->image_header + 2566 pnote->p_offset + pnote->p_filesz); 2567 buf = NULL; 2568 } 2569 for (i = 0; i < 100 && note >= note0 && note < note_end; i++) { 2570 if (!aligned(note, Elf32_Addr) || (const char *)note_end - 2571 (const char *)note < sizeof(Elf_Note)) { 2572 goto retf; 2573 } 2574 if (note->n_namesz != checknote->n_namesz || 2575 note->n_descsz != checknote->n_descsz || 2576 note->n_type != checknote->n_type) 2577 goto nextnote; 2578 note_name = (const char *)(note + 1); 2579 if (note_name + checknote->n_namesz >= 2580 (const char *)note_end || strncmp(note_vendor, 2581 note_name, checknote->n_namesz) != 0) 2582 goto nextnote; 2583 2584 if (cb(note, cb_arg, &res)) 2585 goto ret; 2586 nextnote: 2587 note = (const Elf_Note *)((const char *)(note + 1) + 2588 roundup2(note->n_namesz, ELF_NOTE_ROUNDSIZE) + 2589 roundup2(note->n_descsz, ELF_NOTE_ROUNDSIZE)); 2590 } 2591 retf: 2592 res = FALSE; 2593 ret: 2594 free(buf, M_TEMP); 2595 return (res); 2596 } 2597 2598 struct brandnote_cb_arg { 2599 Elf_Brandnote *brandnote; 2600 int32_t *osrel; 2601 }; 2602 2603 static boolean_t 2604 brandnote_cb(const Elf_Note *note, void *arg0, boolean_t *res) 2605 { 2606 struct brandnote_cb_arg *arg; 2607 2608 arg = arg0; 2609 2610 /* 2611 * Fetch the osreldate for binary from the ELF OSABI-note if 2612 * necessary. 2613 */ 2614 *res = (arg->brandnote->flags & BN_TRANSLATE_OSREL) != 0 && 2615 arg->brandnote->trans_osrel != NULL ? 2616 arg->brandnote->trans_osrel(note, arg->osrel) : TRUE; 2617 2618 return (TRUE); 2619 } 2620 2621 static Elf_Note fctl_note = { 2622 .n_namesz = sizeof(FREEBSD_ABI_VENDOR), 2623 .n_descsz = sizeof(uint32_t), 2624 .n_type = NT_FREEBSD_FEATURE_CTL, 2625 }; 2626 2627 struct fctl_cb_arg { 2628 uint32_t *fctl0; 2629 }; 2630 2631 static boolean_t 2632 note_fctl_cb(const Elf_Note *note, void *arg0, boolean_t *res) 2633 { 2634 struct fctl_cb_arg *arg; 2635 const Elf32_Word *desc; 2636 uintptr_t p; 2637 2638 arg = arg0; 2639 p = (uintptr_t)(note + 1); 2640 p += roundup2(note->n_namesz, ELF_NOTE_ROUNDSIZE); 2641 desc = (const Elf32_Word *)p; 2642 *arg->fctl0 = desc[0]; 2643 return (TRUE); 2644 } 2645 2646 /* 2647 * Try to find the appropriate ABI-note section for checknote, fetch 2648 * the osreldate and feature control flags for binary from the ELF 2649 * OSABI-note. Only the first page of the image is searched, the same 2650 * as for headers. 2651 */ 2652 static boolean_t 2653 __elfN(check_note)(struct image_params *imgp, Elf_Brandnote *brandnote, 2654 int32_t *osrel, uint32_t *fctl0) 2655 { 2656 const Elf_Phdr *phdr; 2657 const Elf_Ehdr *hdr; 2658 struct brandnote_cb_arg b_arg; 2659 struct fctl_cb_arg f_arg; 2660 int i, j; 2661 2662 hdr = (const Elf_Ehdr *)imgp->image_header; 2663 phdr = (const Elf_Phdr *)(imgp->image_header + hdr->e_phoff); 2664 b_arg.brandnote = brandnote; 2665 b_arg.osrel = osrel; 2666 f_arg.fctl0 = fctl0; 2667 2668 for (i = 0; i < hdr->e_phnum; i++) { 2669 if (phdr[i].p_type == PT_NOTE && __elfN(parse_notes)(imgp, 2670 &brandnote->hdr, brandnote->vendor, &phdr[i], brandnote_cb, 2671 &b_arg)) { 2672 for (j = 0; j < hdr->e_phnum; j++) { 2673 if (phdr[j].p_type == PT_NOTE && 2674 __elfN(parse_notes)(imgp, &fctl_note, 2675 FREEBSD_ABI_VENDOR, &phdr[j], 2676 note_fctl_cb, &f_arg)) 2677 break; 2678 } 2679 return (TRUE); 2680 } 2681 } 2682 return (FALSE); 2683 2684 } 2685 2686 /* 2687 * Tell kern_execve.c about it, with a little help from the linker. 2688 */ 2689 static struct execsw __elfN(execsw) = { 2690 .ex_imgact = __CONCAT(exec_, __elfN(imgact)), 2691 .ex_name = __XSTRING(__CONCAT(ELF, __ELF_WORD_SIZE)) 2692 }; 2693 EXEC_SET(__CONCAT(elf, __ELF_WORD_SIZE), __elfN(execsw)); 2694 2695 static vm_prot_t 2696 __elfN(trans_prot)(Elf_Word flags) 2697 { 2698 vm_prot_t prot; 2699 2700 prot = 0; 2701 if (flags & PF_X) 2702 prot |= VM_PROT_EXECUTE; 2703 if (flags & PF_W) 2704 prot |= VM_PROT_WRITE; 2705 if (flags & PF_R) 2706 prot |= VM_PROT_READ; 2707 #if __ELF_WORD_SIZE == 32 && (defined(__amd64__) || defined(__i386__)) 2708 if (i386_read_exec && (flags & PF_R)) 2709 prot |= VM_PROT_EXECUTE; 2710 #endif 2711 return (prot); 2712 } 2713 2714 static Elf_Word 2715 __elfN(untrans_prot)(vm_prot_t prot) 2716 { 2717 Elf_Word flags; 2718 2719 flags = 0; 2720 if (prot & VM_PROT_EXECUTE) 2721 flags |= PF_X; 2722 if (prot & VM_PROT_READ) 2723 flags |= PF_R; 2724 if (prot & VM_PROT_WRITE) 2725 flags |= PF_W; 2726 return (flags); 2727 } 2728