1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 1986, 1988, 1991, 1993 5 * The Regents of the University of California. All rights reserved. 6 * (c) UNIX System Laboratories, Inc. 7 * All or some portions of this file are derived from material licensed 8 * to the University of California by American Telephone and Telegraph 9 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 10 * the permission of UNIX System Laboratories, Inc. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 3. Neither the name of the University nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * @(#)kern_shutdown.c 8.3 (Berkeley) 1/21/94 37 */ 38 39 #include <sys/cdefs.h> 40 __FBSDID("$FreeBSD$"); 41 42 #include "opt_ddb.h" 43 #include "opt_ekcd.h" 44 #include "opt_kdb.h" 45 #include "opt_panic.h" 46 #include "opt_printf.h" 47 #include "opt_sched.h" 48 #include "opt_watchdog.h" 49 50 #include <sys/param.h> 51 #include <sys/systm.h> 52 #include <sys/bio.h> 53 #include <sys/buf.h> 54 #include <sys/conf.h> 55 #include <sys/compressor.h> 56 #include <sys/cons.h> 57 #include <sys/disk.h> 58 #include <sys/eventhandler.h> 59 #include <sys/filedesc.h> 60 #include <sys/jail.h> 61 #include <sys/kdb.h> 62 #include <sys/kernel.h> 63 #include <sys/kerneldump.h> 64 #include <sys/kthread.h> 65 #include <sys/ktr.h> 66 #include <sys/malloc.h> 67 #include <sys/mbuf.h> 68 #include <sys/mount.h> 69 #include <sys/priv.h> 70 #include <sys/proc.h> 71 #include <sys/reboot.h> 72 #include <sys/resourcevar.h> 73 #include <sys/rwlock.h> 74 #include <sys/sbuf.h> 75 #include <sys/sched.h> 76 #include <sys/smp.h> 77 #include <sys/sysctl.h> 78 #include <sys/sysproto.h> 79 #include <sys/taskqueue.h> 80 #include <sys/vnode.h> 81 #include <sys/watchdog.h> 82 83 #include <crypto/rijndael/rijndael-api-fst.h> 84 #include <crypto/sha2/sha256.h> 85 86 #include <ddb/ddb.h> 87 88 #include <machine/cpu.h> 89 #include <machine/dump.h> 90 #include <machine/pcb.h> 91 #include <machine/smp.h> 92 93 #include <security/mac/mac_framework.h> 94 95 #include <vm/vm.h> 96 #include <vm/vm_object.h> 97 #include <vm/vm_page.h> 98 #include <vm/vm_pager.h> 99 #include <vm/swap_pager.h> 100 101 #include <sys/signalvar.h> 102 103 static MALLOC_DEFINE(M_DUMPER, "dumper", "dumper block buffer"); 104 105 #ifndef PANIC_REBOOT_WAIT_TIME 106 #define PANIC_REBOOT_WAIT_TIME 15 /* default to 15 seconds */ 107 #endif 108 static int panic_reboot_wait_time = PANIC_REBOOT_WAIT_TIME; 109 SYSCTL_INT(_kern, OID_AUTO, panic_reboot_wait_time, CTLFLAG_RWTUN, 110 &panic_reboot_wait_time, 0, 111 "Seconds to wait before rebooting after a panic"); 112 113 /* 114 * Note that stdarg.h and the ANSI style va_start macro is used for both 115 * ANSI and traditional C compilers. 116 */ 117 #include <machine/stdarg.h> 118 119 #ifdef KDB 120 #ifdef KDB_UNATTENDED 121 static int debugger_on_panic = 0; 122 #else 123 static int debugger_on_panic = 1; 124 #endif 125 SYSCTL_INT(_debug, OID_AUTO, debugger_on_panic, 126 CTLFLAG_RWTUN | CTLFLAG_SECURE, 127 &debugger_on_panic, 0, "Run debugger on kernel panic"); 128 129 int debugger_on_trap = 0; 130 SYSCTL_INT(_debug, OID_AUTO, debugger_on_trap, 131 CTLFLAG_RWTUN | CTLFLAG_SECURE, 132 &debugger_on_trap, 0, "Run debugger on kernel trap before panic"); 133 134 #ifdef KDB_TRACE 135 static int trace_on_panic = 1; 136 static bool trace_all_panics = true; 137 #else 138 static int trace_on_panic = 0; 139 static bool trace_all_panics = false; 140 #endif 141 SYSCTL_INT(_debug, OID_AUTO, trace_on_panic, 142 CTLFLAG_RWTUN | CTLFLAG_SECURE, 143 &trace_on_panic, 0, "Print stack trace on kernel panic"); 144 SYSCTL_BOOL(_debug, OID_AUTO, trace_all_panics, CTLFLAG_RWTUN, 145 &trace_all_panics, 0, "Print stack traces on secondary kernel panics"); 146 #endif /* KDB */ 147 148 static int sync_on_panic = 0; 149 SYSCTL_INT(_kern, OID_AUTO, sync_on_panic, CTLFLAG_RWTUN, 150 &sync_on_panic, 0, "Do a sync before rebooting from a panic"); 151 152 static bool poweroff_on_panic = 0; 153 SYSCTL_BOOL(_kern, OID_AUTO, poweroff_on_panic, CTLFLAG_RWTUN, 154 &poweroff_on_panic, 0, "Do a power off instead of a reboot on a panic"); 155 156 static bool powercycle_on_panic = 0; 157 SYSCTL_BOOL(_kern, OID_AUTO, powercycle_on_panic, CTLFLAG_RWTUN, 158 &powercycle_on_panic, 0, "Do a power cycle instead of a reboot on a panic"); 159 160 static SYSCTL_NODE(_kern, OID_AUTO, shutdown, CTLFLAG_RW, 0, 161 "Shutdown environment"); 162 163 #ifndef DIAGNOSTIC 164 static int show_busybufs; 165 #else 166 static int show_busybufs = 1; 167 #endif 168 SYSCTL_INT(_kern_shutdown, OID_AUTO, show_busybufs, CTLFLAG_RW, 169 &show_busybufs, 0, ""); 170 171 int suspend_blocked = 0; 172 SYSCTL_INT(_kern, OID_AUTO, suspend_blocked, CTLFLAG_RW, 173 &suspend_blocked, 0, "Block suspend due to a pending shutdown"); 174 175 #ifdef EKCD 176 FEATURE(ekcd, "Encrypted kernel crash dumps support"); 177 178 MALLOC_DEFINE(M_EKCD, "ekcd", "Encrypted kernel crash dumps data"); 179 180 struct kerneldumpcrypto { 181 uint8_t kdc_encryption; 182 uint8_t kdc_iv[KERNELDUMP_IV_MAX_SIZE]; 183 keyInstance kdc_ki; 184 cipherInstance kdc_ci; 185 uint32_t kdc_dumpkeysize; 186 struct kerneldumpkey kdc_dumpkey[]; 187 }; 188 #endif 189 190 struct kerneldumpcomp { 191 uint8_t kdc_format; 192 struct compressor *kdc_stream; 193 uint8_t *kdc_buf; 194 size_t kdc_resid; 195 }; 196 197 static struct kerneldumpcomp *kerneldumpcomp_create(struct dumperinfo *di, 198 uint8_t compression); 199 static void kerneldumpcomp_destroy(struct dumperinfo *di); 200 static int kerneldumpcomp_write_cb(void *base, size_t len, off_t off, void *arg); 201 202 static int kerneldump_gzlevel = 6; 203 SYSCTL_INT(_kern, OID_AUTO, kerneldump_gzlevel, CTLFLAG_RWTUN, 204 &kerneldump_gzlevel, 0, 205 "Kernel crash dump compression level"); 206 207 /* 208 * Variable panicstr contains argument to first call to panic; used as flag 209 * to indicate that the kernel has already called panic. 210 */ 211 const char *panicstr; 212 213 int dumping; /* system is dumping */ 214 int rebooting; /* system is rebooting */ 215 /* 216 * Used to serialize between sysctl kern.shutdown.dumpdevname and list 217 * modifications via ioctl. 218 */ 219 static struct mtx dumpconf_list_lk; 220 MTX_SYSINIT(dumper_configs, &dumpconf_list_lk, "dumper config list", MTX_DEF); 221 222 /* Our selected dumper(s). */ 223 static TAILQ_HEAD(dumpconflist, dumperinfo) dumper_configs = 224 TAILQ_HEAD_INITIALIZER(dumper_configs); 225 226 /* Context information for dump-debuggers. */ 227 static struct pcb dumppcb; /* Registers. */ 228 lwpid_t dumptid; /* Thread ID. */ 229 230 static struct cdevsw reroot_cdevsw = { 231 .d_version = D_VERSION, 232 .d_name = "reroot", 233 }; 234 235 static void poweroff_wait(void *, int); 236 static void shutdown_halt(void *junk, int howto); 237 static void shutdown_panic(void *junk, int howto); 238 static void shutdown_reset(void *junk, int howto); 239 static int kern_reroot(void); 240 241 /* register various local shutdown events */ 242 static void 243 shutdown_conf(void *unused) 244 { 245 246 EVENTHANDLER_REGISTER(shutdown_final, poweroff_wait, NULL, 247 SHUTDOWN_PRI_FIRST); 248 EVENTHANDLER_REGISTER(shutdown_final, shutdown_halt, NULL, 249 SHUTDOWN_PRI_LAST + 100); 250 EVENTHANDLER_REGISTER(shutdown_final, shutdown_panic, NULL, 251 SHUTDOWN_PRI_LAST + 100); 252 EVENTHANDLER_REGISTER(shutdown_final, shutdown_reset, NULL, 253 SHUTDOWN_PRI_LAST + 200); 254 } 255 256 SYSINIT(shutdown_conf, SI_SUB_INTRINSIC, SI_ORDER_ANY, shutdown_conf, NULL); 257 258 /* 259 * The only reason this exists is to create the /dev/reroot/ directory, 260 * used by reroot code in init(8) as a mountpoint for tmpfs. 261 */ 262 static void 263 reroot_conf(void *unused) 264 { 265 int error; 266 struct cdev *cdev; 267 268 error = make_dev_p(MAKEDEV_CHECKNAME | MAKEDEV_WAITOK, &cdev, 269 &reroot_cdevsw, NULL, UID_ROOT, GID_WHEEL, 0600, "reroot/reroot"); 270 if (error != 0) { 271 printf("%s: failed to create device node, error %d", 272 __func__, error); 273 } 274 } 275 276 SYSINIT(reroot_conf, SI_SUB_DEVFS, SI_ORDER_ANY, reroot_conf, NULL); 277 278 /* 279 * The system call that results in a reboot. 280 */ 281 /* ARGSUSED */ 282 int 283 sys_reboot(struct thread *td, struct reboot_args *uap) 284 { 285 int error; 286 287 error = 0; 288 #ifdef MAC 289 error = mac_system_check_reboot(td->td_ucred, uap->opt); 290 #endif 291 if (error == 0) 292 error = priv_check(td, PRIV_REBOOT); 293 if (error == 0) { 294 if (uap->opt & RB_REROOT) 295 error = kern_reroot(); 296 else 297 kern_reboot(uap->opt); 298 } 299 return (error); 300 } 301 302 static void 303 shutdown_nice_task_fn(void *arg, int pending __unused) 304 { 305 int howto; 306 307 howto = (uintptr_t)arg; 308 /* Send a signal to init(8) and have it shutdown the world. */ 309 PROC_LOCK(initproc); 310 if (howto & RB_POWEROFF) 311 kern_psignal(initproc, SIGUSR2); 312 else if (howto & RB_POWERCYCLE) 313 kern_psignal(initproc, SIGWINCH); 314 else if (howto & RB_HALT) 315 kern_psignal(initproc, SIGUSR1); 316 else 317 kern_psignal(initproc, SIGINT); 318 PROC_UNLOCK(initproc); 319 } 320 321 static struct task shutdown_nice_task = TASK_INITIALIZER(0, 322 &shutdown_nice_task_fn, NULL); 323 324 /* 325 * Called by events that want to shut down.. e.g <CTL><ALT><DEL> on a PC 326 */ 327 void 328 shutdown_nice(int howto) 329 { 330 331 if (initproc != NULL && !SCHEDULER_STOPPED()) { 332 shutdown_nice_task.ta_context = (void *)(uintptr_t)howto; 333 taskqueue_enqueue(taskqueue_fast, &shutdown_nice_task); 334 } else { 335 /* 336 * No init(8) running, or scheduler would not allow it 337 * to run, so simply reboot. 338 */ 339 kern_reboot(howto | RB_NOSYNC); 340 } 341 } 342 343 static void 344 print_uptime(void) 345 { 346 int f; 347 struct timespec ts; 348 349 getnanouptime(&ts); 350 printf("Uptime: "); 351 f = 0; 352 if (ts.tv_sec >= 86400) { 353 printf("%ldd", (long)ts.tv_sec / 86400); 354 ts.tv_sec %= 86400; 355 f = 1; 356 } 357 if (f || ts.tv_sec >= 3600) { 358 printf("%ldh", (long)ts.tv_sec / 3600); 359 ts.tv_sec %= 3600; 360 f = 1; 361 } 362 if (f || ts.tv_sec >= 60) { 363 printf("%ldm", (long)ts.tv_sec / 60); 364 ts.tv_sec %= 60; 365 f = 1; 366 } 367 printf("%lds\n", (long)ts.tv_sec); 368 } 369 370 int 371 doadump(boolean_t textdump) 372 { 373 boolean_t coredump; 374 int error; 375 376 error = 0; 377 if (dumping) 378 return (EBUSY); 379 if (TAILQ_EMPTY(&dumper_configs)) 380 return (ENXIO); 381 382 savectx(&dumppcb); 383 dumptid = curthread->td_tid; 384 dumping++; 385 386 coredump = TRUE; 387 #ifdef DDB 388 if (textdump && textdump_pending) { 389 coredump = FALSE; 390 textdump_dumpsys(TAILQ_FIRST(&dumper_configs)); 391 } 392 #endif 393 if (coredump) { 394 struct dumperinfo *di; 395 396 TAILQ_FOREACH(di, &dumper_configs, di_next) { 397 error = dumpsys(di); 398 if (error == 0) 399 break; 400 } 401 } 402 403 dumping--; 404 return (error); 405 } 406 407 /* 408 * Shutdown the system cleanly to prepare for reboot, halt, or power off. 409 */ 410 void 411 kern_reboot(int howto) 412 { 413 static int once = 0; 414 415 /* 416 * Normal paths here don't hold Giant, but we can wind up here 417 * unexpectedly with it held. Drop it now so we don't have to 418 * drop and pick it up elsewhere. The paths it is locking will 419 * never be returned to, and it is preferable to preclude 420 * deadlock than to lock against code that won't ever 421 * continue. 422 */ 423 while (mtx_owned(&Giant)) 424 mtx_unlock(&Giant); 425 426 #if defined(SMP) 427 /* 428 * Bind us to the first CPU so that all shutdown code runs there. Some 429 * systems don't shutdown properly (i.e., ACPI power off) if we 430 * run on another processor. 431 */ 432 if (!SCHEDULER_STOPPED()) { 433 thread_lock(curthread); 434 sched_bind(curthread, CPU_FIRST()); 435 thread_unlock(curthread); 436 KASSERT(PCPU_GET(cpuid) == CPU_FIRST(), 437 ("boot: not running on cpu 0")); 438 } 439 #endif 440 /* We're in the process of rebooting. */ 441 rebooting = 1; 442 443 /* We are out of the debugger now. */ 444 kdb_active = 0; 445 446 /* 447 * Do any callouts that should be done BEFORE syncing the filesystems. 448 */ 449 EVENTHANDLER_INVOKE(shutdown_pre_sync, howto); 450 451 /* 452 * Now sync filesystems 453 */ 454 if (!cold && (howto & RB_NOSYNC) == 0 && once == 0) { 455 once = 1; 456 bufshutdown(show_busybufs); 457 } 458 459 print_uptime(); 460 461 cngrab(); 462 463 /* 464 * Ok, now do things that assume all filesystem activity has 465 * been completed. 466 */ 467 EVENTHANDLER_INVOKE(shutdown_post_sync, howto); 468 469 if ((howto & (RB_HALT|RB_DUMP)) == RB_DUMP && !cold && !dumping) 470 doadump(TRUE); 471 472 /* Now that we're going to really halt the system... */ 473 EVENTHANDLER_INVOKE(shutdown_final, howto); 474 475 for(;;) ; /* safety against shutdown_reset not working */ 476 /* NOTREACHED */ 477 } 478 479 /* 480 * The system call that results in changing the rootfs. 481 */ 482 static int 483 kern_reroot(void) 484 { 485 struct vnode *oldrootvnode, *vp; 486 struct mount *mp, *devmp; 487 int error; 488 489 if (curproc != initproc) 490 return (EPERM); 491 492 /* 493 * Mark the filesystem containing currently-running executable 494 * (the temporary copy of init(8)) busy. 495 */ 496 vp = curproc->p_textvp; 497 error = vn_lock(vp, LK_SHARED); 498 if (error != 0) 499 return (error); 500 mp = vp->v_mount; 501 error = vfs_busy(mp, MBF_NOWAIT); 502 if (error != 0) { 503 vfs_ref(mp); 504 VOP_UNLOCK(vp, 0); 505 error = vfs_busy(mp, 0); 506 vn_lock(vp, LK_SHARED | LK_RETRY); 507 vfs_rel(mp); 508 if (error != 0) { 509 VOP_UNLOCK(vp, 0); 510 return (ENOENT); 511 } 512 if (vp->v_iflag & VI_DOOMED) { 513 VOP_UNLOCK(vp, 0); 514 vfs_unbusy(mp); 515 return (ENOENT); 516 } 517 } 518 VOP_UNLOCK(vp, 0); 519 520 /* 521 * Remove the filesystem containing currently-running executable 522 * from the mount list, to prevent it from being unmounted 523 * by vfs_unmountall(), and to avoid confusing vfs_mountroot(). 524 * 525 * Also preserve /dev - forcibly unmounting it could cause driver 526 * reinitialization. 527 */ 528 529 vfs_ref(rootdevmp); 530 devmp = rootdevmp; 531 rootdevmp = NULL; 532 533 mtx_lock(&mountlist_mtx); 534 TAILQ_REMOVE(&mountlist, mp, mnt_list); 535 TAILQ_REMOVE(&mountlist, devmp, mnt_list); 536 mtx_unlock(&mountlist_mtx); 537 538 oldrootvnode = rootvnode; 539 540 /* 541 * Unmount everything except for the two filesystems preserved above. 542 */ 543 vfs_unmountall(); 544 545 /* 546 * Add /dev back; vfs_mountroot() will move it into its new place. 547 */ 548 mtx_lock(&mountlist_mtx); 549 TAILQ_INSERT_HEAD(&mountlist, devmp, mnt_list); 550 mtx_unlock(&mountlist_mtx); 551 rootdevmp = devmp; 552 vfs_rel(rootdevmp); 553 554 /* 555 * Mount the new rootfs. 556 */ 557 vfs_mountroot(); 558 559 /* 560 * Update all references to the old rootvnode. 561 */ 562 mountcheckdirs(oldrootvnode, rootvnode); 563 564 /* 565 * Add the temporary filesystem back and unbusy it. 566 */ 567 mtx_lock(&mountlist_mtx); 568 TAILQ_INSERT_TAIL(&mountlist, mp, mnt_list); 569 mtx_unlock(&mountlist_mtx); 570 vfs_unbusy(mp); 571 572 return (0); 573 } 574 575 /* 576 * If the shutdown was a clean halt, behave accordingly. 577 */ 578 static void 579 shutdown_halt(void *junk, int howto) 580 { 581 582 if (howto & RB_HALT) { 583 printf("\n"); 584 printf("The operating system has halted.\n"); 585 printf("Please press any key to reboot.\n\n"); 586 switch (cngetc()) { 587 case -1: /* No console, just die */ 588 cpu_halt(); 589 /* NOTREACHED */ 590 default: 591 break; 592 } 593 } 594 } 595 596 /* 597 * Check to see if the system paniced, pause and then reboot 598 * according to the specified delay. 599 */ 600 static void 601 shutdown_panic(void *junk, int howto) 602 { 603 int loop; 604 605 if (howto & RB_DUMP) { 606 if (panic_reboot_wait_time != 0) { 607 if (panic_reboot_wait_time != -1) { 608 printf("Automatic reboot in %d seconds - " 609 "press a key on the console to abort\n", 610 panic_reboot_wait_time); 611 for (loop = panic_reboot_wait_time * 10; 612 loop > 0; --loop) { 613 DELAY(1000 * 100); /* 1/10th second */ 614 /* Did user type a key? */ 615 if (cncheckc() != -1) 616 break; 617 } 618 if (!loop) 619 return; 620 } 621 } else { /* zero time specified - reboot NOW */ 622 return; 623 } 624 printf("--> Press a key on the console to reboot,\n"); 625 printf("--> or switch off the system now.\n"); 626 cngetc(); 627 } 628 } 629 630 /* 631 * Everything done, now reset 632 */ 633 static void 634 shutdown_reset(void *junk, int howto) 635 { 636 637 printf("Rebooting...\n"); 638 DELAY(1000000); /* wait 1 sec for printf's to complete and be read */ 639 640 /* 641 * Acquiring smp_ipi_mtx here has a double effect: 642 * - it disables interrupts avoiding CPU0 preemption 643 * by fast handlers (thus deadlocking against other CPUs) 644 * - it avoids deadlocks against smp_rendezvous() or, more 645 * generally, threads busy-waiting, with this spinlock held, 646 * and waiting for responses by threads on other CPUs 647 * (ie. smp_tlb_shootdown()). 648 * 649 * For the !SMP case it just needs to handle the former problem. 650 */ 651 #ifdef SMP 652 mtx_lock_spin(&smp_ipi_mtx); 653 #else 654 spinlock_enter(); 655 #endif 656 657 /* cpu_boot(howto); */ /* doesn't do anything at the moment */ 658 cpu_reset(); 659 /* NOTREACHED */ /* assuming reset worked */ 660 } 661 662 #if defined(WITNESS) || defined(INVARIANT_SUPPORT) 663 static int kassert_warn_only = 0; 664 #ifdef KDB 665 static int kassert_do_kdb = 0; 666 #endif 667 #ifdef KTR 668 static int kassert_do_ktr = 0; 669 #endif 670 static int kassert_do_log = 1; 671 static int kassert_log_pps_limit = 4; 672 static int kassert_log_mute_at = 0; 673 static int kassert_log_panic_at = 0; 674 static int kassert_suppress_in_panic = 0; 675 static int kassert_warnings = 0; 676 677 SYSCTL_NODE(_debug, OID_AUTO, kassert, CTLFLAG_RW, NULL, "kassert options"); 678 679 #ifdef KASSERT_PANIC_OPTIONAL 680 #define KASSERT_RWTUN CTLFLAG_RWTUN 681 #else 682 #define KASSERT_RWTUN CTLFLAG_RDTUN 683 #endif 684 685 SYSCTL_INT(_debug_kassert, OID_AUTO, warn_only, KASSERT_RWTUN, 686 &kassert_warn_only, 0, 687 "KASSERT triggers a panic (0) or just a warning (1)"); 688 689 #ifdef KDB 690 SYSCTL_INT(_debug_kassert, OID_AUTO, do_kdb, KASSERT_RWTUN, 691 &kassert_do_kdb, 0, "KASSERT will enter the debugger"); 692 #endif 693 694 #ifdef KTR 695 SYSCTL_UINT(_debug_kassert, OID_AUTO, do_ktr, KASSERT_RWTUN, 696 &kassert_do_ktr, 0, 697 "KASSERT does a KTR, set this to the KTRMASK you want"); 698 #endif 699 700 SYSCTL_INT(_debug_kassert, OID_AUTO, do_log, KASSERT_RWTUN, 701 &kassert_do_log, 0, 702 "If warn_only is enabled, log (1) or do not log (0) assertion violations"); 703 704 SYSCTL_INT(_debug_kassert, OID_AUTO, warnings, KASSERT_RWTUN, 705 &kassert_warnings, 0, "number of KASSERTs that have been triggered"); 706 707 SYSCTL_INT(_debug_kassert, OID_AUTO, log_panic_at, KASSERT_RWTUN, 708 &kassert_log_panic_at, 0, "max number of KASSERTS before we will panic"); 709 710 SYSCTL_INT(_debug_kassert, OID_AUTO, log_pps_limit, KASSERT_RWTUN, 711 &kassert_log_pps_limit, 0, "limit number of log messages per second"); 712 713 SYSCTL_INT(_debug_kassert, OID_AUTO, log_mute_at, KASSERT_RWTUN, 714 &kassert_log_mute_at, 0, "max number of KASSERTS to log"); 715 716 SYSCTL_INT(_debug_kassert, OID_AUTO, suppress_in_panic, KASSERT_RWTUN, 717 &kassert_suppress_in_panic, 0, 718 "KASSERTs will be suppressed while handling a panic"); 719 #undef KASSERT_RWTUN 720 721 static int kassert_sysctl_kassert(SYSCTL_HANDLER_ARGS); 722 723 SYSCTL_PROC(_debug_kassert, OID_AUTO, kassert, 724 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_SECURE, NULL, 0, 725 kassert_sysctl_kassert, "I", "set to trigger a test kassert"); 726 727 static int 728 kassert_sysctl_kassert(SYSCTL_HANDLER_ARGS) 729 { 730 int error, i; 731 732 error = sysctl_wire_old_buffer(req, sizeof(int)); 733 if (error == 0) { 734 i = 0; 735 error = sysctl_handle_int(oidp, &i, 0, req); 736 } 737 if (error != 0 || req->newptr == NULL) 738 return (error); 739 KASSERT(0, ("kassert_sysctl_kassert triggered kassert %d", i)); 740 return (0); 741 } 742 743 #ifdef KASSERT_PANIC_OPTIONAL 744 /* 745 * Called by KASSERT, this decides if we will panic 746 * or if we will log via printf and/or ktr. 747 */ 748 void 749 kassert_panic(const char *fmt, ...) 750 { 751 static char buf[256]; 752 va_list ap; 753 754 va_start(ap, fmt); 755 (void)vsnprintf(buf, sizeof(buf), fmt, ap); 756 va_end(ap); 757 758 /* 759 * If we are suppressing secondary panics, log the warning but do not 760 * re-enter panic/kdb. 761 */ 762 if (panicstr != NULL && kassert_suppress_in_panic) { 763 if (kassert_do_log) { 764 printf("KASSERT failed: %s\n", buf); 765 #ifdef KDB 766 if (trace_all_panics && trace_on_panic) 767 kdb_backtrace(); 768 #endif 769 } 770 return; 771 } 772 773 /* 774 * panic if we're not just warning, or if we've exceeded 775 * kassert_log_panic_at warnings. 776 */ 777 if (!kassert_warn_only || 778 (kassert_log_panic_at > 0 && 779 kassert_warnings >= kassert_log_panic_at)) { 780 va_start(ap, fmt); 781 vpanic(fmt, ap); 782 /* NORETURN */ 783 } 784 #ifdef KTR 785 if (kassert_do_ktr) 786 CTR0(ktr_mask, buf); 787 #endif /* KTR */ 788 /* 789 * log if we've not yet met the mute limit. 790 */ 791 if (kassert_do_log && 792 (kassert_log_mute_at == 0 || 793 kassert_warnings < kassert_log_mute_at)) { 794 static struct timeval lasterr; 795 static int curerr; 796 797 if (ppsratecheck(&lasterr, &curerr, kassert_log_pps_limit)) { 798 printf("KASSERT failed: %s\n", buf); 799 kdb_backtrace(); 800 } 801 } 802 #ifdef KDB 803 if (kassert_do_kdb) { 804 kdb_enter(KDB_WHY_KASSERT, buf); 805 } 806 #endif 807 atomic_add_int(&kassert_warnings, 1); 808 } 809 #endif /* KASSERT_PANIC_OPTIONAL */ 810 #endif 811 812 /* 813 * Panic is called on unresolvable fatal errors. It prints "panic: mesg", 814 * and then reboots. If we are called twice, then we avoid trying to sync 815 * the disks as this often leads to recursive panics. 816 */ 817 void 818 panic(const char *fmt, ...) 819 { 820 va_list ap; 821 822 va_start(ap, fmt); 823 vpanic(fmt, ap); 824 } 825 826 void 827 vpanic(const char *fmt, va_list ap) 828 { 829 #ifdef SMP 830 cpuset_t other_cpus; 831 #endif 832 struct thread *td = curthread; 833 int bootopt, newpanic; 834 static char buf[256]; 835 836 spinlock_enter(); 837 838 #ifdef SMP 839 /* 840 * stop_cpus_hard(other_cpus) should prevent multiple CPUs from 841 * concurrently entering panic. Only the winner will proceed 842 * further. 843 */ 844 if (panicstr == NULL && !kdb_active) { 845 other_cpus = all_cpus; 846 CPU_CLR(PCPU_GET(cpuid), &other_cpus); 847 stop_cpus_hard(other_cpus); 848 } 849 #endif 850 851 /* 852 * Ensure that the scheduler is stopped while panicking, even if panic 853 * has been entered from kdb. 854 */ 855 td->td_stopsched = 1; 856 857 bootopt = RB_AUTOBOOT; 858 newpanic = 0; 859 if (panicstr) 860 bootopt |= RB_NOSYNC; 861 else { 862 bootopt |= RB_DUMP; 863 panicstr = fmt; 864 newpanic = 1; 865 } 866 867 if (newpanic) { 868 (void)vsnprintf(buf, sizeof(buf), fmt, ap); 869 panicstr = buf; 870 cngrab(); 871 printf("panic: %s\n", buf); 872 } else { 873 printf("panic: "); 874 vprintf(fmt, ap); 875 printf("\n"); 876 } 877 #ifdef SMP 878 printf("cpuid = %d\n", PCPU_GET(cpuid)); 879 #endif 880 printf("time = %jd\n", (intmax_t )time_second); 881 #ifdef KDB 882 if ((newpanic || trace_all_panics) && trace_on_panic) 883 kdb_backtrace(); 884 if (debugger_on_panic) 885 kdb_enter(KDB_WHY_PANIC, "panic"); 886 #endif 887 /*thread_lock(td); */ 888 td->td_flags |= TDF_INPANIC; 889 /* thread_unlock(td); */ 890 if (!sync_on_panic) 891 bootopt |= RB_NOSYNC; 892 if (poweroff_on_panic) 893 bootopt |= RB_POWEROFF; 894 if (powercycle_on_panic) 895 bootopt |= RB_POWERCYCLE; 896 kern_reboot(bootopt); 897 } 898 899 /* 900 * Support for poweroff delay. 901 * 902 * Please note that setting this delay too short might power off your machine 903 * before the write cache on your hard disk has been flushed, leading to 904 * soft-updates inconsistencies. 905 */ 906 #ifndef POWEROFF_DELAY 907 # define POWEROFF_DELAY 5000 908 #endif 909 static int poweroff_delay = POWEROFF_DELAY; 910 911 SYSCTL_INT(_kern_shutdown, OID_AUTO, poweroff_delay, CTLFLAG_RW, 912 &poweroff_delay, 0, "Delay before poweroff to write disk caches (msec)"); 913 914 static void 915 poweroff_wait(void *junk, int howto) 916 { 917 918 if ((howto & (RB_POWEROFF | RB_POWERCYCLE)) == 0 || poweroff_delay <= 0) 919 return; 920 DELAY(poweroff_delay * 1000); 921 } 922 923 /* 924 * Some system processes (e.g. syncer) need to be stopped at appropriate 925 * points in their main loops prior to a system shutdown, so that they 926 * won't interfere with the shutdown process (e.g. by holding a disk buf 927 * to cause sync to fail). For each of these system processes, register 928 * shutdown_kproc() as a handler for one of shutdown events. 929 */ 930 static int kproc_shutdown_wait = 60; 931 SYSCTL_INT(_kern_shutdown, OID_AUTO, kproc_shutdown_wait, CTLFLAG_RW, 932 &kproc_shutdown_wait, 0, "Max wait time (sec) to stop for each process"); 933 934 void 935 kproc_shutdown(void *arg, int howto) 936 { 937 struct proc *p; 938 int error; 939 940 if (panicstr) 941 return; 942 943 p = (struct proc *)arg; 944 printf("Waiting (max %d seconds) for system process `%s' to stop... ", 945 kproc_shutdown_wait, p->p_comm); 946 error = kproc_suspend(p, kproc_shutdown_wait * hz); 947 948 if (error == EWOULDBLOCK) 949 printf("timed out\n"); 950 else 951 printf("done\n"); 952 } 953 954 void 955 kthread_shutdown(void *arg, int howto) 956 { 957 struct thread *td; 958 int error; 959 960 if (panicstr) 961 return; 962 963 td = (struct thread *)arg; 964 printf("Waiting (max %d seconds) for system thread `%s' to stop... ", 965 kproc_shutdown_wait, td->td_name); 966 error = kthread_suspend(td, kproc_shutdown_wait * hz); 967 968 if (error == EWOULDBLOCK) 969 printf("timed out\n"); 970 else 971 printf("done\n"); 972 } 973 974 static int 975 dumpdevname_sysctl_handler(SYSCTL_HANDLER_ARGS) 976 { 977 char buf[256]; 978 struct dumperinfo *di; 979 struct sbuf sb; 980 int error; 981 982 error = sysctl_wire_old_buffer(req, 0); 983 if (error != 0) 984 return (error); 985 986 sbuf_new_for_sysctl(&sb, buf, sizeof(buf), req); 987 988 mtx_lock(&dumpconf_list_lk); 989 TAILQ_FOREACH(di, &dumper_configs, di_next) { 990 if (di != TAILQ_FIRST(&dumper_configs)) 991 sbuf_putc(&sb, ','); 992 sbuf_cat(&sb, di->di_devname); 993 } 994 mtx_unlock(&dumpconf_list_lk); 995 996 error = sbuf_finish(&sb); 997 sbuf_delete(&sb); 998 return (error); 999 } 1000 SYSCTL_PROC(_kern_shutdown, OID_AUTO, dumpdevname, CTLTYPE_STRING | CTLFLAG_RD, 1001 &dumper_configs, 0, dumpdevname_sysctl_handler, "A", 1002 "Device(s) for kernel dumps"); 1003 1004 static int _dump_append(struct dumperinfo *di, void *virtual, 1005 vm_offset_t physical, size_t length); 1006 1007 #ifdef EKCD 1008 static struct kerneldumpcrypto * 1009 kerneldumpcrypto_create(size_t blocksize, uint8_t encryption, 1010 const uint8_t *key, uint32_t encryptedkeysize, const uint8_t *encryptedkey) 1011 { 1012 struct kerneldumpcrypto *kdc; 1013 struct kerneldumpkey *kdk; 1014 uint32_t dumpkeysize; 1015 1016 dumpkeysize = roundup2(sizeof(*kdk) + encryptedkeysize, blocksize); 1017 kdc = malloc(sizeof(*kdc) + dumpkeysize, M_EKCD, M_WAITOK | M_ZERO); 1018 1019 arc4rand(kdc->kdc_iv, sizeof(kdc->kdc_iv), 0); 1020 1021 kdc->kdc_encryption = encryption; 1022 switch (kdc->kdc_encryption) { 1023 case KERNELDUMP_ENC_AES_256_CBC: 1024 if (rijndael_makeKey(&kdc->kdc_ki, DIR_ENCRYPT, 256, key) <= 0) 1025 goto failed; 1026 break; 1027 default: 1028 goto failed; 1029 } 1030 1031 kdc->kdc_dumpkeysize = dumpkeysize; 1032 kdk = kdc->kdc_dumpkey; 1033 kdk->kdk_encryption = kdc->kdc_encryption; 1034 memcpy(kdk->kdk_iv, kdc->kdc_iv, sizeof(kdk->kdk_iv)); 1035 kdk->kdk_encryptedkeysize = htod32(encryptedkeysize); 1036 memcpy(kdk->kdk_encryptedkey, encryptedkey, encryptedkeysize); 1037 1038 return (kdc); 1039 failed: 1040 explicit_bzero(kdc, sizeof(*kdc) + dumpkeysize); 1041 free(kdc, M_EKCD); 1042 return (NULL); 1043 } 1044 1045 static int 1046 kerneldumpcrypto_init(struct kerneldumpcrypto *kdc) 1047 { 1048 uint8_t hash[SHA256_DIGEST_LENGTH]; 1049 SHA256_CTX ctx; 1050 struct kerneldumpkey *kdk; 1051 int error; 1052 1053 error = 0; 1054 1055 if (kdc == NULL) 1056 return (0); 1057 1058 /* 1059 * When a user enters ddb it can write a crash dump multiple times. 1060 * Each time it should be encrypted using a different IV. 1061 */ 1062 SHA256_Init(&ctx); 1063 SHA256_Update(&ctx, kdc->kdc_iv, sizeof(kdc->kdc_iv)); 1064 SHA256_Final(hash, &ctx); 1065 bcopy(hash, kdc->kdc_iv, sizeof(kdc->kdc_iv)); 1066 1067 switch (kdc->kdc_encryption) { 1068 case KERNELDUMP_ENC_AES_256_CBC: 1069 if (rijndael_cipherInit(&kdc->kdc_ci, MODE_CBC, 1070 kdc->kdc_iv) <= 0) { 1071 error = EINVAL; 1072 goto out; 1073 } 1074 break; 1075 default: 1076 error = EINVAL; 1077 goto out; 1078 } 1079 1080 kdk = kdc->kdc_dumpkey; 1081 memcpy(kdk->kdk_iv, kdc->kdc_iv, sizeof(kdk->kdk_iv)); 1082 out: 1083 explicit_bzero(hash, sizeof(hash)); 1084 return (error); 1085 } 1086 1087 static uint32_t 1088 kerneldumpcrypto_dumpkeysize(const struct kerneldumpcrypto *kdc) 1089 { 1090 1091 if (kdc == NULL) 1092 return (0); 1093 return (kdc->kdc_dumpkeysize); 1094 } 1095 #endif /* EKCD */ 1096 1097 static struct kerneldumpcomp * 1098 kerneldumpcomp_create(struct dumperinfo *di, uint8_t compression) 1099 { 1100 struct kerneldumpcomp *kdcomp; 1101 int format; 1102 1103 switch (compression) { 1104 case KERNELDUMP_COMP_GZIP: 1105 format = COMPRESS_GZIP; 1106 break; 1107 case KERNELDUMP_COMP_ZSTD: 1108 format = COMPRESS_ZSTD; 1109 break; 1110 default: 1111 return (NULL); 1112 } 1113 1114 kdcomp = malloc(sizeof(*kdcomp), M_DUMPER, M_WAITOK | M_ZERO); 1115 kdcomp->kdc_format = compression; 1116 kdcomp->kdc_stream = compressor_init(kerneldumpcomp_write_cb, 1117 format, di->maxiosize, kerneldump_gzlevel, di); 1118 if (kdcomp->kdc_stream == NULL) { 1119 free(kdcomp, M_DUMPER); 1120 return (NULL); 1121 } 1122 kdcomp->kdc_buf = malloc(di->maxiosize, M_DUMPER, M_WAITOK | M_NODUMP); 1123 return (kdcomp); 1124 } 1125 1126 static void 1127 kerneldumpcomp_destroy(struct dumperinfo *di) 1128 { 1129 struct kerneldumpcomp *kdcomp; 1130 1131 kdcomp = di->kdcomp; 1132 if (kdcomp == NULL) 1133 return; 1134 compressor_fini(kdcomp->kdc_stream); 1135 explicit_bzero(kdcomp->kdc_buf, di->maxiosize); 1136 free(kdcomp->kdc_buf, M_DUMPER); 1137 free(kdcomp, M_DUMPER); 1138 } 1139 1140 /* 1141 * Must not be present on global list. 1142 */ 1143 static void 1144 free_single_dumper(struct dumperinfo *di) 1145 { 1146 1147 if (di == NULL) 1148 return; 1149 1150 if (di->blockbuf != NULL) { 1151 explicit_bzero(di->blockbuf, di->blocksize); 1152 free(di->blockbuf, M_DUMPER); 1153 } 1154 1155 kerneldumpcomp_destroy(di); 1156 1157 #ifdef EKCD 1158 if (di->kdcrypto != NULL) { 1159 explicit_bzero(di->kdcrypto, sizeof(*di->kdcrypto) + 1160 di->kdcrypto->kdc_dumpkeysize); 1161 free(di->kdcrypto, M_EKCD); 1162 } 1163 #endif 1164 1165 explicit_bzero(di, sizeof(*di)); 1166 free(di, M_DUMPER); 1167 } 1168 1169 /* Registration of dumpers */ 1170 int 1171 dumper_insert(const struct dumperinfo *di_template, const char *devname, 1172 const struct diocskerneldump_arg *kda) 1173 { 1174 struct dumperinfo *newdi, *listdi; 1175 bool inserted; 1176 uint8_t index; 1177 int error; 1178 1179 index = kda->kda_index; 1180 MPASS(index != KDA_REMOVE && index != KDA_REMOVE_DEV && 1181 index != KDA_REMOVE_ALL); 1182 1183 error = priv_check(curthread, PRIV_SETDUMPER); 1184 if (error != 0) 1185 return (error); 1186 1187 newdi = malloc(sizeof(*newdi) + strlen(devname) + 1, M_DUMPER, M_WAITOK 1188 | M_ZERO); 1189 memcpy(newdi, di_template, sizeof(*newdi)); 1190 newdi->blockbuf = NULL; 1191 newdi->kdcrypto = NULL; 1192 newdi->kdcomp = NULL; 1193 strcpy(newdi->di_devname, devname); 1194 1195 if (kda->kda_encryption != KERNELDUMP_ENC_NONE) { 1196 #ifdef EKCD 1197 newdi->kdcrypto = kerneldumpcrypto_create(di_template->blocksize, 1198 kda->kda_encryption, kda->kda_key, 1199 kda->kda_encryptedkeysize, kda->kda_encryptedkey); 1200 if (newdi->kdcrypto == NULL) { 1201 error = EINVAL; 1202 goto cleanup; 1203 } 1204 #else 1205 error = EOPNOTSUPP; 1206 goto cleanup; 1207 #endif 1208 } 1209 if (kda->kda_compression != KERNELDUMP_COMP_NONE) { 1210 /* 1211 * We currently can't support simultaneous encryption and 1212 * compression because our only encryption mode is an unpadded 1213 * block cipher, go figure. This is low hanging fruit to fix. 1214 */ 1215 if (kda->kda_encryption != KERNELDUMP_ENC_NONE) { 1216 error = EOPNOTSUPP; 1217 goto cleanup; 1218 } 1219 newdi->kdcomp = kerneldumpcomp_create(newdi, 1220 kda->kda_compression); 1221 if (newdi->kdcomp == NULL) { 1222 error = EINVAL; 1223 goto cleanup; 1224 } 1225 } 1226 1227 newdi->blockbuf = malloc(newdi->blocksize, M_DUMPER, M_WAITOK | M_ZERO); 1228 1229 /* Add the new configuration to the queue */ 1230 mtx_lock(&dumpconf_list_lk); 1231 inserted = false; 1232 TAILQ_FOREACH(listdi, &dumper_configs, di_next) { 1233 if (index == 0) { 1234 TAILQ_INSERT_BEFORE(listdi, newdi, di_next); 1235 inserted = true; 1236 break; 1237 } 1238 index--; 1239 } 1240 if (!inserted) 1241 TAILQ_INSERT_TAIL(&dumper_configs, newdi, di_next); 1242 mtx_unlock(&dumpconf_list_lk); 1243 1244 return (0); 1245 1246 cleanup: 1247 free_single_dumper(newdi); 1248 return (error); 1249 } 1250 1251 static bool 1252 dumper_config_match(const struct dumperinfo *di, const char *devname, 1253 const struct diocskerneldump_arg *kda) 1254 { 1255 if (kda->kda_index == KDA_REMOVE_ALL) 1256 return (true); 1257 1258 if (strcmp(di->di_devname, devname) != 0) 1259 return (false); 1260 1261 /* 1262 * Allow wildcard removal of configs matching a device on g_dev_orphan. 1263 */ 1264 if (kda->kda_index == KDA_REMOVE_DEV) 1265 return (true); 1266 1267 if (di->kdcomp != NULL) { 1268 if (di->kdcomp->kdc_format != kda->kda_compression) 1269 return (false); 1270 } else if (kda->kda_compression != KERNELDUMP_COMP_NONE) 1271 return (false); 1272 #ifdef EKCD 1273 if (di->kdcrypto != NULL) { 1274 if (di->kdcrypto->kdc_encryption != kda->kda_encryption) 1275 return (false); 1276 /* 1277 * Do we care to verify keys match to delete? It seems weird 1278 * to expect multiple fallback dump configurations on the same 1279 * device that only differ in crypto key. 1280 */ 1281 } else 1282 #endif 1283 if (kda->kda_encryption != KERNELDUMP_ENC_NONE) 1284 return (false); 1285 1286 return (true); 1287 } 1288 1289 int 1290 dumper_remove(const char *devname, const struct diocskerneldump_arg *kda) 1291 { 1292 struct dumperinfo *di, *sdi; 1293 bool found; 1294 int error; 1295 1296 error = priv_check(curthread, PRIV_SETDUMPER); 1297 if (error != 0) 1298 return (error); 1299 1300 /* 1301 * Try to find a matching configuration, and kill it. 1302 * 1303 * NULL 'kda' indicates remove any configuration matching 'devname', 1304 * which may remove multiple configurations in atypical configurations. 1305 */ 1306 found = false; 1307 mtx_lock(&dumpconf_list_lk); 1308 TAILQ_FOREACH_SAFE(di, &dumper_configs, di_next, sdi) { 1309 if (dumper_config_match(di, devname, kda)) { 1310 found = true; 1311 TAILQ_REMOVE(&dumper_configs, di, di_next); 1312 free_single_dumper(di); 1313 } 1314 } 1315 mtx_unlock(&dumpconf_list_lk); 1316 1317 /* Only produce ENOENT if a more targeted match didn't match. */ 1318 if (!found && kda->kda_index == KDA_REMOVE) 1319 return (ENOENT); 1320 return (0); 1321 } 1322 1323 static int 1324 dump_check_bounds(struct dumperinfo *di, off_t offset, size_t length) 1325 { 1326 1327 if (di->mediasize > 0 && length != 0 && (offset < di->mediaoffset || 1328 offset - di->mediaoffset + length > di->mediasize)) { 1329 if (di->kdcomp != NULL && offset >= di->mediaoffset) { 1330 printf( 1331 "Compressed dump failed to fit in device boundaries.\n"); 1332 return (E2BIG); 1333 } 1334 1335 printf("Attempt to write outside dump device boundaries.\n" 1336 "offset(%jd), mediaoffset(%jd), length(%ju), mediasize(%jd).\n", 1337 (intmax_t)offset, (intmax_t)di->mediaoffset, 1338 (uintmax_t)length, (intmax_t)di->mediasize); 1339 return (ENOSPC); 1340 } 1341 if (length % di->blocksize != 0) { 1342 printf("Attempt to write partial block of length %ju.\n", 1343 (uintmax_t)length); 1344 return (EINVAL); 1345 } 1346 if (offset % di->blocksize != 0) { 1347 printf("Attempt to write at unaligned offset %jd.\n", 1348 (intmax_t)offset); 1349 return (EINVAL); 1350 } 1351 1352 return (0); 1353 } 1354 1355 #ifdef EKCD 1356 static int 1357 dump_encrypt(struct kerneldumpcrypto *kdc, uint8_t *buf, size_t size) 1358 { 1359 1360 switch (kdc->kdc_encryption) { 1361 case KERNELDUMP_ENC_AES_256_CBC: 1362 if (rijndael_blockEncrypt(&kdc->kdc_ci, &kdc->kdc_ki, buf, 1363 8 * size, buf) <= 0) { 1364 return (EIO); 1365 } 1366 if (rijndael_cipherInit(&kdc->kdc_ci, MODE_CBC, 1367 buf + size - 16 /* IV size for AES-256-CBC */) <= 0) { 1368 return (EIO); 1369 } 1370 break; 1371 default: 1372 return (EINVAL); 1373 } 1374 1375 return (0); 1376 } 1377 1378 /* Encrypt data and call dumper. */ 1379 static int 1380 dump_encrypted_write(struct dumperinfo *di, void *virtual, 1381 vm_offset_t physical, off_t offset, size_t length) 1382 { 1383 static uint8_t buf[KERNELDUMP_BUFFER_SIZE]; 1384 struct kerneldumpcrypto *kdc; 1385 int error; 1386 size_t nbytes; 1387 1388 kdc = di->kdcrypto; 1389 1390 while (length > 0) { 1391 nbytes = MIN(length, sizeof(buf)); 1392 bcopy(virtual, buf, nbytes); 1393 1394 if (dump_encrypt(kdc, buf, nbytes) != 0) 1395 return (EIO); 1396 1397 error = dump_write(di, buf, physical, offset, nbytes); 1398 if (error != 0) 1399 return (error); 1400 1401 offset += nbytes; 1402 virtual = (void *)((uint8_t *)virtual + nbytes); 1403 length -= nbytes; 1404 } 1405 1406 return (0); 1407 } 1408 #endif /* EKCD */ 1409 1410 static int 1411 kerneldumpcomp_write_cb(void *base, size_t length, off_t offset, void *arg) 1412 { 1413 struct dumperinfo *di; 1414 size_t resid, rlength; 1415 int error; 1416 1417 di = arg; 1418 1419 if (length % di->blocksize != 0) { 1420 /* 1421 * This must be the final write after flushing the compression 1422 * stream. Write as many full blocks as possible and stash the 1423 * residual data in the dumper's block buffer. It will be 1424 * padded and written in dump_finish(). 1425 */ 1426 rlength = rounddown(length, di->blocksize); 1427 if (rlength != 0) { 1428 error = _dump_append(di, base, 0, rlength); 1429 if (error != 0) 1430 return (error); 1431 } 1432 resid = length - rlength; 1433 memmove(di->blockbuf, (uint8_t *)base + rlength, resid); 1434 di->kdcomp->kdc_resid = resid; 1435 return (EAGAIN); 1436 } 1437 return (_dump_append(di, base, 0, length)); 1438 } 1439 1440 /* 1441 * Write kernel dump headers at the beginning and end of the dump extent. 1442 * Write the kernel dump encryption key after the leading header if we were 1443 * configured to do so. 1444 */ 1445 static int 1446 dump_write_headers(struct dumperinfo *di, struct kerneldumpheader *kdh) 1447 { 1448 #ifdef EKCD 1449 struct kerneldumpcrypto *kdc; 1450 #endif 1451 void *buf, *key; 1452 size_t hdrsz; 1453 uint64_t extent; 1454 uint32_t keysize; 1455 int error; 1456 1457 hdrsz = sizeof(*kdh); 1458 if (hdrsz > di->blocksize) 1459 return (ENOMEM); 1460 1461 #ifdef EKCD 1462 kdc = di->kdcrypto; 1463 key = kdc->kdc_dumpkey; 1464 keysize = kerneldumpcrypto_dumpkeysize(kdc); 1465 #else 1466 key = NULL; 1467 keysize = 0; 1468 #endif 1469 1470 /* 1471 * If the dump device has special handling for headers, let it take care 1472 * of writing them out. 1473 */ 1474 if (di->dumper_hdr != NULL) 1475 return (di->dumper_hdr(di, kdh, key, keysize)); 1476 1477 if (hdrsz == di->blocksize) 1478 buf = kdh; 1479 else { 1480 buf = di->blockbuf; 1481 memset(buf, 0, di->blocksize); 1482 memcpy(buf, kdh, hdrsz); 1483 } 1484 1485 extent = dtoh64(kdh->dumpextent); 1486 #ifdef EKCD 1487 if (kdc != NULL) { 1488 error = dump_write(di, kdc->kdc_dumpkey, 0, 1489 di->mediaoffset + di->mediasize - di->blocksize - extent - 1490 keysize, keysize); 1491 if (error != 0) 1492 return (error); 1493 } 1494 #endif 1495 1496 error = dump_write(di, buf, 0, 1497 di->mediaoffset + di->mediasize - 2 * di->blocksize - extent - 1498 keysize, di->blocksize); 1499 if (error == 0) 1500 error = dump_write(di, buf, 0, di->mediaoffset + di->mediasize - 1501 di->blocksize, di->blocksize); 1502 return (error); 1503 } 1504 1505 /* 1506 * Don't touch the first SIZEOF_METADATA bytes on the dump device. This is to 1507 * protect us from metadata and metadata from us. 1508 */ 1509 #define SIZEOF_METADATA (64 * 1024) 1510 1511 /* 1512 * Do some preliminary setup for a kernel dump: initialize state for encryption, 1513 * if requested, and make sure that we have enough space on the dump device. 1514 * 1515 * We set things up so that the dump ends before the last sector of the dump 1516 * device, at which the trailing header is written. 1517 * 1518 * +-----------+------+-----+----------------------------+------+ 1519 * | | lhdr | key | ... kernel dump ... | thdr | 1520 * +-----------+------+-----+----------------------------+------+ 1521 * 1 blk opt <------- dump extent --------> 1 blk 1522 * 1523 * Dumps written using dump_append() start at the beginning of the extent. 1524 * Uncompressed dumps will use the entire extent, but compressed dumps typically 1525 * will not. The true length of the dump is recorded in the leading and trailing 1526 * headers once the dump has been completed. 1527 * 1528 * The dump device may provide a callback, in which case it will initialize 1529 * dumpoff and take care of laying out the headers. 1530 */ 1531 int 1532 dump_start(struct dumperinfo *di, struct kerneldumpheader *kdh) 1533 { 1534 uint64_t dumpextent, span; 1535 uint32_t keysize; 1536 int error; 1537 1538 #ifdef EKCD 1539 error = kerneldumpcrypto_init(di->kdcrypto); 1540 if (error != 0) 1541 return (error); 1542 keysize = kerneldumpcrypto_dumpkeysize(di->kdcrypto); 1543 #else 1544 error = 0; 1545 keysize = 0; 1546 #endif 1547 1548 if (di->dumper_start != NULL) { 1549 error = di->dumper_start(di); 1550 } else { 1551 dumpextent = dtoh64(kdh->dumpextent); 1552 span = SIZEOF_METADATA + dumpextent + 2 * di->blocksize + 1553 keysize; 1554 if (di->mediasize < span) { 1555 if (di->kdcomp == NULL) 1556 return (E2BIG); 1557 1558 /* 1559 * We don't yet know how much space the compressed dump 1560 * will occupy, so try to use the whole swap partition 1561 * (minus the first 64KB) in the hope that the 1562 * compressed dump will fit. If that doesn't turn out to 1563 * be enough, the bounds checking in dump_write() 1564 * will catch us and cause the dump to fail. 1565 */ 1566 dumpextent = di->mediasize - span + dumpextent; 1567 kdh->dumpextent = htod64(dumpextent); 1568 } 1569 1570 /* 1571 * The offset at which to begin writing the dump. 1572 */ 1573 di->dumpoff = di->mediaoffset + di->mediasize - di->blocksize - 1574 dumpextent; 1575 } 1576 di->origdumpoff = di->dumpoff; 1577 return (error); 1578 } 1579 1580 static int 1581 _dump_append(struct dumperinfo *di, void *virtual, vm_offset_t physical, 1582 size_t length) 1583 { 1584 int error; 1585 1586 #ifdef EKCD 1587 if (di->kdcrypto != NULL) 1588 error = dump_encrypted_write(di, virtual, physical, di->dumpoff, 1589 length); 1590 else 1591 #endif 1592 error = dump_write(di, virtual, physical, di->dumpoff, length); 1593 if (error == 0) 1594 di->dumpoff += length; 1595 return (error); 1596 } 1597 1598 /* 1599 * Write to the dump device starting at dumpoff. When compression is enabled, 1600 * writes to the device will be performed using a callback that gets invoked 1601 * when the compression stream's output buffer is full. 1602 */ 1603 int 1604 dump_append(struct dumperinfo *di, void *virtual, vm_offset_t physical, 1605 size_t length) 1606 { 1607 void *buf; 1608 1609 if (di->kdcomp != NULL) { 1610 /* Bounce through a buffer to avoid CRC errors. */ 1611 if (length > di->maxiosize) 1612 return (EINVAL); 1613 buf = di->kdcomp->kdc_buf; 1614 memmove(buf, virtual, length); 1615 return (compressor_write(di->kdcomp->kdc_stream, buf, length)); 1616 } 1617 return (_dump_append(di, virtual, physical, length)); 1618 } 1619 1620 /* 1621 * Write to the dump device at the specified offset. 1622 */ 1623 int 1624 dump_write(struct dumperinfo *di, void *virtual, vm_offset_t physical, 1625 off_t offset, size_t length) 1626 { 1627 int error; 1628 1629 error = dump_check_bounds(di, offset, length); 1630 if (error != 0) 1631 return (error); 1632 return (di->dumper(di->priv, virtual, physical, offset, length)); 1633 } 1634 1635 /* 1636 * Perform kernel dump finalization: flush the compression stream, if necessary, 1637 * write the leading and trailing kernel dump headers now that we know the true 1638 * length of the dump, and optionally write the encryption key following the 1639 * leading header. 1640 */ 1641 int 1642 dump_finish(struct dumperinfo *di, struct kerneldumpheader *kdh) 1643 { 1644 int error; 1645 1646 if (di->kdcomp != NULL) { 1647 error = compressor_flush(di->kdcomp->kdc_stream); 1648 if (error == EAGAIN) { 1649 /* We have residual data in di->blockbuf. */ 1650 error = dump_write(di, di->blockbuf, 0, di->dumpoff, 1651 di->blocksize); 1652 di->dumpoff += di->kdcomp->kdc_resid; 1653 di->kdcomp->kdc_resid = 0; 1654 } 1655 if (error != 0) 1656 return (error); 1657 1658 /* 1659 * We now know the size of the compressed dump, so update the 1660 * header accordingly and recompute parity. 1661 */ 1662 kdh->dumplength = htod64(di->dumpoff - di->origdumpoff); 1663 kdh->parity = 0; 1664 kdh->parity = kerneldump_parity(kdh); 1665 1666 compressor_reset(di->kdcomp->kdc_stream); 1667 } 1668 1669 error = dump_write_headers(di, kdh); 1670 if (error != 0) 1671 return (error); 1672 1673 (void)dump_write(di, NULL, 0, 0, 0); 1674 return (0); 1675 } 1676 1677 void 1678 dump_init_header(const struct dumperinfo *di, struct kerneldumpheader *kdh, 1679 char *magic, uint32_t archver, uint64_t dumplen) 1680 { 1681 size_t dstsize; 1682 1683 bzero(kdh, sizeof(*kdh)); 1684 strlcpy(kdh->magic, magic, sizeof(kdh->magic)); 1685 strlcpy(kdh->architecture, MACHINE_ARCH, sizeof(kdh->architecture)); 1686 kdh->version = htod32(KERNELDUMPVERSION); 1687 kdh->architectureversion = htod32(archver); 1688 kdh->dumplength = htod64(dumplen); 1689 kdh->dumpextent = kdh->dumplength; 1690 kdh->dumptime = htod64(time_second); 1691 #ifdef EKCD 1692 kdh->dumpkeysize = htod32(kerneldumpcrypto_dumpkeysize(di->kdcrypto)); 1693 #else 1694 kdh->dumpkeysize = 0; 1695 #endif 1696 kdh->blocksize = htod32(di->blocksize); 1697 strlcpy(kdh->hostname, prison0.pr_hostname, sizeof(kdh->hostname)); 1698 dstsize = sizeof(kdh->versionstring); 1699 if (strlcpy(kdh->versionstring, version, dstsize) >= dstsize) 1700 kdh->versionstring[dstsize - 2] = '\n'; 1701 if (panicstr != NULL) 1702 strlcpy(kdh->panicstring, panicstr, sizeof(kdh->panicstring)); 1703 if (di->kdcomp != NULL) 1704 kdh->compression = di->kdcomp->kdc_format; 1705 kdh->parity = kerneldump_parity(kdh); 1706 } 1707 1708 #ifdef DDB 1709 DB_SHOW_COMMAND(panic, db_show_panic) 1710 { 1711 1712 if (panicstr == NULL) 1713 db_printf("panicstr not set\n"); 1714 else 1715 db_printf("panic: %s\n", panicstr); 1716 } 1717 #endif 1718