1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 1986, 1988, 1991, 1993 5 * The Regents of the University of California. All rights reserved. 6 * (c) UNIX System Laboratories, Inc. 7 * All or some portions of this file are derived from material licensed 8 * to the University of California by American Telephone and Telegraph 9 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 10 * the permission of UNIX System Laboratories, Inc. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 3. Neither the name of the University nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * @(#)kern_shutdown.c 8.3 (Berkeley) 1/21/94 37 */ 38 39 #include <sys/cdefs.h> 40 __FBSDID("$FreeBSD$"); 41 42 #include "opt_ddb.h" 43 #include "opt_ekcd.h" 44 #include "opt_kdb.h" 45 #include "opt_panic.h" 46 #include "opt_sched.h" 47 #include "opt_watchdog.h" 48 49 #include <sys/param.h> 50 #include <sys/systm.h> 51 #include <sys/bio.h> 52 #include <sys/buf.h> 53 #include <sys/conf.h> 54 #include <sys/compressor.h> 55 #include <sys/cons.h> 56 #include <sys/eventhandler.h> 57 #include <sys/filedesc.h> 58 #include <sys/jail.h> 59 #include <sys/kdb.h> 60 #include <sys/kernel.h> 61 #include <sys/kerneldump.h> 62 #include <sys/kthread.h> 63 #include <sys/ktr.h> 64 #include <sys/malloc.h> 65 #include <sys/mount.h> 66 #include <sys/priv.h> 67 #include <sys/proc.h> 68 #include <sys/reboot.h> 69 #include <sys/resourcevar.h> 70 #include <sys/rwlock.h> 71 #include <sys/sched.h> 72 #include <sys/smp.h> 73 #include <sys/sysctl.h> 74 #include <sys/sysproto.h> 75 #include <sys/taskqueue.h> 76 #include <sys/vnode.h> 77 #include <sys/watchdog.h> 78 79 #include <crypto/rijndael/rijndael-api-fst.h> 80 #include <crypto/sha2/sha256.h> 81 82 #include <ddb/ddb.h> 83 84 #include <machine/cpu.h> 85 #include <machine/dump.h> 86 #include <machine/pcb.h> 87 #include <machine/smp.h> 88 89 #include <security/mac/mac_framework.h> 90 91 #include <vm/vm.h> 92 #include <vm/vm_object.h> 93 #include <vm/vm_page.h> 94 #include <vm/vm_pager.h> 95 #include <vm/swap_pager.h> 96 97 #include <sys/signalvar.h> 98 99 static MALLOC_DEFINE(M_DUMPER, "dumper", "dumper block buffer"); 100 101 #ifndef PANIC_REBOOT_WAIT_TIME 102 #define PANIC_REBOOT_WAIT_TIME 15 /* default to 15 seconds */ 103 #endif 104 static int panic_reboot_wait_time = PANIC_REBOOT_WAIT_TIME; 105 SYSCTL_INT(_kern, OID_AUTO, panic_reboot_wait_time, CTLFLAG_RWTUN, 106 &panic_reboot_wait_time, 0, 107 "Seconds to wait before rebooting after a panic"); 108 109 /* 110 * Note that stdarg.h and the ANSI style va_start macro is used for both 111 * ANSI and traditional C compilers. 112 */ 113 #include <machine/stdarg.h> 114 115 #ifdef KDB 116 #ifdef KDB_UNATTENDED 117 int debugger_on_panic = 0; 118 #else 119 int debugger_on_panic = 1; 120 #endif 121 SYSCTL_INT(_debug, OID_AUTO, debugger_on_panic, 122 CTLFLAG_RWTUN | CTLFLAG_SECURE, 123 &debugger_on_panic, 0, "Run debugger on kernel panic"); 124 125 #ifdef KDB_TRACE 126 static int trace_on_panic = 1; 127 #else 128 static int trace_on_panic = 0; 129 #endif 130 SYSCTL_INT(_debug, OID_AUTO, trace_on_panic, 131 CTLFLAG_RWTUN | CTLFLAG_SECURE, 132 &trace_on_panic, 0, "Print stack trace on kernel panic"); 133 #endif /* KDB */ 134 135 static int sync_on_panic = 0; 136 SYSCTL_INT(_kern, OID_AUTO, sync_on_panic, CTLFLAG_RWTUN, 137 &sync_on_panic, 0, "Do a sync before rebooting from a panic"); 138 139 static bool poweroff_on_panic = 0; 140 SYSCTL_BOOL(_kern, OID_AUTO, poweroff_on_panic, CTLFLAG_RWTUN, 141 &poweroff_on_panic, 0, "Do a power off instead of a reboot on a panic"); 142 143 static bool powercycle_on_panic = 0; 144 SYSCTL_BOOL(_kern, OID_AUTO, powercycle_on_panic, CTLFLAG_RWTUN, 145 &powercycle_on_panic, 0, "Do a power cycle instead of a reboot on a panic"); 146 147 static SYSCTL_NODE(_kern, OID_AUTO, shutdown, CTLFLAG_RW, 0, 148 "Shutdown environment"); 149 150 #ifndef DIAGNOSTIC 151 static int show_busybufs; 152 #else 153 static int show_busybufs = 1; 154 #endif 155 SYSCTL_INT(_kern_shutdown, OID_AUTO, show_busybufs, CTLFLAG_RW, 156 &show_busybufs, 0, ""); 157 158 int suspend_blocked = 0; 159 SYSCTL_INT(_kern, OID_AUTO, suspend_blocked, CTLFLAG_RW, 160 &suspend_blocked, 0, "Block suspend due to a pending shutdown"); 161 162 #ifdef EKCD 163 FEATURE(ekcd, "Encrypted kernel crash dumps support"); 164 165 MALLOC_DEFINE(M_EKCD, "ekcd", "Encrypted kernel crash dumps data"); 166 167 struct kerneldumpcrypto { 168 uint8_t kdc_encryption; 169 uint8_t kdc_iv[KERNELDUMP_IV_MAX_SIZE]; 170 keyInstance kdc_ki; 171 cipherInstance kdc_ci; 172 uint32_t kdc_dumpkeysize; 173 struct kerneldumpkey kdc_dumpkey[]; 174 }; 175 #endif 176 177 struct kerneldumpcomp { 178 uint8_t kdc_format; 179 struct compressor *kdc_stream; 180 uint8_t *kdc_buf; 181 size_t kdc_resid; 182 }; 183 184 static struct kerneldumpcomp *kerneldumpcomp_create(struct dumperinfo *di, 185 uint8_t compression); 186 static void kerneldumpcomp_destroy(struct dumperinfo *di); 187 static int kerneldumpcomp_write_cb(void *base, size_t len, off_t off, void *arg); 188 189 static int kerneldump_gzlevel = 6; 190 SYSCTL_INT(_kern, OID_AUTO, kerneldump_gzlevel, CTLFLAG_RWTUN, 191 &kerneldump_gzlevel, 0, 192 "Kernel crash dump compression level"); 193 194 /* 195 * Variable panicstr contains argument to first call to panic; used as flag 196 * to indicate that the kernel has already called panic. 197 */ 198 const char *panicstr; 199 200 int dumping; /* system is dumping */ 201 int rebooting; /* system is rebooting */ 202 static struct dumperinfo dumper; /* our selected dumper */ 203 204 /* Context information for dump-debuggers. */ 205 static struct pcb dumppcb; /* Registers. */ 206 lwpid_t dumptid; /* Thread ID. */ 207 208 static struct cdevsw reroot_cdevsw = { 209 .d_version = D_VERSION, 210 .d_name = "reroot", 211 }; 212 213 static void poweroff_wait(void *, int); 214 static void shutdown_halt(void *junk, int howto); 215 static void shutdown_panic(void *junk, int howto); 216 static void shutdown_reset(void *junk, int howto); 217 static int kern_reroot(void); 218 219 /* register various local shutdown events */ 220 static void 221 shutdown_conf(void *unused) 222 { 223 224 EVENTHANDLER_REGISTER(shutdown_final, poweroff_wait, NULL, 225 SHUTDOWN_PRI_FIRST); 226 EVENTHANDLER_REGISTER(shutdown_final, shutdown_halt, NULL, 227 SHUTDOWN_PRI_LAST + 100); 228 EVENTHANDLER_REGISTER(shutdown_final, shutdown_panic, NULL, 229 SHUTDOWN_PRI_LAST + 100); 230 EVENTHANDLER_REGISTER(shutdown_final, shutdown_reset, NULL, 231 SHUTDOWN_PRI_LAST + 200); 232 } 233 234 SYSINIT(shutdown_conf, SI_SUB_INTRINSIC, SI_ORDER_ANY, shutdown_conf, NULL); 235 236 /* 237 * The only reason this exists is to create the /dev/reroot/ directory, 238 * used by reroot code in init(8) as a mountpoint for tmpfs. 239 */ 240 static void 241 reroot_conf(void *unused) 242 { 243 int error; 244 struct cdev *cdev; 245 246 error = make_dev_p(MAKEDEV_CHECKNAME | MAKEDEV_WAITOK, &cdev, 247 &reroot_cdevsw, NULL, UID_ROOT, GID_WHEEL, 0600, "reroot/reroot"); 248 if (error != 0) { 249 printf("%s: failed to create device node, error %d", 250 __func__, error); 251 } 252 } 253 254 SYSINIT(reroot_conf, SI_SUB_DEVFS, SI_ORDER_ANY, reroot_conf, NULL); 255 256 /* 257 * The system call that results in a reboot. 258 */ 259 /* ARGSUSED */ 260 int 261 sys_reboot(struct thread *td, struct reboot_args *uap) 262 { 263 int error; 264 265 error = 0; 266 #ifdef MAC 267 error = mac_system_check_reboot(td->td_ucred, uap->opt); 268 #endif 269 if (error == 0) 270 error = priv_check(td, PRIV_REBOOT); 271 if (error == 0) { 272 if (uap->opt & RB_REROOT) 273 error = kern_reroot(); 274 else 275 kern_reboot(uap->opt); 276 } 277 return (error); 278 } 279 280 static void 281 shutdown_nice_task_fn(void *arg, int pending __unused) 282 { 283 int howto; 284 285 howto = (uintptr_t)arg; 286 /* Send a signal to init(8) and have it shutdown the world. */ 287 PROC_LOCK(initproc); 288 if (howto & RB_POWEROFF) 289 kern_psignal(initproc, SIGUSR2); 290 else if (howto & RB_POWERCYCLE) 291 kern_psignal(initproc, SIGWINCH); 292 else if (howto & RB_HALT) 293 kern_psignal(initproc, SIGUSR1); 294 else 295 kern_psignal(initproc, SIGINT); 296 PROC_UNLOCK(initproc); 297 } 298 299 static struct task shutdown_nice_task = TASK_INITIALIZER(0, 300 &shutdown_nice_task_fn, NULL); 301 302 /* 303 * Called by events that want to shut down.. e.g <CTL><ALT><DEL> on a PC 304 */ 305 void 306 shutdown_nice(int howto) 307 { 308 309 if (initproc != NULL && !SCHEDULER_STOPPED()) { 310 shutdown_nice_task.ta_context = (void *)(uintptr_t)howto; 311 taskqueue_enqueue(taskqueue_fast, &shutdown_nice_task); 312 } else { 313 /* 314 * No init(8) running, or scheduler would not allow it 315 * to run, so simply reboot. 316 */ 317 kern_reboot(howto | RB_NOSYNC); 318 } 319 } 320 321 static void 322 print_uptime(void) 323 { 324 int f; 325 struct timespec ts; 326 327 getnanouptime(&ts); 328 printf("Uptime: "); 329 f = 0; 330 if (ts.tv_sec >= 86400) { 331 printf("%ldd", (long)ts.tv_sec / 86400); 332 ts.tv_sec %= 86400; 333 f = 1; 334 } 335 if (f || ts.tv_sec >= 3600) { 336 printf("%ldh", (long)ts.tv_sec / 3600); 337 ts.tv_sec %= 3600; 338 f = 1; 339 } 340 if (f || ts.tv_sec >= 60) { 341 printf("%ldm", (long)ts.tv_sec / 60); 342 ts.tv_sec %= 60; 343 f = 1; 344 } 345 printf("%lds\n", (long)ts.tv_sec); 346 } 347 348 int 349 doadump(boolean_t textdump) 350 { 351 boolean_t coredump; 352 int error; 353 354 error = 0; 355 if (dumping) 356 return (EBUSY); 357 if (dumper.dumper == NULL) 358 return (ENXIO); 359 360 savectx(&dumppcb); 361 dumptid = curthread->td_tid; 362 dumping++; 363 364 coredump = TRUE; 365 #ifdef DDB 366 if (textdump && textdump_pending) { 367 coredump = FALSE; 368 textdump_dumpsys(&dumper); 369 } 370 #endif 371 if (coredump) 372 error = dumpsys(&dumper); 373 374 dumping--; 375 return (error); 376 } 377 378 /* 379 * Shutdown the system cleanly to prepare for reboot, halt, or power off. 380 */ 381 void 382 kern_reboot(int howto) 383 { 384 static int once = 0; 385 386 /* 387 * Normal paths here don't hold Giant, but we can wind up here 388 * unexpectedly with it held. Drop it now so we don't have to 389 * drop and pick it up elsewhere. The paths it is locking will 390 * never be returned to, and it is preferable to preclude 391 * deadlock than to lock against code that won't ever 392 * continue. 393 */ 394 while (mtx_owned(&Giant)) 395 mtx_unlock(&Giant); 396 397 #if defined(SMP) 398 /* 399 * Bind us to the first CPU so that all shutdown code runs there. Some 400 * systems don't shutdown properly (i.e., ACPI power off) if we 401 * run on another processor. 402 */ 403 if (!SCHEDULER_STOPPED()) { 404 thread_lock(curthread); 405 sched_bind(curthread, CPU_FIRST()); 406 thread_unlock(curthread); 407 KASSERT(PCPU_GET(cpuid) == CPU_FIRST(), 408 ("boot: not running on cpu 0")); 409 } 410 #endif 411 /* We're in the process of rebooting. */ 412 rebooting = 1; 413 414 /* We are out of the debugger now. */ 415 kdb_active = 0; 416 417 /* 418 * Do any callouts that should be done BEFORE syncing the filesystems. 419 */ 420 EVENTHANDLER_INVOKE(shutdown_pre_sync, howto); 421 422 /* 423 * Now sync filesystems 424 */ 425 if (!cold && (howto & RB_NOSYNC) == 0 && once == 0) { 426 once = 1; 427 bufshutdown(show_busybufs); 428 } 429 430 print_uptime(); 431 432 cngrab(); 433 434 /* 435 * Ok, now do things that assume all filesystem activity has 436 * been completed. 437 */ 438 EVENTHANDLER_INVOKE(shutdown_post_sync, howto); 439 440 if ((howto & (RB_HALT|RB_DUMP)) == RB_DUMP && !cold && !dumping) 441 doadump(TRUE); 442 443 /* Now that we're going to really halt the system... */ 444 EVENTHANDLER_INVOKE(shutdown_final, howto); 445 446 for(;;) ; /* safety against shutdown_reset not working */ 447 /* NOTREACHED */ 448 } 449 450 /* 451 * The system call that results in changing the rootfs. 452 */ 453 static int 454 kern_reroot(void) 455 { 456 struct vnode *oldrootvnode, *vp; 457 struct mount *mp, *devmp; 458 int error; 459 460 if (curproc != initproc) 461 return (EPERM); 462 463 /* 464 * Mark the filesystem containing currently-running executable 465 * (the temporary copy of init(8)) busy. 466 */ 467 vp = curproc->p_textvp; 468 error = vn_lock(vp, LK_SHARED); 469 if (error != 0) 470 return (error); 471 mp = vp->v_mount; 472 error = vfs_busy(mp, MBF_NOWAIT); 473 if (error != 0) { 474 vfs_ref(mp); 475 VOP_UNLOCK(vp, 0); 476 error = vfs_busy(mp, 0); 477 vn_lock(vp, LK_SHARED | LK_RETRY); 478 vfs_rel(mp); 479 if (error != 0) { 480 VOP_UNLOCK(vp, 0); 481 return (ENOENT); 482 } 483 if (vp->v_iflag & VI_DOOMED) { 484 VOP_UNLOCK(vp, 0); 485 vfs_unbusy(mp); 486 return (ENOENT); 487 } 488 } 489 VOP_UNLOCK(vp, 0); 490 491 /* 492 * Remove the filesystem containing currently-running executable 493 * from the mount list, to prevent it from being unmounted 494 * by vfs_unmountall(), and to avoid confusing vfs_mountroot(). 495 * 496 * Also preserve /dev - forcibly unmounting it could cause driver 497 * reinitialization. 498 */ 499 500 vfs_ref(rootdevmp); 501 devmp = rootdevmp; 502 rootdevmp = NULL; 503 504 mtx_lock(&mountlist_mtx); 505 TAILQ_REMOVE(&mountlist, mp, mnt_list); 506 TAILQ_REMOVE(&mountlist, devmp, mnt_list); 507 mtx_unlock(&mountlist_mtx); 508 509 oldrootvnode = rootvnode; 510 511 /* 512 * Unmount everything except for the two filesystems preserved above. 513 */ 514 vfs_unmountall(); 515 516 /* 517 * Add /dev back; vfs_mountroot() will move it into its new place. 518 */ 519 mtx_lock(&mountlist_mtx); 520 TAILQ_INSERT_HEAD(&mountlist, devmp, mnt_list); 521 mtx_unlock(&mountlist_mtx); 522 rootdevmp = devmp; 523 vfs_rel(rootdevmp); 524 525 /* 526 * Mount the new rootfs. 527 */ 528 vfs_mountroot(); 529 530 /* 531 * Update all references to the old rootvnode. 532 */ 533 mountcheckdirs(oldrootvnode, rootvnode); 534 535 /* 536 * Add the temporary filesystem back and unbusy it. 537 */ 538 mtx_lock(&mountlist_mtx); 539 TAILQ_INSERT_TAIL(&mountlist, mp, mnt_list); 540 mtx_unlock(&mountlist_mtx); 541 vfs_unbusy(mp); 542 543 return (0); 544 } 545 546 /* 547 * If the shutdown was a clean halt, behave accordingly. 548 */ 549 static void 550 shutdown_halt(void *junk, int howto) 551 { 552 553 if (howto & RB_HALT) { 554 printf("\n"); 555 printf("The operating system has halted.\n"); 556 printf("Please press any key to reboot.\n\n"); 557 switch (cngetc()) { 558 case -1: /* No console, just die */ 559 cpu_halt(); 560 /* NOTREACHED */ 561 default: 562 break; 563 } 564 } 565 } 566 567 /* 568 * Check to see if the system paniced, pause and then reboot 569 * according to the specified delay. 570 */ 571 static void 572 shutdown_panic(void *junk, int howto) 573 { 574 int loop; 575 576 if (howto & RB_DUMP) { 577 if (panic_reboot_wait_time != 0) { 578 if (panic_reboot_wait_time != -1) { 579 printf("Automatic reboot in %d seconds - " 580 "press a key on the console to abort\n", 581 panic_reboot_wait_time); 582 for (loop = panic_reboot_wait_time * 10; 583 loop > 0; --loop) { 584 DELAY(1000 * 100); /* 1/10th second */ 585 /* Did user type a key? */ 586 if (cncheckc() != -1) 587 break; 588 } 589 if (!loop) 590 return; 591 } 592 } else { /* zero time specified - reboot NOW */ 593 return; 594 } 595 printf("--> Press a key on the console to reboot,\n"); 596 printf("--> or switch off the system now.\n"); 597 cngetc(); 598 } 599 } 600 601 /* 602 * Everything done, now reset 603 */ 604 static void 605 shutdown_reset(void *junk, int howto) 606 { 607 608 printf("Rebooting...\n"); 609 DELAY(1000000); /* wait 1 sec for printf's to complete and be read */ 610 611 /* 612 * Acquiring smp_ipi_mtx here has a double effect: 613 * - it disables interrupts avoiding CPU0 preemption 614 * by fast handlers (thus deadlocking against other CPUs) 615 * - it avoids deadlocks against smp_rendezvous() or, more 616 * generally, threads busy-waiting, with this spinlock held, 617 * and waiting for responses by threads on other CPUs 618 * (ie. smp_tlb_shootdown()). 619 * 620 * For the !SMP case it just needs to handle the former problem. 621 */ 622 #ifdef SMP 623 mtx_lock_spin(&smp_ipi_mtx); 624 #else 625 spinlock_enter(); 626 #endif 627 628 /* cpu_boot(howto); */ /* doesn't do anything at the moment */ 629 cpu_reset(); 630 /* NOTREACHED */ /* assuming reset worked */ 631 } 632 633 #if defined(WITNESS) || defined(INVARIANT_SUPPORT) 634 static int kassert_warn_only = 0; 635 #ifdef KDB 636 static int kassert_do_kdb = 0; 637 #endif 638 #ifdef KTR 639 static int kassert_do_ktr = 0; 640 #endif 641 static int kassert_do_log = 1; 642 static int kassert_log_pps_limit = 4; 643 static int kassert_log_mute_at = 0; 644 static int kassert_log_panic_at = 0; 645 static int kassert_suppress_in_panic = 1; 646 static int kassert_warnings = 0; 647 648 SYSCTL_NODE(_debug, OID_AUTO, kassert, CTLFLAG_RW, NULL, "kassert options"); 649 650 SYSCTL_INT(_debug_kassert, OID_AUTO, warn_only, CTLFLAG_RWTUN, 651 &kassert_warn_only, 0, 652 "KASSERT triggers a panic (1) or just a warning (0)"); 653 654 #ifdef KDB 655 SYSCTL_INT(_debug_kassert, OID_AUTO, do_kdb, CTLFLAG_RWTUN, 656 &kassert_do_kdb, 0, "KASSERT will enter the debugger"); 657 #endif 658 659 #ifdef KTR 660 SYSCTL_UINT(_debug_kassert, OID_AUTO, do_ktr, CTLFLAG_RWTUN, 661 &kassert_do_ktr, 0, 662 "KASSERT does a KTR, set this to the KTRMASK you want"); 663 #endif 664 665 SYSCTL_INT(_debug_kassert, OID_AUTO, do_log, CTLFLAG_RWTUN, 666 &kassert_do_log, 0, "KASSERT triggers a panic (1) or just a warning (0)"); 667 668 SYSCTL_INT(_debug_kassert, OID_AUTO, warnings, CTLFLAG_RWTUN, 669 &kassert_warnings, 0, "number of KASSERTs that have been triggered"); 670 671 SYSCTL_INT(_debug_kassert, OID_AUTO, log_panic_at, CTLFLAG_RWTUN, 672 &kassert_log_panic_at, 0, "max number of KASSERTS before we will panic"); 673 674 SYSCTL_INT(_debug_kassert, OID_AUTO, log_pps_limit, CTLFLAG_RWTUN, 675 &kassert_log_pps_limit, 0, "limit number of log messages per second"); 676 677 SYSCTL_INT(_debug_kassert, OID_AUTO, log_mute_at, CTLFLAG_RWTUN, 678 &kassert_log_mute_at, 0, "max number of KASSERTS to log"); 679 680 SYSCTL_INT(_debug_kassert, OID_AUTO, suppress_in_panic, CTLFLAG_RWTUN, 681 &kassert_suppress_in_panic, 0, 682 "KASSERTs will be suppressed while handling a panic"); 683 684 static int kassert_sysctl_kassert(SYSCTL_HANDLER_ARGS); 685 686 SYSCTL_PROC(_debug_kassert, OID_AUTO, kassert, 687 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_SECURE, NULL, 0, 688 kassert_sysctl_kassert, "I", "set to trigger a test kassert"); 689 690 static int 691 kassert_sysctl_kassert(SYSCTL_HANDLER_ARGS) 692 { 693 int error, i; 694 695 error = sysctl_wire_old_buffer(req, sizeof(int)); 696 if (error == 0) { 697 i = 0; 698 error = sysctl_handle_int(oidp, &i, 0, req); 699 } 700 if (error != 0 || req->newptr == NULL) 701 return (error); 702 KASSERT(0, ("kassert_sysctl_kassert triggered kassert %d", i)); 703 return (0); 704 } 705 706 /* 707 * Called by KASSERT, this decides if we will panic 708 * or if we will log via printf and/or ktr. 709 */ 710 void 711 kassert_panic(const char *fmt, ...) 712 { 713 static char buf[256]; 714 va_list ap; 715 716 /* If we already panic'd, don't create a double-fault. */ 717 if (panicstr != NULL && kassert_suppress_in_panic) 718 return; 719 720 va_start(ap, fmt); 721 (void)vsnprintf(buf, sizeof(buf), fmt, ap); 722 va_end(ap); 723 724 /* 725 * panic if we're not just warning, or if we've exceeded 726 * kassert_log_panic_at warnings. 727 */ 728 if (!kassert_warn_only || 729 (kassert_log_panic_at > 0 && 730 kassert_warnings >= kassert_log_panic_at)) { 731 va_start(ap, fmt); 732 vpanic(fmt, ap); 733 /* NORETURN */ 734 } 735 #ifdef KTR 736 if (kassert_do_ktr) 737 CTR0(ktr_mask, buf); 738 #endif /* KTR */ 739 /* 740 * log if we've not yet met the mute limit. 741 */ 742 if (kassert_do_log && 743 (kassert_log_mute_at == 0 || 744 kassert_warnings < kassert_log_mute_at)) { 745 static struct timeval lasterr; 746 static int curerr; 747 748 if (ppsratecheck(&lasterr, &curerr, kassert_log_pps_limit)) { 749 printf("KASSERT failed: %s\n", buf); 750 kdb_backtrace(); 751 } 752 } 753 #ifdef KDB 754 if (kassert_do_kdb) { 755 kdb_enter(KDB_WHY_KASSERT, buf); 756 } 757 #endif 758 atomic_add_int(&kassert_warnings, 1); 759 } 760 #endif 761 762 /* 763 * Panic is called on unresolvable fatal errors. It prints "panic: mesg", 764 * and then reboots. If we are called twice, then we avoid trying to sync 765 * the disks as this often leads to recursive panics. 766 */ 767 void 768 panic(const char *fmt, ...) 769 { 770 va_list ap; 771 772 va_start(ap, fmt); 773 vpanic(fmt, ap); 774 } 775 776 void 777 vpanic(const char *fmt, va_list ap) 778 { 779 #ifdef SMP 780 cpuset_t other_cpus; 781 #endif 782 struct thread *td = curthread; 783 int bootopt, newpanic; 784 static char buf[256]; 785 786 spinlock_enter(); 787 788 #ifdef SMP 789 /* 790 * stop_cpus_hard(other_cpus) should prevent multiple CPUs from 791 * concurrently entering panic. Only the winner will proceed 792 * further. 793 */ 794 if (panicstr == NULL && !kdb_active) { 795 other_cpus = all_cpus; 796 CPU_CLR(PCPU_GET(cpuid), &other_cpus); 797 stop_cpus_hard(other_cpus); 798 } 799 #endif 800 801 /* 802 * Ensure that the scheduler is stopped while panicking, even if panic 803 * has been entered from kdb. 804 */ 805 td->td_stopsched = 1; 806 807 bootopt = RB_AUTOBOOT; 808 newpanic = 0; 809 if (panicstr) 810 bootopt |= RB_NOSYNC; 811 else { 812 bootopt |= RB_DUMP; 813 panicstr = fmt; 814 newpanic = 1; 815 } 816 817 if (newpanic) { 818 (void)vsnprintf(buf, sizeof(buf), fmt, ap); 819 panicstr = buf; 820 cngrab(); 821 printf("panic: %s\n", buf); 822 } else { 823 printf("panic: "); 824 vprintf(fmt, ap); 825 printf("\n"); 826 } 827 #ifdef SMP 828 printf("cpuid = %d\n", PCPU_GET(cpuid)); 829 #endif 830 printf("time = %jd\n", (intmax_t )time_second); 831 #ifdef KDB 832 if (newpanic && trace_on_panic) 833 kdb_backtrace(); 834 if (debugger_on_panic) 835 kdb_enter(KDB_WHY_PANIC, "panic"); 836 #endif 837 /*thread_lock(td); */ 838 td->td_flags |= TDF_INPANIC; 839 /* thread_unlock(td); */ 840 if (!sync_on_panic) 841 bootopt |= RB_NOSYNC; 842 if (poweroff_on_panic) 843 bootopt |= RB_POWEROFF; 844 if (powercycle_on_panic) 845 bootopt |= RB_POWERCYCLE; 846 kern_reboot(bootopt); 847 } 848 849 /* 850 * Support for poweroff delay. 851 * 852 * Please note that setting this delay too short might power off your machine 853 * before the write cache on your hard disk has been flushed, leading to 854 * soft-updates inconsistencies. 855 */ 856 #ifndef POWEROFF_DELAY 857 # define POWEROFF_DELAY 5000 858 #endif 859 static int poweroff_delay = POWEROFF_DELAY; 860 861 SYSCTL_INT(_kern_shutdown, OID_AUTO, poweroff_delay, CTLFLAG_RW, 862 &poweroff_delay, 0, "Delay before poweroff to write disk caches (msec)"); 863 864 static void 865 poweroff_wait(void *junk, int howto) 866 { 867 868 if ((howto & (RB_POWEROFF | RB_POWERCYCLE)) == 0 || poweroff_delay <= 0) 869 return; 870 DELAY(poweroff_delay * 1000); 871 } 872 873 /* 874 * Some system processes (e.g. syncer) need to be stopped at appropriate 875 * points in their main loops prior to a system shutdown, so that they 876 * won't interfere with the shutdown process (e.g. by holding a disk buf 877 * to cause sync to fail). For each of these system processes, register 878 * shutdown_kproc() as a handler for one of shutdown events. 879 */ 880 static int kproc_shutdown_wait = 60; 881 SYSCTL_INT(_kern_shutdown, OID_AUTO, kproc_shutdown_wait, CTLFLAG_RW, 882 &kproc_shutdown_wait, 0, "Max wait time (sec) to stop for each process"); 883 884 void 885 kproc_shutdown(void *arg, int howto) 886 { 887 struct proc *p; 888 int error; 889 890 if (panicstr) 891 return; 892 893 p = (struct proc *)arg; 894 printf("Waiting (max %d seconds) for system process `%s' to stop... ", 895 kproc_shutdown_wait, p->p_comm); 896 error = kproc_suspend(p, kproc_shutdown_wait * hz); 897 898 if (error == EWOULDBLOCK) 899 printf("timed out\n"); 900 else 901 printf("done\n"); 902 } 903 904 void 905 kthread_shutdown(void *arg, int howto) 906 { 907 struct thread *td; 908 int error; 909 910 if (panicstr) 911 return; 912 913 td = (struct thread *)arg; 914 printf("Waiting (max %d seconds) for system thread `%s' to stop... ", 915 kproc_shutdown_wait, td->td_name); 916 error = kthread_suspend(td, kproc_shutdown_wait * hz); 917 918 if (error == EWOULDBLOCK) 919 printf("timed out\n"); 920 else 921 printf("done\n"); 922 } 923 924 static char dumpdevname[sizeof(((struct cdev*)NULL)->si_name)]; 925 SYSCTL_STRING(_kern_shutdown, OID_AUTO, dumpdevname, CTLFLAG_RD, 926 dumpdevname, 0, "Device for kernel dumps"); 927 928 static int _dump_append(struct dumperinfo *di, void *virtual, 929 vm_offset_t physical, size_t length); 930 931 #ifdef EKCD 932 static struct kerneldumpcrypto * 933 kerneldumpcrypto_create(size_t blocksize, uint8_t encryption, 934 const uint8_t *key, uint32_t encryptedkeysize, const uint8_t *encryptedkey) 935 { 936 struct kerneldumpcrypto *kdc; 937 struct kerneldumpkey *kdk; 938 uint32_t dumpkeysize; 939 940 dumpkeysize = roundup2(sizeof(*kdk) + encryptedkeysize, blocksize); 941 kdc = malloc(sizeof(*kdc) + dumpkeysize, M_EKCD, M_WAITOK | M_ZERO); 942 943 arc4rand(kdc->kdc_iv, sizeof(kdc->kdc_iv), 0); 944 945 kdc->kdc_encryption = encryption; 946 switch (kdc->kdc_encryption) { 947 case KERNELDUMP_ENC_AES_256_CBC: 948 if (rijndael_makeKey(&kdc->kdc_ki, DIR_ENCRYPT, 256, key) <= 0) 949 goto failed; 950 break; 951 default: 952 goto failed; 953 } 954 955 kdc->kdc_dumpkeysize = dumpkeysize; 956 kdk = kdc->kdc_dumpkey; 957 kdk->kdk_encryption = kdc->kdc_encryption; 958 memcpy(kdk->kdk_iv, kdc->kdc_iv, sizeof(kdk->kdk_iv)); 959 kdk->kdk_encryptedkeysize = htod32(encryptedkeysize); 960 memcpy(kdk->kdk_encryptedkey, encryptedkey, encryptedkeysize); 961 962 return (kdc); 963 failed: 964 explicit_bzero(kdc, sizeof(*kdc) + dumpkeysize); 965 free(kdc, M_EKCD); 966 return (NULL); 967 } 968 969 static int 970 kerneldumpcrypto_init(struct kerneldumpcrypto *kdc) 971 { 972 uint8_t hash[SHA256_DIGEST_LENGTH]; 973 SHA256_CTX ctx; 974 struct kerneldumpkey *kdk; 975 int error; 976 977 error = 0; 978 979 if (kdc == NULL) 980 return (0); 981 982 /* 983 * When a user enters ddb it can write a crash dump multiple times. 984 * Each time it should be encrypted using a different IV. 985 */ 986 SHA256_Init(&ctx); 987 SHA256_Update(&ctx, kdc->kdc_iv, sizeof(kdc->kdc_iv)); 988 SHA256_Final(hash, &ctx); 989 bcopy(hash, kdc->kdc_iv, sizeof(kdc->kdc_iv)); 990 991 switch (kdc->kdc_encryption) { 992 case KERNELDUMP_ENC_AES_256_CBC: 993 if (rijndael_cipherInit(&kdc->kdc_ci, MODE_CBC, 994 kdc->kdc_iv) <= 0) { 995 error = EINVAL; 996 goto out; 997 } 998 break; 999 default: 1000 error = EINVAL; 1001 goto out; 1002 } 1003 1004 kdk = kdc->kdc_dumpkey; 1005 memcpy(kdk->kdk_iv, kdc->kdc_iv, sizeof(kdk->kdk_iv)); 1006 out: 1007 explicit_bzero(hash, sizeof(hash)); 1008 return (error); 1009 } 1010 1011 static uint32_t 1012 kerneldumpcrypto_dumpkeysize(const struct kerneldumpcrypto *kdc) 1013 { 1014 1015 if (kdc == NULL) 1016 return (0); 1017 return (kdc->kdc_dumpkeysize); 1018 } 1019 #endif /* EKCD */ 1020 1021 static struct kerneldumpcomp * 1022 kerneldumpcomp_create(struct dumperinfo *di, uint8_t compression) 1023 { 1024 struct kerneldumpcomp *kdcomp; 1025 int format; 1026 1027 switch (compression) { 1028 case KERNELDUMP_COMP_GZIP: 1029 format = COMPRESS_GZIP; 1030 break; 1031 case KERNELDUMP_COMP_ZSTD: 1032 format = COMPRESS_ZSTD; 1033 break; 1034 default: 1035 return (NULL); 1036 } 1037 1038 kdcomp = malloc(sizeof(*kdcomp), M_DUMPER, M_WAITOK | M_ZERO); 1039 kdcomp->kdc_format = compression; 1040 kdcomp->kdc_stream = compressor_init(kerneldumpcomp_write_cb, 1041 format, di->maxiosize, kerneldump_gzlevel, di); 1042 if (kdcomp->kdc_stream == NULL) { 1043 free(kdcomp, M_DUMPER); 1044 return (NULL); 1045 } 1046 kdcomp->kdc_buf = malloc(di->maxiosize, M_DUMPER, M_WAITOK | M_NODUMP); 1047 return (kdcomp); 1048 } 1049 1050 static void 1051 kerneldumpcomp_destroy(struct dumperinfo *di) 1052 { 1053 struct kerneldumpcomp *kdcomp; 1054 1055 kdcomp = di->kdcomp; 1056 if (kdcomp == NULL) 1057 return; 1058 compressor_fini(kdcomp->kdc_stream); 1059 explicit_bzero(kdcomp->kdc_buf, di->maxiosize); 1060 free(kdcomp->kdc_buf, M_DUMPER); 1061 free(kdcomp, M_DUMPER); 1062 } 1063 1064 /* Registration of dumpers */ 1065 int 1066 set_dumper(struct dumperinfo *di, const char *devname, struct thread *td, 1067 uint8_t compression, uint8_t encryption, const uint8_t *key, 1068 uint32_t encryptedkeysize, const uint8_t *encryptedkey) 1069 { 1070 size_t wantcopy; 1071 int error; 1072 1073 error = priv_check(td, PRIV_SETDUMPER); 1074 if (error != 0) 1075 return (error); 1076 1077 if (di == NULL) { 1078 error = 0; 1079 goto cleanup; 1080 } 1081 if (dumper.dumper != NULL) 1082 return (EBUSY); 1083 dumper = *di; 1084 dumper.blockbuf = NULL; 1085 dumper.kdcrypto = NULL; 1086 dumper.kdcomp = NULL; 1087 1088 if (encryption != KERNELDUMP_ENC_NONE) { 1089 #ifdef EKCD 1090 dumper.kdcrypto = kerneldumpcrypto_create(di->blocksize, 1091 encryption, key, encryptedkeysize, encryptedkey); 1092 if (dumper.kdcrypto == NULL) { 1093 error = EINVAL; 1094 goto cleanup; 1095 } 1096 #else 1097 error = EOPNOTSUPP; 1098 goto cleanup; 1099 #endif 1100 } 1101 1102 wantcopy = strlcpy(dumpdevname, devname, sizeof(dumpdevname)); 1103 if (wantcopy >= sizeof(dumpdevname)) { 1104 printf("set_dumper: device name truncated from '%s' -> '%s'\n", 1105 devname, dumpdevname); 1106 } 1107 1108 if (compression != KERNELDUMP_COMP_NONE) { 1109 /* 1110 * We currently can't support simultaneous encryption and 1111 * compression. 1112 */ 1113 if (encryption != KERNELDUMP_ENC_NONE) { 1114 error = EOPNOTSUPP; 1115 goto cleanup; 1116 } 1117 dumper.kdcomp = kerneldumpcomp_create(&dumper, compression); 1118 if (dumper.kdcomp == NULL) { 1119 error = EINVAL; 1120 goto cleanup; 1121 } 1122 } 1123 1124 dumper.blockbuf = malloc(di->blocksize, M_DUMPER, M_WAITOK | M_ZERO); 1125 return (0); 1126 cleanup: 1127 #ifdef EKCD 1128 if (dumper.kdcrypto != NULL) { 1129 explicit_bzero(dumper.kdcrypto, sizeof(*dumper.kdcrypto) + 1130 dumper.kdcrypto->kdc_dumpkeysize); 1131 free(dumper.kdcrypto, M_EKCD); 1132 } 1133 #endif 1134 1135 kerneldumpcomp_destroy(&dumper); 1136 1137 if (dumper.blockbuf != NULL) { 1138 explicit_bzero(dumper.blockbuf, dumper.blocksize); 1139 free(dumper.blockbuf, M_DUMPER); 1140 } 1141 explicit_bzero(&dumper, sizeof(dumper)); 1142 dumpdevname[0] = '\0'; 1143 return (error); 1144 } 1145 1146 static int 1147 dump_check_bounds(struct dumperinfo *di, off_t offset, size_t length) 1148 { 1149 1150 if (length != 0 && (offset < di->mediaoffset || 1151 offset - di->mediaoffset + length > di->mediasize)) { 1152 if (di->kdcomp != NULL && offset >= di->mediaoffset) { 1153 printf( 1154 "Compressed dump failed to fit in device boundaries.\n"); 1155 return (E2BIG); 1156 } 1157 1158 printf("Attempt to write outside dump device boundaries.\n" 1159 "offset(%jd), mediaoffset(%jd), length(%ju), mediasize(%jd).\n", 1160 (intmax_t)offset, (intmax_t)di->mediaoffset, 1161 (uintmax_t)length, (intmax_t)di->mediasize); 1162 return (ENOSPC); 1163 } 1164 if (length % di->blocksize != 0) { 1165 printf("Attempt to write partial block of length %ju.\n", 1166 (uintmax_t)length); 1167 return (EINVAL); 1168 } 1169 if (offset % di->blocksize != 0) { 1170 printf("Attempt to write at unaligned offset %jd.\n", 1171 (intmax_t)offset); 1172 return (EINVAL); 1173 } 1174 1175 return (0); 1176 } 1177 1178 #ifdef EKCD 1179 static int 1180 dump_encrypt(struct kerneldumpcrypto *kdc, uint8_t *buf, size_t size) 1181 { 1182 1183 switch (kdc->kdc_encryption) { 1184 case KERNELDUMP_ENC_AES_256_CBC: 1185 if (rijndael_blockEncrypt(&kdc->kdc_ci, &kdc->kdc_ki, buf, 1186 8 * size, buf) <= 0) { 1187 return (EIO); 1188 } 1189 if (rijndael_cipherInit(&kdc->kdc_ci, MODE_CBC, 1190 buf + size - 16 /* IV size for AES-256-CBC */) <= 0) { 1191 return (EIO); 1192 } 1193 break; 1194 default: 1195 return (EINVAL); 1196 } 1197 1198 return (0); 1199 } 1200 1201 /* Encrypt data and call dumper. */ 1202 static int 1203 dump_encrypted_write(struct dumperinfo *di, void *virtual, 1204 vm_offset_t physical, off_t offset, size_t length) 1205 { 1206 static uint8_t buf[KERNELDUMP_BUFFER_SIZE]; 1207 struct kerneldumpcrypto *kdc; 1208 int error; 1209 size_t nbytes; 1210 1211 kdc = di->kdcrypto; 1212 1213 while (length > 0) { 1214 nbytes = MIN(length, sizeof(buf)); 1215 bcopy(virtual, buf, nbytes); 1216 1217 if (dump_encrypt(kdc, buf, nbytes) != 0) 1218 return (EIO); 1219 1220 error = dump_write(di, buf, physical, offset, nbytes); 1221 if (error != 0) 1222 return (error); 1223 1224 offset += nbytes; 1225 virtual = (void *)((uint8_t *)virtual + nbytes); 1226 length -= nbytes; 1227 } 1228 1229 return (0); 1230 } 1231 1232 static int 1233 dump_write_key(struct dumperinfo *di, off_t offset) 1234 { 1235 struct kerneldumpcrypto *kdc; 1236 1237 kdc = di->kdcrypto; 1238 if (kdc == NULL) 1239 return (0); 1240 return (dump_write(di, kdc->kdc_dumpkey, 0, offset, 1241 kdc->kdc_dumpkeysize)); 1242 } 1243 #endif /* EKCD */ 1244 1245 static int 1246 kerneldumpcomp_write_cb(void *base, size_t length, off_t offset, void *arg) 1247 { 1248 struct dumperinfo *di; 1249 size_t resid, rlength; 1250 int error; 1251 1252 di = arg; 1253 1254 if (length % di->blocksize != 0) { 1255 /* 1256 * This must be the final write after flushing the compression 1257 * stream. Write as many full blocks as possible and stash the 1258 * residual data in the dumper's block buffer. It will be 1259 * padded and written in dump_finish(). 1260 */ 1261 rlength = rounddown(length, di->blocksize); 1262 if (rlength != 0) { 1263 error = _dump_append(di, base, 0, rlength); 1264 if (error != 0) 1265 return (error); 1266 } 1267 resid = length - rlength; 1268 memmove(di->blockbuf, (uint8_t *)base + rlength, resid); 1269 di->kdcomp->kdc_resid = resid; 1270 return (EAGAIN); 1271 } 1272 return (_dump_append(di, base, 0, length)); 1273 } 1274 1275 /* 1276 * Write a kerneldumpheader at the specified offset. The header structure is 512 1277 * bytes in size, but we must pad to the device sector size. 1278 */ 1279 static int 1280 dump_write_header(struct dumperinfo *di, struct kerneldumpheader *kdh, 1281 off_t offset) 1282 { 1283 void *buf; 1284 size_t hdrsz; 1285 1286 hdrsz = sizeof(*kdh); 1287 if (hdrsz > di->blocksize) 1288 return (ENOMEM); 1289 1290 if (hdrsz == di->blocksize) 1291 buf = kdh; 1292 else { 1293 buf = di->blockbuf; 1294 memset(buf, 0, di->blocksize); 1295 memcpy(buf, kdh, hdrsz); 1296 } 1297 1298 return (dump_write(di, buf, 0, offset, di->blocksize)); 1299 } 1300 1301 /* 1302 * Don't touch the first SIZEOF_METADATA bytes on the dump device. This is to 1303 * protect us from metadata and metadata from us. 1304 */ 1305 #define SIZEOF_METADATA (64 * 1024) 1306 1307 /* 1308 * Do some preliminary setup for a kernel dump: initialize state for encryption, 1309 * if requested, and make sure that we have enough space on the dump device. 1310 * 1311 * We set things up so that the dump ends before the last sector of the dump 1312 * device, at which the trailing header is written. 1313 * 1314 * +-----------+------+-----+----------------------------+------+ 1315 * | | lhdr | key | ... kernel dump ... | thdr | 1316 * +-----------+------+-----+----------------------------+------+ 1317 * 1 blk opt <------- dump extent --------> 1 blk 1318 * 1319 * Dumps written using dump_append() start at the beginning of the extent. 1320 * Uncompressed dumps will use the entire extent, but compressed dumps typically 1321 * will not. The true length of the dump is recorded in the leading and trailing 1322 * headers once the dump has been completed. 1323 */ 1324 int 1325 dump_start(struct dumperinfo *di, struct kerneldumpheader *kdh) 1326 { 1327 uint64_t dumpextent; 1328 uint32_t keysize; 1329 1330 #ifdef EKCD 1331 int error = kerneldumpcrypto_init(di->kdcrypto); 1332 if (error != 0) 1333 return (error); 1334 keysize = kerneldumpcrypto_dumpkeysize(di->kdcrypto); 1335 #else 1336 keysize = 0; 1337 #endif 1338 1339 dumpextent = dtoh64(kdh->dumpextent); 1340 if (di->mediasize < SIZEOF_METADATA + dumpextent + 2 * di->blocksize + 1341 keysize) { 1342 if (di->kdcomp != NULL) { 1343 /* 1344 * We don't yet know how much space the compressed dump 1345 * will occupy, so try to use the whole swap partition 1346 * (minus the first 64KB) in the hope that the 1347 * compressed dump will fit. If that doesn't turn out to 1348 * be enough, the bounds checking in dump_write() 1349 * will catch us and cause the dump to fail. 1350 */ 1351 dumpextent = di->mediasize - SIZEOF_METADATA - 1352 2 * di->blocksize - keysize; 1353 kdh->dumpextent = htod64(dumpextent); 1354 } else 1355 return (E2BIG); 1356 } 1357 1358 /* The offset at which to begin writing the dump. */ 1359 di->dumpoff = di->mediaoffset + di->mediasize - di->blocksize - 1360 dumpextent; 1361 1362 return (0); 1363 } 1364 1365 static int 1366 _dump_append(struct dumperinfo *di, void *virtual, vm_offset_t physical, 1367 size_t length) 1368 { 1369 int error; 1370 1371 #ifdef EKCD 1372 if (di->kdcrypto != NULL) 1373 error = dump_encrypted_write(di, virtual, physical, di->dumpoff, 1374 length); 1375 else 1376 #endif 1377 error = dump_write(di, virtual, physical, di->dumpoff, length); 1378 if (error == 0) 1379 di->dumpoff += length; 1380 return (error); 1381 } 1382 1383 /* 1384 * Write to the dump device starting at dumpoff. When compression is enabled, 1385 * writes to the device will be performed using a callback that gets invoked 1386 * when the compression stream's output buffer is full. 1387 */ 1388 int 1389 dump_append(struct dumperinfo *di, void *virtual, vm_offset_t physical, 1390 size_t length) 1391 { 1392 void *buf; 1393 1394 if (di->kdcomp != NULL) { 1395 /* Bounce through a buffer to avoid CRC errors. */ 1396 if (length > di->maxiosize) 1397 return (EINVAL); 1398 buf = di->kdcomp->kdc_buf; 1399 memmove(buf, virtual, length); 1400 return (compressor_write(di->kdcomp->kdc_stream, buf, length)); 1401 } 1402 return (_dump_append(di, virtual, physical, length)); 1403 } 1404 1405 /* 1406 * Write to the dump device at the specified offset. 1407 */ 1408 int 1409 dump_write(struct dumperinfo *di, void *virtual, vm_offset_t physical, 1410 off_t offset, size_t length) 1411 { 1412 int error; 1413 1414 error = dump_check_bounds(di, offset, length); 1415 if (error != 0) 1416 return (error); 1417 return (di->dumper(di->priv, virtual, physical, offset, length)); 1418 } 1419 1420 /* 1421 * Perform kernel dump finalization: flush the compression stream, if necessary, 1422 * write the leading and trailing kernel dump headers now that we know the true 1423 * length of the dump, and optionally write the encryption key following the 1424 * leading header. 1425 */ 1426 int 1427 dump_finish(struct dumperinfo *di, struct kerneldumpheader *kdh) 1428 { 1429 uint64_t extent; 1430 uint32_t keysize; 1431 int error; 1432 1433 extent = dtoh64(kdh->dumpextent); 1434 1435 #ifdef EKCD 1436 keysize = kerneldumpcrypto_dumpkeysize(di->kdcrypto); 1437 #else 1438 keysize = 0; 1439 #endif 1440 1441 if (di->kdcomp != NULL) { 1442 error = compressor_flush(di->kdcomp->kdc_stream); 1443 if (error == EAGAIN) { 1444 /* We have residual data in di->blockbuf. */ 1445 error = dump_write(di, di->blockbuf, 0, di->dumpoff, 1446 di->blocksize); 1447 di->dumpoff += di->kdcomp->kdc_resid; 1448 di->kdcomp->kdc_resid = 0; 1449 } 1450 if (error != 0) 1451 return (error); 1452 1453 /* 1454 * We now know the size of the compressed dump, so update the 1455 * header accordingly and recompute parity. 1456 */ 1457 kdh->dumplength = htod64(di->dumpoff - 1458 (di->mediaoffset + di->mediasize - di->blocksize - extent)); 1459 kdh->parity = 0; 1460 kdh->parity = kerneldump_parity(kdh); 1461 1462 compressor_reset(di->kdcomp->kdc_stream); 1463 } 1464 1465 /* 1466 * Write kerneldump headers at the beginning and end of the dump extent. 1467 * Write the key after the leading header. 1468 */ 1469 error = dump_write_header(di, kdh, 1470 di->mediaoffset + di->mediasize - 2 * di->blocksize - extent - 1471 keysize); 1472 if (error != 0) 1473 return (error); 1474 1475 #ifdef EKCD 1476 error = dump_write_key(di, 1477 di->mediaoffset + di->mediasize - di->blocksize - extent - keysize); 1478 if (error != 0) 1479 return (error); 1480 #endif 1481 1482 error = dump_write_header(di, kdh, 1483 di->mediaoffset + di->mediasize - di->blocksize); 1484 if (error != 0) 1485 return (error); 1486 1487 (void)dump_write(di, NULL, 0, 0, 0); 1488 return (0); 1489 } 1490 1491 void 1492 dump_init_header(const struct dumperinfo *di, struct kerneldumpheader *kdh, 1493 char *magic, uint32_t archver, uint64_t dumplen) 1494 { 1495 size_t dstsize; 1496 1497 bzero(kdh, sizeof(*kdh)); 1498 strlcpy(kdh->magic, magic, sizeof(kdh->magic)); 1499 strlcpy(kdh->architecture, MACHINE_ARCH, sizeof(kdh->architecture)); 1500 kdh->version = htod32(KERNELDUMPVERSION); 1501 kdh->architectureversion = htod32(archver); 1502 kdh->dumplength = htod64(dumplen); 1503 kdh->dumpextent = kdh->dumplength; 1504 kdh->dumptime = htod64(time_second); 1505 #ifdef EKCD 1506 kdh->dumpkeysize = htod32(kerneldumpcrypto_dumpkeysize(di->kdcrypto)); 1507 #else 1508 kdh->dumpkeysize = 0; 1509 #endif 1510 kdh->blocksize = htod32(di->blocksize); 1511 strlcpy(kdh->hostname, prison0.pr_hostname, sizeof(kdh->hostname)); 1512 dstsize = sizeof(kdh->versionstring); 1513 if (strlcpy(kdh->versionstring, version, dstsize) >= dstsize) 1514 kdh->versionstring[dstsize - 2] = '\n'; 1515 if (panicstr != NULL) 1516 strlcpy(kdh->panicstring, panicstr, sizeof(kdh->panicstring)); 1517 if (di->kdcomp != NULL) 1518 kdh->compression = di->kdcomp->kdc_format; 1519 kdh->parity = kerneldump_parity(kdh); 1520 } 1521 1522 #ifdef DDB 1523 DB_SHOW_COMMAND(panic, db_show_panic) 1524 { 1525 1526 if (panicstr == NULL) 1527 db_printf("panicstr not set\n"); 1528 else 1529 db_printf("panic: %s\n", panicstr); 1530 } 1531 #endif 1532