1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 1986, 1988, 1991, 1993 5 * The Regents of the University of California. All rights reserved. 6 * (c) UNIX System Laboratories, Inc. 7 * All or some portions of this file are derived from material licensed 8 * to the University of California by American Telephone and Telegraph 9 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 10 * the permission of UNIX System Laboratories, Inc. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 3. Neither the name of the University nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * @(#)kern_shutdown.c 8.3 (Berkeley) 1/21/94 37 */ 38 39 #include <sys/cdefs.h> 40 __FBSDID("$FreeBSD$"); 41 42 #include "opt_ddb.h" 43 #include "opt_ekcd.h" 44 #include "opt_kdb.h" 45 #include "opt_panic.h" 46 #include "opt_sched.h" 47 #include "opt_watchdog.h" 48 49 #include <sys/param.h> 50 #include <sys/systm.h> 51 #include <sys/bio.h> 52 #include <sys/buf.h> 53 #include <sys/conf.h> 54 #include <sys/compressor.h> 55 #include <sys/cons.h> 56 #include <sys/eventhandler.h> 57 #include <sys/filedesc.h> 58 #include <sys/jail.h> 59 #include <sys/kdb.h> 60 #include <sys/kernel.h> 61 #include <sys/kerneldump.h> 62 #include <sys/kthread.h> 63 #include <sys/ktr.h> 64 #include <sys/malloc.h> 65 #include <sys/mount.h> 66 #include <sys/priv.h> 67 #include <sys/proc.h> 68 #include <sys/reboot.h> 69 #include <sys/resourcevar.h> 70 #include <sys/rwlock.h> 71 #include <sys/sched.h> 72 #include <sys/smp.h> 73 #include <sys/sysctl.h> 74 #include <sys/sysproto.h> 75 #include <sys/vnode.h> 76 #include <sys/watchdog.h> 77 78 #include <crypto/rijndael/rijndael-api-fst.h> 79 #include <crypto/sha2/sha256.h> 80 81 #include <ddb/ddb.h> 82 83 #include <machine/cpu.h> 84 #include <machine/dump.h> 85 #include <machine/pcb.h> 86 #include <machine/smp.h> 87 88 #include <security/mac/mac_framework.h> 89 90 #include <vm/vm.h> 91 #include <vm/vm_object.h> 92 #include <vm/vm_page.h> 93 #include <vm/vm_pager.h> 94 #include <vm/swap_pager.h> 95 96 #include <sys/signalvar.h> 97 98 static MALLOC_DEFINE(M_DUMPER, "dumper", "dumper block buffer"); 99 100 #ifndef PANIC_REBOOT_WAIT_TIME 101 #define PANIC_REBOOT_WAIT_TIME 15 /* default to 15 seconds */ 102 #endif 103 static int panic_reboot_wait_time = PANIC_REBOOT_WAIT_TIME; 104 SYSCTL_INT(_kern, OID_AUTO, panic_reboot_wait_time, CTLFLAG_RWTUN, 105 &panic_reboot_wait_time, 0, 106 "Seconds to wait before rebooting after a panic"); 107 108 /* 109 * Note that stdarg.h and the ANSI style va_start macro is used for both 110 * ANSI and traditional C compilers. 111 */ 112 #include <machine/stdarg.h> 113 114 #ifdef KDB 115 #ifdef KDB_UNATTENDED 116 int debugger_on_panic = 0; 117 #else 118 int debugger_on_panic = 1; 119 #endif 120 SYSCTL_INT(_debug, OID_AUTO, debugger_on_panic, 121 CTLFLAG_RWTUN | CTLFLAG_SECURE, 122 &debugger_on_panic, 0, "Run debugger on kernel panic"); 123 124 #ifdef KDB_TRACE 125 static int trace_on_panic = 1; 126 #else 127 static int trace_on_panic = 0; 128 #endif 129 SYSCTL_INT(_debug, OID_AUTO, trace_on_panic, 130 CTLFLAG_RWTUN | CTLFLAG_SECURE, 131 &trace_on_panic, 0, "Print stack trace on kernel panic"); 132 #endif /* KDB */ 133 134 static int sync_on_panic = 0; 135 SYSCTL_INT(_kern, OID_AUTO, sync_on_panic, CTLFLAG_RWTUN, 136 &sync_on_panic, 0, "Do a sync before rebooting from a panic"); 137 138 static bool poweroff_on_panic = 0; 139 SYSCTL_BOOL(_kern, OID_AUTO, poweroff_on_panic, CTLFLAG_RWTUN, 140 &poweroff_on_panic, 0, "Do a power off instead of a reboot on a panic"); 141 142 static bool powercycle_on_panic = 0; 143 SYSCTL_BOOL(_kern, OID_AUTO, powercycle_on_panic, CTLFLAG_RWTUN, 144 &powercycle_on_panic, 0, "Do a power cycle instead of a reboot on a panic"); 145 146 static SYSCTL_NODE(_kern, OID_AUTO, shutdown, CTLFLAG_RW, 0, 147 "Shutdown environment"); 148 149 #ifndef DIAGNOSTIC 150 static int show_busybufs; 151 #else 152 static int show_busybufs = 1; 153 #endif 154 SYSCTL_INT(_kern_shutdown, OID_AUTO, show_busybufs, CTLFLAG_RW, 155 &show_busybufs, 0, ""); 156 157 int suspend_blocked = 0; 158 SYSCTL_INT(_kern, OID_AUTO, suspend_blocked, CTLFLAG_RW, 159 &suspend_blocked, 0, "Block suspend due to a pending shutdown"); 160 161 #ifdef EKCD 162 FEATURE(ekcd, "Encrypted kernel crash dumps support"); 163 164 MALLOC_DEFINE(M_EKCD, "ekcd", "Encrypted kernel crash dumps data"); 165 166 struct kerneldumpcrypto { 167 uint8_t kdc_encryption; 168 uint8_t kdc_iv[KERNELDUMP_IV_MAX_SIZE]; 169 keyInstance kdc_ki; 170 cipherInstance kdc_ci; 171 uint32_t kdc_dumpkeysize; 172 struct kerneldumpkey kdc_dumpkey[]; 173 }; 174 #endif 175 176 struct kerneldumpcomp { 177 struct compressor *kdc_stream; 178 uint8_t *kdc_buf; 179 size_t kdc_resid; 180 }; 181 182 static struct kerneldumpcomp *kerneldumpcomp_create(struct dumperinfo *di, 183 uint8_t compression); 184 static void kerneldumpcomp_destroy(struct dumperinfo *di); 185 static int kerneldumpcomp_write_cb(void *base, size_t len, off_t off, void *arg); 186 187 static int kerneldump_gzlevel = 6; 188 SYSCTL_INT(_kern, OID_AUTO, kerneldump_gzlevel, CTLFLAG_RWTUN, 189 &kerneldump_gzlevel, 0, 190 "Kernel crash dump compression level"); 191 192 /* 193 * Variable panicstr contains argument to first call to panic; used as flag 194 * to indicate that the kernel has already called panic. 195 */ 196 const char *panicstr; 197 198 int dumping; /* system is dumping */ 199 int rebooting; /* system is rebooting */ 200 static struct dumperinfo dumper; /* our selected dumper */ 201 202 /* Context information for dump-debuggers. */ 203 static struct pcb dumppcb; /* Registers. */ 204 lwpid_t dumptid; /* Thread ID. */ 205 206 static struct cdevsw reroot_cdevsw = { 207 .d_version = D_VERSION, 208 .d_name = "reroot", 209 }; 210 211 static void poweroff_wait(void *, int); 212 static void shutdown_halt(void *junk, int howto); 213 static void shutdown_panic(void *junk, int howto); 214 static void shutdown_reset(void *junk, int howto); 215 static int kern_reroot(void); 216 217 /* register various local shutdown events */ 218 static void 219 shutdown_conf(void *unused) 220 { 221 222 EVENTHANDLER_REGISTER(shutdown_final, poweroff_wait, NULL, 223 SHUTDOWN_PRI_FIRST); 224 EVENTHANDLER_REGISTER(shutdown_final, shutdown_halt, NULL, 225 SHUTDOWN_PRI_LAST + 100); 226 EVENTHANDLER_REGISTER(shutdown_final, shutdown_panic, NULL, 227 SHUTDOWN_PRI_LAST + 100); 228 EVENTHANDLER_REGISTER(shutdown_final, shutdown_reset, NULL, 229 SHUTDOWN_PRI_LAST + 200); 230 } 231 232 SYSINIT(shutdown_conf, SI_SUB_INTRINSIC, SI_ORDER_ANY, shutdown_conf, NULL); 233 234 /* 235 * The only reason this exists is to create the /dev/reroot/ directory, 236 * used by reroot code in init(8) as a mountpoint for tmpfs. 237 */ 238 static void 239 reroot_conf(void *unused) 240 { 241 int error; 242 struct cdev *cdev; 243 244 error = make_dev_p(MAKEDEV_CHECKNAME | MAKEDEV_WAITOK, &cdev, 245 &reroot_cdevsw, NULL, UID_ROOT, GID_WHEEL, 0600, "reroot/reroot"); 246 if (error != 0) { 247 printf("%s: failed to create device node, error %d", 248 __func__, error); 249 } 250 } 251 252 SYSINIT(reroot_conf, SI_SUB_DEVFS, SI_ORDER_ANY, reroot_conf, NULL); 253 254 /* 255 * The system call that results in a reboot. 256 */ 257 /* ARGSUSED */ 258 int 259 sys_reboot(struct thread *td, struct reboot_args *uap) 260 { 261 int error; 262 263 error = 0; 264 #ifdef MAC 265 error = mac_system_check_reboot(td->td_ucred, uap->opt); 266 #endif 267 if (error == 0) 268 error = priv_check(td, PRIV_REBOOT); 269 if (error == 0) { 270 if (uap->opt & RB_REROOT) { 271 error = kern_reroot(); 272 } else { 273 mtx_lock(&Giant); 274 kern_reboot(uap->opt); 275 mtx_unlock(&Giant); 276 } 277 } 278 return (error); 279 } 280 281 /* 282 * Called by events that want to shut down.. e.g <CTL><ALT><DEL> on a PC 283 */ 284 void 285 shutdown_nice(int howto) 286 { 287 288 if (initproc != NULL) { 289 /* Send a signal to init(8) and have it shutdown the world. */ 290 PROC_LOCK(initproc); 291 if (howto & RB_POWEROFF) 292 kern_psignal(initproc, SIGUSR2); 293 else if (howto & RB_POWERCYCLE) 294 kern_psignal(initproc, SIGWINCH); 295 else if (howto & RB_HALT) 296 kern_psignal(initproc, SIGUSR1); 297 else 298 kern_psignal(initproc, SIGINT); 299 PROC_UNLOCK(initproc); 300 } else { 301 /* No init(8) running, so simply reboot. */ 302 kern_reboot(howto | RB_NOSYNC); 303 } 304 } 305 306 static void 307 print_uptime(void) 308 { 309 int f; 310 struct timespec ts; 311 312 getnanouptime(&ts); 313 printf("Uptime: "); 314 f = 0; 315 if (ts.tv_sec >= 86400) { 316 printf("%ldd", (long)ts.tv_sec / 86400); 317 ts.tv_sec %= 86400; 318 f = 1; 319 } 320 if (f || ts.tv_sec >= 3600) { 321 printf("%ldh", (long)ts.tv_sec / 3600); 322 ts.tv_sec %= 3600; 323 f = 1; 324 } 325 if (f || ts.tv_sec >= 60) { 326 printf("%ldm", (long)ts.tv_sec / 60); 327 ts.tv_sec %= 60; 328 f = 1; 329 } 330 printf("%lds\n", (long)ts.tv_sec); 331 } 332 333 int 334 doadump(boolean_t textdump) 335 { 336 boolean_t coredump; 337 int error; 338 339 error = 0; 340 if (dumping) 341 return (EBUSY); 342 if (dumper.dumper == NULL) 343 return (ENXIO); 344 345 savectx(&dumppcb); 346 dumptid = curthread->td_tid; 347 dumping++; 348 349 coredump = TRUE; 350 #ifdef DDB 351 if (textdump && textdump_pending) { 352 coredump = FALSE; 353 textdump_dumpsys(&dumper); 354 } 355 #endif 356 if (coredump) 357 error = dumpsys(&dumper); 358 359 dumping--; 360 return (error); 361 } 362 363 /* 364 * Shutdown the system cleanly to prepare for reboot, halt, or power off. 365 */ 366 void 367 kern_reboot(int howto) 368 { 369 static int once = 0; 370 371 #if defined(SMP) 372 /* 373 * Bind us to the first CPU so that all shutdown code runs there. Some 374 * systems don't shutdown properly (i.e., ACPI power off) if we 375 * run on another processor. 376 */ 377 if (!SCHEDULER_STOPPED()) { 378 thread_lock(curthread); 379 sched_bind(curthread, CPU_FIRST()); 380 thread_unlock(curthread); 381 KASSERT(PCPU_GET(cpuid) == CPU_FIRST(), 382 ("boot: not running on cpu 0")); 383 } 384 #endif 385 /* We're in the process of rebooting. */ 386 rebooting = 1; 387 388 /* We are out of the debugger now. */ 389 kdb_active = 0; 390 391 /* 392 * Do any callouts that should be done BEFORE syncing the filesystems. 393 */ 394 EVENTHANDLER_INVOKE(shutdown_pre_sync, howto); 395 396 /* 397 * Now sync filesystems 398 */ 399 if (!cold && (howto & RB_NOSYNC) == 0 && once == 0) { 400 once = 1; 401 bufshutdown(show_busybufs); 402 } 403 404 print_uptime(); 405 406 cngrab(); 407 408 /* 409 * Ok, now do things that assume all filesystem activity has 410 * been completed. 411 */ 412 EVENTHANDLER_INVOKE(shutdown_post_sync, howto); 413 414 if ((howto & (RB_HALT|RB_DUMP)) == RB_DUMP && !cold && !dumping) 415 doadump(TRUE); 416 417 /* Now that we're going to really halt the system... */ 418 EVENTHANDLER_INVOKE(shutdown_final, howto); 419 420 for(;;) ; /* safety against shutdown_reset not working */ 421 /* NOTREACHED */ 422 } 423 424 /* 425 * The system call that results in changing the rootfs. 426 */ 427 static int 428 kern_reroot(void) 429 { 430 struct vnode *oldrootvnode, *vp; 431 struct mount *mp, *devmp; 432 int error; 433 434 if (curproc != initproc) 435 return (EPERM); 436 437 /* 438 * Mark the filesystem containing currently-running executable 439 * (the temporary copy of init(8)) busy. 440 */ 441 vp = curproc->p_textvp; 442 error = vn_lock(vp, LK_SHARED); 443 if (error != 0) 444 return (error); 445 mp = vp->v_mount; 446 error = vfs_busy(mp, MBF_NOWAIT); 447 if (error != 0) { 448 vfs_ref(mp); 449 VOP_UNLOCK(vp, 0); 450 error = vfs_busy(mp, 0); 451 vn_lock(vp, LK_SHARED | LK_RETRY); 452 vfs_rel(mp); 453 if (error != 0) { 454 VOP_UNLOCK(vp, 0); 455 return (ENOENT); 456 } 457 if (vp->v_iflag & VI_DOOMED) { 458 VOP_UNLOCK(vp, 0); 459 vfs_unbusy(mp); 460 return (ENOENT); 461 } 462 } 463 VOP_UNLOCK(vp, 0); 464 465 /* 466 * Remove the filesystem containing currently-running executable 467 * from the mount list, to prevent it from being unmounted 468 * by vfs_unmountall(), and to avoid confusing vfs_mountroot(). 469 * 470 * Also preserve /dev - forcibly unmounting it could cause driver 471 * reinitialization. 472 */ 473 474 vfs_ref(rootdevmp); 475 devmp = rootdevmp; 476 rootdevmp = NULL; 477 478 mtx_lock(&mountlist_mtx); 479 TAILQ_REMOVE(&mountlist, mp, mnt_list); 480 TAILQ_REMOVE(&mountlist, devmp, mnt_list); 481 mtx_unlock(&mountlist_mtx); 482 483 oldrootvnode = rootvnode; 484 485 /* 486 * Unmount everything except for the two filesystems preserved above. 487 */ 488 vfs_unmountall(); 489 490 /* 491 * Add /dev back; vfs_mountroot() will move it into its new place. 492 */ 493 mtx_lock(&mountlist_mtx); 494 TAILQ_INSERT_HEAD(&mountlist, devmp, mnt_list); 495 mtx_unlock(&mountlist_mtx); 496 rootdevmp = devmp; 497 vfs_rel(rootdevmp); 498 499 /* 500 * Mount the new rootfs. 501 */ 502 vfs_mountroot(); 503 504 /* 505 * Update all references to the old rootvnode. 506 */ 507 mountcheckdirs(oldrootvnode, rootvnode); 508 509 /* 510 * Add the temporary filesystem back and unbusy it. 511 */ 512 mtx_lock(&mountlist_mtx); 513 TAILQ_INSERT_TAIL(&mountlist, mp, mnt_list); 514 mtx_unlock(&mountlist_mtx); 515 vfs_unbusy(mp); 516 517 return (0); 518 } 519 520 /* 521 * If the shutdown was a clean halt, behave accordingly. 522 */ 523 static void 524 shutdown_halt(void *junk, int howto) 525 { 526 527 if (howto & RB_HALT) { 528 printf("\n"); 529 printf("The operating system has halted.\n"); 530 printf("Please press any key to reboot.\n\n"); 531 switch (cngetc()) { 532 case -1: /* No console, just die */ 533 cpu_halt(); 534 /* NOTREACHED */ 535 default: 536 break; 537 } 538 } 539 } 540 541 /* 542 * Check to see if the system paniced, pause and then reboot 543 * according to the specified delay. 544 */ 545 static void 546 shutdown_panic(void *junk, int howto) 547 { 548 int loop; 549 550 if (howto & RB_DUMP) { 551 if (panic_reboot_wait_time != 0) { 552 if (panic_reboot_wait_time != -1) { 553 printf("Automatic reboot in %d seconds - " 554 "press a key on the console to abort\n", 555 panic_reboot_wait_time); 556 for (loop = panic_reboot_wait_time * 10; 557 loop > 0; --loop) { 558 DELAY(1000 * 100); /* 1/10th second */ 559 /* Did user type a key? */ 560 if (cncheckc() != -1) 561 break; 562 } 563 if (!loop) 564 return; 565 } 566 } else { /* zero time specified - reboot NOW */ 567 return; 568 } 569 printf("--> Press a key on the console to reboot,\n"); 570 printf("--> or switch off the system now.\n"); 571 cngetc(); 572 } 573 } 574 575 /* 576 * Everything done, now reset 577 */ 578 static void 579 shutdown_reset(void *junk, int howto) 580 { 581 582 printf("Rebooting...\n"); 583 DELAY(1000000); /* wait 1 sec for printf's to complete and be read */ 584 585 /* 586 * Acquiring smp_ipi_mtx here has a double effect: 587 * - it disables interrupts avoiding CPU0 preemption 588 * by fast handlers (thus deadlocking against other CPUs) 589 * - it avoids deadlocks against smp_rendezvous() or, more 590 * generally, threads busy-waiting, with this spinlock held, 591 * and waiting for responses by threads on other CPUs 592 * (ie. smp_tlb_shootdown()). 593 * 594 * For the !SMP case it just needs to handle the former problem. 595 */ 596 #ifdef SMP 597 mtx_lock_spin(&smp_ipi_mtx); 598 #else 599 spinlock_enter(); 600 #endif 601 602 /* cpu_boot(howto); */ /* doesn't do anything at the moment */ 603 cpu_reset(); 604 /* NOTREACHED */ /* assuming reset worked */ 605 } 606 607 #if defined(WITNESS) || defined(INVARIANT_SUPPORT) 608 static int kassert_warn_only = 0; 609 #ifdef KDB 610 static int kassert_do_kdb = 0; 611 #endif 612 #ifdef KTR 613 static int kassert_do_ktr = 0; 614 #endif 615 static int kassert_do_log = 1; 616 static int kassert_log_pps_limit = 4; 617 static int kassert_log_mute_at = 0; 618 static int kassert_log_panic_at = 0; 619 static int kassert_warnings = 0; 620 621 SYSCTL_NODE(_debug, OID_AUTO, kassert, CTLFLAG_RW, NULL, "kassert options"); 622 623 SYSCTL_INT(_debug_kassert, OID_AUTO, warn_only, CTLFLAG_RWTUN, 624 &kassert_warn_only, 0, 625 "KASSERT triggers a panic (1) or just a warning (0)"); 626 627 #ifdef KDB 628 SYSCTL_INT(_debug_kassert, OID_AUTO, do_kdb, CTLFLAG_RWTUN, 629 &kassert_do_kdb, 0, "KASSERT will enter the debugger"); 630 #endif 631 632 #ifdef KTR 633 SYSCTL_UINT(_debug_kassert, OID_AUTO, do_ktr, CTLFLAG_RWTUN, 634 &kassert_do_ktr, 0, 635 "KASSERT does a KTR, set this to the KTRMASK you want"); 636 #endif 637 638 SYSCTL_INT(_debug_kassert, OID_AUTO, do_log, CTLFLAG_RWTUN, 639 &kassert_do_log, 0, "KASSERT triggers a panic (1) or just a warning (0)"); 640 641 SYSCTL_INT(_debug_kassert, OID_AUTO, warnings, CTLFLAG_RWTUN, 642 &kassert_warnings, 0, "number of KASSERTs that have been triggered"); 643 644 SYSCTL_INT(_debug_kassert, OID_AUTO, log_panic_at, CTLFLAG_RWTUN, 645 &kassert_log_panic_at, 0, "max number of KASSERTS before we will panic"); 646 647 SYSCTL_INT(_debug_kassert, OID_AUTO, log_pps_limit, CTLFLAG_RWTUN, 648 &kassert_log_pps_limit, 0, "limit number of log messages per second"); 649 650 SYSCTL_INT(_debug_kassert, OID_AUTO, log_mute_at, CTLFLAG_RWTUN, 651 &kassert_log_mute_at, 0, "max number of KASSERTS to log"); 652 653 static int kassert_sysctl_kassert(SYSCTL_HANDLER_ARGS); 654 655 SYSCTL_PROC(_debug_kassert, OID_AUTO, kassert, 656 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_SECURE, NULL, 0, 657 kassert_sysctl_kassert, "I", "set to trigger a test kassert"); 658 659 static int 660 kassert_sysctl_kassert(SYSCTL_HANDLER_ARGS) 661 { 662 int error, i; 663 664 error = sysctl_wire_old_buffer(req, sizeof(int)); 665 if (error == 0) { 666 i = 0; 667 error = sysctl_handle_int(oidp, &i, 0, req); 668 } 669 if (error != 0 || req->newptr == NULL) 670 return (error); 671 KASSERT(0, ("kassert_sysctl_kassert triggered kassert %d", i)); 672 return (0); 673 } 674 675 /* 676 * Called by KASSERT, this decides if we will panic 677 * or if we will log via printf and/or ktr. 678 */ 679 void 680 kassert_panic(const char *fmt, ...) 681 { 682 static char buf[256]; 683 va_list ap; 684 685 va_start(ap, fmt); 686 (void)vsnprintf(buf, sizeof(buf), fmt, ap); 687 va_end(ap); 688 689 /* 690 * panic if we're not just warning, or if we've exceeded 691 * kassert_log_panic_at warnings. 692 */ 693 if (!kassert_warn_only || 694 (kassert_log_panic_at > 0 && 695 kassert_warnings >= kassert_log_panic_at)) { 696 va_start(ap, fmt); 697 vpanic(fmt, ap); 698 /* NORETURN */ 699 } 700 #ifdef KTR 701 if (kassert_do_ktr) 702 CTR0(ktr_mask, buf); 703 #endif /* KTR */ 704 /* 705 * log if we've not yet met the mute limit. 706 */ 707 if (kassert_do_log && 708 (kassert_log_mute_at == 0 || 709 kassert_warnings < kassert_log_mute_at)) { 710 static struct timeval lasterr; 711 static int curerr; 712 713 if (ppsratecheck(&lasterr, &curerr, kassert_log_pps_limit)) { 714 printf("KASSERT failed: %s\n", buf); 715 kdb_backtrace(); 716 } 717 } 718 #ifdef KDB 719 if (kassert_do_kdb) { 720 kdb_enter(KDB_WHY_KASSERT, buf); 721 } 722 #endif 723 atomic_add_int(&kassert_warnings, 1); 724 } 725 #endif 726 727 /* 728 * Panic is called on unresolvable fatal errors. It prints "panic: mesg", 729 * and then reboots. If we are called twice, then we avoid trying to sync 730 * the disks as this often leads to recursive panics. 731 */ 732 void 733 panic(const char *fmt, ...) 734 { 735 va_list ap; 736 737 va_start(ap, fmt); 738 vpanic(fmt, ap); 739 } 740 741 void 742 vpanic(const char *fmt, va_list ap) 743 { 744 #ifdef SMP 745 cpuset_t other_cpus; 746 #endif 747 struct thread *td = curthread; 748 int bootopt, newpanic; 749 static char buf[256]; 750 751 spinlock_enter(); 752 753 #ifdef SMP 754 /* 755 * stop_cpus_hard(other_cpus) should prevent multiple CPUs from 756 * concurrently entering panic. Only the winner will proceed 757 * further. 758 */ 759 if (panicstr == NULL && !kdb_active) { 760 other_cpus = all_cpus; 761 CPU_CLR(PCPU_GET(cpuid), &other_cpus); 762 stop_cpus_hard(other_cpus); 763 } 764 #endif 765 766 /* 767 * Ensure that the scheduler is stopped while panicking, even if panic 768 * has been entered from kdb. 769 */ 770 td->td_stopsched = 1; 771 772 bootopt = RB_AUTOBOOT; 773 newpanic = 0; 774 if (panicstr) 775 bootopt |= RB_NOSYNC; 776 else { 777 bootopt |= RB_DUMP; 778 panicstr = fmt; 779 newpanic = 1; 780 } 781 782 if (newpanic) { 783 (void)vsnprintf(buf, sizeof(buf), fmt, ap); 784 panicstr = buf; 785 cngrab(); 786 printf("panic: %s\n", buf); 787 } else { 788 printf("panic: "); 789 vprintf(fmt, ap); 790 printf("\n"); 791 } 792 #ifdef SMP 793 printf("cpuid = %d\n", PCPU_GET(cpuid)); 794 #endif 795 printf("time = %jd\n", (intmax_t )time_second); 796 #ifdef KDB 797 if (newpanic && trace_on_panic) 798 kdb_backtrace(); 799 if (debugger_on_panic) 800 kdb_enter(KDB_WHY_PANIC, "panic"); 801 #endif 802 /*thread_lock(td); */ 803 td->td_flags |= TDF_INPANIC; 804 /* thread_unlock(td); */ 805 if (!sync_on_panic) 806 bootopt |= RB_NOSYNC; 807 if (poweroff_on_panic) 808 bootopt |= RB_POWEROFF; 809 if (powercycle_on_panic) 810 bootopt |= RB_POWERCYCLE; 811 kern_reboot(bootopt); 812 } 813 814 /* 815 * Support for poweroff delay. 816 * 817 * Please note that setting this delay too short might power off your machine 818 * before the write cache on your hard disk has been flushed, leading to 819 * soft-updates inconsistencies. 820 */ 821 #ifndef POWEROFF_DELAY 822 # define POWEROFF_DELAY 5000 823 #endif 824 static int poweroff_delay = POWEROFF_DELAY; 825 826 SYSCTL_INT(_kern_shutdown, OID_AUTO, poweroff_delay, CTLFLAG_RW, 827 &poweroff_delay, 0, "Delay before poweroff to write disk caches (msec)"); 828 829 static void 830 poweroff_wait(void *junk, int howto) 831 { 832 833 if ((howto & (RB_POWEROFF | RB_POWERCYCLE)) == 0 || poweroff_delay <= 0) 834 return; 835 DELAY(poweroff_delay * 1000); 836 } 837 838 /* 839 * Some system processes (e.g. syncer) need to be stopped at appropriate 840 * points in their main loops prior to a system shutdown, so that they 841 * won't interfere with the shutdown process (e.g. by holding a disk buf 842 * to cause sync to fail). For each of these system processes, register 843 * shutdown_kproc() as a handler for one of shutdown events. 844 */ 845 static int kproc_shutdown_wait = 60; 846 SYSCTL_INT(_kern_shutdown, OID_AUTO, kproc_shutdown_wait, CTLFLAG_RW, 847 &kproc_shutdown_wait, 0, "Max wait time (sec) to stop for each process"); 848 849 void 850 kproc_shutdown(void *arg, int howto) 851 { 852 struct proc *p; 853 int error; 854 855 if (panicstr) 856 return; 857 858 p = (struct proc *)arg; 859 printf("Waiting (max %d seconds) for system process `%s' to stop... ", 860 kproc_shutdown_wait, p->p_comm); 861 error = kproc_suspend(p, kproc_shutdown_wait * hz); 862 863 if (error == EWOULDBLOCK) 864 printf("timed out\n"); 865 else 866 printf("done\n"); 867 } 868 869 void 870 kthread_shutdown(void *arg, int howto) 871 { 872 struct thread *td; 873 int error; 874 875 if (panicstr) 876 return; 877 878 td = (struct thread *)arg; 879 printf("Waiting (max %d seconds) for system thread `%s' to stop... ", 880 kproc_shutdown_wait, td->td_name); 881 error = kthread_suspend(td, kproc_shutdown_wait * hz); 882 883 if (error == EWOULDBLOCK) 884 printf("timed out\n"); 885 else 886 printf("done\n"); 887 } 888 889 static char dumpdevname[sizeof(((struct cdev*)NULL)->si_name)]; 890 SYSCTL_STRING(_kern_shutdown, OID_AUTO, dumpdevname, CTLFLAG_RD, 891 dumpdevname, 0, "Device for kernel dumps"); 892 893 static int _dump_append(struct dumperinfo *di, void *virtual, 894 vm_offset_t physical, size_t length); 895 896 #ifdef EKCD 897 static struct kerneldumpcrypto * 898 kerneldumpcrypto_create(size_t blocksize, uint8_t encryption, 899 const uint8_t *key, uint32_t encryptedkeysize, const uint8_t *encryptedkey) 900 { 901 struct kerneldumpcrypto *kdc; 902 struct kerneldumpkey *kdk; 903 uint32_t dumpkeysize; 904 905 dumpkeysize = roundup2(sizeof(*kdk) + encryptedkeysize, blocksize); 906 kdc = malloc(sizeof(*kdc) + dumpkeysize, M_EKCD, M_WAITOK | M_ZERO); 907 908 arc4rand(kdc->kdc_iv, sizeof(kdc->kdc_iv), 0); 909 910 kdc->kdc_encryption = encryption; 911 switch (kdc->kdc_encryption) { 912 case KERNELDUMP_ENC_AES_256_CBC: 913 if (rijndael_makeKey(&kdc->kdc_ki, DIR_ENCRYPT, 256, key) <= 0) 914 goto failed; 915 break; 916 default: 917 goto failed; 918 } 919 920 kdc->kdc_dumpkeysize = dumpkeysize; 921 kdk = kdc->kdc_dumpkey; 922 kdk->kdk_encryption = kdc->kdc_encryption; 923 memcpy(kdk->kdk_iv, kdc->kdc_iv, sizeof(kdk->kdk_iv)); 924 kdk->kdk_encryptedkeysize = htod32(encryptedkeysize); 925 memcpy(kdk->kdk_encryptedkey, encryptedkey, encryptedkeysize); 926 927 return (kdc); 928 failed: 929 explicit_bzero(kdc, sizeof(*kdc) + dumpkeysize); 930 free(kdc, M_EKCD); 931 return (NULL); 932 } 933 934 static int 935 kerneldumpcrypto_init(struct kerneldumpcrypto *kdc) 936 { 937 uint8_t hash[SHA256_DIGEST_LENGTH]; 938 SHA256_CTX ctx; 939 struct kerneldumpkey *kdk; 940 int error; 941 942 error = 0; 943 944 if (kdc == NULL) 945 return (0); 946 947 /* 948 * When a user enters ddb it can write a crash dump multiple times. 949 * Each time it should be encrypted using a different IV. 950 */ 951 SHA256_Init(&ctx); 952 SHA256_Update(&ctx, kdc->kdc_iv, sizeof(kdc->kdc_iv)); 953 SHA256_Final(hash, &ctx); 954 bcopy(hash, kdc->kdc_iv, sizeof(kdc->kdc_iv)); 955 956 switch (kdc->kdc_encryption) { 957 case KERNELDUMP_ENC_AES_256_CBC: 958 if (rijndael_cipherInit(&kdc->kdc_ci, MODE_CBC, 959 kdc->kdc_iv) <= 0) { 960 error = EINVAL; 961 goto out; 962 } 963 break; 964 default: 965 error = EINVAL; 966 goto out; 967 } 968 969 kdk = kdc->kdc_dumpkey; 970 memcpy(kdk->kdk_iv, kdc->kdc_iv, sizeof(kdk->kdk_iv)); 971 out: 972 explicit_bzero(hash, sizeof(hash)); 973 return (error); 974 } 975 976 static uint32_t 977 kerneldumpcrypto_dumpkeysize(const struct kerneldumpcrypto *kdc) 978 { 979 980 if (kdc == NULL) 981 return (0); 982 return (kdc->kdc_dumpkeysize); 983 } 984 #endif /* EKCD */ 985 986 static struct kerneldumpcomp * 987 kerneldumpcomp_create(struct dumperinfo *di, uint8_t compression) 988 { 989 struct kerneldumpcomp *kdcomp; 990 991 if (compression != KERNELDUMP_COMP_GZIP) 992 return (NULL); 993 kdcomp = malloc(sizeof(*kdcomp), M_DUMPER, M_WAITOK | M_ZERO); 994 kdcomp->kdc_stream = compressor_init(kerneldumpcomp_write_cb, 995 COMPRESS_GZIP, di->maxiosize, kerneldump_gzlevel, di); 996 if (kdcomp->kdc_stream == NULL) { 997 free(kdcomp, M_DUMPER); 998 return (NULL); 999 } 1000 kdcomp->kdc_buf = malloc(di->maxiosize, M_DUMPER, M_WAITOK | M_NODUMP); 1001 return (kdcomp); 1002 } 1003 1004 static void 1005 kerneldumpcomp_destroy(struct dumperinfo *di) 1006 { 1007 struct kerneldumpcomp *kdcomp; 1008 1009 kdcomp = di->kdcomp; 1010 if (kdcomp == NULL) 1011 return; 1012 compressor_fini(kdcomp->kdc_stream); 1013 explicit_bzero(kdcomp->kdc_buf, di->maxiosize); 1014 free(kdcomp->kdc_buf, M_DUMPER); 1015 free(kdcomp, M_DUMPER); 1016 } 1017 1018 /* Registration of dumpers */ 1019 int 1020 set_dumper(struct dumperinfo *di, const char *devname, struct thread *td, 1021 uint8_t compression, uint8_t encryption, const uint8_t *key, 1022 uint32_t encryptedkeysize, const uint8_t *encryptedkey) 1023 { 1024 size_t wantcopy; 1025 int error; 1026 1027 error = priv_check(td, PRIV_SETDUMPER); 1028 if (error != 0) 1029 return (error); 1030 1031 if (di == NULL) { 1032 error = 0; 1033 goto cleanup; 1034 } 1035 if (dumper.dumper != NULL) 1036 return (EBUSY); 1037 dumper = *di; 1038 dumper.blockbuf = NULL; 1039 dumper.kdcrypto = NULL; 1040 dumper.kdcomp = NULL; 1041 1042 if (encryption != KERNELDUMP_ENC_NONE) { 1043 #ifdef EKCD 1044 dumper.kdcrypto = kerneldumpcrypto_create(di->blocksize, 1045 encryption, key, encryptedkeysize, encryptedkey); 1046 if (dumper.kdcrypto == NULL) { 1047 error = EINVAL; 1048 goto cleanup; 1049 } 1050 #else 1051 error = EOPNOTSUPP; 1052 goto cleanup; 1053 #endif 1054 } 1055 1056 wantcopy = strlcpy(dumpdevname, devname, sizeof(dumpdevname)); 1057 if (wantcopy >= sizeof(dumpdevname)) { 1058 printf("set_dumper: device name truncated from '%s' -> '%s'\n", 1059 devname, dumpdevname); 1060 } 1061 1062 if (compression != KERNELDUMP_COMP_NONE) { 1063 /* 1064 * We currently can't support simultaneous encryption and 1065 * compression. 1066 */ 1067 if (encryption != KERNELDUMP_ENC_NONE) { 1068 error = EOPNOTSUPP; 1069 goto cleanup; 1070 } 1071 dumper.kdcomp = kerneldumpcomp_create(&dumper, compression); 1072 if (dumper.kdcomp == NULL) { 1073 error = EINVAL; 1074 goto cleanup; 1075 } 1076 } 1077 1078 dumper.blockbuf = malloc(di->blocksize, M_DUMPER, M_WAITOK | M_ZERO); 1079 return (0); 1080 cleanup: 1081 #ifdef EKCD 1082 if (dumper.kdcrypto != NULL) { 1083 explicit_bzero(dumper.kdcrypto, sizeof(*dumper.kdcrypto) + 1084 dumper.kdcrypto->kdc_dumpkeysize); 1085 free(dumper.kdcrypto, M_EKCD); 1086 } 1087 #endif 1088 1089 kerneldumpcomp_destroy(&dumper); 1090 1091 if (dumper.blockbuf != NULL) { 1092 explicit_bzero(dumper.blockbuf, dumper.blocksize); 1093 free(dumper.blockbuf, M_DUMPER); 1094 } 1095 explicit_bzero(&dumper, sizeof(dumper)); 1096 dumpdevname[0] = '\0'; 1097 return (error); 1098 } 1099 1100 static int 1101 dump_check_bounds(struct dumperinfo *di, off_t offset, size_t length) 1102 { 1103 1104 if (length != 0 && (offset < di->mediaoffset || 1105 offset - di->mediaoffset + length > di->mediasize)) { 1106 printf("Attempt to write outside dump device boundaries.\n" 1107 "offset(%jd), mediaoffset(%jd), length(%ju), mediasize(%jd).\n", 1108 (intmax_t)offset, (intmax_t)di->mediaoffset, 1109 (uintmax_t)length, (intmax_t)di->mediasize); 1110 return (ENOSPC); 1111 } 1112 if (length % di->blocksize != 0) { 1113 printf("Attempt to write partial block of length %ju.\n", 1114 (uintmax_t)length); 1115 return (EINVAL); 1116 } 1117 if (offset % di->blocksize != 0) { 1118 printf("Attempt to write at unaligned offset %jd.\n", 1119 (intmax_t)offset); 1120 return (EINVAL); 1121 } 1122 1123 return (0); 1124 } 1125 1126 #ifdef EKCD 1127 static int 1128 dump_encrypt(struct kerneldumpcrypto *kdc, uint8_t *buf, size_t size) 1129 { 1130 1131 switch (kdc->kdc_encryption) { 1132 case KERNELDUMP_ENC_AES_256_CBC: 1133 if (rijndael_blockEncrypt(&kdc->kdc_ci, &kdc->kdc_ki, buf, 1134 8 * size, buf) <= 0) { 1135 return (EIO); 1136 } 1137 if (rijndael_cipherInit(&kdc->kdc_ci, MODE_CBC, 1138 buf + size - 16 /* IV size for AES-256-CBC */) <= 0) { 1139 return (EIO); 1140 } 1141 break; 1142 default: 1143 return (EINVAL); 1144 } 1145 1146 return (0); 1147 } 1148 1149 /* Encrypt data and call dumper. */ 1150 static int 1151 dump_encrypted_write(struct dumperinfo *di, void *virtual, 1152 vm_offset_t physical, off_t offset, size_t length) 1153 { 1154 static uint8_t buf[KERNELDUMP_BUFFER_SIZE]; 1155 struct kerneldumpcrypto *kdc; 1156 int error; 1157 size_t nbytes; 1158 1159 kdc = di->kdcrypto; 1160 1161 while (length > 0) { 1162 nbytes = MIN(length, sizeof(buf)); 1163 bcopy(virtual, buf, nbytes); 1164 1165 if (dump_encrypt(kdc, buf, nbytes) != 0) 1166 return (EIO); 1167 1168 error = dump_write(di, buf, physical, offset, nbytes); 1169 if (error != 0) 1170 return (error); 1171 1172 offset += nbytes; 1173 virtual = (void *)((uint8_t *)virtual + nbytes); 1174 length -= nbytes; 1175 } 1176 1177 return (0); 1178 } 1179 1180 static int 1181 dump_write_key(struct dumperinfo *di, off_t offset) 1182 { 1183 struct kerneldumpcrypto *kdc; 1184 1185 kdc = di->kdcrypto; 1186 if (kdc == NULL) 1187 return (0); 1188 return (dump_write(di, kdc->kdc_dumpkey, 0, offset, 1189 kdc->kdc_dumpkeysize)); 1190 } 1191 #endif /* EKCD */ 1192 1193 static int 1194 kerneldumpcomp_write_cb(void *base, size_t length, off_t offset, void *arg) 1195 { 1196 struct dumperinfo *di; 1197 size_t resid, rlength; 1198 int error; 1199 1200 di = arg; 1201 1202 if (length % di->blocksize != 0) { 1203 /* 1204 * This must be the final write after flushing the compression 1205 * stream. Write as many full blocks as possible and stash the 1206 * residual data in the dumper's block buffer. It will be 1207 * padded and written in dump_finish(). 1208 */ 1209 rlength = rounddown(length, di->blocksize); 1210 if (rlength != 0) { 1211 error = _dump_append(di, base, 0, rlength); 1212 if (error != 0) 1213 return (error); 1214 } 1215 resid = length - rlength; 1216 memmove(di->blockbuf, (uint8_t *)base + rlength, resid); 1217 di->kdcomp->kdc_resid = resid; 1218 return (EAGAIN); 1219 } 1220 return (_dump_append(di, base, 0, length)); 1221 } 1222 1223 /* 1224 * Write a kerneldumpheader at the specified offset. The header structure is 512 1225 * bytes in size, but we must pad to the device sector size. 1226 */ 1227 static int 1228 dump_write_header(struct dumperinfo *di, struct kerneldumpheader *kdh, 1229 off_t offset) 1230 { 1231 void *buf; 1232 size_t hdrsz; 1233 1234 hdrsz = sizeof(*kdh); 1235 if (hdrsz > di->blocksize) 1236 return (ENOMEM); 1237 1238 if (hdrsz == di->blocksize) 1239 buf = kdh; 1240 else { 1241 buf = di->blockbuf; 1242 memset(buf, 0, di->blocksize); 1243 memcpy(buf, kdh, hdrsz); 1244 } 1245 1246 return (dump_write(di, buf, 0, offset, di->blocksize)); 1247 } 1248 1249 /* 1250 * Don't touch the first SIZEOF_METADATA bytes on the dump device. This is to 1251 * protect us from metadata and metadata from us. 1252 */ 1253 #define SIZEOF_METADATA (64 * 1024) 1254 1255 /* 1256 * Do some preliminary setup for a kernel dump: initialize state for encryption, 1257 * if requested, and make sure that we have enough space on the dump device. 1258 * 1259 * We set things up so that the dump ends before the last sector of the dump 1260 * device, at which the trailing header is written. 1261 * 1262 * +-----------+------+-----+----------------------------+------+ 1263 * | | lhdr | key | ... kernel dump ... | thdr | 1264 * +-----------+------+-----+----------------------------+------+ 1265 * 1 blk opt <------- dump extent --------> 1 blk 1266 * 1267 * Dumps written using dump_append() start at the beginning of the extent. 1268 * Uncompressed dumps will use the entire extent, but compressed dumps typically 1269 * will not. The true length of the dump is recorded in the leading and trailing 1270 * headers once the dump has been completed. 1271 */ 1272 int 1273 dump_start(struct dumperinfo *di, struct kerneldumpheader *kdh) 1274 { 1275 uint64_t dumpextent; 1276 uint32_t keysize; 1277 1278 #ifdef EKCD 1279 int error = kerneldumpcrypto_init(di->kdcrypto); 1280 if (error != 0) 1281 return (error); 1282 keysize = kerneldumpcrypto_dumpkeysize(di->kdcrypto); 1283 #else 1284 keysize = 0; 1285 #endif 1286 1287 dumpextent = dtoh64(kdh->dumpextent); 1288 if (di->mediasize < SIZEOF_METADATA + dumpextent + 2 * di->blocksize + 1289 keysize) { 1290 if (di->kdcomp != NULL) { 1291 /* 1292 * We don't yet know how much space the compressed dump 1293 * will occupy, so try to use the whole swap partition 1294 * (minus the first 64KB) in the hope that the 1295 * compressed dump will fit. If that doesn't turn out to 1296 * be enouch, the bounds checking in dump_write() 1297 * will catch us and cause the dump to fail. 1298 */ 1299 dumpextent = di->mediasize - SIZEOF_METADATA - 1300 2 * di->blocksize - keysize; 1301 kdh->dumpextent = htod64(dumpextent); 1302 } else 1303 return (E2BIG); 1304 } 1305 1306 /* The offset at which to begin writing the dump. */ 1307 di->dumpoff = di->mediaoffset + di->mediasize - di->blocksize - 1308 dumpextent; 1309 1310 return (0); 1311 } 1312 1313 static int 1314 _dump_append(struct dumperinfo *di, void *virtual, vm_offset_t physical, 1315 size_t length) 1316 { 1317 int error; 1318 1319 #ifdef EKCD 1320 if (di->kdcrypto != NULL) 1321 error = dump_encrypted_write(di, virtual, physical, di->dumpoff, 1322 length); 1323 else 1324 #endif 1325 error = dump_write(di, virtual, physical, di->dumpoff, length); 1326 if (error == 0) 1327 di->dumpoff += length; 1328 return (error); 1329 } 1330 1331 /* 1332 * Write to the dump device starting at dumpoff. When compression is enabled, 1333 * writes to the device will be performed using a callback that gets invoked 1334 * when the compression stream's output buffer is full. 1335 */ 1336 int 1337 dump_append(struct dumperinfo *di, void *virtual, vm_offset_t physical, 1338 size_t length) 1339 { 1340 void *buf; 1341 1342 if (di->kdcomp != NULL) { 1343 /* Bounce through a buffer to avoid CRC errors. */ 1344 if (length > di->maxiosize) 1345 return (EINVAL); 1346 buf = di->kdcomp->kdc_buf; 1347 memmove(buf, virtual, length); 1348 return (compressor_write(di->kdcomp->kdc_stream, buf, length)); 1349 } 1350 return (_dump_append(di, virtual, physical, length)); 1351 } 1352 1353 /* 1354 * Write to the dump device at the specified offset. 1355 */ 1356 int 1357 dump_write(struct dumperinfo *di, void *virtual, vm_offset_t physical, 1358 off_t offset, size_t length) 1359 { 1360 int error; 1361 1362 error = dump_check_bounds(di, offset, length); 1363 if (error != 0) 1364 return (error); 1365 return (di->dumper(di->priv, virtual, physical, offset, length)); 1366 } 1367 1368 /* 1369 * Perform kernel dump finalization: flush the compression stream, if necessary, 1370 * write the leading and trailing kernel dump headers now that we know the true 1371 * length of the dump, and optionally write the encryption key following the 1372 * leading header. 1373 */ 1374 int 1375 dump_finish(struct dumperinfo *di, struct kerneldumpheader *kdh) 1376 { 1377 uint64_t extent; 1378 uint32_t keysize; 1379 int error; 1380 1381 extent = dtoh64(kdh->dumpextent); 1382 1383 #ifdef EKCD 1384 keysize = kerneldumpcrypto_dumpkeysize(di->kdcrypto); 1385 #else 1386 keysize = 0; 1387 #endif 1388 1389 if (di->kdcomp != NULL) { 1390 error = compressor_flush(di->kdcomp->kdc_stream); 1391 if (error == EAGAIN) { 1392 /* We have residual data in di->blockbuf. */ 1393 error = dump_write(di, di->blockbuf, 0, di->dumpoff, 1394 di->blocksize); 1395 di->dumpoff += di->kdcomp->kdc_resid; 1396 di->kdcomp->kdc_resid = 0; 1397 } 1398 if (error != 0) 1399 return (error); 1400 1401 /* 1402 * We now know the size of the compressed dump, so update the 1403 * header accordingly and recompute parity. 1404 */ 1405 kdh->dumplength = htod64(di->dumpoff - 1406 (di->mediaoffset + di->mediasize - di->blocksize - extent)); 1407 kdh->parity = 0; 1408 kdh->parity = kerneldump_parity(kdh); 1409 1410 compressor_reset(di->kdcomp->kdc_stream); 1411 } 1412 1413 /* 1414 * Write kerneldump headers at the beginning and end of the dump extent. 1415 * Write the key after the leading header. 1416 */ 1417 error = dump_write_header(di, kdh, 1418 di->mediaoffset + di->mediasize - 2 * di->blocksize - extent - 1419 keysize); 1420 if (error != 0) 1421 return (error); 1422 1423 #ifdef EKCD 1424 error = dump_write_key(di, 1425 di->mediaoffset + di->mediasize - di->blocksize - extent - keysize); 1426 if (error != 0) 1427 return (error); 1428 #endif 1429 1430 error = dump_write_header(di, kdh, 1431 di->mediaoffset + di->mediasize - di->blocksize); 1432 if (error != 0) 1433 return (error); 1434 1435 (void)dump_write(di, NULL, 0, 0, 0); 1436 return (0); 1437 } 1438 1439 void 1440 dump_init_header(const struct dumperinfo *di, struct kerneldumpheader *kdh, 1441 char *magic, uint32_t archver, uint64_t dumplen) 1442 { 1443 size_t dstsize; 1444 1445 bzero(kdh, sizeof(*kdh)); 1446 strlcpy(kdh->magic, magic, sizeof(kdh->magic)); 1447 strlcpy(kdh->architecture, MACHINE_ARCH, sizeof(kdh->architecture)); 1448 kdh->version = htod32(KERNELDUMPVERSION); 1449 kdh->architectureversion = htod32(archver); 1450 kdh->dumplength = htod64(dumplen); 1451 kdh->dumpextent = kdh->dumplength; 1452 kdh->dumptime = htod64(time_second); 1453 #ifdef EKCD 1454 kdh->dumpkeysize = htod32(kerneldumpcrypto_dumpkeysize(di->kdcrypto)); 1455 #else 1456 kdh->dumpkeysize = 0; 1457 #endif 1458 kdh->blocksize = htod32(di->blocksize); 1459 strlcpy(kdh->hostname, prison0.pr_hostname, sizeof(kdh->hostname)); 1460 dstsize = sizeof(kdh->versionstring); 1461 if (strlcpy(kdh->versionstring, version, dstsize) >= dstsize) 1462 kdh->versionstring[dstsize - 2] = '\n'; 1463 if (panicstr != NULL) 1464 strlcpy(kdh->panicstring, panicstr, sizeof(kdh->panicstring)); 1465 if (di->kdcomp != NULL) 1466 kdh->compression = KERNELDUMP_COMP_GZIP; 1467 kdh->parity = kerneldump_parity(kdh); 1468 } 1469 1470 #ifdef DDB 1471 DB_SHOW_COMMAND(panic, db_show_panic) 1472 { 1473 1474 if (panicstr == NULL) 1475 db_printf("panicstr not set\n"); 1476 else 1477 db_printf("panic: %s\n", panicstr); 1478 } 1479 #endif 1480