1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved. 24 * Copyright 2016 Joyent, Inc. 25 */ 26 27 #include <sys/types.h> 28 #include <sys/param.h> 29 #include <sys/systm.h> 30 #include <sys/vm.h> 31 #include <sys/proc.h> 32 #include <sys/file.h> 33 #include <sys/conf.h> 34 #include <sys/kmem.h> 35 #include <sys/mem.h> 36 #include <sys/mman.h> 37 #include <sys/vnode.h> 38 #include <sys/errno.h> 39 #include <sys/memlist.h> 40 #include <sys/dumphdr.h> 41 #include <sys/dumpadm.h> 42 #include <sys/ksyms.h> 43 #include <sys/compress.h> 44 #include <sys/stream.h> 45 #include <sys/strsun.h> 46 #include <sys/cmn_err.h> 47 #include <sys/bitmap.h> 48 #include <sys/modctl.h> 49 #include <sys/utsname.h> 50 #include <sys/systeminfo.h> 51 #include <sys/vmem.h> 52 #include <sys/log.h> 53 #include <sys/var.h> 54 #include <sys/debug.h> 55 #include <sys/sunddi.h> 56 #include <fs/fs_subr.h> 57 #include <sys/fs/snode.h> 58 #include <sys/ontrap.h> 59 #include <sys/panic.h> 60 #include <sys/dkio.h> 61 #include <sys/vtoc.h> 62 #include <sys/errorq.h> 63 #include <sys/fm/util.h> 64 #include <sys/fs/zfs.h> 65 66 #include <vm/hat.h> 67 #include <vm/as.h> 68 #include <vm/page.h> 69 #include <vm/pvn.h> 70 #include <vm/seg.h> 71 #include <vm/seg_kmem.h> 72 #include <sys/clock_impl.h> 73 #include <sys/hold_page.h> 74 75 #include <bzip2/bzlib.h> 76 77 /* 78 * Crash dump time is dominated by disk write time. To reduce this, 79 * the stronger compression method bzip2 is applied to reduce the dump 80 * size and hence reduce I/O time. However, bzip2 is much more 81 * computationally expensive than the existing lzjb algorithm, so to 82 * avoid increasing compression time, CPUs that are otherwise idle 83 * during panic are employed to parallelize the compression task. 84 * Many helper CPUs are needed to prevent bzip2 from being a 85 * bottleneck, and on systems with too few CPUs, the lzjb algorithm is 86 * parallelized instead. Lastly, I/O and compression are performed by 87 * different CPUs, and are hence overlapped in time, unlike the older 88 * serial code. 89 * 90 * Another important consideration is the speed of the dump 91 * device. Faster disks need less CPUs in order to benefit from 92 * parallel lzjb versus parallel bzip2. Therefore, the CPU count 93 * threshold for switching from parallel lzjb to paralled bzip2 is 94 * elevated for faster disks. The dump device speed is adduced from 95 * the setting for dumpbuf.iosize, see dump_update_clevel. 96 */ 97 98 /* 99 * exported vars 100 */ 101 kmutex_t dump_lock; /* lock for dump configuration */ 102 dumphdr_t *dumphdr; /* dump header */ 103 int dump_conflags = DUMP_KERNEL; /* dump configuration flags */ 104 vnode_t *dumpvp; /* dump device vnode pointer */ 105 u_offset_t dumpvp_size; /* size of dump device, in bytes */ 106 char *dumppath; /* pathname of dump device */ 107 int dump_timeout = 120; /* timeout for dumping pages */ 108 int dump_timeleft; /* portion of dump_timeout remaining */ 109 int dump_ioerr; /* dump i/o error */ 110 int dump_check_used; /* enable check for used pages */ 111 char *dump_stack_scratch; /* scratch area for saving stack summary */ 112 113 /* 114 * Tunables for dump compression and parallelism. These can be set via 115 * /etc/system. 116 * 117 * dump_ncpu_low number of helpers for parallel lzjb 118 * This is also the minimum configuration. 119 * 120 * dump_bzip2_level bzip2 compression level: 1-9 121 * Higher numbers give greater compression, but take more memory 122 * and time. Memory used per helper is ~(dump_bzip2_level * 1MB). 123 * 124 * dump_plat_mincpu the cross-over limit for using bzip2 (per platform): 125 * if dump_plat_mincpu == 0, then always do single threaded dump 126 * if ncpu >= dump_plat_mincpu then try to use bzip2 127 * 128 * dump_metrics_on if set, metrics are collected in the kernel, passed 129 * to savecore via the dump file, and recorded by savecore in 130 * METRICS.txt. 131 */ 132 uint_t dump_ncpu_low = 4; /* minimum config for parallel lzjb */ 133 uint_t dump_bzip2_level = 1; /* bzip2 level (1-9) */ 134 135 /* Use dump_plat_mincpu_default unless this variable is set by /etc/system */ 136 #define MINCPU_NOT_SET ((uint_t)-1) 137 uint_t dump_plat_mincpu = MINCPU_NOT_SET; 138 139 /* tunables for pre-reserved heap */ 140 uint_t dump_kmem_permap = 1024; 141 uint_t dump_kmem_pages = 8; 142 143 /* Define multiple buffers per helper to avoid stalling */ 144 #define NCBUF_PER_HELPER 2 145 #define NCMAP_PER_HELPER 4 146 147 /* minimum number of helpers configured */ 148 #define MINHELPERS (dump_ncpu_low) 149 #define MINCBUFS (MINHELPERS * NCBUF_PER_HELPER) 150 151 /* 152 * Define constant parameters. 153 * 154 * CBUF_SIZE size of an output buffer 155 * 156 * CBUF_MAPSIZE size of virtual range for mapping pages 157 * 158 * CBUF_MAPNP size of virtual range in pages 159 * 160 */ 161 #define DUMP_1KB ((size_t)1 << 10) 162 #define DUMP_1MB ((size_t)1 << 20) 163 #define CBUF_SIZE ((size_t)1 << 17) 164 #define CBUF_MAPSHIFT (22) 165 #define CBUF_MAPSIZE ((size_t)1 << CBUF_MAPSHIFT) 166 #define CBUF_MAPNP ((size_t)1 << (CBUF_MAPSHIFT - PAGESHIFT)) 167 168 /* 169 * Compression metrics are accumulated nano-second subtotals. The 170 * results are normalized by the number of pages dumped. A report is 171 * generated when dumpsys() completes and is saved in the dump image 172 * after the trailing dump header. 173 * 174 * Metrics are always collected. Set the variable dump_metrics_on to 175 * cause metrics to be saved in the crash file, where savecore will 176 * save it in the file METRICS.txt. 177 */ 178 #define PERPAGES \ 179 PERPAGE(bitmap) PERPAGE(map) PERPAGE(unmap) \ 180 PERPAGE(copy) PERPAGE(compress) \ 181 PERPAGE(write) \ 182 PERPAGE(inwait) PERPAGE(outwait) 183 184 typedef struct perpage { 185 #define PERPAGE(x) hrtime_t x; 186 PERPAGES 187 #undef PERPAGE 188 } perpage_t; 189 190 /* 191 * This macro controls the code generation for collecting dump 192 * performance information. By default, the code is generated, but 193 * automatic saving of the information is disabled. If dump_metrics_on 194 * is set to 1, the timing information is passed to savecore via the 195 * crash file, where it is appended to the file dump-dir/METRICS.txt. 196 */ 197 #define COLLECT_METRICS 198 199 #ifdef COLLECT_METRICS 200 uint_t dump_metrics_on = 0; /* set to 1 to enable recording metrics */ 201 202 #define HRSTART(v, m) v##ts.m = gethrtime() 203 #define HRSTOP(v, m) v.m += gethrtime() - v##ts.m 204 #define HRBEGIN(v, m, s) v##ts.m = gethrtime(); v.size += s 205 #define HREND(v, m) v.m += gethrtime() - v##ts.m 206 #define HRNORM(v, m, n) v.m /= (n) 207 208 #else 209 #define HRSTART(v, m) 210 #define HRSTOP(v, m) 211 #define HRBEGIN(v, m, s) 212 #define HREND(v, m) 213 #define HRNORM(v, m, n) 214 #endif /* COLLECT_METRICS */ 215 216 /* 217 * Buffers for copying and compressing memory pages. 218 * 219 * cbuf_t buffer controllers: used for both input and output. 220 * 221 * The buffer state indicates how it is being used: 222 * 223 * CBUF_FREEMAP: CBUF_MAPSIZE virtual address range is available for 224 * mapping input pages. 225 * 226 * CBUF_INREADY: input pages are mapped and ready for compression by a 227 * helper. 228 * 229 * CBUF_USEDMAP: mapping has been consumed by a helper. Needs unmap. 230 * 231 * CBUF_FREEBUF: CBUF_SIZE output buffer, which is available. 232 * 233 * CBUF_WRITE: CBUF_SIZE block of compressed pages from a helper, 234 * ready to write out. 235 * 236 * CBUF_ERRMSG: CBUF_SIZE block of error messages from a helper 237 * (reports UE errors.) 238 */ 239 240 typedef enum cbufstate { 241 CBUF_FREEMAP, 242 CBUF_INREADY, 243 CBUF_USEDMAP, 244 CBUF_FREEBUF, 245 CBUF_WRITE, 246 CBUF_ERRMSG 247 } cbufstate_t; 248 249 typedef struct cbuf cbuf_t; 250 251 struct cbuf { 252 cbuf_t *next; /* next in list */ 253 cbufstate_t state; /* processing state */ 254 size_t used; /* amount used */ 255 size_t size; /* mem size */ 256 char *buf; /* kmem or vmem */ 257 pgcnt_t pagenum; /* index to pfn map */ 258 pgcnt_t bitnum; /* first set bitnum */ 259 pfn_t pfn; /* first pfn in mapped range */ 260 int off; /* byte offset to first pfn */ 261 }; 262 263 static char dump_osimage_uuid[36 + 1]; 264 265 #define isdigit(ch) ((ch) >= '0' && (ch) <= '9') 266 #define isxdigit(ch) (isdigit(ch) || ((ch) >= 'a' && (ch) <= 'f') || \ 267 ((ch) >= 'A' && (ch) <= 'F')) 268 269 /* 270 * cqueue_t queues: a uni-directional channel for communication 271 * from the master to helper tasks or vice-versa using put and 272 * get primitives. Both mappings and data buffers are passed via 273 * queues. Producers close a queue when done. The number of 274 * active producers is reference counted so the consumer can 275 * detect end of data. Concurrent access is mediated by atomic 276 * operations for panic dump, or mutex/cv for live dump. 277 * 278 * There a four queues, used as follows: 279 * 280 * Queue Dataflow NewState 281 * -------------------------------------------------- 282 * mainq master -> master FREEMAP 283 * master has initialized or unmapped an input buffer 284 * -------------------------------------------------- 285 * helperq master -> helper INREADY 286 * master has mapped input for use by helper 287 * -------------------------------------------------- 288 * mainq master <- helper USEDMAP 289 * helper is done with input 290 * -------------------------------------------------- 291 * freebufq master -> helper FREEBUF 292 * master has initialized or written an output buffer 293 * -------------------------------------------------- 294 * mainq master <- helper WRITE 295 * block of compressed pages from a helper 296 * -------------------------------------------------- 297 * mainq master <- helper ERRMSG 298 * error messages from a helper (memory error case) 299 * -------------------------------------------------- 300 * writerq master <- master WRITE 301 * non-blocking queue of blocks to write 302 * -------------------------------------------------- 303 */ 304 typedef struct cqueue { 305 cbuf_t *volatile first; /* first in list */ 306 cbuf_t *last; /* last in list */ 307 hrtime_t ts; /* timestamp */ 308 hrtime_t empty; /* total time empty */ 309 kmutex_t mutex; /* live state lock */ 310 kcondvar_t cv; /* live wait var */ 311 lock_t spinlock; /* panic mode spin lock */ 312 volatile uint_t open; /* producer ref count */ 313 } cqueue_t; 314 315 /* 316 * Convenience macros for using the cqueue functions 317 * Note that the caller must have defined "dumpsync_t *ds" 318 */ 319 #define CQ_IS_EMPTY(q) \ 320 (ds->q.first == NULL) 321 322 #define CQ_OPEN(q) \ 323 atomic_inc_uint(&ds->q.open) 324 325 #define CQ_CLOSE(q) \ 326 dumpsys_close_cq(&ds->q, ds->live) 327 328 #define CQ_PUT(q, cp, st) \ 329 dumpsys_put_cq(&ds->q, cp, st, ds->live) 330 331 #define CQ_GET(q) \ 332 dumpsys_get_cq(&ds->q, ds->live) 333 334 /* 335 * Dynamic state when dumpsys() is running. 336 */ 337 typedef struct dumpsync { 338 pgcnt_t npages; /* subtotal of pages dumped */ 339 pgcnt_t pages_mapped; /* subtotal of pages mapped */ 340 pgcnt_t pages_used; /* subtotal of pages used per map */ 341 size_t nwrite; /* subtotal of bytes written */ 342 uint_t live; /* running live dump */ 343 uint_t neednl; /* will need to print a newline */ 344 uint_t percent; /* dump progress */ 345 uint_t percent_done; /* dump progress reported */ 346 int sec_done; /* dump progress last report time */ 347 cqueue_t freebufq; /* free kmem bufs for writing */ 348 cqueue_t mainq; /* input for main task */ 349 cqueue_t helperq; /* input for helpers */ 350 cqueue_t writerq; /* input for writer */ 351 hrtime_t start; /* start time */ 352 hrtime_t elapsed; /* elapsed time when completed */ 353 hrtime_t iotime; /* time spent writing nwrite bytes */ 354 hrtime_t iowait; /* time spent waiting for output */ 355 hrtime_t iowaitts; /* iowait timestamp */ 356 perpage_t perpage; /* metrics */ 357 perpage_t perpagets; 358 int dumpcpu; /* master cpu */ 359 } dumpsync_t; 360 361 static dumpsync_t dumpsync; /* synchronization vars */ 362 363 /* 364 * helper_t helpers: contains the context for a stream. CPUs run in 365 * parallel at dump time; each CPU creates a single stream of 366 * compression data. Stream data is divided into CBUF_SIZE blocks. 367 * The blocks are written in order within a stream. But, blocks from 368 * multiple streams can be interleaved. Each stream is identified by a 369 * unique tag. 370 */ 371 typedef struct helper { 372 int helper; /* bound helper id */ 373 int tag; /* compression stream tag */ 374 perpage_t perpage; /* per page metrics */ 375 perpage_t perpagets; /* per page metrics (timestamps) */ 376 taskqid_t taskqid; /* live dump task ptr */ 377 int in, out; /* buffer offsets */ 378 cbuf_t *cpin, *cpout, *cperr; /* cbuf objects in process */ 379 dumpsync_t *ds; /* pointer to sync vars */ 380 size_t used; /* counts input consumed */ 381 char *page; /* buffer for page copy */ 382 char *lzbuf; /* lzjb output */ 383 bz_stream bzstream; /* bzip2 state */ 384 } helper_t; 385 386 #define MAINHELPER (-1) /* helper is also the main task */ 387 #define FREEHELPER (-2) /* unbound helper */ 388 #define DONEHELPER (-3) /* helper finished */ 389 390 /* 391 * configuration vars for dumpsys 392 */ 393 typedef struct dumpcfg { 394 int threshold; /* ncpu threshold for bzip2 */ 395 int nhelper; /* number of helpers */ 396 int nhelper_used; /* actual number of helpers used */ 397 int ncmap; /* number VA pages for compression */ 398 int ncbuf; /* number of bufs for compression */ 399 int ncbuf_used; /* number of bufs in use */ 400 uint_t clevel; /* dump compression level */ 401 helper_t *helper; /* array of helpers */ 402 cbuf_t *cmap; /* array of input (map) buffers */ 403 cbuf_t *cbuf; /* array of output buffers */ 404 ulong_t *helpermap; /* set of dumpsys helper CPU ids */ 405 ulong_t *bitmap; /* bitmap for marking pages to dump */ 406 ulong_t *rbitmap; /* bitmap for used CBUF_MAPSIZE ranges */ 407 pgcnt_t bitmapsize; /* size of bitmap */ 408 pgcnt_t rbitmapsize; /* size of bitmap for ranges */ 409 pgcnt_t found4m; /* number ranges allocated by dump */ 410 pgcnt_t foundsm; /* number small pages allocated by dump */ 411 pid_t *pids; /* list of process IDs at dump time */ 412 size_t maxsize; /* memory size needed at dump time */ 413 size_t maxvmsize; /* size of reserved VM */ 414 char *maxvm; /* reserved VM for spare pages */ 415 lock_t helper_lock; /* protect helper state */ 416 char helpers_wanted; /* flag to enable parallelism */ 417 } dumpcfg_t; 418 419 static dumpcfg_t dumpcfg; /* config vars */ 420 421 /* 422 * The dump I/O buffer. 423 * 424 * There is one I/O buffer used by dumpvp_write and dumvp_flush. It is 425 * sized according to the optimum device transfer speed. 426 */ 427 typedef struct dumpbuf { 428 vnode_t *cdev_vp; /* VCHR open of the dump device */ 429 len_t vp_limit; /* maximum write offset */ 430 offset_t vp_off; /* current dump device offset */ 431 char *cur; /* dump write pointer */ 432 char *start; /* dump buffer address */ 433 char *end; /* dump buffer end */ 434 size_t size; /* size of dumpbuf in bytes */ 435 size_t iosize; /* best transfer size for device */ 436 } dumpbuf_t; 437 438 dumpbuf_t dumpbuf; /* I/O buffer */ 439 440 /* 441 * The dump I/O buffer must be at least one page, at most xfer_size 442 * bytes, and should scale with physmem in between. The transfer size 443 * passed in will either represent a global default (maxphys) or the 444 * best size for the device. The size of the dumpbuf I/O buffer is 445 * limited by dumpbuf_limit (8MB by default) because the dump 446 * performance saturates beyond a certain size. The default is to 447 * select 1/4096 of the memory. 448 */ 449 static int dumpbuf_fraction = 12; /* memory size scale factor */ 450 static size_t dumpbuf_limit = 8 * DUMP_1MB; /* max I/O buf size */ 451 452 static size_t 453 dumpbuf_iosize(size_t xfer_size) 454 { 455 size_t iosize = ptob(physmem >> dumpbuf_fraction); 456 457 if (iosize < PAGESIZE) 458 iosize = PAGESIZE; 459 else if (iosize > xfer_size) 460 iosize = xfer_size; 461 if (iosize > dumpbuf_limit) 462 iosize = dumpbuf_limit; 463 return (iosize & PAGEMASK); 464 } 465 466 /* 467 * resize the I/O buffer 468 */ 469 static void 470 dumpbuf_resize(void) 471 { 472 char *old_buf = dumpbuf.start; 473 size_t old_size = dumpbuf.size; 474 char *new_buf; 475 size_t new_size; 476 477 ASSERT(MUTEX_HELD(&dump_lock)); 478 479 new_size = dumpbuf_iosize(MAX(dumpbuf.iosize, maxphys)); 480 if (new_size <= old_size) 481 return; /* no need to reallocate buffer */ 482 483 new_buf = kmem_alloc(new_size, KM_SLEEP); 484 dumpbuf.size = new_size; 485 dumpbuf.start = new_buf; 486 dumpbuf.end = new_buf + new_size; 487 kmem_free(old_buf, old_size); 488 } 489 490 /* 491 * dump_update_clevel is called when dumpadm configures the dump device. 492 * Calculate number of helpers and buffers. 493 * Allocate the minimum configuration for now. 494 * 495 * When the dump file is configured we reserve a minimum amount of 496 * memory for use at crash time. But we reserve VA for all the memory 497 * we really want in order to do the fastest dump possible. The VA is 498 * backed by pages not being dumped, according to the bitmap. If 499 * there is insufficient spare memory, however, we fall back to the 500 * minimum. 501 * 502 * Live dump (savecore -L) always uses the minimum config. 503 * 504 * clevel 0 is single threaded lzjb 505 * clevel 1 is parallel lzjb 506 * clevel 2 is parallel bzip2 507 * 508 * The ncpu threshold is selected with dump_plat_mincpu. 509 * On OPL, set_platform_defaults() overrides the sun4u setting. 510 * The actual values are defined via DUMP_PLAT_*_MINCPU macros. 511 * 512 * Architecture Threshold Algorithm 513 * sun4u < 51 parallel lzjb 514 * sun4u >= 51 parallel bzip2(*) 515 * sun4u OPL < 8 parallel lzjb 516 * sun4u OPL >= 8 parallel bzip2(*) 517 * sun4v < 128 parallel lzjb 518 * sun4v >= 128 parallel bzip2(*) 519 * x86 < 11 parallel lzjb 520 * x86 >= 11 parallel bzip2(*) 521 * 32-bit N/A single-threaded lzjb 522 * 523 * (*) bzip2 is only chosen if there is sufficient available 524 * memory for buffers at dump time. See dumpsys_get_maxmem(). 525 * 526 * Faster dump devices have larger I/O buffers. The threshold value is 527 * increased according to the size of the dump I/O buffer, because 528 * parallel lzjb performs better with faster disks. For buffers >= 1MB 529 * the threshold is 3X; for buffers >= 256K threshold is 2X. 530 * 531 * For parallel dumps, the number of helpers is ncpu-1. The CPU 532 * running panic runs the main task. For single-threaded dumps, the 533 * panic CPU does lzjb compression (it is tagged as MAINHELPER.) 534 * 535 * Need multiple buffers per helper so that they do not block waiting 536 * for the main task. 537 * parallel single-threaded 538 * Number of output buffers: nhelper*2 1 539 * Number of mapping buffers: nhelper*4 1 540 * 541 */ 542 static void 543 dump_update_clevel() 544 { 545 int tag; 546 size_t bz2size; 547 helper_t *hp, *hpend; 548 cbuf_t *cp, *cpend; 549 dumpcfg_t *old = &dumpcfg; 550 dumpcfg_t newcfg = *old; 551 dumpcfg_t *new = &newcfg; 552 553 ASSERT(MUTEX_HELD(&dump_lock)); 554 555 /* 556 * Free the previously allocated bufs and VM. 557 */ 558 if (old->helper != NULL) { 559 560 /* helpers */ 561 hpend = &old->helper[old->nhelper]; 562 for (hp = old->helper; hp != hpend; hp++) { 563 if (hp->lzbuf != NULL) 564 kmem_free(hp->lzbuf, PAGESIZE); 565 if (hp->page != NULL) 566 kmem_free(hp->page, PAGESIZE); 567 } 568 kmem_free(old->helper, old->nhelper * sizeof (helper_t)); 569 570 /* VM space for mapping pages */ 571 cpend = &old->cmap[old->ncmap]; 572 for (cp = old->cmap; cp != cpend; cp++) 573 vmem_xfree(heap_arena, cp->buf, CBUF_MAPSIZE); 574 kmem_free(old->cmap, old->ncmap * sizeof (cbuf_t)); 575 576 /* output bufs */ 577 cpend = &old->cbuf[old->ncbuf]; 578 for (cp = old->cbuf; cp != cpend; cp++) 579 if (cp->buf != NULL) 580 kmem_free(cp->buf, cp->size); 581 kmem_free(old->cbuf, old->ncbuf * sizeof (cbuf_t)); 582 583 /* reserved VM for dumpsys_get_maxmem */ 584 if (old->maxvmsize > 0) 585 vmem_xfree(heap_arena, old->maxvm, old->maxvmsize); 586 } 587 588 /* 589 * Allocate memory and VM. 590 * One CPU runs dumpsys, the rest are helpers. 591 */ 592 new->nhelper = ncpus - 1; 593 if (new->nhelper < 1) 594 new->nhelper = 1; 595 596 if (new->nhelper > DUMP_MAX_NHELPER) 597 new->nhelper = DUMP_MAX_NHELPER; 598 599 /* use platform default, unless /etc/system overrides */ 600 if (dump_plat_mincpu == MINCPU_NOT_SET) 601 dump_plat_mincpu = dump_plat_mincpu_default; 602 603 /* increase threshold for faster disks */ 604 new->threshold = dump_plat_mincpu; 605 if (dumpbuf.iosize >= DUMP_1MB) 606 new->threshold *= 3; 607 else if (dumpbuf.iosize >= (256 * DUMP_1KB)) 608 new->threshold *= 2; 609 610 /* figure compression level based upon the computed threshold. */ 611 if (dump_plat_mincpu == 0 || new->nhelper < 2) { 612 new->clevel = 0; 613 new->nhelper = 1; 614 } else if ((new->nhelper + 1) >= new->threshold) { 615 new->clevel = DUMP_CLEVEL_BZIP2; 616 } else { 617 new->clevel = DUMP_CLEVEL_LZJB; 618 } 619 620 if (new->clevel == 0) { 621 new->ncbuf = 1; 622 new->ncmap = 1; 623 } else { 624 new->ncbuf = NCBUF_PER_HELPER * new->nhelper; 625 new->ncmap = NCMAP_PER_HELPER * new->nhelper; 626 } 627 628 /* 629 * Allocate new data structures and buffers for MINHELPERS, 630 * and also figure the max desired size. 631 */ 632 bz2size = BZ2_bzCompressInitSize(dump_bzip2_level); 633 new->maxsize = 0; 634 new->maxvmsize = 0; 635 new->maxvm = NULL; 636 tag = 1; 637 new->helper = kmem_zalloc(new->nhelper * sizeof (helper_t), KM_SLEEP); 638 hpend = &new->helper[new->nhelper]; 639 for (hp = new->helper; hp != hpend; hp++) { 640 hp->tag = tag++; 641 if (hp < &new->helper[MINHELPERS]) { 642 hp->lzbuf = kmem_alloc(PAGESIZE, KM_SLEEP); 643 hp->page = kmem_alloc(PAGESIZE, KM_SLEEP); 644 } else if (new->clevel < DUMP_CLEVEL_BZIP2) { 645 new->maxsize += 2 * PAGESIZE; 646 } else { 647 new->maxsize += PAGESIZE; 648 } 649 if (new->clevel >= DUMP_CLEVEL_BZIP2) 650 new->maxsize += bz2size; 651 } 652 653 new->cbuf = kmem_zalloc(new->ncbuf * sizeof (cbuf_t), KM_SLEEP); 654 cpend = &new->cbuf[new->ncbuf]; 655 for (cp = new->cbuf; cp != cpend; cp++) { 656 cp->state = CBUF_FREEBUF; 657 cp->size = CBUF_SIZE; 658 if (cp < &new->cbuf[MINCBUFS]) 659 cp->buf = kmem_alloc(cp->size, KM_SLEEP); 660 else 661 new->maxsize += cp->size; 662 } 663 664 new->cmap = kmem_zalloc(new->ncmap * sizeof (cbuf_t), KM_SLEEP); 665 cpend = &new->cmap[new->ncmap]; 666 for (cp = new->cmap; cp != cpend; cp++) { 667 cp->state = CBUF_FREEMAP; 668 cp->size = CBUF_MAPSIZE; 669 cp->buf = vmem_xalloc(heap_arena, CBUF_MAPSIZE, CBUF_MAPSIZE, 670 0, 0, NULL, NULL, VM_SLEEP); 671 } 672 673 /* reserve VA to be backed with spare pages at crash time */ 674 if (new->maxsize > 0) { 675 new->maxsize = P2ROUNDUP(new->maxsize, PAGESIZE); 676 new->maxvmsize = P2ROUNDUP(new->maxsize, CBUF_MAPSIZE); 677 new->maxvm = vmem_xalloc(heap_arena, new->maxvmsize, 678 CBUF_MAPSIZE, 0, 0, NULL, NULL, VM_SLEEP); 679 } 680 681 /* 682 * Reserve memory for kmem allocation calls made during crash 683 * dump. The hat layer allocates memory for each mapping 684 * created, and the I/O path allocates buffers and data structs. 685 * Add a few pages for safety. 686 */ 687 kmem_dump_init((new->ncmap * dump_kmem_permap) + 688 (dump_kmem_pages * PAGESIZE)); 689 690 /* set new config pointers */ 691 *old = *new; 692 } 693 694 /* 695 * Define a struct memlist walker to optimize bitnum to pfn 696 * lookup. The walker maintains the state of the list traversal. 697 */ 698 typedef struct dumpmlw { 699 struct memlist *mp; /* current memlist */ 700 pgcnt_t basenum; /* bitnum base offset */ 701 pgcnt_t mppages; /* current memlist size */ 702 pgcnt_t mpleft; /* size to end of current memlist */ 703 pfn_t mpaddr; /* first pfn in memlist */ 704 } dumpmlw_t; 705 706 /* initialize the walker */ 707 static inline void 708 dump_init_memlist_walker(dumpmlw_t *pw) 709 { 710 pw->mp = phys_install; 711 pw->basenum = 0; 712 pw->mppages = pw->mp->ml_size >> PAGESHIFT; 713 pw->mpleft = pw->mppages; 714 pw->mpaddr = pw->mp->ml_address >> PAGESHIFT; 715 } 716 717 /* 718 * Lookup pfn given bitnum. The memlist can be quite long on some 719 * systems (e.g.: one per board). To optimize sequential lookups, the 720 * caller initializes and presents a memlist walker. 721 */ 722 static pfn_t 723 dump_bitnum_to_pfn(pgcnt_t bitnum, dumpmlw_t *pw) 724 { 725 bitnum -= pw->basenum; 726 while (pw->mp != NULL) { 727 if (bitnum < pw->mppages) { 728 pw->mpleft = pw->mppages - bitnum; 729 return (pw->mpaddr + bitnum); 730 } 731 bitnum -= pw->mppages; 732 pw->basenum += pw->mppages; 733 pw->mp = pw->mp->ml_next; 734 if (pw->mp != NULL) { 735 pw->mppages = pw->mp->ml_size >> PAGESHIFT; 736 pw->mpleft = pw->mppages; 737 pw->mpaddr = pw->mp->ml_address >> PAGESHIFT; 738 } 739 } 740 return (PFN_INVALID); 741 } 742 743 static pgcnt_t 744 dump_pfn_to_bitnum(pfn_t pfn) 745 { 746 struct memlist *mp; 747 pgcnt_t bitnum = 0; 748 749 for (mp = phys_install; mp != NULL; mp = mp->ml_next) { 750 if (pfn >= (mp->ml_address >> PAGESHIFT) && 751 pfn < ((mp->ml_address + mp->ml_size) >> PAGESHIFT)) 752 return (bitnum + pfn - (mp->ml_address >> PAGESHIFT)); 753 bitnum += mp->ml_size >> PAGESHIFT; 754 } 755 return ((pgcnt_t)-1); 756 } 757 758 /* 759 * Set/test bitmap for a CBUF_MAPSIZE range which includes pfn. The 760 * mapping of pfn to range index is imperfect because pfn and bitnum 761 * do not have the same phase. To make sure a CBUF_MAPSIZE range is 762 * covered, call this for both ends: 763 * dump_set_used(base) 764 * dump_set_used(base+CBUF_MAPNP-1) 765 * 766 * This is used during a panic dump to mark pages allocated by 767 * dumpsys_get_maxmem(). The macro IS_DUMP_PAGE(pp) is used by 768 * page_get_mnode_freelist() to make sure pages used by dump are never 769 * allocated. 770 */ 771 #define CBUF_MAPP2R(pfn) ((pfn) >> (CBUF_MAPSHIFT - PAGESHIFT)) 772 773 static void 774 dump_set_used(pfn_t pfn) 775 { 776 777 pgcnt_t bitnum, rbitnum; 778 779 bitnum = dump_pfn_to_bitnum(pfn); 780 ASSERT(bitnum != (pgcnt_t)-1); 781 782 rbitnum = CBUF_MAPP2R(bitnum); 783 ASSERT(rbitnum < dumpcfg.rbitmapsize); 784 785 BT_SET(dumpcfg.rbitmap, rbitnum); 786 } 787 788 int 789 dump_test_used(pfn_t pfn) 790 { 791 pgcnt_t bitnum, rbitnum; 792 793 bitnum = dump_pfn_to_bitnum(pfn); 794 ASSERT(bitnum != (pgcnt_t)-1); 795 796 rbitnum = CBUF_MAPP2R(bitnum); 797 ASSERT(rbitnum < dumpcfg.rbitmapsize); 798 799 return (BT_TEST(dumpcfg.rbitmap, rbitnum)); 800 } 801 802 /* 803 * dumpbzalloc and dumpbzfree are callbacks from the bzip2 library. 804 * dumpsys_get_maxmem() uses them for BZ2_bzCompressInit(). 805 */ 806 static void * 807 dumpbzalloc(void *opaque, int items, int size) 808 { 809 size_t *sz; 810 char *ret; 811 812 ASSERT(opaque != NULL); 813 sz = opaque; 814 ret = dumpcfg.maxvm + *sz; 815 *sz += items * size; 816 *sz = P2ROUNDUP(*sz, BZ2_BZALLOC_ALIGN); 817 ASSERT(*sz <= dumpcfg.maxvmsize); 818 return (ret); 819 } 820 821 /*ARGSUSED*/ 822 static void 823 dumpbzfree(void *opaque, void *addr) 824 { 825 } 826 827 /* 828 * Perform additional checks on the page to see if we can really use 829 * it. The kernel (kas) pages are always set in the bitmap. However, 830 * boot memory pages (prom_ppages or P_BOOTPAGES) are not in the 831 * bitmap. So we check for them. 832 */ 833 static inline int 834 dump_pfn_check(pfn_t pfn) 835 { 836 page_t *pp = page_numtopp_nolock(pfn); 837 if (pp == NULL || pp->p_pagenum != pfn || 838 #if defined(__sparc) 839 pp->p_vnode == &promvp || 840 #else 841 PP_ISBOOTPAGES(pp) || 842 #endif 843 pp->p_toxic != 0) 844 return (0); 845 return (1); 846 } 847 848 /* 849 * Check a range to see if all contained pages are available and 850 * return non-zero if the range can be used. 851 */ 852 static inline int 853 dump_range_check(pgcnt_t start, pgcnt_t end, pfn_t pfn) 854 { 855 for (; start < end; start++, pfn++) { 856 if (BT_TEST(dumpcfg.bitmap, start)) 857 return (0); 858 if (!dump_pfn_check(pfn)) 859 return (0); 860 } 861 return (1); 862 } 863 864 /* 865 * dumpsys_get_maxmem() is called during panic. Find unused ranges 866 * and use them for buffers. If we find enough memory switch to 867 * parallel bzip2, otherwise use parallel lzjb. 868 * 869 * It searches the dump bitmap in 2 passes. The first time it looks 870 * for CBUF_MAPSIZE ranges. On the second pass it uses small pages. 871 */ 872 static void 873 dumpsys_get_maxmem() 874 { 875 dumpcfg_t *cfg = &dumpcfg; 876 cbuf_t *endcp = &cfg->cbuf[cfg->ncbuf]; 877 helper_t *endhp = &cfg->helper[cfg->nhelper]; 878 pgcnt_t bitnum, end; 879 size_t sz, endsz, bz2size; 880 pfn_t pfn, off; 881 cbuf_t *cp; 882 helper_t *hp, *ohp; 883 dumpmlw_t mlw; 884 int k; 885 886 /* 887 * Setting dump_plat_mincpu to 0 at any time forces a serial 888 * dump. 889 */ 890 if (dump_plat_mincpu == 0) { 891 cfg->clevel = 0; 892 return; 893 } 894 895 /* 896 * There may be no point in looking for spare memory. If 897 * dumping all memory, then none is spare. If doing a serial 898 * dump, then already have buffers. 899 */ 900 if (cfg->maxsize == 0 || cfg->clevel < DUMP_CLEVEL_LZJB || 901 (dump_conflags & DUMP_ALL) != 0) { 902 if (cfg->clevel > DUMP_CLEVEL_LZJB) 903 cfg->clevel = DUMP_CLEVEL_LZJB; 904 return; 905 } 906 907 sz = 0; 908 cfg->found4m = 0; 909 cfg->foundsm = 0; 910 911 /* bitmap of ranges used to estimate which pfns are being used */ 912 bzero(dumpcfg.rbitmap, BT_SIZEOFMAP(dumpcfg.rbitmapsize)); 913 914 /* find ranges that are not being dumped to use for buffers */ 915 dump_init_memlist_walker(&mlw); 916 for (bitnum = 0; bitnum < dumpcfg.bitmapsize; bitnum = end) { 917 dump_timeleft = dump_timeout; 918 end = bitnum + CBUF_MAPNP; 919 pfn = dump_bitnum_to_pfn(bitnum, &mlw); 920 ASSERT(pfn != PFN_INVALID); 921 922 /* skip partial range at end of mem segment */ 923 if (mlw.mpleft < CBUF_MAPNP) { 924 end = bitnum + mlw.mpleft; 925 continue; 926 } 927 928 /* skip non aligned pages */ 929 off = P2PHASE(pfn, CBUF_MAPNP); 930 if (off != 0) { 931 end -= off; 932 continue; 933 } 934 935 if (!dump_range_check(bitnum, end, pfn)) 936 continue; 937 938 ASSERT((sz + CBUF_MAPSIZE) <= cfg->maxvmsize); 939 hat_devload(kas.a_hat, cfg->maxvm + sz, CBUF_MAPSIZE, pfn, 940 PROT_READ | PROT_WRITE, HAT_LOAD_NOCONSIST); 941 sz += CBUF_MAPSIZE; 942 cfg->found4m++; 943 944 /* set the bitmap for both ends to be sure to cover the range */ 945 dump_set_used(pfn); 946 dump_set_used(pfn + CBUF_MAPNP - 1); 947 948 if (sz >= cfg->maxsize) 949 goto foundmax; 950 } 951 952 /* Add small pages if we can't find enough large pages. */ 953 dump_init_memlist_walker(&mlw); 954 for (bitnum = 0; bitnum < dumpcfg.bitmapsize; bitnum = end) { 955 dump_timeleft = dump_timeout; 956 end = bitnum + CBUF_MAPNP; 957 pfn = dump_bitnum_to_pfn(bitnum, &mlw); 958 ASSERT(pfn != PFN_INVALID); 959 960 /* Find any non-aligned pages at start and end of segment. */ 961 off = P2PHASE(pfn, CBUF_MAPNP); 962 if (mlw.mpleft < CBUF_MAPNP) { 963 end = bitnum + mlw.mpleft; 964 } else if (off != 0) { 965 end -= off; 966 } else if (cfg->found4m && dump_test_used(pfn)) { 967 continue; 968 } 969 970 for (; bitnum < end; bitnum++, pfn++) { 971 dump_timeleft = dump_timeout; 972 if (BT_TEST(dumpcfg.bitmap, bitnum)) 973 continue; 974 if (!dump_pfn_check(pfn)) 975 continue; 976 ASSERT((sz + PAGESIZE) <= cfg->maxvmsize); 977 hat_devload(kas.a_hat, cfg->maxvm + sz, PAGESIZE, pfn, 978 PROT_READ | PROT_WRITE, HAT_LOAD_NOCONSIST); 979 sz += PAGESIZE; 980 cfg->foundsm++; 981 dump_set_used(pfn); 982 if (sz >= cfg->maxsize) 983 goto foundmax; 984 } 985 } 986 987 /* Fall back to lzjb if we did not get enough memory for bzip2. */ 988 endsz = (cfg->maxsize * cfg->threshold) / cfg->nhelper; 989 if (sz < endsz) { 990 cfg->clevel = DUMP_CLEVEL_LZJB; 991 } 992 993 /* Allocate memory for as many helpers as we can. */ 994 foundmax: 995 996 /* Byte offsets into memory found and mapped above */ 997 endsz = sz; 998 sz = 0; 999 1000 /* Set the size for bzip2 state. Only bzip2 needs it. */ 1001 bz2size = BZ2_bzCompressInitSize(dump_bzip2_level); 1002 1003 /* Skip the preallocate output buffers. */ 1004 cp = &cfg->cbuf[MINCBUFS]; 1005 1006 /* Use this to move memory up from the preallocated helpers. */ 1007 ohp = cfg->helper; 1008 1009 /* Loop over all helpers and allocate memory. */ 1010 for (hp = cfg->helper; hp < endhp; hp++) { 1011 1012 /* Skip preallocated helpers by checking hp->page. */ 1013 if (hp->page == NULL) { 1014 if (cfg->clevel <= DUMP_CLEVEL_LZJB) { 1015 /* lzjb needs 2 1-page buffers */ 1016 if ((sz + (2 * PAGESIZE)) > endsz) 1017 break; 1018 hp->page = cfg->maxvm + sz; 1019 sz += PAGESIZE; 1020 hp->lzbuf = cfg->maxvm + sz; 1021 sz += PAGESIZE; 1022 1023 } else if (ohp->lzbuf != NULL) { 1024 /* re-use the preallocted lzjb page for bzip2 */ 1025 hp->page = ohp->lzbuf; 1026 ohp->lzbuf = NULL; 1027 ++ohp; 1028 1029 } else { 1030 /* bzip2 needs a 1-page buffer */ 1031 if ((sz + PAGESIZE) > endsz) 1032 break; 1033 hp->page = cfg->maxvm + sz; 1034 sz += PAGESIZE; 1035 } 1036 } 1037 1038 /* 1039 * Add output buffers per helper. The number of 1040 * buffers per helper is determined by the ratio of 1041 * ncbuf to nhelper. 1042 */ 1043 for (k = 0; cp < endcp && (sz + CBUF_SIZE) <= endsz && 1044 k < NCBUF_PER_HELPER; k++) { 1045 cp->state = CBUF_FREEBUF; 1046 cp->size = CBUF_SIZE; 1047 cp->buf = cfg->maxvm + sz; 1048 sz += CBUF_SIZE; 1049 ++cp; 1050 } 1051 1052 /* 1053 * bzip2 needs compression state. Use the dumpbzalloc 1054 * and dumpbzfree callbacks to allocate the memory. 1055 * bzip2 does allocation only at init time. 1056 */ 1057 if (cfg->clevel >= DUMP_CLEVEL_BZIP2) { 1058 if ((sz + bz2size) > endsz) { 1059 hp->page = NULL; 1060 break; 1061 } else { 1062 hp->bzstream.opaque = &sz; 1063 hp->bzstream.bzalloc = dumpbzalloc; 1064 hp->bzstream.bzfree = dumpbzfree; 1065 (void) BZ2_bzCompressInit(&hp->bzstream, 1066 dump_bzip2_level, 0, 0); 1067 hp->bzstream.opaque = NULL; 1068 } 1069 } 1070 } 1071 1072 /* Finish allocating output buffers */ 1073 for (; cp < endcp && (sz + CBUF_SIZE) <= endsz; cp++) { 1074 cp->state = CBUF_FREEBUF; 1075 cp->size = CBUF_SIZE; 1076 cp->buf = cfg->maxvm + sz; 1077 sz += CBUF_SIZE; 1078 } 1079 1080 /* Enable IS_DUMP_PAGE macro, which checks for pages we took. */ 1081 if (cfg->found4m || cfg->foundsm) 1082 dump_check_used = 1; 1083 1084 ASSERT(sz <= endsz); 1085 } 1086 1087 static void 1088 dumphdr_init(void) 1089 { 1090 pgcnt_t npages = 0; 1091 1092 ASSERT(MUTEX_HELD(&dump_lock)); 1093 1094 if (dumphdr == NULL) { 1095 dumphdr = kmem_zalloc(sizeof (dumphdr_t), KM_SLEEP); 1096 dumphdr->dump_magic = DUMP_MAGIC; 1097 dumphdr->dump_version = DUMP_VERSION; 1098 dumphdr->dump_wordsize = DUMP_WORDSIZE; 1099 dumphdr->dump_pageshift = PAGESHIFT; 1100 dumphdr->dump_pagesize = PAGESIZE; 1101 dumphdr->dump_utsname = utsname; 1102 (void) strcpy(dumphdr->dump_platform, platform); 1103 dumpbuf.size = dumpbuf_iosize(maxphys); 1104 dumpbuf.start = kmem_alloc(dumpbuf.size, KM_SLEEP); 1105 dumpbuf.end = dumpbuf.start + dumpbuf.size; 1106 dumpcfg.pids = kmem_alloc(v.v_proc * sizeof (pid_t), KM_SLEEP); 1107 dumpcfg.helpermap = kmem_zalloc(BT_SIZEOFMAP(NCPU), KM_SLEEP); 1108 LOCK_INIT_HELD(&dumpcfg.helper_lock); 1109 dump_stack_scratch = kmem_alloc(STACK_BUF_SIZE, KM_SLEEP); 1110 (void) strncpy(dumphdr->dump_uuid, dump_get_uuid(), 1111 sizeof (dumphdr->dump_uuid)); 1112 } 1113 1114 npages = num_phys_pages(); 1115 1116 if (dumpcfg.bitmapsize != npages) { 1117 size_t rlen = CBUF_MAPP2R(P2ROUNDUP(npages, CBUF_MAPNP)); 1118 void *map = kmem_alloc(BT_SIZEOFMAP(npages), KM_SLEEP); 1119 void *rmap = kmem_alloc(BT_SIZEOFMAP(rlen), KM_SLEEP); 1120 1121 if (dumpcfg.bitmap != NULL) 1122 kmem_free(dumpcfg.bitmap, BT_SIZEOFMAP(dumpcfg. 1123 bitmapsize)); 1124 if (dumpcfg.rbitmap != NULL) 1125 kmem_free(dumpcfg.rbitmap, BT_SIZEOFMAP(dumpcfg. 1126 rbitmapsize)); 1127 dumpcfg.bitmap = map; 1128 dumpcfg.bitmapsize = npages; 1129 dumpcfg.rbitmap = rmap; 1130 dumpcfg.rbitmapsize = rlen; 1131 } 1132 } 1133 1134 /* 1135 * Establish a new dump device. 1136 */ 1137 int 1138 dumpinit(vnode_t *vp, char *name, int justchecking) 1139 { 1140 vnode_t *cvp; 1141 vattr_t vattr; 1142 vnode_t *cdev_vp; 1143 int error = 0; 1144 1145 ASSERT(MUTEX_HELD(&dump_lock)); 1146 1147 dumphdr_init(); 1148 1149 cvp = common_specvp(vp); 1150 if (cvp == dumpvp) 1151 return (0); 1152 1153 /* 1154 * Determine whether this is a plausible dump device. We want either: 1155 * (1) a real device that's not mounted and has a cb_dump routine, or 1156 * (2) a swapfile on some filesystem that has a vop_dump routine. 1157 */ 1158 if ((error = VOP_OPEN(&cvp, FREAD | FWRITE, kcred, NULL)) != 0) 1159 return (error); 1160 1161 vattr.va_mask = AT_SIZE | AT_TYPE | AT_RDEV; 1162 if ((error = VOP_GETATTR(cvp, &vattr, 0, kcred, NULL)) == 0) { 1163 if (vattr.va_type == VBLK || vattr.va_type == VCHR) { 1164 if (devopsp[getmajor(vattr.va_rdev)]-> 1165 devo_cb_ops->cb_dump == nodev) 1166 error = ENOTSUP; 1167 else if (vfs_devismounted(vattr.va_rdev)) 1168 error = EBUSY; 1169 if (strcmp(ddi_driver_name(VTOS(cvp)->s_dip), 1170 ZFS_DRIVER) == 0 && 1171 IS_SWAPVP(common_specvp(cvp))) 1172 error = EBUSY; 1173 } else { 1174 if (vn_matchopval(cvp, VOPNAME_DUMP, fs_nosys) || 1175 !IS_SWAPVP(cvp)) 1176 error = ENOTSUP; 1177 } 1178 } 1179 1180 if (error == 0 && vattr.va_size < 2 * DUMP_LOGSIZE + DUMP_ERPTSIZE) 1181 error = ENOSPC; 1182 1183 if (error || justchecking) { 1184 (void) VOP_CLOSE(cvp, FREAD | FWRITE, 1, (offset_t)0, 1185 kcred, NULL); 1186 return (error); 1187 } 1188 1189 VN_HOLD(cvp); 1190 1191 if (dumpvp != NULL) 1192 dumpfini(); /* unconfigure the old dump device */ 1193 1194 dumpvp = cvp; 1195 dumpvp_size = vattr.va_size & -DUMP_OFFSET; 1196 dumppath = kmem_alloc(strlen(name) + 1, KM_SLEEP); 1197 (void) strcpy(dumppath, name); 1198 dumpbuf.iosize = 0; 1199 1200 /* 1201 * If the dump device is a block device, attempt to open up the 1202 * corresponding character device and determine its maximum transfer 1203 * size. We use this information to potentially resize dumpbuf to a 1204 * larger and more optimal size for performing i/o to the dump device. 1205 */ 1206 if (cvp->v_type == VBLK && 1207 (cdev_vp = makespecvp(VTOS(cvp)->s_dev, VCHR)) != NULL) { 1208 if (VOP_OPEN(&cdev_vp, FREAD | FWRITE, kcred, NULL) == 0) { 1209 size_t blk_size; 1210 struct dk_cinfo dki; 1211 struct dk_minfo minf; 1212 1213 if (VOP_IOCTL(cdev_vp, DKIOCGMEDIAINFO, 1214 (intptr_t)&minf, FKIOCTL, kcred, NULL, NULL) 1215 == 0 && minf.dki_lbsize != 0) 1216 blk_size = minf.dki_lbsize; 1217 else 1218 blk_size = DEV_BSIZE; 1219 1220 if (VOP_IOCTL(cdev_vp, DKIOCINFO, (intptr_t)&dki, 1221 FKIOCTL, kcred, NULL, NULL) == 0) { 1222 dumpbuf.iosize = dki.dki_maxtransfer * blk_size; 1223 dumpbuf_resize(); 1224 } 1225 /* 1226 * If we are working with a zvol then dumpify it 1227 * if it's not being used as swap. 1228 */ 1229 if (strcmp(dki.dki_dname, ZVOL_DRIVER) == 0) { 1230 if (IS_SWAPVP(common_specvp(cvp))) 1231 error = EBUSY; 1232 else if ((error = VOP_IOCTL(cdev_vp, 1233 DKIOCDUMPINIT, NULL, FKIOCTL, kcred, 1234 NULL, NULL)) != 0) 1235 dumpfini(); 1236 } 1237 1238 (void) VOP_CLOSE(cdev_vp, FREAD | FWRITE, 1, 0, 1239 kcred, NULL); 1240 } 1241 1242 VN_RELE(cdev_vp); 1243 } 1244 1245 cmn_err(CE_CONT, "?dump on %s size %llu MB\n", name, dumpvp_size >> 20); 1246 1247 dump_update_clevel(); 1248 1249 return (error); 1250 } 1251 1252 void 1253 dumpfini(void) 1254 { 1255 vattr_t vattr; 1256 boolean_t is_zfs = B_FALSE; 1257 vnode_t *cdev_vp; 1258 ASSERT(MUTEX_HELD(&dump_lock)); 1259 1260 kmem_free(dumppath, strlen(dumppath) + 1); 1261 1262 /* 1263 * Determine if we are using zvols for our dump device 1264 */ 1265 vattr.va_mask = AT_RDEV; 1266 if (VOP_GETATTR(dumpvp, &vattr, 0, kcred, NULL) == 0) { 1267 is_zfs = (getmajor(vattr.va_rdev) == 1268 ddi_name_to_major(ZFS_DRIVER)) ? B_TRUE : B_FALSE; 1269 } 1270 1271 /* 1272 * If we have a zvol dump device then we call into zfs so 1273 * that it may have a chance to cleanup. 1274 */ 1275 if (is_zfs && 1276 (cdev_vp = makespecvp(VTOS(dumpvp)->s_dev, VCHR)) != NULL) { 1277 if (VOP_OPEN(&cdev_vp, FREAD | FWRITE, kcred, NULL) == 0) { 1278 (void) VOP_IOCTL(cdev_vp, DKIOCDUMPFINI, NULL, FKIOCTL, 1279 kcred, NULL, NULL); 1280 (void) VOP_CLOSE(cdev_vp, FREAD | FWRITE, 1, 0, 1281 kcred, NULL); 1282 } 1283 VN_RELE(cdev_vp); 1284 } 1285 1286 (void) VOP_CLOSE(dumpvp, FREAD | FWRITE, 1, (offset_t)0, kcred, NULL); 1287 1288 VN_RELE(dumpvp); 1289 1290 dumpvp = NULL; 1291 dumpvp_size = 0; 1292 dumppath = NULL; 1293 } 1294 1295 static offset_t 1296 dumpvp_flush(void) 1297 { 1298 size_t size = P2ROUNDUP(dumpbuf.cur - dumpbuf.start, PAGESIZE); 1299 hrtime_t iotime; 1300 int err; 1301 1302 if (dumpbuf.vp_off + size > dumpbuf.vp_limit) { 1303 dump_ioerr = ENOSPC; 1304 dumpbuf.vp_off = dumpbuf.vp_limit; 1305 } else if (size != 0) { 1306 iotime = gethrtime(); 1307 dumpsync.iowait += iotime - dumpsync.iowaitts; 1308 if (panicstr) 1309 err = VOP_DUMP(dumpvp, dumpbuf.start, 1310 lbtodb(dumpbuf.vp_off), btod(size), NULL); 1311 else 1312 err = vn_rdwr(UIO_WRITE, dumpbuf.cdev_vp != NULL ? 1313 dumpbuf.cdev_vp : dumpvp, dumpbuf.start, size, 1314 dumpbuf.vp_off, UIO_SYSSPACE, 0, dumpbuf.vp_limit, 1315 kcred, 0); 1316 if (err && dump_ioerr == 0) 1317 dump_ioerr = err; 1318 dumpsync.iowaitts = gethrtime(); 1319 dumpsync.iotime += dumpsync.iowaitts - iotime; 1320 dumpsync.nwrite += size; 1321 dumpbuf.vp_off += size; 1322 } 1323 dumpbuf.cur = dumpbuf.start; 1324 dump_timeleft = dump_timeout; 1325 return (dumpbuf.vp_off); 1326 } 1327 1328 /* maximize write speed by keeping seek offset aligned with size */ 1329 void 1330 dumpvp_write(const void *va, size_t size) 1331 { 1332 size_t len, off, sz; 1333 1334 while (size != 0) { 1335 len = MIN(size, dumpbuf.end - dumpbuf.cur); 1336 if (len == 0) { 1337 off = P2PHASE(dumpbuf.vp_off, dumpbuf.size); 1338 if (off == 0 || !ISP2(dumpbuf.size)) { 1339 (void) dumpvp_flush(); 1340 } else { 1341 sz = dumpbuf.size - off; 1342 dumpbuf.cur = dumpbuf.start + sz; 1343 (void) dumpvp_flush(); 1344 ovbcopy(dumpbuf.start + sz, dumpbuf.start, off); 1345 dumpbuf.cur += off; 1346 } 1347 } else { 1348 bcopy(va, dumpbuf.cur, len); 1349 va = (char *)va + len; 1350 dumpbuf.cur += len; 1351 size -= len; 1352 } 1353 } 1354 } 1355 1356 /*ARGSUSED*/ 1357 static void 1358 dumpvp_ksyms_write(const void *src, void *dst, size_t size) 1359 { 1360 dumpvp_write(src, size); 1361 } 1362 1363 /* 1364 * Mark 'pfn' in the bitmap and dump its translation table entry. 1365 */ 1366 void 1367 dump_addpage(struct as *as, void *va, pfn_t pfn) 1368 { 1369 mem_vtop_t mem_vtop; 1370 pgcnt_t bitnum; 1371 1372 if ((bitnum = dump_pfn_to_bitnum(pfn)) != (pgcnt_t)-1) { 1373 if (!BT_TEST(dumpcfg.bitmap, bitnum)) { 1374 dumphdr->dump_npages++; 1375 BT_SET(dumpcfg.bitmap, bitnum); 1376 } 1377 dumphdr->dump_nvtop++; 1378 mem_vtop.m_as = as; 1379 mem_vtop.m_va = va; 1380 mem_vtop.m_pfn = pfn; 1381 dumpvp_write(&mem_vtop, sizeof (mem_vtop_t)); 1382 } 1383 dump_timeleft = dump_timeout; 1384 } 1385 1386 /* 1387 * Mark 'pfn' in the bitmap 1388 */ 1389 void 1390 dump_page(pfn_t pfn) 1391 { 1392 pgcnt_t bitnum; 1393 1394 if ((bitnum = dump_pfn_to_bitnum(pfn)) != (pgcnt_t)-1) { 1395 if (!BT_TEST(dumpcfg.bitmap, bitnum)) { 1396 dumphdr->dump_npages++; 1397 BT_SET(dumpcfg.bitmap, bitnum); 1398 } 1399 } 1400 dump_timeleft = dump_timeout; 1401 } 1402 1403 /* 1404 * Dump the <as, va, pfn> information for a given address space. 1405 * SEGOP_DUMP() will call dump_addpage() for each page in the segment. 1406 */ 1407 static void 1408 dump_as(struct as *as) 1409 { 1410 struct seg *seg; 1411 1412 AS_LOCK_ENTER(as, RW_READER); 1413 for (seg = AS_SEGFIRST(as); seg; seg = AS_SEGNEXT(as, seg)) { 1414 if (seg->s_as != as) 1415 break; 1416 if (seg->s_ops == NULL) 1417 continue; 1418 SEGOP_DUMP(seg); 1419 } 1420 AS_LOCK_EXIT(as); 1421 1422 if (seg != NULL) 1423 cmn_err(CE_WARN, "invalid segment %p in address space %p", 1424 (void *)seg, (void *)as); 1425 } 1426 1427 static int 1428 dump_process(pid_t pid) 1429 { 1430 proc_t *p = sprlock(pid); 1431 1432 if (p == NULL) 1433 return (-1); 1434 if (p->p_as != &kas) { 1435 mutex_exit(&p->p_lock); 1436 dump_as(p->p_as); 1437 mutex_enter(&p->p_lock); 1438 } 1439 1440 sprunlock(p); 1441 1442 return (0); 1443 } 1444 1445 /* 1446 * The following functions (dump_summary(), dump_ereports(), and 1447 * dump_messages()), write data to an uncompressed area within the 1448 * crashdump. The layout of these is 1449 * 1450 * +------------------------------------------------------------+ 1451 * | compressed pages | summary | ereports | messages | 1452 * +------------------------------------------------------------+ 1453 * 1454 * With the advent of saving a compressed crash dump by default, we 1455 * need to save a little more data to describe the failure mode in 1456 * an uncompressed buffer available before savecore uncompresses 1457 * the dump. Initially this is a copy of the stack trace. Additional 1458 * summary information should be added here. 1459 */ 1460 1461 void 1462 dump_summary(void) 1463 { 1464 u_offset_t dumpvp_start; 1465 summary_dump_t sd; 1466 1467 if (dumpvp == NULL || dumphdr == NULL) 1468 return; 1469 1470 dumpbuf.cur = dumpbuf.start; 1471 1472 dumpbuf.vp_limit = dumpvp_size - (DUMP_OFFSET + DUMP_LOGSIZE + 1473 DUMP_ERPTSIZE); 1474 dumpvp_start = dumpbuf.vp_limit - DUMP_SUMMARYSIZE; 1475 dumpbuf.vp_off = dumpvp_start; 1476 1477 sd.sd_magic = SUMMARY_MAGIC; 1478 sd.sd_ssum = checksum32(dump_stack_scratch, STACK_BUF_SIZE); 1479 dumpvp_write(&sd, sizeof (sd)); 1480 dumpvp_write(dump_stack_scratch, STACK_BUF_SIZE); 1481 1482 sd.sd_magic = 0; /* indicate end of summary */ 1483 dumpvp_write(&sd, sizeof (sd)); 1484 (void) dumpvp_flush(); 1485 } 1486 1487 void 1488 dump_ereports(void) 1489 { 1490 u_offset_t dumpvp_start; 1491 erpt_dump_t ed; 1492 1493 if (dumpvp == NULL || dumphdr == NULL) 1494 return; 1495 1496 dumpbuf.cur = dumpbuf.start; 1497 dumpbuf.vp_limit = dumpvp_size - (DUMP_OFFSET + DUMP_LOGSIZE); 1498 dumpvp_start = dumpbuf.vp_limit - DUMP_ERPTSIZE; 1499 dumpbuf.vp_off = dumpvp_start; 1500 1501 fm_ereport_dump(); 1502 if (panicstr) 1503 errorq_dump(); 1504 1505 bzero(&ed, sizeof (ed)); /* indicate end of ereports */ 1506 dumpvp_write(&ed, sizeof (ed)); 1507 (void) dumpvp_flush(); 1508 1509 if (!panicstr) { 1510 (void) VOP_PUTPAGE(dumpvp, dumpvp_start, 1511 (size_t)(dumpbuf.vp_off - dumpvp_start), 1512 B_INVAL | B_FORCE, kcred, NULL); 1513 } 1514 } 1515 1516 void 1517 dump_messages(void) 1518 { 1519 log_dump_t ld; 1520 mblk_t *mctl, *mdata; 1521 queue_t *q, *qlast; 1522 u_offset_t dumpvp_start; 1523 1524 if (dumpvp == NULL || dumphdr == NULL || log_consq == NULL) 1525 return; 1526 1527 dumpbuf.cur = dumpbuf.start; 1528 dumpbuf.vp_limit = dumpvp_size - DUMP_OFFSET; 1529 dumpvp_start = dumpbuf.vp_limit - DUMP_LOGSIZE; 1530 dumpbuf.vp_off = dumpvp_start; 1531 1532 qlast = NULL; 1533 do { 1534 for (q = log_consq; q->q_next != qlast; q = q->q_next) 1535 continue; 1536 for (mctl = q->q_first; mctl != NULL; mctl = mctl->b_next) { 1537 dump_timeleft = dump_timeout; 1538 mdata = mctl->b_cont; 1539 ld.ld_magic = LOG_MAGIC; 1540 ld.ld_msgsize = MBLKL(mctl->b_cont); 1541 ld.ld_csum = checksum32(mctl->b_rptr, MBLKL(mctl)); 1542 ld.ld_msum = checksum32(mdata->b_rptr, MBLKL(mdata)); 1543 dumpvp_write(&ld, sizeof (ld)); 1544 dumpvp_write(mctl->b_rptr, MBLKL(mctl)); 1545 dumpvp_write(mdata->b_rptr, MBLKL(mdata)); 1546 } 1547 } while ((qlast = q) != log_consq); 1548 1549 ld.ld_magic = 0; /* indicate end of messages */ 1550 dumpvp_write(&ld, sizeof (ld)); 1551 (void) dumpvp_flush(); 1552 if (!panicstr) { 1553 (void) VOP_PUTPAGE(dumpvp, dumpvp_start, 1554 (size_t)(dumpbuf.vp_off - dumpvp_start), 1555 B_INVAL | B_FORCE, kcred, NULL); 1556 } 1557 } 1558 1559 /* 1560 * The following functions are called on multiple CPUs during dump. 1561 * They must not use most kernel services, because all cross-calls are 1562 * disabled during panic. Therefore, blocking locks and cache flushes 1563 * will not work. 1564 */ 1565 1566 /* 1567 * Copy pages, trapping ECC errors. Also, for robustness, trap data 1568 * access in case something goes wrong in the hat layer and the 1569 * mapping is broken. 1570 */ 1571 static int 1572 dump_pagecopy(void *src, void *dst) 1573 { 1574 long *wsrc = (long *)src; 1575 long *wdst = (long *)dst; 1576 const ulong_t ncopies = PAGESIZE / sizeof (long); 1577 volatile int w = 0; 1578 volatile int ueoff = -1; 1579 on_trap_data_t otd; 1580 1581 if (on_trap(&otd, OT_DATA_EC | OT_DATA_ACCESS)) { 1582 if (ueoff == -1) 1583 ueoff = w * sizeof (long); 1584 /* report "bad ECC" or "bad address" */ 1585 #ifdef _LP64 1586 if (otd.ot_trap & OT_DATA_EC) 1587 wdst[w++] = 0x00badecc00badecc; 1588 else 1589 wdst[w++] = 0x00badadd00badadd; 1590 #else 1591 if (otd.ot_trap & OT_DATA_EC) 1592 wdst[w++] = 0x00badecc; 1593 else 1594 wdst[w++] = 0x00badadd; 1595 #endif 1596 } 1597 while (w < ncopies) { 1598 wdst[w] = wsrc[w]; 1599 w++; 1600 } 1601 no_trap(); 1602 return (ueoff); 1603 } 1604 1605 static void 1606 dumpsys_close_cq(cqueue_t *cq, int live) 1607 { 1608 if (live) { 1609 mutex_enter(&cq->mutex); 1610 atomic_dec_uint(&cq->open); 1611 cv_signal(&cq->cv); 1612 mutex_exit(&cq->mutex); 1613 } else { 1614 atomic_dec_uint(&cq->open); 1615 } 1616 } 1617 1618 static inline void 1619 dumpsys_spinlock(lock_t *lp) 1620 { 1621 uint_t backoff = 0; 1622 int loop_count = 0; 1623 1624 while (LOCK_HELD(lp) || !lock_spin_try(lp)) { 1625 if (++loop_count >= ncpus) { 1626 backoff = mutex_lock_backoff(0); 1627 loop_count = 0; 1628 } else { 1629 backoff = mutex_lock_backoff(backoff); 1630 } 1631 mutex_lock_delay(backoff); 1632 } 1633 } 1634 1635 static inline void 1636 dumpsys_spinunlock(lock_t *lp) 1637 { 1638 lock_clear(lp); 1639 } 1640 1641 static inline void 1642 dumpsys_lock(cqueue_t *cq, int live) 1643 { 1644 if (live) 1645 mutex_enter(&cq->mutex); 1646 else 1647 dumpsys_spinlock(&cq->spinlock); 1648 } 1649 1650 static inline void 1651 dumpsys_unlock(cqueue_t *cq, int live, int signal) 1652 { 1653 if (live) { 1654 if (signal) 1655 cv_signal(&cq->cv); 1656 mutex_exit(&cq->mutex); 1657 } else { 1658 dumpsys_spinunlock(&cq->spinlock); 1659 } 1660 } 1661 1662 static void 1663 dumpsys_wait_cq(cqueue_t *cq, int live) 1664 { 1665 if (live) { 1666 cv_wait(&cq->cv, &cq->mutex); 1667 } else { 1668 dumpsys_spinunlock(&cq->spinlock); 1669 while (cq->open) 1670 if (cq->first) 1671 break; 1672 dumpsys_spinlock(&cq->spinlock); 1673 } 1674 } 1675 1676 static void 1677 dumpsys_put_cq(cqueue_t *cq, cbuf_t *cp, int newstate, int live) 1678 { 1679 if (cp == NULL) 1680 return; 1681 1682 dumpsys_lock(cq, live); 1683 1684 if (cq->ts != 0) { 1685 cq->empty += gethrtime() - cq->ts; 1686 cq->ts = 0; 1687 } 1688 1689 cp->state = newstate; 1690 cp->next = NULL; 1691 if (cq->last == NULL) 1692 cq->first = cp; 1693 else 1694 cq->last->next = cp; 1695 cq->last = cp; 1696 1697 dumpsys_unlock(cq, live, 1); 1698 } 1699 1700 static cbuf_t * 1701 dumpsys_get_cq(cqueue_t *cq, int live) 1702 { 1703 cbuf_t *cp; 1704 hrtime_t now = gethrtime(); 1705 1706 dumpsys_lock(cq, live); 1707 1708 /* CONSTCOND */ 1709 while (1) { 1710 cp = (cbuf_t *)cq->first; 1711 if (cp == NULL) { 1712 if (cq->open == 0) 1713 break; 1714 dumpsys_wait_cq(cq, live); 1715 continue; 1716 } 1717 cq->first = cp->next; 1718 if (cq->first == NULL) { 1719 cq->last = NULL; 1720 cq->ts = now; 1721 } 1722 break; 1723 } 1724 1725 dumpsys_unlock(cq, live, cq->first != NULL || cq->open == 0); 1726 return (cp); 1727 } 1728 1729 /* 1730 * Send an error message to the console. If the main task is running 1731 * just write the message via uprintf. If a helper is running the 1732 * message has to be put on a queue for the main task. Setting fmt to 1733 * NULL means flush the error message buffer. If fmt is not NULL, just 1734 * add the text to the existing buffer. 1735 */ 1736 static void 1737 dumpsys_errmsg(helper_t *hp, const char *fmt, ...) 1738 { 1739 dumpsync_t *ds = hp->ds; 1740 cbuf_t *cp = hp->cperr; 1741 va_list adx; 1742 1743 if (hp->helper == MAINHELPER) { 1744 if (fmt != NULL) { 1745 if (ds->neednl) { 1746 uprintf("\n"); 1747 ds->neednl = 0; 1748 } 1749 va_start(adx, fmt); 1750 vuprintf(fmt, adx); 1751 va_end(adx); 1752 } 1753 } else if (fmt == NULL) { 1754 if (cp != NULL) { 1755 CQ_PUT(mainq, cp, CBUF_ERRMSG); 1756 hp->cperr = NULL; 1757 } 1758 } else { 1759 if (hp->cperr == NULL) { 1760 cp = CQ_GET(freebufq); 1761 hp->cperr = cp; 1762 cp->used = 0; 1763 } 1764 va_start(adx, fmt); 1765 cp->used += vsnprintf(cp->buf + cp->used, cp->size - cp->used, 1766 fmt, adx); 1767 va_end(adx); 1768 if ((cp->used + LOG_MSGSIZE) > cp->size) { 1769 CQ_PUT(mainq, cp, CBUF_ERRMSG); 1770 hp->cperr = NULL; 1771 } 1772 } 1773 } 1774 1775 /* 1776 * Write an output buffer to the dump file. If the main task is 1777 * running just write the data. If a helper is running the output is 1778 * placed on a queue for the main task. 1779 */ 1780 static void 1781 dumpsys_swrite(helper_t *hp, cbuf_t *cp, size_t used) 1782 { 1783 dumpsync_t *ds = hp->ds; 1784 1785 if (hp->helper == MAINHELPER) { 1786 HRSTART(ds->perpage, write); 1787 dumpvp_write(cp->buf, used); 1788 HRSTOP(ds->perpage, write); 1789 CQ_PUT(freebufq, cp, CBUF_FREEBUF); 1790 } else { 1791 cp->used = used; 1792 CQ_PUT(mainq, cp, CBUF_WRITE); 1793 } 1794 } 1795 1796 /* 1797 * Copy one page within the mapped range. The offset starts at 0 and 1798 * is relative to the first pfn. cp->buf + cp->off is the address of 1799 * the first pfn. If dump_pagecopy returns a UE offset, create an 1800 * error message. Returns the offset to the next pfn in the range 1801 * selected by the bitmap. 1802 */ 1803 static int 1804 dumpsys_copy_page(helper_t *hp, int offset) 1805 { 1806 cbuf_t *cp = hp->cpin; 1807 int ueoff; 1808 1809 ASSERT(cp->off + offset + PAGESIZE <= cp->size); 1810 ASSERT(BT_TEST(dumpcfg.bitmap, cp->bitnum)); 1811 1812 ueoff = dump_pagecopy(cp->buf + cp->off + offset, hp->page); 1813 1814 /* ueoff is the offset in the page to a UE error */ 1815 if (ueoff != -1) { 1816 uint64_t pa = ptob(cp->pfn) + offset + ueoff; 1817 1818 dumpsys_errmsg(hp, "cpu %d: memory error at PA 0x%08x.%08x\n", 1819 CPU->cpu_id, (uint32_t)(pa >> 32), (uint32_t)pa); 1820 } 1821 1822 /* 1823 * Advance bitnum and offset to the next input page for the 1824 * next call to this function. 1825 */ 1826 offset += PAGESIZE; 1827 cp->bitnum++; 1828 while (cp->off + offset < cp->size) { 1829 if (BT_TEST(dumpcfg.bitmap, cp->bitnum)) 1830 break; 1831 offset += PAGESIZE; 1832 cp->bitnum++; 1833 } 1834 1835 return (offset); 1836 } 1837 1838 /* 1839 * Read the helper queue, and copy one mapped page. Return 0 when 1840 * done. Return 1 when a page has been copied into hp->page. 1841 */ 1842 static int 1843 dumpsys_sread(helper_t *hp) 1844 { 1845 dumpsync_t *ds = hp->ds; 1846 1847 /* CONSTCOND */ 1848 while (1) { 1849 1850 /* Find the next input buffer. */ 1851 if (hp->cpin == NULL) { 1852 HRSTART(hp->perpage, inwait); 1853 1854 /* CONSTCOND */ 1855 while (1) { 1856 hp->cpin = CQ_GET(helperq); 1857 dump_timeleft = dump_timeout; 1858 1859 /* 1860 * NULL return means the helper queue 1861 * is closed and empty. 1862 */ 1863 if (hp->cpin == NULL) 1864 break; 1865 1866 /* Have input, check for dump I/O error. */ 1867 if (!dump_ioerr) 1868 break; 1869 1870 /* 1871 * If an I/O error occurs, stay in the 1872 * loop in order to empty the helper 1873 * queue. Return the buffers to the 1874 * main task to unmap and free it. 1875 */ 1876 hp->cpin->used = 0; 1877 CQ_PUT(mainq, hp->cpin, CBUF_USEDMAP); 1878 } 1879 HRSTOP(hp->perpage, inwait); 1880 1881 /* Stop here when the helper queue is closed. */ 1882 if (hp->cpin == NULL) 1883 break; 1884 1885 /* Set the offset=0 to get the first pfn. */ 1886 hp->in = 0; 1887 1888 /* Set the total processed to 0 */ 1889 hp->used = 0; 1890 } 1891 1892 /* Process the next page. */ 1893 if (hp->used < hp->cpin->used) { 1894 1895 /* 1896 * Get the next page from the input buffer and 1897 * return a copy. 1898 */ 1899 ASSERT(hp->in != -1); 1900 HRSTART(hp->perpage, copy); 1901 hp->in = dumpsys_copy_page(hp, hp->in); 1902 hp->used += PAGESIZE; 1903 HRSTOP(hp->perpage, copy); 1904 break; 1905 1906 } else { 1907 1908 /* 1909 * Done with the input. Flush the VM and 1910 * return the buffer to the main task. 1911 */ 1912 if (panicstr && hp->helper != MAINHELPER) 1913 hat_flush_range(kas.a_hat, 1914 hp->cpin->buf, hp->cpin->size); 1915 dumpsys_errmsg(hp, NULL); 1916 CQ_PUT(mainq, hp->cpin, CBUF_USEDMAP); 1917 hp->cpin = NULL; 1918 } 1919 } 1920 1921 return (hp->cpin != NULL); 1922 } 1923 1924 /* 1925 * Compress size bytes starting at buf with bzip2 1926 * mode: 1927 * BZ_RUN add one more compressed page 1928 * BZ_FINISH no more input, flush the state 1929 */ 1930 static void 1931 dumpsys_bzrun(helper_t *hp, void *buf, size_t size, int mode) 1932 { 1933 dumpsync_t *ds = hp->ds; 1934 const int CSIZE = sizeof (dumpcsize_t); 1935 bz_stream *ps = &hp->bzstream; 1936 int rc = 0; 1937 uint32_t csize; 1938 dumpcsize_t cs; 1939 1940 /* Set input pointers to new input page */ 1941 if (size > 0) { 1942 ps->avail_in = size; 1943 ps->next_in = buf; 1944 } 1945 1946 /* CONSTCOND */ 1947 while (1) { 1948 1949 /* Quit when all input has been consumed */ 1950 if (ps->avail_in == 0 && mode == BZ_RUN) 1951 break; 1952 1953 /* Get a new output buffer */ 1954 if (hp->cpout == NULL) { 1955 HRSTART(hp->perpage, outwait); 1956 hp->cpout = CQ_GET(freebufq); 1957 HRSTOP(hp->perpage, outwait); 1958 ps->avail_out = hp->cpout->size - CSIZE; 1959 ps->next_out = hp->cpout->buf + CSIZE; 1960 } 1961 1962 /* Compress input, or finalize */ 1963 HRSTART(hp->perpage, compress); 1964 rc = BZ2_bzCompress(ps, mode); 1965 HRSTOP(hp->perpage, compress); 1966 1967 /* Check for error */ 1968 if (mode == BZ_RUN && rc != BZ_RUN_OK) { 1969 dumpsys_errmsg(hp, "%d: BZ_RUN error %s at page %lx\n", 1970 hp->helper, BZ2_bzErrorString(rc), 1971 hp->cpin->pagenum); 1972 break; 1973 } 1974 1975 /* Write the buffer if it is full, or we are flushing */ 1976 if (ps->avail_out == 0 || mode == BZ_FINISH) { 1977 csize = hp->cpout->size - CSIZE - ps->avail_out; 1978 cs = DUMP_SET_TAG(csize, hp->tag); 1979 if (csize > 0) { 1980 (void) memcpy(hp->cpout->buf, &cs, CSIZE); 1981 dumpsys_swrite(hp, hp->cpout, csize + CSIZE); 1982 hp->cpout = NULL; 1983 } 1984 } 1985 1986 /* Check for final complete */ 1987 if (mode == BZ_FINISH) { 1988 if (rc == BZ_STREAM_END) 1989 break; 1990 if (rc != BZ_FINISH_OK) { 1991 dumpsys_errmsg(hp, "%d: BZ_FINISH error %s\n", 1992 hp->helper, BZ2_bzErrorString(rc)); 1993 break; 1994 } 1995 } 1996 } 1997 1998 /* Cleanup state and buffers */ 1999 if (mode == BZ_FINISH) { 2000 2001 /* Reset state so that it is re-usable. */ 2002 (void) BZ2_bzCompressReset(&hp->bzstream); 2003 2004 /* Give any unused outout buffer to the main task */ 2005 if (hp->cpout != NULL) { 2006 hp->cpout->used = 0; 2007 CQ_PUT(mainq, hp->cpout, CBUF_ERRMSG); 2008 hp->cpout = NULL; 2009 } 2010 } 2011 } 2012 2013 static void 2014 dumpsys_bz2compress(helper_t *hp) 2015 { 2016 dumpsync_t *ds = hp->ds; 2017 dumpstreamhdr_t sh; 2018 2019 (void) strcpy(sh.stream_magic, DUMP_STREAM_MAGIC); 2020 sh.stream_pagenum = (pgcnt_t)-1; 2021 sh.stream_npages = 0; 2022 hp->cpin = NULL; 2023 hp->cpout = NULL; 2024 hp->cperr = NULL; 2025 hp->in = 0; 2026 hp->out = 0; 2027 hp->bzstream.avail_in = 0; 2028 2029 /* Bump reference to mainq while we are running */ 2030 CQ_OPEN(mainq); 2031 2032 /* Get one page at a time */ 2033 while (dumpsys_sread(hp)) { 2034 if (sh.stream_pagenum != hp->cpin->pagenum) { 2035 sh.stream_pagenum = hp->cpin->pagenum; 2036 sh.stream_npages = btop(hp->cpin->used); 2037 dumpsys_bzrun(hp, &sh, sizeof (sh), BZ_RUN); 2038 } 2039 dumpsys_bzrun(hp, hp->page, PAGESIZE, 0); 2040 } 2041 2042 /* Done with input, flush any partial buffer */ 2043 if (sh.stream_pagenum != (pgcnt_t)-1) { 2044 dumpsys_bzrun(hp, NULL, 0, BZ_FINISH); 2045 dumpsys_errmsg(hp, NULL); 2046 } 2047 2048 ASSERT(hp->cpin == NULL && hp->cpout == NULL && hp->cperr == NULL); 2049 2050 /* Decrement main queue count, we are done */ 2051 CQ_CLOSE(mainq); 2052 } 2053 2054 /* 2055 * Compress with lzjb 2056 * write stream block if full or size==0 2057 * if csize==0 write stream header, else write <csize, data> 2058 * size==0 is a call to flush a buffer 2059 * hp->cpout is the buffer we are flushing or filling 2060 * hp->out is the next index to fill data 2061 * osize is either csize+data, or the size of a stream header 2062 */ 2063 static void 2064 dumpsys_lzjbrun(helper_t *hp, size_t csize, void *buf, size_t size) 2065 { 2066 dumpsync_t *ds = hp->ds; 2067 const int CSIZE = sizeof (dumpcsize_t); 2068 dumpcsize_t cs; 2069 size_t osize = csize > 0 ? CSIZE + size : size; 2070 2071 /* If flush, and there is no buffer, just return */ 2072 if (size == 0 && hp->cpout == NULL) 2073 return; 2074 2075 /* If flush, or cpout is full, write it out */ 2076 if (size == 0 || 2077 hp->cpout != NULL && hp->out + osize > hp->cpout->size) { 2078 2079 /* Set tag+size word at the front of the stream block. */ 2080 cs = DUMP_SET_TAG(hp->out - CSIZE, hp->tag); 2081 (void) memcpy(hp->cpout->buf, &cs, CSIZE); 2082 2083 /* Write block to dump file. */ 2084 dumpsys_swrite(hp, hp->cpout, hp->out); 2085 2086 /* Clear pointer to indicate we need a new buffer */ 2087 hp->cpout = NULL; 2088 2089 /* flushing, we are done */ 2090 if (size == 0) 2091 return; 2092 } 2093 2094 /* Get an output buffer if we dont have one. */ 2095 if (hp->cpout == NULL) { 2096 HRSTART(hp->perpage, outwait); 2097 hp->cpout = CQ_GET(freebufq); 2098 HRSTOP(hp->perpage, outwait); 2099 hp->out = CSIZE; 2100 } 2101 2102 /* Store csize word. This is the size of compressed data. */ 2103 if (csize > 0) { 2104 cs = DUMP_SET_TAG(csize, 0); 2105 (void) memcpy(hp->cpout->buf + hp->out, &cs, CSIZE); 2106 hp->out += CSIZE; 2107 } 2108 2109 /* Store the data. */ 2110 (void) memcpy(hp->cpout->buf + hp->out, buf, size); 2111 hp->out += size; 2112 } 2113 2114 static void 2115 dumpsys_lzjbcompress(helper_t *hp) 2116 { 2117 dumpsync_t *ds = hp->ds; 2118 size_t csize; 2119 dumpstreamhdr_t sh; 2120 2121 (void) strcpy(sh.stream_magic, DUMP_STREAM_MAGIC); 2122 sh.stream_pagenum = (pfn_t)-1; 2123 sh.stream_npages = 0; 2124 hp->cpin = NULL; 2125 hp->cpout = NULL; 2126 hp->cperr = NULL; 2127 hp->in = 0; 2128 hp->out = 0; 2129 2130 /* Bump reference to mainq while we are running */ 2131 CQ_OPEN(mainq); 2132 2133 /* Get one page at a time */ 2134 while (dumpsys_sread(hp)) { 2135 2136 /* Create a stream header for each new input map */ 2137 if (sh.stream_pagenum != hp->cpin->pagenum) { 2138 sh.stream_pagenum = hp->cpin->pagenum; 2139 sh.stream_npages = btop(hp->cpin->used); 2140 dumpsys_lzjbrun(hp, 0, &sh, sizeof (sh)); 2141 } 2142 2143 /* Compress one page */ 2144 HRSTART(hp->perpage, compress); 2145 csize = compress(hp->page, hp->lzbuf, PAGESIZE); 2146 HRSTOP(hp->perpage, compress); 2147 2148 /* Add csize+data to output block */ 2149 ASSERT(csize > 0 && csize <= PAGESIZE); 2150 dumpsys_lzjbrun(hp, csize, hp->lzbuf, csize); 2151 } 2152 2153 /* Done with input, flush any partial buffer */ 2154 if (sh.stream_pagenum != (pfn_t)-1) { 2155 dumpsys_lzjbrun(hp, 0, NULL, 0); 2156 dumpsys_errmsg(hp, NULL); 2157 } 2158 2159 ASSERT(hp->cpin == NULL && hp->cpout == NULL && hp->cperr == NULL); 2160 2161 /* Decrement main queue count, we are done */ 2162 CQ_CLOSE(mainq); 2163 } 2164 2165 /* 2166 * Dump helper called from panic_idle() to compress pages. CPUs in 2167 * this path must not call most kernel services. 2168 * 2169 * During panic, all but one of the CPUs is idle. These CPUs are used 2170 * as helpers working in parallel to copy and compress memory 2171 * pages. During a panic, however, these processors cannot call any 2172 * kernel services. This is because mutexes become no-ops during 2173 * panic, and, cross-call interrupts are inhibited. Therefore, during 2174 * panic dump the helper CPUs communicate with the panic CPU using 2175 * memory variables. All memory mapping and I/O is performed by the 2176 * panic CPU. 2177 * 2178 * At dump configuration time, helper_lock is set and helpers_wanted 2179 * is 0. dumpsys() decides whether to set helpers_wanted before 2180 * clearing helper_lock. 2181 * 2182 * At panic time, idle CPUs spin-wait on helper_lock, then alternately 2183 * take the lock and become a helper, or return. 2184 */ 2185 void 2186 dumpsys_helper() 2187 { 2188 dumpsys_spinlock(&dumpcfg.helper_lock); 2189 if (dumpcfg.helpers_wanted) { 2190 helper_t *hp, *hpend = &dumpcfg.helper[dumpcfg.nhelper]; 2191 2192 for (hp = dumpcfg.helper; hp != hpend; hp++) { 2193 if (hp->helper == FREEHELPER) { 2194 hp->helper = CPU->cpu_id; 2195 BT_SET(dumpcfg.helpermap, CPU->cpu_seqid); 2196 2197 dumpsys_spinunlock(&dumpcfg.helper_lock); 2198 2199 if (dumpcfg.clevel < DUMP_CLEVEL_BZIP2) 2200 dumpsys_lzjbcompress(hp); 2201 else 2202 dumpsys_bz2compress(hp); 2203 2204 hp->helper = DONEHELPER; 2205 return; 2206 } 2207 } 2208 2209 /* No more helpers are needed. */ 2210 dumpcfg.helpers_wanted = 0; 2211 2212 } 2213 dumpsys_spinunlock(&dumpcfg.helper_lock); 2214 } 2215 2216 /* 2217 * No-wait helper callable in spin loops. 2218 * 2219 * Do not wait for helper_lock. Just check helpers_wanted. The caller 2220 * may decide to continue. This is the "c)ontinue, s)ync, r)eset? s" 2221 * case. 2222 */ 2223 void 2224 dumpsys_helper_nw() 2225 { 2226 if (dumpcfg.helpers_wanted) 2227 dumpsys_helper(); 2228 } 2229 2230 /* 2231 * Dump helper for live dumps. 2232 * These run as a system task. 2233 */ 2234 static void 2235 dumpsys_live_helper(void *arg) 2236 { 2237 helper_t *hp = arg; 2238 2239 BT_ATOMIC_SET(dumpcfg.helpermap, CPU->cpu_seqid); 2240 if (dumpcfg.clevel < DUMP_CLEVEL_BZIP2) 2241 dumpsys_lzjbcompress(hp); 2242 else 2243 dumpsys_bz2compress(hp); 2244 } 2245 2246 /* 2247 * Compress one page with lzjb (single threaded case) 2248 */ 2249 static void 2250 dumpsys_lzjb_page(helper_t *hp, cbuf_t *cp) 2251 { 2252 dumpsync_t *ds = hp->ds; 2253 uint32_t csize; 2254 2255 hp->helper = MAINHELPER; 2256 hp->in = 0; 2257 hp->used = 0; 2258 hp->cpin = cp; 2259 while (hp->used < cp->used) { 2260 HRSTART(hp->perpage, copy); 2261 hp->in = dumpsys_copy_page(hp, hp->in); 2262 hp->used += PAGESIZE; 2263 HRSTOP(hp->perpage, copy); 2264 2265 HRSTART(hp->perpage, compress); 2266 csize = compress(hp->page, hp->lzbuf, PAGESIZE); 2267 HRSTOP(hp->perpage, compress); 2268 2269 HRSTART(hp->perpage, write); 2270 dumpvp_write(&csize, sizeof (csize)); 2271 dumpvp_write(hp->lzbuf, csize); 2272 HRSTOP(hp->perpage, write); 2273 } 2274 CQ_PUT(mainq, hp->cpin, CBUF_USEDMAP); 2275 hp->cpin = NULL; 2276 } 2277 2278 /* 2279 * Main task to dump pages. This is called on the dump CPU. 2280 */ 2281 static void 2282 dumpsys_main_task(void *arg) 2283 { 2284 dumpsync_t *ds = arg; 2285 pgcnt_t pagenum = 0, bitnum = 0, hibitnum; 2286 dumpmlw_t mlw; 2287 cbuf_t *cp; 2288 pgcnt_t baseoff, pfnoff; 2289 pfn_t base, pfn; 2290 int i, dumpserial; 2291 2292 /* 2293 * Fall back to serial mode if there are no helpers. 2294 * dump_plat_mincpu can be set to 0 at any time. 2295 * dumpcfg.helpermap must contain at least one member. 2296 */ 2297 dumpserial = 1; 2298 2299 if (dump_plat_mincpu != 0 && dumpcfg.clevel != 0) { 2300 for (i = 0; i < BT_BITOUL(NCPU); ++i) { 2301 if (dumpcfg.helpermap[i] != 0) { 2302 dumpserial = 0; 2303 break; 2304 } 2305 } 2306 } 2307 2308 if (dumpserial) { 2309 dumpcfg.clevel = 0; 2310 if (dumpcfg.helper[0].lzbuf == NULL) 2311 dumpcfg.helper[0].lzbuf = dumpcfg.helper[1].page; 2312 } 2313 2314 dump_init_memlist_walker(&mlw); 2315 2316 for (;;) { 2317 int sec = (gethrtime() - ds->start) / NANOSEC; 2318 2319 /* 2320 * Render a simple progress display on the system console to 2321 * make clear to the operator that the system has not hung. 2322 * Emit an update when dump progress has advanced by one 2323 * percent, or when no update has been drawn in the last 2324 * second. 2325 */ 2326 if (ds->percent > ds->percent_done || sec > ds->sec_done) { 2327 ds->sec_done = sec; 2328 ds->percent_done = ds->percent; 2329 uprintf("^\rdumping: %2d:%02d %3d%% done", 2330 sec / 60, sec % 60, ds->percent); 2331 ds->neednl = 1; 2332 } 2333 2334 while (CQ_IS_EMPTY(mainq) && !CQ_IS_EMPTY(writerq)) { 2335 2336 /* the writerq never blocks */ 2337 cp = CQ_GET(writerq); 2338 if (cp == NULL) 2339 break; 2340 2341 dump_timeleft = dump_timeout; 2342 2343 HRSTART(ds->perpage, write); 2344 dumpvp_write(cp->buf, cp->used); 2345 HRSTOP(ds->perpage, write); 2346 2347 CQ_PUT(freebufq, cp, CBUF_FREEBUF); 2348 } 2349 2350 /* 2351 * Wait here for some buffers to process. Returns NULL 2352 * when all helpers have terminated and all buffers 2353 * have been processed. 2354 */ 2355 cp = CQ_GET(mainq); 2356 2357 if (cp == NULL) { 2358 2359 /* Drain the write queue. */ 2360 if (!CQ_IS_EMPTY(writerq)) 2361 continue; 2362 2363 /* Main task exits here. */ 2364 break; 2365 } 2366 2367 dump_timeleft = dump_timeout; 2368 2369 switch (cp->state) { 2370 2371 case CBUF_FREEMAP: 2372 2373 /* 2374 * Note that we drop CBUF_FREEMAP buffers on 2375 * the floor (they will not be on any cqueue) 2376 * when we no longer need them. 2377 */ 2378 if (bitnum >= dumpcfg.bitmapsize) 2379 break; 2380 2381 if (dump_ioerr) { 2382 bitnum = dumpcfg.bitmapsize; 2383 CQ_CLOSE(helperq); 2384 break; 2385 } 2386 2387 HRSTART(ds->perpage, bitmap); 2388 for (; bitnum < dumpcfg.bitmapsize; bitnum++) 2389 if (BT_TEST(dumpcfg.bitmap, bitnum)) 2390 break; 2391 HRSTOP(ds->perpage, bitmap); 2392 dump_timeleft = dump_timeout; 2393 2394 if (bitnum >= dumpcfg.bitmapsize) { 2395 CQ_CLOSE(helperq); 2396 break; 2397 } 2398 2399 /* 2400 * Try to map CBUF_MAPSIZE ranges. Can't 2401 * assume that memory segment size is a 2402 * multiple of CBUF_MAPSIZE. Can't assume that 2403 * the segment starts on a CBUF_MAPSIZE 2404 * boundary. 2405 */ 2406 pfn = dump_bitnum_to_pfn(bitnum, &mlw); 2407 ASSERT(pfn != PFN_INVALID); 2408 ASSERT(bitnum + mlw.mpleft <= dumpcfg.bitmapsize); 2409 2410 base = P2ALIGN(pfn, CBUF_MAPNP); 2411 if (base < mlw.mpaddr) { 2412 base = mlw.mpaddr; 2413 baseoff = P2PHASE(base, CBUF_MAPNP); 2414 } else { 2415 baseoff = 0; 2416 } 2417 2418 pfnoff = pfn - base; 2419 if (pfnoff + mlw.mpleft < CBUF_MAPNP) { 2420 hibitnum = bitnum + mlw.mpleft; 2421 cp->size = ptob(pfnoff + mlw.mpleft); 2422 } else { 2423 hibitnum = bitnum - pfnoff + CBUF_MAPNP - 2424 baseoff; 2425 cp->size = CBUF_MAPSIZE - ptob(baseoff); 2426 } 2427 2428 cp->pfn = pfn; 2429 cp->bitnum = bitnum++; 2430 cp->pagenum = pagenum++; 2431 cp->off = ptob(pfnoff); 2432 2433 for (; bitnum < hibitnum; bitnum++) 2434 if (BT_TEST(dumpcfg.bitmap, bitnum)) 2435 pagenum++; 2436 2437 dump_timeleft = dump_timeout; 2438 cp->used = ptob(pagenum - cp->pagenum); 2439 2440 HRSTART(ds->perpage, map); 2441 hat_devload(kas.a_hat, cp->buf, cp->size, base, 2442 PROT_READ, HAT_LOAD_NOCONSIST); 2443 HRSTOP(ds->perpage, map); 2444 2445 ds->pages_mapped += btop(cp->size); 2446 ds->pages_used += pagenum - cp->pagenum; 2447 2448 CQ_OPEN(mainq); 2449 2450 /* 2451 * If there are no helpers the main task does 2452 * non-streams lzjb compress. 2453 */ 2454 if (dumpserial) { 2455 dumpsys_lzjb_page(dumpcfg.helper, cp); 2456 break; 2457 } 2458 2459 /* pass mapped pages to a helper */ 2460 CQ_PUT(helperq, cp, CBUF_INREADY); 2461 2462 /* the last page was done */ 2463 if (bitnum >= dumpcfg.bitmapsize) 2464 CQ_CLOSE(helperq); 2465 2466 break; 2467 2468 case CBUF_USEDMAP: 2469 2470 ds->npages += btop(cp->used); 2471 2472 HRSTART(ds->perpage, unmap); 2473 hat_unload(kas.a_hat, cp->buf, cp->size, HAT_UNLOAD); 2474 HRSTOP(ds->perpage, unmap); 2475 2476 if (bitnum < dumpcfg.bitmapsize) 2477 CQ_PUT(mainq, cp, CBUF_FREEMAP); 2478 CQ_CLOSE(mainq); 2479 2480 ASSERT(ds->npages <= dumphdr->dump_npages); 2481 ds->percent = ds->npages * 100LL / dumphdr->dump_npages; 2482 break; 2483 2484 case CBUF_WRITE: 2485 2486 CQ_PUT(writerq, cp, CBUF_WRITE); 2487 break; 2488 2489 case CBUF_ERRMSG: 2490 2491 if (cp->used > 0) { 2492 cp->buf[cp->size - 2] = '\n'; 2493 cp->buf[cp->size - 1] = '\0'; 2494 if (ds->neednl) { 2495 uprintf("\n%s", cp->buf); 2496 ds->neednl = 0; 2497 } else { 2498 uprintf("%s", cp->buf); 2499 } 2500 /* wait for console output */ 2501 drv_usecwait(200000); 2502 dump_timeleft = dump_timeout; 2503 } 2504 CQ_PUT(freebufq, cp, CBUF_FREEBUF); 2505 break; 2506 2507 default: 2508 uprintf("dump: unexpected buffer state %d, " 2509 "buffer will be lost\n", cp->state); 2510 break; 2511 2512 } /* end switch */ 2513 } 2514 } 2515 2516 #ifdef COLLECT_METRICS 2517 size_t 2518 dumpsys_metrics(dumpsync_t *ds, char *buf, size_t size) 2519 { 2520 dumpcfg_t *cfg = &dumpcfg; 2521 int myid = CPU->cpu_seqid; 2522 int i, compress_ratio; 2523 int sec, iorate; 2524 helper_t *hp, *hpend = &cfg->helper[cfg->nhelper]; 2525 char *e = buf + size; 2526 char *p = buf; 2527 2528 sec = ds->elapsed / (1000 * 1000 * 1000ULL); 2529 if (sec < 1) 2530 sec = 1; 2531 2532 if (ds->iotime < 1) 2533 ds->iotime = 1; 2534 iorate = (ds->nwrite * 100000ULL) / ds->iotime; 2535 2536 compress_ratio = 100LL * ds->npages / btopr(ds->nwrite + 1); 2537 2538 #define P(...) (p += p < e ? snprintf(p, e - p, __VA_ARGS__) : 0) 2539 2540 P("Master cpu_seqid,%d\n", CPU->cpu_seqid); 2541 P("Master cpu_id,%d\n", CPU->cpu_id); 2542 P("dump_flags,0x%x\n", dumphdr->dump_flags); 2543 P("dump_ioerr,%d\n", dump_ioerr); 2544 2545 P("Helpers:\n"); 2546 for (i = 0; i < ncpus; i++) { 2547 if ((i & 15) == 0) 2548 P(",,%03d,", i); 2549 if (i == myid) 2550 P(" M"); 2551 else if (BT_TEST(cfg->helpermap, i)) 2552 P("%4d", cpu_seq[i]->cpu_id); 2553 else 2554 P(" *"); 2555 if ((i & 15) == 15) 2556 P("\n"); 2557 } 2558 2559 P("ncbuf_used,%d\n", cfg->ncbuf_used); 2560 P("ncmap,%d\n", cfg->ncmap); 2561 2562 P("Found %ldM ranges,%ld\n", (CBUF_MAPSIZE / DUMP_1MB), cfg->found4m); 2563 P("Found small pages,%ld\n", cfg->foundsm); 2564 2565 P("Compression level,%d\n", cfg->clevel); 2566 P("Compression type,%s %s\n", cfg->clevel == 0 ? "serial" : "parallel", 2567 cfg->clevel >= DUMP_CLEVEL_BZIP2 ? "bzip2" : "lzjb"); 2568 P("Compression ratio,%d.%02d\n", compress_ratio / 100, compress_ratio % 2569 100); 2570 P("nhelper_used,%d\n", cfg->nhelper_used); 2571 2572 P("Dump I/O rate MBS,%d.%02d\n", iorate / 100, iorate % 100); 2573 P("..total bytes,%lld\n", (u_longlong_t)ds->nwrite); 2574 P("..total nsec,%lld\n", (u_longlong_t)ds->iotime); 2575 P("dumpbuf.iosize,%ld\n", dumpbuf.iosize); 2576 P("dumpbuf.size,%ld\n", dumpbuf.size); 2577 2578 P("Dump pages/sec,%llu\n", (u_longlong_t)ds->npages / sec); 2579 P("Dump pages,%llu\n", (u_longlong_t)ds->npages); 2580 P("Dump time,%d\n", sec); 2581 2582 if (ds->pages_mapped > 0) 2583 P("per-cent map utilization,%d\n", (int)((100 * ds->pages_used) 2584 / ds->pages_mapped)); 2585 2586 P("\nPer-page metrics:\n"); 2587 if (ds->npages > 0) { 2588 for (hp = cfg->helper; hp != hpend; hp++) { 2589 #define PERPAGE(x) ds->perpage.x += hp->perpage.x; 2590 PERPAGES; 2591 #undef PERPAGE 2592 } 2593 #define PERPAGE(x) \ 2594 P("%s nsec/page,%d\n", #x, (int)(ds->perpage.x / ds->npages)); 2595 PERPAGES; 2596 #undef PERPAGE 2597 P("freebufq.empty,%d\n", (int)(ds->freebufq.empty / 2598 ds->npages)); 2599 P("helperq.empty,%d\n", (int)(ds->helperq.empty / 2600 ds->npages)); 2601 P("writerq.empty,%d\n", (int)(ds->writerq.empty / 2602 ds->npages)); 2603 P("mainq.empty,%d\n", (int)(ds->mainq.empty / ds->npages)); 2604 2605 P("I/O wait nsec/page,%llu\n", (u_longlong_t)(ds->iowait / 2606 ds->npages)); 2607 } 2608 #undef P 2609 if (p < e) 2610 bzero(p, e - p); 2611 return (p - buf); 2612 } 2613 #endif /* COLLECT_METRICS */ 2614 2615 /* 2616 * Dump the system. 2617 */ 2618 void 2619 dumpsys(void) 2620 { 2621 dumpsync_t *ds = &dumpsync; 2622 taskq_t *livetaskq = NULL; 2623 pfn_t pfn; 2624 pgcnt_t bitnum; 2625 proc_t *p; 2626 helper_t *hp, *hpend = &dumpcfg.helper[dumpcfg.nhelper]; 2627 cbuf_t *cp; 2628 pid_t npids, pidx; 2629 char *content; 2630 char *buf; 2631 size_t size; 2632 int save_dump_clevel; 2633 dumpmlw_t mlw; 2634 dumpcsize_t datatag; 2635 dumpdatahdr_t datahdr; 2636 2637 if (dumpvp == NULL || dumphdr == NULL) { 2638 uprintf("skipping system dump - no dump device configured\n"); 2639 if (panicstr) { 2640 dumpcfg.helpers_wanted = 0; 2641 dumpsys_spinunlock(&dumpcfg.helper_lock); 2642 } 2643 return; 2644 } 2645 dumpbuf.cur = dumpbuf.start; 2646 2647 /* clear the sync variables */ 2648 ASSERT(dumpcfg.nhelper > 0); 2649 bzero(ds, sizeof (*ds)); 2650 ds->dumpcpu = CPU->cpu_id; 2651 2652 /* 2653 * Calculate the starting block for dump. If we're dumping on a 2654 * swap device, start 1/5 of the way in; otherwise, start at the 2655 * beginning. And never use the first page -- it may be a disk label. 2656 */ 2657 if (dumpvp->v_flag & VISSWAP) 2658 dumphdr->dump_start = P2ROUNDUP(dumpvp_size / 5, DUMP_OFFSET); 2659 else 2660 dumphdr->dump_start = DUMP_OFFSET; 2661 2662 dumphdr->dump_flags = DF_VALID | DF_COMPLETE | DF_LIVE | DF_COMPRESSED; 2663 dumphdr->dump_crashtime = gethrestime_sec(); 2664 dumphdr->dump_npages = 0; 2665 dumphdr->dump_nvtop = 0; 2666 bzero(dumpcfg.bitmap, BT_SIZEOFMAP(dumpcfg.bitmapsize)); 2667 dump_timeleft = dump_timeout; 2668 2669 if (panicstr) { 2670 dumphdr->dump_flags &= ~DF_LIVE; 2671 (void) VOP_DUMPCTL(dumpvp, DUMP_FREE, NULL, NULL); 2672 (void) VOP_DUMPCTL(dumpvp, DUMP_ALLOC, NULL, NULL); 2673 (void) vsnprintf(dumphdr->dump_panicstring, DUMP_PANICSIZE, 2674 panicstr, panicargs); 2675 2676 } 2677 2678 if (dump_conflags & DUMP_ALL) 2679 content = "all"; 2680 else if (dump_conflags & DUMP_CURPROC) 2681 content = "kernel + curproc"; 2682 else 2683 content = "kernel"; 2684 uprintf("dumping to %s, offset %lld, content: %s\n", dumppath, 2685 dumphdr->dump_start, content); 2686 2687 /* Make sure nodename is current */ 2688 bcopy(utsname.nodename, dumphdr->dump_utsname.nodename, SYS_NMLN); 2689 2690 /* 2691 * If this is a live dump, try to open a VCHR vnode for better 2692 * performance. We must take care to flush the buffer cache 2693 * first. 2694 */ 2695 if (!panicstr) { 2696 vnode_t *cdev_vp, *cmn_cdev_vp; 2697 2698 ASSERT(dumpbuf.cdev_vp == NULL); 2699 cdev_vp = makespecvp(VTOS(dumpvp)->s_dev, VCHR); 2700 if (cdev_vp != NULL) { 2701 cmn_cdev_vp = common_specvp(cdev_vp); 2702 if (VOP_OPEN(&cmn_cdev_vp, FREAD | FWRITE, kcred, NULL) 2703 == 0) { 2704 if (vn_has_cached_data(dumpvp)) 2705 (void) pvn_vplist_dirty(dumpvp, 0, NULL, 2706 B_INVAL | B_TRUNC, kcred); 2707 dumpbuf.cdev_vp = cmn_cdev_vp; 2708 } else { 2709 VN_RELE(cdev_vp); 2710 } 2711 } 2712 } 2713 2714 /* 2715 * Store a hires timestamp so we can look it up during debugging. 2716 */ 2717 lbolt_debug_entry(); 2718 2719 /* 2720 * Leave room for the message and ereport save areas and terminal dump 2721 * header. 2722 */ 2723 dumpbuf.vp_limit = dumpvp_size - DUMP_LOGSIZE - DUMP_OFFSET - 2724 DUMP_ERPTSIZE; 2725 2726 /* 2727 * Write out the symbol table. It's no longer compressed, 2728 * so its 'size' and 'csize' are equal. 2729 */ 2730 dumpbuf.vp_off = dumphdr->dump_ksyms = dumphdr->dump_start + PAGESIZE; 2731 dumphdr->dump_ksyms_size = dumphdr->dump_ksyms_csize = 2732 ksyms_snapshot(dumpvp_ksyms_write, NULL, LONG_MAX); 2733 2734 /* 2735 * Write out the translation map. 2736 */ 2737 dumphdr->dump_map = dumpvp_flush(); 2738 dump_as(&kas); 2739 dumphdr->dump_nvtop += dump_plat_addr(); 2740 2741 /* 2742 * call into hat, which may have unmapped pages that also need to 2743 * be in the dump 2744 */ 2745 hat_dump(); 2746 2747 if (dump_conflags & DUMP_ALL) { 2748 mutex_enter(&pidlock); 2749 2750 for (npids = 0, p = practive; p != NULL; p = p->p_next) 2751 dumpcfg.pids[npids++] = p->p_pid; 2752 2753 mutex_exit(&pidlock); 2754 2755 for (pidx = 0; pidx < npids; pidx++) 2756 (void) dump_process(dumpcfg.pids[pidx]); 2757 2758 dump_init_memlist_walker(&mlw); 2759 for (bitnum = 0; bitnum < dumpcfg.bitmapsize; bitnum++) { 2760 dump_timeleft = dump_timeout; 2761 pfn = dump_bitnum_to_pfn(bitnum, &mlw); 2762 /* 2763 * Some hypervisors do not have all pages available to 2764 * be accessed by the guest OS. Check for page 2765 * accessibility. 2766 */ 2767 if (plat_hold_page(pfn, PLAT_HOLD_NO_LOCK, NULL) != 2768 PLAT_HOLD_OK) 2769 continue; 2770 BT_SET(dumpcfg.bitmap, bitnum); 2771 } 2772 dumphdr->dump_npages = dumpcfg.bitmapsize; 2773 dumphdr->dump_flags |= DF_ALL; 2774 2775 } else if (dump_conflags & DUMP_CURPROC) { 2776 /* 2777 * Determine which pid is to be dumped. If we're panicking, we 2778 * dump the process associated with panic_thread (if any). If 2779 * this is a live dump, we dump the process associated with 2780 * curthread. 2781 */ 2782 npids = 0; 2783 if (panicstr) { 2784 if (panic_thread != NULL && 2785 panic_thread->t_procp != NULL && 2786 panic_thread->t_procp != &p0) { 2787 dumpcfg.pids[npids++] = 2788 panic_thread->t_procp->p_pid; 2789 } 2790 } else { 2791 dumpcfg.pids[npids++] = curthread->t_procp->p_pid; 2792 } 2793 2794 if (npids && dump_process(dumpcfg.pids[0]) == 0) 2795 dumphdr->dump_flags |= DF_CURPROC; 2796 else 2797 dumphdr->dump_flags |= DF_KERNEL; 2798 2799 } else { 2800 dumphdr->dump_flags |= DF_KERNEL; 2801 } 2802 2803 dumphdr->dump_hashmask = (1 << highbit(dumphdr->dump_nvtop - 1)) - 1; 2804 2805 /* 2806 * Write out the pfn table. 2807 */ 2808 dumphdr->dump_pfn = dumpvp_flush(); 2809 dump_init_memlist_walker(&mlw); 2810 for (bitnum = 0; bitnum < dumpcfg.bitmapsize; bitnum++) { 2811 dump_timeleft = dump_timeout; 2812 if (!BT_TEST(dumpcfg.bitmap, bitnum)) 2813 continue; 2814 pfn = dump_bitnum_to_pfn(bitnum, &mlw); 2815 ASSERT(pfn != PFN_INVALID); 2816 dumpvp_write(&pfn, sizeof (pfn_t)); 2817 } 2818 dump_plat_pfn(); 2819 2820 /* 2821 * Write out all the pages. 2822 * Map pages, copy them handling UEs, compress, and write them out. 2823 * Cooperate with any helpers running on CPUs in panic_idle(). 2824 */ 2825 dumphdr->dump_data = dumpvp_flush(); 2826 2827 bzero(dumpcfg.helpermap, BT_SIZEOFMAP(NCPU)); 2828 ds->live = dumpcfg.clevel > 0 && 2829 (dumphdr->dump_flags & DF_LIVE) != 0; 2830 2831 save_dump_clevel = dumpcfg.clevel; 2832 if (panicstr) 2833 dumpsys_get_maxmem(); 2834 else if (dumpcfg.clevel >= DUMP_CLEVEL_BZIP2) 2835 dumpcfg.clevel = DUMP_CLEVEL_LZJB; 2836 2837 dumpcfg.nhelper_used = 0; 2838 for (hp = dumpcfg.helper; hp != hpend; hp++) { 2839 if (hp->page == NULL) { 2840 hp->helper = DONEHELPER; 2841 continue; 2842 } 2843 ++dumpcfg.nhelper_used; 2844 hp->helper = FREEHELPER; 2845 hp->taskqid = NULL; 2846 hp->ds = ds; 2847 bzero(&hp->perpage, sizeof (hp->perpage)); 2848 if (dumpcfg.clevel >= DUMP_CLEVEL_BZIP2) 2849 (void) BZ2_bzCompressReset(&hp->bzstream); 2850 } 2851 2852 CQ_OPEN(freebufq); 2853 CQ_OPEN(helperq); 2854 2855 dumpcfg.ncbuf_used = 0; 2856 for (cp = dumpcfg.cbuf; cp != &dumpcfg.cbuf[dumpcfg.ncbuf]; cp++) { 2857 if (cp->buf != NULL) { 2858 CQ_PUT(freebufq, cp, CBUF_FREEBUF); 2859 ++dumpcfg.ncbuf_used; 2860 } 2861 } 2862 2863 for (cp = dumpcfg.cmap; cp != &dumpcfg.cmap[dumpcfg.ncmap]; cp++) 2864 CQ_PUT(mainq, cp, CBUF_FREEMAP); 2865 2866 ds->start = gethrtime(); 2867 ds->iowaitts = ds->start; 2868 2869 /* start helpers */ 2870 if (ds->live) { 2871 int n = dumpcfg.nhelper_used; 2872 int pri = MINCLSYSPRI - 25; 2873 2874 livetaskq = taskq_create("LiveDump", n, pri, n, n, 2875 TASKQ_PREPOPULATE); 2876 for (hp = dumpcfg.helper; hp != hpend; hp++) { 2877 if (hp->page == NULL) 2878 continue; 2879 hp->helper = hp - dumpcfg.helper; 2880 hp->taskqid = taskq_dispatch(livetaskq, 2881 dumpsys_live_helper, (void *)hp, TQ_NOSLEEP); 2882 } 2883 2884 } else { 2885 if (panicstr) 2886 kmem_dump_begin(); 2887 dumpcfg.helpers_wanted = dumpcfg.clevel > 0; 2888 dumpsys_spinunlock(&dumpcfg.helper_lock); 2889 } 2890 2891 /* run main task */ 2892 dumpsys_main_task(ds); 2893 2894 ds->elapsed = gethrtime() - ds->start; 2895 if (ds->elapsed < 1) 2896 ds->elapsed = 1; 2897 2898 if (livetaskq != NULL) 2899 taskq_destroy(livetaskq); 2900 2901 if (ds->neednl) { 2902 uprintf("\n"); 2903 ds->neednl = 0; 2904 } 2905 2906 /* record actual pages dumped */ 2907 dumphdr->dump_npages = ds->npages; 2908 2909 /* platform-specific data */ 2910 dumphdr->dump_npages += dump_plat_data(dumpcfg.cbuf[0].buf); 2911 2912 /* note any errors by clearing DF_COMPLETE */ 2913 if (dump_ioerr || ds->npages < dumphdr->dump_npages) 2914 dumphdr->dump_flags &= ~DF_COMPLETE; 2915 2916 /* end of stream blocks */ 2917 datatag = 0; 2918 dumpvp_write(&datatag, sizeof (datatag)); 2919 2920 bzero(&datahdr, sizeof (datahdr)); 2921 2922 /* buffer for metrics */ 2923 buf = dumpcfg.cbuf[0].buf; 2924 size = MIN(dumpcfg.cbuf[0].size, DUMP_OFFSET - sizeof (dumphdr_t) - 2925 sizeof (dumpdatahdr_t)); 2926 2927 /* finish the kmem intercepts, collect kmem verbose info */ 2928 if (panicstr) { 2929 datahdr.dump_metrics = kmem_dump_finish(buf, size); 2930 buf += datahdr.dump_metrics; 2931 size -= datahdr.dump_metrics; 2932 } 2933 2934 /* record in the header whether this is a fault-management panic */ 2935 if (panicstr) 2936 dumphdr->dump_fm_panic = is_fm_panic(); 2937 2938 /* compression info in data header */ 2939 datahdr.dump_datahdr_magic = DUMP_DATAHDR_MAGIC; 2940 datahdr.dump_datahdr_version = DUMP_DATAHDR_VERSION; 2941 datahdr.dump_maxcsize = CBUF_SIZE; 2942 datahdr.dump_maxrange = CBUF_MAPSIZE / PAGESIZE; 2943 datahdr.dump_nstreams = dumpcfg.nhelper_used; 2944 datahdr.dump_clevel = dumpcfg.clevel; 2945 #ifdef COLLECT_METRICS 2946 if (dump_metrics_on) 2947 datahdr.dump_metrics += dumpsys_metrics(ds, buf, size); 2948 #endif 2949 datahdr.dump_data_csize = dumpvp_flush() - dumphdr->dump_data; 2950 2951 /* 2952 * Write out the initial and terminal dump headers. 2953 */ 2954 dumpbuf.vp_off = dumphdr->dump_start; 2955 dumpvp_write(dumphdr, sizeof (dumphdr_t)); 2956 (void) dumpvp_flush(); 2957 2958 dumpbuf.vp_limit = dumpvp_size; 2959 dumpbuf.vp_off = dumpbuf.vp_limit - DUMP_OFFSET; 2960 dumpvp_write(dumphdr, sizeof (dumphdr_t)); 2961 dumpvp_write(&datahdr, sizeof (dumpdatahdr_t)); 2962 dumpvp_write(dumpcfg.cbuf[0].buf, datahdr.dump_metrics); 2963 2964 (void) dumpvp_flush(); 2965 2966 uprintf("\r%3d%% done: %llu pages dumped, ", 2967 ds->percent_done, (u_longlong_t)ds->npages); 2968 2969 if (dump_ioerr == 0) { 2970 uprintf("dump succeeded\n"); 2971 } else { 2972 uprintf("dump failed: error %d\n", dump_ioerr); 2973 #ifdef DEBUG 2974 if (panicstr) 2975 debug_enter("dump failed"); 2976 #endif 2977 } 2978 2979 /* 2980 * Write out all undelivered messages. This has to be the *last* 2981 * thing we do because the dump process itself emits messages. 2982 */ 2983 if (panicstr) { 2984 dump_summary(); 2985 dump_ereports(); 2986 dump_messages(); 2987 } 2988 2989 delay(2 * hz); /* let people see the 'done' message */ 2990 dump_timeleft = 0; 2991 dump_ioerr = 0; 2992 2993 /* restore settings after live dump completes */ 2994 if (!panicstr) { 2995 dumpcfg.clevel = save_dump_clevel; 2996 2997 /* release any VCHR open of the dump device */ 2998 if (dumpbuf.cdev_vp != NULL) { 2999 (void) VOP_CLOSE(dumpbuf.cdev_vp, FREAD | FWRITE, 1, 0, 3000 kcred, NULL); 3001 VN_RELE(dumpbuf.cdev_vp); 3002 dumpbuf.cdev_vp = NULL; 3003 } 3004 } 3005 } 3006 3007 /* 3008 * This function is called whenever the memory size, as represented 3009 * by the phys_install list, changes. 3010 */ 3011 void 3012 dump_resize() 3013 { 3014 mutex_enter(&dump_lock); 3015 dumphdr_init(); 3016 dumpbuf_resize(); 3017 dump_update_clevel(); 3018 mutex_exit(&dump_lock); 3019 } 3020 3021 /* 3022 * This function allows for dynamic resizing of a dump area. It assumes that 3023 * the underlying device has update its appropriate size(9P). 3024 */ 3025 int 3026 dumpvp_resize() 3027 { 3028 int error; 3029 vattr_t vattr; 3030 3031 mutex_enter(&dump_lock); 3032 vattr.va_mask = AT_SIZE; 3033 if ((error = VOP_GETATTR(dumpvp, &vattr, 0, kcred, NULL)) != 0) { 3034 mutex_exit(&dump_lock); 3035 return (error); 3036 } 3037 3038 if (error == 0 && vattr.va_size < 2 * DUMP_LOGSIZE + DUMP_ERPTSIZE) { 3039 mutex_exit(&dump_lock); 3040 return (ENOSPC); 3041 } 3042 3043 dumpvp_size = vattr.va_size & -DUMP_OFFSET; 3044 mutex_exit(&dump_lock); 3045 return (0); 3046 } 3047 3048 int 3049 dump_set_uuid(const char *uuidstr) 3050 { 3051 const char *ptr; 3052 int i; 3053 3054 if (uuidstr == NULL || strnlen(uuidstr, 36 + 1) != 36) 3055 return (EINVAL); 3056 3057 /* uuid_parse is not common code so check manually */ 3058 for (i = 0, ptr = uuidstr; i < 36; i++, ptr++) { 3059 switch (i) { 3060 case 8: 3061 case 13: 3062 case 18: 3063 case 23: 3064 if (*ptr != '-') 3065 return (EINVAL); 3066 break; 3067 3068 default: 3069 if (!isxdigit(*ptr)) 3070 return (EINVAL); 3071 break; 3072 } 3073 } 3074 3075 if (dump_osimage_uuid[0] != '\0') 3076 return (EALREADY); 3077 3078 (void) strncpy(dump_osimage_uuid, uuidstr, 36 + 1); 3079 3080 cmn_err(CE_CONT, "?This Solaris instance has UUID %s\n", 3081 dump_osimage_uuid); 3082 3083 return (0); 3084 } 3085 3086 const char * 3087 dump_get_uuid(void) 3088 { 3089 return (dump_osimage_uuid[0] != '\0' ? dump_osimage_uuid : ""); 3090 } 3091