1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Copyright 2018 Joyent, Inc.
25 * Copyright 2018 Nexenta Systems, Inc. All rights reserved.
26 */
27
28 #include <sys/types.h>
29 #include <sys/param.h>
30 #include <sys/systm.h>
31 #include <sys/vm.h>
32 #include <sys/proc.h>
33 #include <sys/file.h>
34 #include <sys/conf.h>
35 #include <sys/kmem.h>
36 #include <sys/mem.h>
37 #include <sys/mman.h>
38 #include <sys/vnode.h>
39 #include <sys/errno.h>
40 #include <sys/memlist.h>
41 #include <sys/dumphdr.h>
42 #include <sys/dumpadm.h>
43 #include <sys/ksyms.h>
44 #include <sys/compress.h>
45 #include <sys/stream.h>
46 #include <sys/strsun.h>
47 #include <sys/cmn_err.h>
48 #include <sys/bitmap.h>
49 #include <sys/modctl.h>
50 #include <sys/utsname.h>
51 #include <sys/systeminfo.h>
52 #include <sys/vmem.h>
53 #include <sys/log.h>
54 #include <sys/var.h>
55 #include <sys/debug.h>
56 #include <sys/sunddi.h>
57 #include <fs/fs_subr.h>
58 #include <sys/fs/snode.h>
59 #include <sys/ontrap.h>
60 #include <sys/panic.h>
61 #include <sys/dkio.h>
62 #include <sys/vtoc.h>
63 #include <sys/errorq.h>
64 #include <sys/fm/util.h>
65 #include <sys/fs/zfs.h>
66
67 #include <vm/hat.h>
68 #include <vm/as.h>
69 #include <vm/page.h>
70 #include <vm/pvn.h>
71 #include <vm/seg.h>
72 #include <vm/seg_kmem.h>
73 #include <sys/clock_impl.h>
74 #include <sys/hold_page.h>
75 #include <sys/cpu.h>
76
77 #include <bzip2/bzlib.h>
78
79 #define ONE_GIG (1024 * 1024 * 1024UL)
80
81 /*
82 * Crash dump time is dominated by disk write time. To reduce this,
83 * the stronger compression method bzip2 is applied to reduce the dump
84 * size and hence reduce I/O time. However, bzip2 is much more
85 * computationally expensive than the existing lzjb algorithm, so to
86 * avoid increasing compression time, CPUs that are otherwise idle
87 * during panic are employed to parallelize the compression task.
88 * Many helper CPUs are needed to prevent bzip2 from being a
89 * bottleneck, and on systems with too few CPUs, the lzjb algorithm is
90 * parallelized instead. Lastly, I/O and compression are performed by
91 * different CPUs, and are hence overlapped in time, unlike the older
92 * serial code.
93 *
94 * Another important consideration is the speed of the dump
95 * device. Faster disks need less CPUs in order to benefit from
96 * parallel lzjb versus parallel bzip2. Therefore, the CPU count
97 * threshold for switching from parallel lzjb to paralled bzip2 is
98 * elevated for faster disks. The dump device speed is adduced from
99 * the setting for dumpbuf.iosize, see dump_update_clevel.
100 */
101
102 /*
103 * exported vars
104 */
105 kmutex_t dump_lock; /* lock for dump configuration */
106 dumphdr_t *dumphdr; /* dump header */
107 int dump_conflags = DUMP_KERNEL; /* dump configuration flags */
108 vnode_t *dumpvp; /* dump device vnode pointer */
109 u_offset_t dumpvp_size; /* size of dump device, in bytes */
110 char *dumppath; /* pathname of dump device */
111 int dump_timeout = 120; /* timeout for dumping pages */
112 int dump_timeleft; /* portion of dump_timeout remaining */
113 int dump_ioerr; /* dump i/o error */
114 int dump_check_used; /* enable check for used pages */
115 char *dump_stack_scratch; /* scratch area for saving stack summary */
116
117 /*
118 * Tunables for dump compression and parallelism. These can be set via
119 * /etc/system.
120 *
121 * dump_ncpu_low number of helpers for parallel lzjb
122 * This is also the minimum configuration.
123 *
124 * dump_bzip2_level bzip2 compression level: 1-9
125 * Higher numbers give greater compression, but take more memory
126 * and time. Memory used per helper is ~(dump_bzip2_level * 1MB).
127 *
128 * dump_plat_mincpu the cross-over limit for using bzip2 (per platform):
129 * if dump_plat_mincpu == 0, then always do single threaded dump
130 * if ncpu >= dump_plat_mincpu then try to use bzip2
131 *
132 * dump_metrics_on if set, metrics are collected in the kernel, passed
133 * to savecore via the dump file, and recorded by savecore in
134 * METRICS.txt.
135 */
136 uint_t dump_ncpu_low = 4; /* minimum config for parallel lzjb */
137 uint_t dump_bzip2_level = 1; /* bzip2 level (1-9) */
138
139 /* Use dump_plat_mincpu_default unless this variable is set by /etc/system */
140 #define MINCPU_NOT_SET ((uint_t)-1)
141 uint_t dump_plat_mincpu = MINCPU_NOT_SET;
142
143 /* tunables for pre-reserved heap */
144 uint_t dump_kmem_permap = 1024;
145 uint_t dump_kmem_pages = 0;
146
147 /* Define multiple buffers per helper to avoid stalling */
148 #define NCBUF_PER_HELPER 2
149 #define NCMAP_PER_HELPER 4
150
151 /* minimum number of helpers configured */
152 #define MINHELPERS (dump_ncpu_low)
153 #define MINCBUFS (MINHELPERS * NCBUF_PER_HELPER)
154
155 /*
156 * Define constant parameters.
157 *
158 * CBUF_SIZE size of an output buffer
159 *
160 * CBUF_MAPSIZE size of virtual range for mapping pages
161 *
162 * CBUF_MAPNP size of virtual range in pages
163 *
164 */
165 #define DUMP_1KB ((size_t)1 << 10)
166 #define DUMP_1MB ((size_t)1 << 20)
167 #define CBUF_SIZE ((size_t)1 << 17)
168 #define CBUF_MAPSHIFT (22)
169 #define CBUF_MAPSIZE ((size_t)1 << CBUF_MAPSHIFT)
170 #define CBUF_MAPNP ((size_t)1 << (CBUF_MAPSHIFT - PAGESHIFT))
171
172 /*
173 * Compression metrics are accumulated nano-second subtotals. The
174 * results are normalized by the number of pages dumped. A report is
175 * generated when dumpsys() completes and is saved in the dump image
176 * after the trailing dump header.
177 *
178 * Metrics are always collected. Set the variable dump_metrics_on to
179 * cause metrics to be saved in the crash file, where savecore will
180 * save it in the file METRICS.txt.
181 */
182 #define PERPAGES \
183 PERPAGE(bitmap) PERPAGE(map) PERPAGE(unmap) \
184 PERPAGE(copy) PERPAGE(compress) \
185 PERPAGE(write) \
186 PERPAGE(inwait) PERPAGE(outwait)
187
188 typedef struct perpage {
189 #define PERPAGE(x) hrtime_t x;
190 PERPAGES
191 #undef PERPAGE
192 } perpage_t;
193
194 /*
195 * This macro controls the code generation for collecting dump
196 * performance information. By default, the code is generated, but
197 * automatic saving of the information is disabled. If dump_metrics_on
198 * is set to 1, the timing information is passed to savecore via the
199 * crash file, where it is appended to the file dump-dir/METRICS.txt.
200 */
201 #define COLLECT_METRICS
202
203 #ifdef COLLECT_METRICS
204 uint_t dump_metrics_on = 0; /* set to 1 to enable recording metrics */
205
206 #define HRSTART(v, m) v##ts.m = gethrtime()
207 #define HRSTOP(v, m) v.m += gethrtime() - v##ts.m
208 #define HRBEGIN(v, m, s) v##ts.m = gethrtime(); v.size += s
209 #define HREND(v, m) v.m += gethrtime() - v##ts.m
210 #define HRNORM(v, m, n) v.m /= (n)
211
212 #else
213 #define HRSTART(v, m)
214 #define HRSTOP(v, m)
215 #define HRBEGIN(v, m, s)
216 #define HREND(v, m)
217 #define HRNORM(v, m, n)
218 #endif /* COLLECT_METRICS */
219
220 /*
221 * Buffers for copying and compressing memory pages.
222 *
223 * cbuf_t buffer controllers: used for both input and output.
224 *
225 * The buffer state indicates how it is being used:
226 *
227 * CBUF_FREEMAP: CBUF_MAPSIZE virtual address range is available for
228 * mapping input pages.
229 *
230 * CBUF_INREADY: input pages are mapped and ready for compression by a
231 * helper.
232 *
233 * CBUF_USEDMAP: mapping has been consumed by a helper. Needs unmap.
234 *
235 * CBUF_FREEBUF: CBUF_SIZE output buffer, which is available.
236 *
237 * CBUF_WRITE: CBUF_SIZE block of compressed pages from a helper,
238 * ready to write out.
239 *
240 * CBUF_ERRMSG: CBUF_SIZE block of error messages from a helper
241 * (reports UE errors.)
242 */
243
244 typedef enum cbufstate {
245 CBUF_FREEMAP,
246 CBUF_INREADY,
247 CBUF_USEDMAP,
248 CBUF_FREEBUF,
249 CBUF_WRITE,
250 CBUF_ERRMSG
251 } cbufstate_t;
252
253 typedef struct cbuf cbuf_t;
254
255 struct cbuf {
256 cbuf_t *next; /* next in list */
257 cbufstate_t state; /* processing state */
258 size_t used; /* amount used */
259 size_t size; /* mem size */
260 char *buf; /* kmem or vmem */
261 pgcnt_t pagenum; /* index to pfn map */
262 pgcnt_t bitnum; /* first set bitnum */
263 pfn_t pfn; /* first pfn in mapped range */
264 int off; /* byte offset to first pfn */
265 };
266
267 static char dump_osimage_uuid[36 + 1];
268
269 #define isdigit(ch) ((ch) >= '0' && (ch) <= '9')
270 #define isxdigit(ch) (isdigit(ch) || ((ch) >= 'a' && (ch) <= 'f') || \
271 ((ch) >= 'A' && (ch) <= 'F'))
272
273 /*
274 * cqueue_t queues: a uni-directional channel for communication
275 * from the master to helper tasks or vice-versa using put and
276 * get primitives. Both mappings and data buffers are passed via
277 * queues. Producers close a queue when done. The number of
278 * active producers is reference counted so the consumer can
279 * detect end of data. Concurrent access is mediated by atomic
280 * operations for panic dump, or mutex/cv for live dump.
281 *
282 * There a four queues, used as follows:
283 *
284 * Queue Dataflow NewState
285 * --------------------------------------------------
286 * mainq master -> master FREEMAP
287 * master has initialized or unmapped an input buffer
288 * --------------------------------------------------
289 * helperq master -> helper INREADY
290 * master has mapped input for use by helper
291 * --------------------------------------------------
292 * mainq master <- helper USEDMAP
293 * helper is done with input
294 * --------------------------------------------------
295 * freebufq master -> helper FREEBUF
296 * master has initialized or written an output buffer
297 * --------------------------------------------------
298 * mainq master <- helper WRITE
299 * block of compressed pages from a helper
300 * --------------------------------------------------
301 * mainq master <- helper ERRMSG
302 * error messages from a helper (memory error case)
303 * --------------------------------------------------
304 * writerq master <- master WRITE
305 * non-blocking queue of blocks to write
306 * --------------------------------------------------
307 */
308 typedef struct cqueue {
309 cbuf_t *volatile first; /* first in list */
310 cbuf_t *last; /* last in list */
311 hrtime_t ts; /* timestamp */
312 hrtime_t empty; /* total time empty */
313 kmutex_t mutex; /* live state lock */
314 kcondvar_t cv; /* live wait var */
315 lock_t spinlock; /* panic mode spin lock */
316 volatile uint_t open; /* producer ref count */
317 } cqueue_t;
318
319 /*
320 * Convenience macros for using the cqueue functions
321 * Note that the caller must have defined "dumpsync_t *ds"
322 */
323 #define CQ_IS_EMPTY(q) \
324 (ds->q.first == NULL)
325
326 #define CQ_OPEN(q) \
327 atomic_inc_uint(&ds->q.open)
328
329 #define CQ_CLOSE(q) \
330 dumpsys_close_cq(&ds->q, ds->live)
331
332 #define CQ_PUT(q, cp, st) \
333 dumpsys_put_cq(&ds->q, cp, st, ds->live)
334
335 #define CQ_GET(q) \
336 dumpsys_get_cq(&ds->q, ds->live)
337
338 /*
339 * Dynamic state when dumpsys() is running.
340 */
341 typedef struct dumpsync {
342 pgcnt_t npages; /* subtotal of pages dumped */
343 pgcnt_t pages_mapped; /* subtotal of pages mapped */
344 pgcnt_t pages_used; /* subtotal of pages used per map */
345 size_t nwrite; /* subtotal of bytes written */
346 uint_t live; /* running live dump */
347 uint_t neednl; /* will need to print a newline */
348 uint_t percent; /* dump progress */
349 uint_t percent_done; /* dump progress reported */
350 int sec_done; /* dump progress last report time */
351 cqueue_t freebufq; /* free kmem bufs for writing */
352 cqueue_t mainq; /* input for main task */
353 cqueue_t helperq; /* input for helpers */
354 cqueue_t writerq; /* input for writer */
355 hrtime_t start; /* start time */
356 hrtime_t elapsed; /* elapsed time when completed */
357 hrtime_t iotime; /* time spent writing nwrite bytes */
358 hrtime_t iowait; /* time spent waiting for output */
359 hrtime_t iowaitts; /* iowait timestamp */
360 perpage_t perpage; /* metrics */
361 perpage_t perpagets;
362 int dumpcpu; /* master cpu */
363 } dumpsync_t;
364
365 static dumpsync_t dumpsync; /* synchronization vars */
366
367 /*
368 * helper_t helpers: contains the context for a stream. CPUs run in
369 * parallel at dump time; each CPU creates a single stream of
370 * compression data. Stream data is divided into CBUF_SIZE blocks.
371 * The blocks are written in order within a stream. But, blocks from
372 * multiple streams can be interleaved. Each stream is identified by a
373 * unique tag.
374 */
375 typedef struct helper {
376 int helper; /* bound helper id */
377 int tag; /* compression stream tag */
378 perpage_t perpage; /* per page metrics */
379 perpage_t perpagets; /* per page metrics (timestamps) */
380 taskqid_t taskqid; /* live dump task ptr */
381 int in, out; /* buffer offsets */
382 cbuf_t *cpin, *cpout, *cperr; /* cbuf objects in process */
383 dumpsync_t *ds; /* pointer to sync vars */
384 size_t used; /* counts input consumed */
385 char *page; /* buffer for page copy */
386 char *lzbuf; /* lzjb output */
387 bz_stream bzstream; /* bzip2 state */
388 } helper_t;
389
390 #define MAINHELPER (-1) /* helper is also the main task */
391 #define FREEHELPER (-2) /* unbound helper */
392 #define DONEHELPER (-3) /* helper finished */
393
394 /*
395 * configuration vars for dumpsys
396 */
397 typedef struct dumpcfg {
398 int threshold; /* ncpu threshold for bzip2 */
399 int nhelper; /* number of helpers */
400 int nhelper_used; /* actual number of helpers used */
401 int ncmap; /* number VA pages for compression */
402 int ncbuf; /* number of bufs for compression */
403 int ncbuf_used; /* number of bufs in use */
404 uint_t clevel; /* dump compression level */
405 helper_t *helper; /* array of helpers */
406 cbuf_t *cmap; /* array of input (map) buffers */
407 cbuf_t *cbuf; /* array of output buffers */
408 ulong_t *helpermap; /* set of dumpsys helper CPU ids */
409 ulong_t *bitmap; /* bitmap for marking pages to dump */
410 ulong_t *rbitmap; /* bitmap for used CBUF_MAPSIZE ranges */
411 pgcnt_t bitmapsize; /* size of bitmap */
412 pgcnt_t rbitmapsize; /* size of bitmap for ranges */
413 pgcnt_t found4m; /* number ranges allocated by dump */
414 pgcnt_t foundsm; /* number small pages allocated by dump */
415 pid_t *pids; /* list of process IDs at dump time */
416 size_t maxsize; /* memory size needed at dump time */
417 size_t maxvmsize; /* size of reserved VM */
418 char *maxvm; /* reserved VM for spare pages */
419 lock_t helper_lock; /* protect helper state */
420 char helpers_wanted; /* flag to enable parallelism */
421 } dumpcfg_t;
422
423 static dumpcfg_t dumpcfg; /* config vars */
424
425 /*
426 * The dump I/O buffer.
427 *
428 * There is one I/O buffer used by dumpvp_write and dumvp_flush. It is
429 * sized according to the optimum device transfer speed.
430 */
431 typedef struct dumpbuf {
432 vnode_t *cdev_vp; /* VCHR open of the dump device */
433 len_t vp_limit; /* maximum write offset */
434 offset_t vp_off; /* current dump device offset */
435 char *cur; /* dump write pointer */
436 char *start; /* dump buffer address */
437 char *end; /* dump buffer end */
438 size_t size; /* size of dumpbuf in bytes */
439 size_t iosize; /* best transfer size for device */
440 } dumpbuf_t;
441
442 dumpbuf_t dumpbuf; /* I/O buffer */
443
444 /*
445 * For parallel dump, defines maximum time main task thread will wait
446 * for at least one helper to register in dumpcfg.helpermap, before
447 * assuming there are no helpers and falling back to serial mode.
448 * Value is chosen arbitrary and provides *really* long wait for any
449 * available helper to register.
450 */
451 #define DUMP_HELPER_MAX_WAIT 1000 /* millisec */
452
453 /*
454 * The dump I/O buffer must be at least one page, at most xfer_size
455 * bytes, and should scale with physmem in between. The transfer size
456 * passed in will either represent a global default (maxphys) or the
457 * best size for the device. The size of the dumpbuf I/O buffer is
458 * limited by dumpbuf_limit (8MB by default) because the dump
459 * performance saturates beyond a certain size. The default is to
460 * select 1/4096 of the memory.
461 */
462 static int dumpbuf_fraction = 12; /* memory size scale factor */
463 static size_t dumpbuf_limit = 8 * DUMP_1MB; /* max I/O buf size */
464
465 static size_t
dumpbuf_iosize(size_t xfer_size)466 dumpbuf_iosize(size_t xfer_size)
467 {
468 size_t iosize = ptob(physmem >> dumpbuf_fraction);
469
470 if (iosize < PAGESIZE)
471 iosize = PAGESIZE;
472 else if (iosize > xfer_size)
473 iosize = xfer_size;
474 if (iosize > dumpbuf_limit)
475 iosize = dumpbuf_limit;
476 return (iosize & PAGEMASK);
477 }
478
479 /*
480 * resize the I/O buffer
481 */
482 static void
dumpbuf_resize(void)483 dumpbuf_resize(void)
484 {
485 char *old_buf = dumpbuf.start;
486 size_t old_size = dumpbuf.size;
487 char *new_buf;
488 size_t new_size;
489
490 ASSERT(MUTEX_HELD(&dump_lock));
491
492 new_size = dumpbuf_iosize(MAX(dumpbuf.iosize, maxphys));
493 if (new_size <= old_size)
494 return; /* no need to reallocate buffer */
495
496 new_buf = kmem_alloc(new_size, KM_SLEEP);
497 dumpbuf.size = new_size;
498 dumpbuf.start = new_buf;
499 dumpbuf.end = new_buf + new_size;
500 kmem_free(old_buf, old_size);
501 }
502
503 /*
504 * dump_update_clevel is called when dumpadm configures the dump device.
505 * Calculate number of helpers and buffers.
506 * Allocate the minimum configuration for now.
507 *
508 * When the dump file is configured we reserve a minimum amount of
509 * memory for use at crash time. But we reserve VA for all the memory
510 * we really want in order to do the fastest dump possible. The VA is
511 * backed by pages not being dumped, according to the bitmap. If
512 * there is insufficient spare memory, however, we fall back to the
513 * minimum.
514 *
515 * Live dump (savecore -L) always uses the minimum config.
516 *
517 * clevel 0 is single threaded lzjb
518 * clevel 1 is parallel lzjb
519 * clevel 2 is parallel bzip2
520 *
521 * The ncpu threshold is selected with dump_plat_mincpu.
522 * On OPL, set_platform_defaults() overrides the sun4u setting.
523 * The actual values are defined via DUMP_PLAT_*_MINCPU macros.
524 *
525 * Architecture Threshold Algorithm
526 * sun4u < 51 parallel lzjb
527 * sun4u >= 51 parallel bzip2(*)
528 * sun4u OPL < 8 parallel lzjb
529 * sun4u OPL >= 8 parallel bzip2(*)
530 * sun4v < 128 parallel lzjb
531 * sun4v >= 128 parallel bzip2(*)
532 * x86 < 11 parallel lzjb
533 * x86 >= 11 parallel bzip2(*)
534 * 32-bit N/A single-threaded lzjb
535 *
536 * (*) bzip2 is only chosen if there is sufficient available
537 * memory for buffers at dump time. See dumpsys_get_maxmem().
538 *
539 * Faster dump devices have larger I/O buffers. The threshold value is
540 * increased according to the size of the dump I/O buffer, because
541 * parallel lzjb performs better with faster disks. For buffers >= 1MB
542 * the threshold is 3X; for buffers >= 256K threshold is 2X.
543 *
544 * For parallel dumps, the number of helpers is ncpu-1. The CPU
545 * running panic runs the main task. For single-threaded dumps, the
546 * panic CPU does lzjb compression (it is tagged as MAINHELPER.)
547 *
548 * Need multiple buffers per helper so that they do not block waiting
549 * for the main task.
550 * parallel single-threaded
551 * Number of output buffers: nhelper*2 1
552 * Number of mapping buffers: nhelper*4 1
553 *
554 */
555 static void
dump_update_clevel()556 dump_update_clevel()
557 {
558 int tag;
559 size_t bz2size;
560 helper_t *hp, *hpend;
561 cbuf_t *cp, *cpend;
562 dumpcfg_t *old = &dumpcfg;
563 dumpcfg_t newcfg = *old;
564 dumpcfg_t *new = &newcfg;
565
566 ASSERT(MUTEX_HELD(&dump_lock));
567
568 /*
569 * Free the previously allocated bufs and VM.
570 */
571 if (old->helper != NULL) {
572
573 /* helpers */
574 hpend = &old->helper[old->nhelper];
575 for (hp = old->helper; hp != hpend; hp++) {
576 if (hp->lzbuf != NULL)
577 kmem_free(hp->lzbuf, PAGESIZE);
578 if (hp->page != NULL)
579 kmem_free(hp->page, PAGESIZE);
580 }
581 kmem_free(old->helper, old->nhelper * sizeof (helper_t));
582
583 /* VM space for mapping pages */
584 cpend = &old->cmap[old->ncmap];
585 for (cp = old->cmap; cp != cpend; cp++)
586 vmem_xfree(heap_arena, cp->buf, CBUF_MAPSIZE);
587 kmem_free(old->cmap, old->ncmap * sizeof (cbuf_t));
588
589 /* output bufs */
590 cpend = &old->cbuf[old->ncbuf];
591 for (cp = old->cbuf; cp != cpend; cp++)
592 if (cp->buf != NULL)
593 kmem_free(cp->buf, cp->size);
594 kmem_free(old->cbuf, old->ncbuf * sizeof (cbuf_t));
595
596 /* reserved VM for dumpsys_get_maxmem */
597 if (old->maxvmsize > 0)
598 vmem_xfree(heap_arena, old->maxvm, old->maxvmsize);
599 }
600
601 /*
602 * Allocate memory and VM.
603 * One CPU runs dumpsys, the rest are helpers.
604 */
605 new->nhelper = ncpus - 1;
606 if (new->nhelper < 1)
607 new->nhelper = 1;
608
609 if (new->nhelper > DUMP_MAX_NHELPER)
610 new->nhelper = DUMP_MAX_NHELPER;
611
612 /* use platform default, unless /etc/system overrides */
613 if (dump_plat_mincpu == MINCPU_NOT_SET)
614 dump_plat_mincpu = dump_plat_mincpu_default;
615
616 /* increase threshold for faster disks */
617 new->threshold = dump_plat_mincpu;
618 if (dumpbuf.iosize >= DUMP_1MB)
619 new->threshold *= 3;
620 else if (dumpbuf.iosize >= (256 * DUMP_1KB))
621 new->threshold *= 2;
622
623 /* figure compression level based upon the computed threshold. */
624 if (dump_plat_mincpu == 0 || new->nhelper < 2) {
625 new->clevel = 0;
626 new->nhelper = 1;
627 } else if ((new->nhelper + 1) >= new->threshold) {
628 new->clevel = DUMP_CLEVEL_BZIP2;
629 } else {
630 new->clevel = DUMP_CLEVEL_LZJB;
631 }
632
633 if (new->clevel == 0) {
634 new->ncbuf = 1;
635 new->ncmap = 1;
636 } else {
637 new->ncbuf = NCBUF_PER_HELPER * new->nhelper;
638 new->ncmap = NCMAP_PER_HELPER * new->nhelper;
639 }
640
641 /*
642 * Allocate new data structures and buffers for MINHELPERS,
643 * and also figure the max desired size.
644 */
645 bz2size = BZ2_bzCompressInitSize(dump_bzip2_level);
646 new->maxsize = 0;
647 new->maxvmsize = 0;
648 new->maxvm = NULL;
649 tag = 1;
650 new->helper = kmem_zalloc(new->nhelper * sizeof (helper_t), KM_SLEEP);
651 hpend = &new->helper[new->nhelper];
652 for (hp = new->helper; hp != hpend; hp++) {
653 hp->tag = tag++;
654 if (hp < &new->helper[MINHELPERS]) {
655 hp->lzbuf = kmem_alloc(PAGESIZE, KM_SLEEP);
656 hp->page = kmem_alloc(PAGESIZE, KM_SLEEP);
657 } else if (new->clevel < DUMP_CLEVEL_BZIP2) {
658 new->maxsize += 2 * PAGESIZE;
659 } else {
660 new->maxsize += PAGESIZE;
661 }
662 if (new->clevel >= DUMP_CLEVEL_BZIP2)
663 new->maxsize += bz2size;
664 }
665
666 new->cbuf = kmem_zalloc(new->ncbuf * sizeof (cbuf_t), KM_SLEEP);
667 cpend = &new->cbuf[new->ncbuf];
668 for (cp = new->cbuf; cp != cpend; cp++) {
669 cp->state = CBUF_FREEBUF;
670 cp->size = CBUF_SIZE;
671 if (cp < &new->cbuf[MINCBUFS])
672 cp->buf = kmem_alloc(cp->size, KM_SLEEP);
673 else
674 new->maxsize += cp->size;
675 }
676
677 new->cmap = kmem_zalloc(new->ncmap * sizeof (cbuf_t), KM_SLEEP);
678 cpend = &new->cmap[new->ncmap];
679 for (cp = new->cmap; cp != cpend; cp++) {
680 cp->state = CBUF_FREEMAP;
681 cp->size = CBUF_MAPSIZE;
682 cp->buf = vmem_xalloc(heap_arena, CBUF_MAPSIZE, CBUF_MAPSIZE,
683 0, 0, NULL, NULL, VM_SLEEP);
684 }
685
686 /* reserve VA to be backed with spare pages at crash time */
687 if (new->maxsize > 0) {
688 new->maxsize = P2ROUNDUP(new->maxsize, PAGESIZE);
689 new->maxvmsize = P2ROUNDUP(new->maxsize, CBUF_MAPSIZE);
690 new->maxvm = vmem_xalloc(heap_arena, new->maxvmsize,
691 CBUF_MAPSIZE, 0, 0, NULL, NULL, VM_SLEEP);
692 }
693
694 /*
695 * Reserve memory for kmem allocation calls made during crash dump. The
696 * hat layer allocates memory for each mapping created, and the I/O path
697 * allocates buffers and data structs.
698 *
699 * On larger systems, we easily exceed the lower amount, so we need some
700 * more space; the cut-over point is relatively arbitrary. If we run
701 * out, the only impact is that kmem state in the dump becomes
702 * inconsistent.
703 */
704
705 if (dump_kmem_pages == 0) {
706 if (physmem > (16 * ONE_GIG) / PAGESIZE)
707 dump_kmem_pages = 20;
708 else
709 dump_kmem_pages = 8;
710 }
711
712 kmem_dump_init((new->ncmap * dump_kmem_permap) +
713 (dump_kmem_pages * PAGESIZE));
714
715 /* set new config pointers */
716 *old = *new;
717 }
718
719 /*
720 * Define a struct memlist walker to optimize bitnum to pfn
721 * lookup. The walker maintains the state of the list traversal.
722 */
723 typedef struct dumpmlw {
724 struct memlist *mp; /* current memlist */
725 pgcnt_t basenum; /* bitnum base offset */
726 pgcnt_t mppages; /* current memlist size */
727 pgcnt_t mpleft; /* size to end of current memlist */
728 pfn_t mpaddr; /* first pfn in memlist */
729 } dumpmlw_t;
730
731 /* initialize the walker */
732 static inline void
dump_init_memlist_walker(dumpmlw_t * pw)733 dump_init_memlist_walker(dumpmlw_t *pw)
734 {
735 pw->mp = phys_install;
736 pw->basenum = 0;
737 pw->mppages = pw->mp->ml_size >> PAGESHIFT;
738 pw->mpleft = pw->mppages;
739 pw->mpaddr = pw->mp->ml_address >> PAGESHIFT;
740 }
741
742 /*
743 * Lookup pfn given bitnum. The memlist can be quite long on some
744 * systems (e.g.: one per board). To optimize sequential lookups, the
745 * caller initializes and presents a memlist walker.
746 */
747 static pfn_t
dump_bitnum_to_pfn(pgcnt_t bitnum,dumpmlw_t * pw)748 dump_bitnum_to_pfn(pgcnt_t bitnum, dumpmlw_t *pw)
749 {
750 bitnum -= pw->basenum;
751 while (pw->mp != NULL) {
752 if (bitnum < pw->mppages) {
753 pw->mpleft = pw->mppages - bitnum;
754 return (pw->mpaddr + bitnum);
755 }
756 bitnum -= pw->mppages;
757 pw->basenum += pw->mppages;
758 pw->mp = pw->mp->ml_next;
759 if (pw->mp != NULL) {
760 pw->mppages = pw->mp->ml_size >> PAGESHIFT;
761 pw->mpleft = pw->mppages;
762 pw->mpaddr = pw->mp->ml_address >> PAGESHIFT;
763 }
764 }
765 return (PFN_INVALID);
766 }
767
768 static pgcnt_t
dump_pfn_to_bitnum(pfn_t pfn)769 dump_pfn_to_bitnum(pfn_t pfn)
770 {
771 struct memlist *mp;
772 pgcnt_t bitnum = 0;
773
774 for (mp = phys_install; mp != NULL; mp = mp->ml_next) {
775 if (pfn >= (mp->ml_address >> PAGESHIFT) &&
776 pfn < ((mp->ml_address + mp->ml_size) >> PAGESHIFT))
777 return (bitnum + pfn - (mp->ml_address >> PAGESHIFT));
778 bitnum += mp->ml_size >> PAGESHIFT;
779 }
780 return ((pgcnt_t)-1);
781 }
782
783 /*
784 * Set/test bitmap for a CBUF_MAPSIZE range which includes pfn. The
785 * mapping of pfn to range index is imperfect because pfn and bitnum
786 * do not have the same phase. To make sure a CBUF_MAPSIZE range is
787 * covered, call this for both ends:
788 * dump_set_used(base)
789 * dump_set_used(base+CBUF_MAPNP-1)
790 *
791 * This is used during a panic dump to mark pages allocated by
792 * dumpsys_get_maxmem(). The macro IS_DUMP_PAGE(pp) is used by
793 * page_get_mnode_freelist() to make sure pages used by dump are never
794 * allocated.
795 */
796 #define CBUF_MAPP2R(pfn) ((pfn) >> (CBUF_MAPSHIFT - PAGESHIFT))
797
798 static void
dump_set_used(pfn_t pfn)799 dump_set_used(pfn_t pfn)
800 {
801
802 pgcnt_t bitnum, rbitnum;
803
804 bitnum = dump_pfn_to_bitnum(pfn);
805 ASSERT(bitnum != (pgcnt_t)-1);
806
807 rbitnum = CBUF_MAPP2R(bitnum);
808 ASSERT(rbitnum < dumpcfg.rbitmapsize);
809
810 BT_SET(dumpcfg.rbitmap, rbitnum);
811 }
812
813 int
dump_test_used(pfn_t pfn)814 dump_test_used(pfn_t pfn)
815 {
816 pgcnt_t bitnum, rbitnum;
817
818 bitnum = dump_pfn_to_bitnum(pfn);
819 ASSERT(bitnum != (pgcnt_t)-1);
820
821 rbitnum = CBUF_MAPP2R(bitnum);
822 ASSERT(rbitnum < dumpcfg.rbitmapsize);
823
824 return (BT_TEST(dumpcfg.rbitmap, rbitnum));
825 }
826
827 /*
828 * dumpbzalloc and dumpbzfree are callbacks from the bzip2 library.
829 * dumpsys_get_maxmem() uses them for BZ2_bzCompressInit().
830 */
831 static void *
dumpbzalloc(void * opaque,int items,int size)832 dumpbzalloc(void *opaque, int items, int size)
833 {
834 size_t *sz;
835 char *ret;
836
837 ASSERT(opaque != NULL);
838 sz = opaque;
839 ret = dumpcfg.maxvm + *sz;
840 *sz += items * size;
841 *sz = P2ROUNDUP(*sz, BZ2_BZALLOC_ALIGN);
842 ASSERT(*sz <= dumpcfg.maxvmsize);
843 return (ret);
844 }
845
846 /*ARGSUSED*/
847 static void
dumpbzfree(void * opaque,void * addr)848 dumpbzfree(void *opaque, void *addr)
849 {
850 }
851
852 /*
853 * Perform additional checks on the page to see if we can really use
854 * it. The kernel (kas) pages are always set in the bitmap. However,
855 * boot memory pages (prom_ppages or P_BOOTPAGES) are not in the
856 * bitmap. So we check for them.
857 */
858 static inline int
dump_pfn_check(pfn_t pfn)859 dump_pfn_check(pfn_t pfn)
860 {
861 page_t *pp = page_numtopp_nolock(pfn);
862 if (pp == NULL || pp->p_pagenum != pfn ||
863 #if defined(__sparc)
864 pp->p_vnode == &promvp ||
865 #else
866 PP_ISBOOTPAGES(pp) ||
867 #endif
868 pp->p_toxic != 0)
869 return (0);
870 return (1);
871 }
872
873 /*
874 * Check a range to see if all contained pages are available and
875 * return non-zero if the range can be used.
876 */
877 static inline int
dump_range_check(pgcnt_t start,pgcnt_t end,pfn_t pfn)878 dump_range_check(pgcnt_t start, pgcnt_t end, pfn_t pfn)
879 {
880 for (; start < end; start++, pfn++) {
881 if (BT_TEST(dumpcfg.bitmap, start))
882 return (0);
883 if (!dump_pfn_check(pfn))
884 return (0);
885 }
886 return (1);
887 }
888
889 /*
890 * dumpsys_get_maxmem() is called during panic. Find unused ranges
891 * and use them for buffers. If we find enough memory switch to
892 * parallel bzip2, otherwise use parallel lzjb.
893 *
894 * It searches the dump bitmap in 2 passes. The first time it looks
895 * for CBUF_MAPSIZE ranges. On the second pass it uses small pages.
896 */
897 static void
dumpsys_get_maxmem()898 dumpsys_get_maxmem()
899 {
900 dumpcfg_t *cfg = &dumpcfg;
901 cbuf_t *endcp = &cfg->cbuf[cfg->ncbuf];
902 helper_t *endhp = &cfg->helper[cfg->nhelper];
903 pgcnt_t bitnum, end;
904 size_t sz, endsz, bz2size;
905 pfn_t pfn, off;
906 cbuf_t *cp;
907 helper_t *hp, *ohp;
908 dumpmlw_t mlw;
909 int k;
910
911 /*
912 * Setting dump_plat_mincpu to 0 at any time forces a serial
913 * dump.
914 */
915 if (dump_plat_mincpu == 0) {
916 cfg->clevel = 0;
917 return;
918 }
919
920 /*
921 * There may be no point in looking for spare memory. If
922 * dumping all memory, then none is spare. If doing a serial
923 * dump, then already have buffers.
924 */
925 if (cfg->maxsize == 0 || cfg->clevel < DUMP_CLEVEL_LZJB ||
926 (dump_conflags & DUMP_ALL) != 0) {
927 if (cfg->clevel > DUMP_CLEVEL_LZJB)
928 cfg->clevel = DUMP_CLEVEL_LZJB;
929 return;
930 }
931
932 sz = 0;
933 cfg->found4m = 0;
934 cfg->foundsm = 0;
935
936 /* bitmap of ranges used to estimate which pfns are being used */
937 bzero(dumpcfg.rbitmap, BT_SIZEOFMAP(dumpcfg.rbitmapsize));
938
939 /* find ranges that are not being dumped to use for buffers */
940 dump_init_memlist_walker(&mlw);
941 for (bitnum = 0; bitnum < dumpcfg.bitmapsize; bitnum = end) {
942 dump_timeleft = dump_timeout;
943 end = bitnum + CBUF_MAPNP;
944 pfn = dump_bitnum_to_pfn(bitnum, &mlw);
945 ASSERT(pfn != PFN_INVALID);
946
947 /* skip partial range at end of mem segment */
948 if (mlw.mpleft < CBUF_MAPNP) {
949 end = bitnum + mlw.mpleft;
950 continue;
951 }
952
953 /* skip non aligned pages */
954 off = P2PHASE(pfn, CBUF_MAPNP);
955 if (off != 0) {
956 end -= off;
957 continue;
958 }
959
960 if (!dump_range_check(bitnum, end, pfn))
961 continue;
962
963 ASSERT((sz + CBUF_MAPSIZE) <= cfg->maxvmsize);
964 hat_devload(kas.a_hat, cfg->maxvm + sz, CBUF_MAPSIZE, pfn,
965 PROT_READ | PROT_WRITE, HAT_LOAD_NOCONSIST);
966 sz += CBUF_MAPSIZE;
967 cfg->found4m++;
968
969 /* set the bitmap for both ends to be sure to cover the range */
970 dump_set_used(pfn);
971 dump_set_used(pfn + CBUF_MAPNP - 1);
972
973 if (sz >= cfg->maxsize)
974 goto foundmax;
975 }
976
977 /* Add small pages if we can't find enough large pages. */
978 dump_init_memlist_walker(&mlw);
979 for (bitnum = 0; bitnum < dumpcfg.bitmapsize; bitnum = end) {
980 dump_timeleft = dump_timeout;
981 end = bitnum + CBUF_MAPNP;
982 pfn = dump_bitnum_to_pfn(bitnum, &mlw);
983 ASSERT(pfn != PFN_INVALID);
984
985 /* Find any non-aligned pages at start and end of segment. */
986 off = P2PHASE(pfn, CBUF_MAPNP);
987 if (mlw.mpleft < CBUF_MAPNP) {
988 end = bitnum + mlw.mpleft;
989 } else if (off != 0) {
990 end -= off;
991 } else if (cfg->found4m && dump_test_used(pfn)) {
992 continue;
993 }
994
995 for (; bitnum < end; bitnum++, pfn++) {
996 dump_timeleft = dump_timeout;
997 if (BT_TEST(dumpcfg.bitmap, bitnum))
998 continue;
999 if (!dump_pfn_check(pfn))
1000 continue;
1001 ASSERT((sz + PAGESIZE) <= cfg->maxvmsize);
1002 hat_devload(kas.a_hat, cfg->maxvm + sz, PAGESIZE, pfn,
1003 PROT_READ | PROT_WRITE, HAT_LOAD_NOCONSIST);
1004 sz += PAGESIZE;
1005 cfg->foundsm++;
1006 dump_set_used(pfn);
1007 if (sz >= cfg->maxsize)
1008 goto foundmax;
1009 }
1010 }
1011
1012 /* Fall back to lzjb if we did not get enough memory for bzip2. */
1013 endsz = (cfg->maxsize * cfg->threshold) / cfg->nhelper;
1014 if (sz < endsz) {
1015 cfg->clevel = DUMP_CLEVEL_LZJB;
1016 }
1017
1018 /* Allocate memory for as many helpers as we can. */
1019 foundmax:
1020
1021 /* Byte offsets into memory found and mapped above */
1022 endsz = sz;
1023 sz = 0;
1024
1025 /* Set the size for bzip2 state. Only bzip2 needs it. */
1026 bz2size = BZ2_bzCompressInitSize(dump_bzip2_level);
1027
1028 /* Skip the preallocate output buffers. */
1029 cp = &cfg->cbuf[MINCBUFS];
1030
1031 /* Use this to move memory up from the preallocated helpers. */
1032 ohp = cfg->helper;
1033
1034 /* Loop over all helpers and allocate memory. */
1035 for (hp = cfg->helper; hp < endhp; hp++) {
1036
1037 /* Skip preallocated helpers by checking hp->page. */
1038 if (hp->page == NULL) {
1039 if (cfg->clevel <= DUMP_CLEVEL_LZJB) {
1040 /* lzjb needs 2 1-page buffers */
1041 if ((sz + (2 * PAGESIZE)) > endsz)
1042 break;
1043 hp->page = cfg->maxvm + sz;
1044 sz += PAGESIZE;
1045 hp->lzbuf = cfg->maxvm + sz;
1046 sz += PAGESIZE;
1047
1048 } else if (ohp->lzbuf != NULL) {
1049 /* re-use the preallocted lzjb page for bzip2 */
1050 hp->page = ohp->lzbuf;
1051 ohp->lzbuf = NULL;
1052 ++ohp;
1053
1054 } else {
1055 /* bzip2 needs a 1-page buffer */
1056 if ((sz + PAGESIZE) > endsz)
1057 break;
1058 hp->page = cfg->maxvm + sz;
1059 sz += PAGESIZE;
1060 }
1061 }
1062
1063 /*
1064 * Add output buffers per helper. The number of
1065 * buffers per helper is determined by the ratio of
1066 * ncbuf to nhelper.
1067 */
1068 for (k = 0; cp < endcp && (sz + CBUF_SIZE) <= endsz &&
1069 k < NCBUF_PER_HELPER; k++) {
1070 cp->state = CBUF_FREEBUF;
1071 cp->size = CBUF_SIZE;
1072 cp->buf = cfg->maxvm + sz;
1073 sz += CBUF_SIZE;
1074 ++cp;
1075 }
1076
1077 /*
1078 * bzip2 needs compression state. Use the dumpbzalloc
1079 * and dumpbzfree callbacks to allocate the memory.
1080 * bzip2 does allocation only at init time.
1081 */
1082 if (cfg->clevel >= DUMP_CLEVEL_BZIP2) {
1083 if ((sz + bz2size) > endsz) {
1084 hp->page = NULL;
1085 break;
1086 } else {
1087 hp->bzstream.opaque = &sz;
1088 hp->bzstream.bzalloc = dumpbzalloc;
1089 hp->bzstream.bzfree = dumpbzfree;
1090 (void) BZ2_bzCompressInit(&hp->bzstream,
1091 dump_bzip2_level, 0, 0);
1092 hp->bzstream.opaque = NULL;
1093 }
1094 }
1095 }
1096
1097 /* Finish allocating output buffers */
1098 for (; cp < endcp && (sz + CBUF_SIZE) <= endsz; cp++) {
1099 cp->state = CBUF_FREEBUF;
1100 cp->size = CBUF_SIZE;
1101 cp->buf = cfg->maxvm + sz;
1102 sz += CBUF_SIZE;
1103 }
1104
1105 /* Enable IS_DUMP_PAGE macro, which checks for pages we took. */
1106 if (cfg->found4m || cfg->foundsm)
1107 dump_check_used = 1;
1108
1109 ASSERT(sz <= endsz);
1110 }
1111
1112 static void
dumphdr_init(void)1113 dumphdr_init(void)
1114 {
1115 pgcnt_t npages = 0;
1116
1117 ASSERT(MUTEX_HELD(&dump_lock));
1118
1119 if (dumphdr == NULL) {
1120 dumphdr = kmem_zalloc(sizeof (dumphdr_t), KM_SLEEP);
1121 dumphdr->dump_magic = DUMP_MAGIC;
1122 dumphdr->dump_version = DUMP_VERSION;
1123 dumphdr->dump_wordsize = DUMP_WORDSIZE;
1124 dumphdr->dump_pageshift = PAGESHIFT;
1125 dumphdr->dump_pagesize = PAGESIZE;
1126 dumphdr->dump_utsname = utsname;
1127 (void) strcpy(dumphdr->dump_platform, platform);
1128 dumpbuf.size = dumpbuf_iosize(maxphys);
1129 dumpbuf.start = kmem_alloc(dumpbuf.size, KM_SLEEP);
1130 dumpbuf.end = dumpbuf.start + dumpbuf.size;
1131 dumpcfg.pids = kmem_alloc(v.v_proc * sizeof (pid_t), KM_SLEEP);
1132 dumpcfg.helpermap = kmem_zalloc(BT_SIZEOFMAP(NCPU), KM_SLEEP);
1133 LOCK_INIT_HELD(&dumpcfg.helper_lock);
1134 dump_stack_scratch = kmem_alloc(STACK_BUF_SIZE, KM_SLEEP);
1135 (void) strncpy(dumphdr->dump_uuid, dump_get_uuid(),
1136 sizeof (dumphdr->dump_uuid));
1137 }
1138
1139 npages = num_phys_pages();
1140
1141 if (dumpcfg.bitmapsize != npages) {
1142 size_t rlen = CBUF_MAPP2R(P2ROUNDUP(npages, CBUF_MAPNP));
1143 void *map = kmem_alloc(BT_SIZEOFMAP(npages), KM_SLEEP);
1144 void *rmap = kmem_alloc(BT_SIZEOFMAP(rlen), KM_SLEEP);
1145
1146 if (dumpcfg.bitmap != NULL)
1147 kmem_free(dumpcfg.bitmap, BT_SIZEOFMAP(dumpcfg.
1148 bitmapsize));
1149 if (dumpcfg.rbitmap != NULL)
1150 kmem_free(dumpcfg.rbitmap, BT_SIZEOFMAP(dumpcfg.
1151 rbitmapsize));
1152 dumpcfg.bitmap = map;
1153 dumpcfg.bitmapsize = npages;
1154 dumpcfg.rbitmap = rmap;
1155 dumpcfg.rbitmapsize = rlen;
1156 }
1157 }
1158
1159 /*
1160 * Establish a new dump device.
1161 */
1162 int
dumpinit(vnode_t * vp,char * name,int justchecking)1163 dumpinit(vnode_t *vp, char *name, int justchecking)
1164 {
1165 vnode_t *cvp;
1166 vattr_t vattr;
1167 vnode_t *cdev_vp;
1168 int error = 0;
1169
1170 ASSERT(MUTEX_HELD(&dump_lock));
1171
1172 dumphdr_init();
1173
1174 cvp = common_specvp(vp);
1175 if (cvp == dumpvp)
1176 return (0);
1177
1178 /*
1179 * Determine whether this is a plausible dump device. We want either:
1180 * (1) a real device that's not mounted and has a cb_dump routine, or
1181 * (2) a swapfile on some filesystem that has a vop_dump routine.
1182 */
1183 if ((error = VOP_OPEN(&cvp, FREAD | FWRITE, kcred, NULL)) != 0)
1184 return (error);
1185
1186 vattr.va_mask = AT_SIZE | AT_TYPE | AT_RDEV;
1187 if ((error = VOP_GETATTR(cvp, &vattr, 0, kcred, NULL)) == 0) {
1188 if (vattr.va_type == VBLK || vattr.va_type == VCHR) {
1189 if (devopsp[getmajor(vattr.va_rdev)]->
1190 devo_cb_ops->cb_dump == nodev)
1191 error = ENOTSUP;
1192 else if (vfs_devismounted(vattr.va_rdev))
1193 error = EBUSY;
1194 if (strcmp(ddi_driver_name(VTOS(cvp)->s_dip),
1195 ZFS_DRIVER) == 0 &&
1196 IS_SWAPVP(common_specvp(cvp)))
1197 error = EBUSY;
1198 } else {
1199 if (vn_matchopval(cvp, VOPNAME_DUMP, fs_nosys) ||
1200 !IS_SWAPVP(cvp))
1201 error = ENOTSUP;
1202 }
1203 }
1204
1205 if (error == 0 && vattr.va_size < 2 * DUMP_LOGSIZE + DUMP_ERPTSIZE)
1206 error = ENOSPC;
1207
1208 if (error || justchecking) {
1209 (void) VOP_CLOSE(cvp, FREAD | FWRITE, 1, (offset_t)0,
1210 kcred, NULL);
1211 return (error);
1212 }
1213
1214 VN_HOLD(cvp);
1215
1216 if (dumpvp != NULL)
1217 dumpfini(); /* unconfigure the old dump device */
1218
1219 dumpvp = cvp;
1220 dumpvp_size = vattr.va_size & -DUMP_OFFSET;
1221 dumppath = kmem_alloc(strlen(name) + 1, KM_SLEEP);
1222 (void) strcpy(dumppath, name);
1223 dumpbuf.iosize = 0;
1224
1225 /*
1226 * If the dump device is a block device, attempt to open up the
1227 * corresponding character device and determine its maximum transfer
1228 * size. We use this information to potentially resize dumpbuf to a
1229 * larger and more optimal size for performing i/o to the dump device.
1230 */
1231 if (cvp->v_type == VBLK &&
1232 (cdev_vp = makespecvp(VTOS(cvp)->s_dev, VCHR)) != NULL) {
1233 if (VOP_OPEN(&cdev_vp, FREAD | FWRITE, kcred, NULL) == 0) {
1234 size_t blk_size;
1235 struct dk_cinfo dki;
1236 struct dk_minfo minf;
1237
1238 if (VOP_IOCTL(cdev_vp, DKIOCGMEDIAINFO,
1239 (intptr_t)&minf, FKIOCTL, kcred, NULL, NULL)
1240 == 0 && minf.dki_lbsize != 0)
1241 blk_size = minf.dki_lbsize;
1242 else
1243 blk_size = DEV_BSIZE;
1244
1245 if (VOP_IOCTL(cdev_vp, DKIOCINFO, (intptr_t)&dki,
1246 FKIOCTL, kcred, NULL, NULL) == 0) {
1247 dumpbuf.iosize = dki.dki_maxtransfer * blk_size;
1248 dumpbuf_resize();
1249 }
1250 /*
1251 * If we are working with a zvol then dumpify it
1252 * if it's not being used as swap.
1253 */
1254 if (strcmp(dki.dki_dname, ZVOL_DRIVER) == 0) {
1255 if (IS_SWAPVP(common_specvp(cvp)))
1256 error = EBUSY;
1257 else if ((error = VOP_IOCTL(cdev_vp,
1258 DKIOCDUMPINIT, NULL, FKIOCTL, kcred,
1259 NULL, NULL)) != 0)
1260 dumpfini();
1261 }
1262
1263 (void) VOP_CLOSE(cdev_vp, FREAD | FWRITE, 1, 0,
1264 kcred, NULL);
1265 }
1266
1267 VN_RELE(cdev_vp);
1268 }
1269
1270 cmn_err(CE_CONT, "?dump on %s size %llu MB\n", name, dumpvp_size >> 20);
1271
1272 dump_update_clevel();
1273
1274 return (error);
1275 }
1276
1277 void
dumpfini(void)1278 dumpfini(void)
1279 {
1280 vattr_t vattr;
1281 boolean_t is_zfs = B_FALSE;
1282 vnode_t *cdev_vp;
1283 ASSERT(MUTEX_HELD(&dump_lock));
1284
1285 kmem_free(dumppath, strlen(dumppath) + 1);
1286
1287 /*
1288 * Determine if we are using zvols for our dump device
1289 */
1290 vattr.va_mask = AT_RDEV;
1291 if (VOP_GETATTR(dumpvp, &vattr, 0, kcred, NULL) == 0) {
1292 is_zfs = (getmajor(vattr.va_rdev) ==
1293 ddi_name_to_major(ZFS_DRIVER)) ? B_TRUE : B_FALSE;
1294 }
1295
1296 /*
1297 * If we have a zvol dump device then we call into zfs so
1298 * that it may have a chance to cleanup.
1299 */
1300 if (is_zfs &&
1301 (cdev_vp = makespecvp(VTOS(dumpvp)->s_dev, VCHR)) != NULL) {
1302 if (VOP_OPEN(&cdev_vp, FREAD | FWRITE, kcred, NULL) == 0) {
1303 (void) VOP_IOCTL(cdev_vp, DKIOCDUMPFINI, NULL, FKIOCTL,
1304 kcred, NULL, NULL);
1305 (void) VOP_CLOSE(cdev_vp, FREAD | FWRITE, 1, 0,
1306 kcred, NULL);
1307 }
1308 VN_RELE(cdev_vp);
1309 }
1310
1311 (void) VOP_CLOSE(dumpvp, FREAD | FWRITE, 1, (offset_t)0, kcred, NULL);
1312
1313 VN_RELE(dumpvp);
1314
1315 dumpvp = NULL;
1316 dumpvp_size = 0;
1317 dumppath = NULL;
1318 }
1319
1320 static offset_t
dumpvp_flush(void)1321 dumpvp_flush(void)
1322 {
1323 size_t size = P2ROUNDUP(dumpbuf.cur - dumpbuf.start, PAGESIZE);
1324 hrtime_t iotime;
1325 int err;
1326
1327 if (dumpbuf.vp_off + size > dumpbuf.vp_limit) {
1328 dump_ioerr = ENOSPC;
1329 dumpbuf.vp_off = dumpbuf.vp_limit;
1330 } else if (size != 0) {
1331 iotime = gethrtime();
1332 dumpsync.iowait += iotime - dumpsync.iowaitts;
1333 if (panicstr)
1334 err = VOP_DUMP(dumpvp, dumpbuf.start,
1335 lbtodb(dumpbuf.vp_off), btod(size), NULL);
1336 else
1337 err = vn_rdwr(UIO_WRITE, dumpbuf.cdev_vp != NULL ?
1338 dumpbuf.cdev_vp : dumpvp, dumpbuf.start, size,
1339 dumpbuf.vp_off, UIO_SYSSPACE, 0, dumpbuf.vp_limit,
1340 kcred, 0);
1341 if (err && dump_ioerr == 0)
1342 dump_ioerr = err;
1343 dumpsync.iowaitts = gethrtime();
1344 dumpsync.iotime += dumpsync.iowaitts - iotime;
1345 dumpsync.nwrite += size;
1346 dumpbuf.vp_off += size;
1347 }
1348 dumpbuf.cur = dumpbuf.start;
1349 dump_timeleft = dump_timeout;
1350 return (dumpbuf.vp_off);
1351 }
1352
1353 /* maximize write speed by keeping seek offset aligned with size */
1354 void
dumpvp_write(const void * va,size_t size)1355 dumpvp_write(const void *va, size_t size)
1356 {
1357 size_t len, off, sz;
1358
1359 while (size != 0) {
1360 len = MIN(size, dumpbuf.end - dumpbuf.cur);
1361 if (len == 0) {
1362 off = P2PHASE(dumpbuf.vp_off, dumpbuf.size);
1363 if (off == 0 || !ISP2(dumpbuf.size)) {
1364 (void) dumpvp_flush();
1365 } else {
1366 sz = dumpbuf.size - off;
1367 dumpbuf.cur = dumpbuf.start + sz;
1368 (void) dumpvp_flush();
1369 ovbcopy(dumpbuf.start + sz, dumpbuf.start, off);
1370 dumpbuf.cur += off;
1371 }
1372 } else {
1373 bcopy(va, dumpbuf.cur, len);
1374 va = (char *)va + len;
1375 dumpbuf.cur += len;
1376 size -= len;
1377 }
1378 }
1379 }
1380
1381 /*ARGSUSED*/
1382 static void
dumpvp_ksyms_write(const void * src,void * dst,size_t size)1383 dumpvp_ksyms_write(const void *src, void *dst, size_t size)
1384 {
1385 dumpvp_write(src, size);
1386 }
1387
1388 /*
1389 * Mark 'pfn' in the bitmap and dump its translation table entry.
1390 */
1391 void
dump_addpage(struct as * as,void * va,pfn_t pfn)1392 dump_addpage(struct as *as, void *va, pfn_t pfn)
1393 {
1394 mem_vtop_t mem_vtop;
1395 pgcnt_t bitnum;
1396
1397 if ((bitnum = dump_pfn_to_bitnum(pfn)) != (pgcnt_t)-1) {
1398 if (!BT_TEST(dumpcfg.bitmap, bitnum)) {
1399 dumphdr->dump_npages++;
1400 BT_SET(dumpcfg.bitmap, bitnum);
1401 }
1402 dumphdr->dump_nvtop++;
1403 mem_vtop.m_as = as;
1404 mem_vtop.m_va = va;
1405 mem_vtop.m_pfn = pfn;
1406 dumpvp_write(&mem_vtop, sizeof (mem_vtop_t));
1407 }
1408 dump_timeleft = dump_timeout;
1409 }
1410
1411 /*
1412 * Mark 'pfn' in the bitmap
1413 */
1414 void
dump_page(pfn_t pfn)1415 dump_page(pfn_t pfn)
1416 {
1417 pgcnt_t bitnum;
1418
1419 if ((bitnum = dump_pfn_to_bitnum(pfn)) != (pgcnt_t)-1) {
1420 if (!BT_TEST(dumpcfg.bitmap, bitnum)) {
1421 dumphdr->dump_npages++;
1422 BT_SET(dumpcfg.bitmap, bitnum);
1423 }
1424 }
1425 dump_timeleft = dump_timeout;
1426 }
1427
1428 /*
1429 * Dump the <as, va, pfn> information for a given address space.
1430 * SEGOP_DUMP() will call dump_addpage() for each page in the segment.
1431 */
1432 static void
dump_as(struct as * as)1433 dump_as(struct as *as)
1434 {
1435 struct seg *seg;
1436
1437 AS_LOCK_ENTER(as, RW_READER);
1438 for (seg = AS_SEGFIRST(as); seg; seg = AS_SEGNEXT(as, seg)) {
1439 if (seg->s_as != as)
1440 break;
1441 if (seg->s_ops == NULL)
1442 continue;
1443 SEGOP_DUMP(seg);
1444 }
1445 AS_LOCK_EXIT(as);
1446
1447 if (seg != NULL)
1448 cmn_err(CE_WARN, "invalid segment %p in address space %p",
1449 (void *)seg, (void *)as);
1450 }
1451
1452 static int
dump_process(pid_t pid)1453 dump_process(pid_t pid)
1454 {
1455 proc_t *p = sprlock(pid);
1456
1457 if (p == NULL)
1458 return (-1);
1459 if (p->p_as != &kas) {
1460 mutex_exit(&p->p_lock);
1461 dump_as(p->p_as);
1462 mutex_enter(&p->p_lock);
1463 }
1464
1465 sprunlock(p);
1466
1467 return (0);
1468 }
1469
1470 /*
1471 * The following functions (dump_summary(), dump_ereports(), and
1472 * dump_messages()), write data to an uncompressed area within the
1473 * crashdump. The layout of these is
1474 *
1475 * +------------------------------------------------------------+
1476 * | compressed pages | summary | ereports | messages |
1477 * +------------------------------------------------------------+
1478 *
1479 * With the advent of saving a compressed crash dump by default, we
1480 * need to save a little more data to describe the failure mode in
1481 * an uncompressed buffer available before savecore uncompresses
1482 * the dump. Initially this is a copy of the stack trace. Additional
1483 * summary information should be added here.
1484 */
1485
1486 void
dump_summary(void)1487 dump_summary(void)
1488 {
1489 u_offset_t dumpvp_start;
1490 summary_dump_t sd;
1491
1492 if (dumpvp == NULL || dumphdr == NULL)
1493 return;
1494
1495 dumpbuf.cur = dumpbuf.start;
1496
1497 dumpbuf.vp_limit = dumpvp_size - (DUMP_OFFSET + DUMP_LOGSIZE +
1498 DUMP_ERPTSIZE);
1499 dumpvp_start = dumpbuf.vp_limit - DUMP_SUMMARYSIZE;
1500 dumpbuf.vp_off = dumpvp_start;
1501
1502 sd.sd_magic = SUMMARY_MAGIC;
1503 sd.sd_ssum = checksum32(dump_stack_scratch, STACK_BUF_SIZE);
1504 dumpvp_write(&sd, sizeof (sd));
1505 dumpvp_write(dump_stack_scratch, STACK_BUF_SIZE);
1506
1507 sd.sd_magic = 0; /* indicate end of summary */
1508 dumpvp_write(&sd, sizeof (sd));
1509 (void) dumpvp_flush();
1510 }
1511
1512 void
dump_ereports(void)1513 dump_ereports(void)
1514 {
1515 u_offset_t dumpvp_start;
1516 erpt_dump_t ed;
1517
1518 if (dumpvp == NULL || dumphdr == NULL)
1519 return;
1520
1521 dumpbuf.cur = dumpbuf.start;
1522 dumpbuf.vp_limit = dumpvp_size - (DUMP_OFFSET + DUMP_LOGSIZE);
1523 dumpvp_start = dumpbuf.vp_limit - DUMP_ERPTSIZE;
1524 dumpbuf.vp_off = dumpvp_start;
1525
1526 fm_ereport_dump();
1527 if (panicstr)
1528 errorq_dump();
1529
1530 bzero(&ed, sizeof (ed)); /* indicate end of ereports */
1531 dumpvp_write(&ed, sizeof (ed));
1532 (void) dumpvp_flush();
1533
1534 if (!panicstr) {
1535 (void) VOP_PUTPAGE(dumpvp, dumpvp_start,
1536 (size_t)(dumpbuf.vp_off - dumpvp_start),
1537 B_INVAL | B_FORCE, kcred, NULL);
1538 }
1539 }
1540
1541 void
dump_messages(void)1542 dump_messages(void)
1543 {
1544 log_dump_t ld;
1545 mblk_t *mctl, *mdata;
1546 queue_t *q, *qlast;
1547 u_offset_t dumpvp_start;
1548
1549 if (dumpvp == NULL || dumphdr == NULL || log_consq == NULL)
1550 return;
1551
1552 dumpbuf.cur = dumpbuf.start;
1553 dumpbuf.vp_limit = dumpvp_size - DUMP_OFFSET;
1554 dumpvp_start = dumpbuf.vp_limit - DUMP_LOGSIZE;
1555 dumpbuf.vp_off = dumpvp_start;
1556
1557 qlast = NULL;
1558 do {
1559 for (q = log_consq; q->q_next != qlast; q = q->q_next)
1560 continue;
1561 for (mctl = q->q_first; mctl != NULL; mctl = mctl->b_next) {
1562 dump_timeleft = dump_timeout;
1563 mdata = mctl->b_cont;
1564 ld.ld_magic = LOG_MAGIC;
1565 ld.ld_msgsize = MBLKL(mctl->b_cont);
1566 ld.ld_csum = checksum32(mctl->b_rptr, MBLKL(mctl));
1567 ld.ld_msum = checksum32(mdata->b_rptr, MBLKL(mdata));
1568 dumpvp_write(&ld, sizeof (ld));
1569 dumpvp_write(mctl->b_rptr, MBLKL(mctl));
1570 dumpvp_write(mdata->b_rptr, MBLKL(mdata));
1571 }
1572 } while ((qlast = q) != log_consq);
1573
1574 ld.ld_magic = 0; /* indicate end of messages */
1575 dumpvp_write(&ld, sizeof (ld));
1576 (void) dumpvp_flush();
1577 if (!panicstr) {
1578 (void) VOP_PUTPAGE(dumpvp, dumpvp_start,
1579 (size_t)(dumpbuf.vp_off - dumpvp_start),
1580 B_INVAL | B_FORCE, kcred, NULL);
1581 }
1582 }
1583
1584 /*
1585 * The following functions are called on multiple CPUs during dump.
1586 * They must not use most kernel services, because all cross-calls are
1587 * disabled during panic. Therefore, blocking locks and cache flushes
1588 * will not work.
1589 */
1590
1591 /*
1592 * Copy pages, trapping ECC errors. Also, for robustness, trap data
1593 * access in case something goes wrong in the hat layer and the
1594 * mapping is broken.
1595 */
1596 static int
dump_pagecopy(void * src,void * dst)1597 dump_pagecopy(void *src, void *dst)
1598 {
1599 long *wsrc = (long *)src;
1600 long *wdst = (long *)dst;
1601 const ulong_t ncopies = PAGESIZE / sizeof (long);
1602 volatile int w = 0;
1603 volatile int ueoff = -1;
1604 on_trap_data_t otd;
1605
1606 if (on_trap(&otd, OT_DATA_EC | OT_DATA_ACCESS)) {
1607 if (ueoff == -1)
1608 ueoff = w * sizeof (long);
1609 /* report "bad ECC" or "bad address" */
1610 #ifdef _LP64
1611 if (otd.ot_trap & OT_DATA_EC)
1612 wdst[w++] = 0x00badecc00badecc;
1613 else
1614 wdst[w++] = 0x00badadd00badadd;
1615 #else
1616 if (otd.ot_trap & OT_DATA_EC)
1617 wdst[w++] = 0x00badecc;
1618 else
1619 wdst[w++] = 0x00badadd;
1620 #endif
1621 }
1622 while (w < ncopies) {
1623 wdst[w] = wsrc[w];
1624 w++;
1625 }
1626 no_trap();
1627 return (ueoff);
1628 }
1629
1630 static void
dumpsys_close_cq(cqueue_t * cq,int live)1631 dumpsys_close_cq(cqueue_t *cq, int live)
1632 {
1633 if (live) {
1634 mutex_enter(&cq->mutex);
1635 atomic_dec_uint(&cq->open);
1636 cv_signal(&cq->cv);
1637 mutex_exit(&cq->mutex);
1638 } else {
1639 atomic_dec_uint(&cq->open);
1640 }
1641 }
1642
1643 static inline void
dumpsys_spinlock(lock_t * lp)1644 dumpsys_spinlock(lock_t *lp)
1645 {
1646 uint_t backoff = 0;
1647 int loop_count = 0;
1648
1649 while (LOCK_HELD(lp) || !lock_spin_try(lp)) {
1650 if (++loop_count >= ncpus) {
1651 backoff = mutex_lock_backoff(0);
1652 loop_count = 0;
1653 } else {
1654 backoff = mutex_lock_backoff(backoff);
1655 }
1656 mutex_lock_delay(backoff);
1657 }
1658 }
1659
1660 static inline void
dumpsys_spinunlock(lock_t * lp)1661 dumpsys_spinunlock(lock_t *lp)
1662 {
1663 lock_clear(lp);
1664 }
1665
1666 static inline void
dumpsys_lock(cqueue_t * cq,int live)1667 dumpsys_lock(cqueue_t *cq, int live)
1668 {
1669 if (live)
1670 mutex_enter(&cq->mutex);
1671 else
1672 dumpsys_spinlock(&cq->spinlock);
1673 }
1674
1675 static inline void
dumpsys_unlock(cqueue_t * cq,int live,int signal)1676 dumpsys_unlock(cqueue_t *cq, int live, int signal)
1677 {
1678 if (live) {
1679 if (signal)
1680 cv_signal(&cq->cv);
1681 mutex_exit(&cq->mutex);
1682 } else {
1683 dumpsys_spinunlock(&cq->spinlock);
1684 }
1685 }
1686
1687 static void
dumpsys_wait_cq(cqueue_t * cq,int live)1688 dumpsys_wait_cq(cqueue_t *cq, int live)
1689 {
1690 if (live) {
1691 cv_wait(&cq->cv, &cq->mutex);
1692 } else {
1693 dumpsys_spinunlock(&cq->spinlock);
1694 while (cq->open)
1695 if (cq->first)
1696 break;
1697 dumpsys_spinlock(&cq->spinlock);
1698 }
1699 }
1700
1701 static void
dumpsys_put_cq(cqueue_t * cq,cbuf_t * cp,int newstate,int live)1702 dumpsys_put_cq(cqueue_t *cq, cbuf_t *cp, int newstate, int live)
1703 {
1704 if (cp == NULL)
1705 return;
1706
1707 dumpsys_lock(cq, live);
1708
1709 if (cq->ts != 0) {
1710 cq->empty += gethrtime() - cq->ts;
1711 cq->ts = 0;
1712 }
1713
1714 cp->state = newstate;
1715 cp->next = NULL;
1716 if (cq->last == NULL)
1717 cq->first = cp;
1718 else
1719 cq->last->next = cp;
1720 cq->last = cp;
1721
1722 dumpsys_unlock(cq, live, 1);
1723 }
1724
1725 static cbuf_t *
dumpsys_get_cq(cqueue_t * cq,int live)1726 dumpsys_get_cq(cqueue_t *cq, int live)
1727 {
1728 cbuf_t *cp;
1729 hrtime_t now = gethrtime();
1730
1731 dumpsys_lock(cq, live);
1732
1733 /* CONSTCOND */
1734 while (1) {
1735 cp = (cbuf_t *)cq->first;
1736 if (cp == NULL) {
1737 if (cq->open == 0)
1738 break;
1739 dumpsys_wait_cq(cq, live);
1740 continue;
1741 }
1742 cq->first = cp->next;
1743 if (cq->first == NULL) {
1744 cq->last = NULL;
1745 cq->ts = now;
1746 }
1747 break;
1748 }
1749
1750 dumpsys_unlock(cq, live, cq->first != NULL || cq->open == 0);
1751 return (cp);
1752 }
1753
1754 /*
1755 * Send an error message to the console. If the main task is running
1756 * just write the message via uprintf. If a helper is running the
1757 * message has to be put on a queue for the main task. Setting fmt to
1758 * NULL means flush the error message buffer. If fmt is not NULL, just
1759 * add the text to the existing buffer.
1760 */
1761 static void
dumpsys_errmsg(helper_t * hp,const char * fmt,...)1762 dumpsys_errmsg(helper_t *hp, const char *fmt, ...)
1763 {
1764 dumpsync_t *ds = hp->ds;
1765 cbuf_t *cp = hp->cperr;
1766 va_list adx;
1767
1768 if (hp->helper == MAINHELPER) {
1769 if (fmt != NULL) {
1770 if (ds->neednl) {
1771 uprintf("\n");
1772 ds->neednl = 0;
1773 }
1774 va_start(adx, fmt);
1775 vuprintf(fmt, adx);
1776 va_end(adx);
1777 }
1778 } else if (fmt == NULL) {
1779 if (cp != NULL) {
1780 CQ_PUT(mainq, cp, CBUF_ERRMSG);
1781 hp->cperr = NULL;
1782 }
1783 } else {
1784 if (hp->cperr == NULL) {
1785 cp = CQ_GET(freebufq);
1786 hp->cperr = cp;
1787 cp->used = 0;
1788 }
1789 va_start(adx, fmt);
1790 cp->used += vsnprintf(cp->buf + cp->used, cp->size - cp->used,
1791 fmt, adx);
1792 va_end(adx);
1793 if ((cp->used + LOG_MSGSIZE) > cp->size) {
1794 CQ_PUT(mainq, cp, CBUF_ERRMSG);
1795 hp->cperr = NULL;
1796 }
1797 }
1798 }
1799
1800 /*
1801 * Write an output buffer to the dump file. If the main task is
1802 * running just write the data. If a helper is running the output is
1803 * placed on a queue for the main task.
1804 */
1805 static void
dumpsys_swrite(helper_t * hp,cbuf_t * cp,size_t used)1806 dumpsys_swrite(helper_t *hp, cbuf_t *cp, size_t used)
1807 {
1808 dumpsync_t *ds = hp->ds;
1809
1810 if (hp->helper == MAINHELPER) {
1811 HRSTART(ds->perpage, write);
1812 dumpvp_write(cp->buf, used);
1813 HRSTOP(ds->perpage, write);
1814 CQ_PUT(freebufq, cp, CBUF_FREEBUF);
1815 } else {
1816 cp->used = used;
1817 CQ_PUT(mainq, cp, CBUF_WRITE);
1818 }
1819 }
1820
1821 /*
1822 * Copy one page within the mapped range. The offset starts at 0 and
1823 * is relative to the first pfn. cp->buf + cp->off is the address of
1824 * the first pfn. If dump_pagecopy returns a UE offset, create an
1825 * error message. Returns the offset to the next pfn in the range
1826 * selected by the bitmap.
1827 */
1828 static int
dumpsys_copy_page(helper_t * hp,int offset)1829 dumpsys_copy_page(helper_t *hp, int offset)
1830 {
1831 cbuf_t *cp = hp->cpin;
1832 int ueoff;
1833
1834 ASSERT(cp->off + offset + PAGESIZE <= cp->size);
1835 ASSERT(BT_TEST(dumpcfg.bitmap, cp->bitnum));
1836
1837 ueoff = dump_pagecopy(cp->buf + cp->off + offset, hp->page);
1838
1839 /* ueoff is the offset in the page to a UE error */
1840 if (ueoff != -1) {
1841 uint64_t pa = ptob(cp->pfn) + offset + ueoff;
1842
1843 dumpsys_errmsg(hp, "cpu %d: memory error at PA 0x%08x.%08x\n",
1844 CPU->cpu_id, (uint32_t)(pa >> 32), (uint32_t)pa);
1845 }
1846
1847 /*
1848 * Advance bitnum and offset to the next input page for the
1849 * next call to this function.
1850 */
1851 offset += PAGESIZE;
1852 cp->bitnum++;
1853 while (cp->off + offset < cp->size) {
1854 if (BT_TEST(dumpcfg.bitmap, cp->bitnum))
1855 break;
1856 offset += PAGESIZE;
1857 cp->bitnum++;
1858 }
1859
1860 return (offset);
1861 }
1862
1863 /*
1864 * Read the helper queue, and copy one mapped page. Return 0 when
1865 * done. Return 1 when a page has been copied into hp->page.
1866 */
1867 static int
dumpsys_sread(helper_t * hp)1868 dumpsys_sread(helper_t *hp)
1869 {
1870 dumpsync_t *ds = hp->ds;
1871
1872 /* CONSTCOND */
1873 while (1) {
1874
1875 /* Find the next input buffer. */
1876 if (hp->cpin == NULL) {
1877 HRSTART(hp->perpage, inwait);
1878
1879 /* CONSTCOND */
1880 while (1) {
1881 hp->cpin = CQ_GET(helperq);
1882 dump_timeleft = dump_timeout;
1883
1884 /*
1885 * NULL return means the helper queue
1886 * is closed and empty.
1887 */
1888 if (hp->cpin == NULL)
1889 break;
1890
1891 /* Have input, check for dump I/O error. */
1892 if (!dump_ioerr)
1893 break;
1894
1895 /*
1896 * If an I/O error occurs, stay in the
1897 * loop in order to empty the helper
1898 * queue. Return the buffers to the
1899 * main task to unmap and free it.
1900 */
1901 hp->cpin->used = 0;
1902 CQ_PUT(mainq, hp->cpin, CBUF_USEDMAP);
1903 }
1904 HRSTOP(hp->perpage, inwait);
1905
1906 /* Stop here when the helper queue is closed. */
1907 if (hp->cpin == NULL)
1908 break;
1909
1910 /* Set the offset=0 to get the first pfn. */
1911 hp->in = 0;
1912
1913 /* Set the total processed to 0 */
1914 hp->used = 0;
1915 }
1916
1917 /* Process the next page. */
1918 if (hp->used < hp->cpin->used) {
1919
1920 /*
1921 * Get the next page from the input buffer and
1922 * return a copy.
1923 */
1924 ASSERT(hp->in != -1);
1925 HRSTART(hp->perpage, copy);
1926 hp->in = dumpsys_copy_page(hp, hp->in);
1927 hp->used += PAGESIZE;
1928 HRSTOP(hp->perpage, copy);
1929 break;
1930
1931 } else {
1932
1933 /*
1934 * Done with the input. Flush the VM and
1935 * return the buffer to the main task.
1936 */
1937 if (panicstr && hp->helper != MAINHELPER)
1938 hat_flush_range(kas.a_hat,
1939 hp->cpin->buf, hp->cpin->size);
1940 dumpsys_errmsg(hp, NULL);
1941 CQ_PUT(mainq, hp->cpin, CBUF_USEDMAP);
1942 hp->cpin = NULL;
1943 }
1944 }
1945
1946 return (hp->cpin != NULL);
1947 }
1948
1949 /*
1950 * Compress size bytes starting at buf with bzip2
1951 * mode:
1952 * BZ_RUN add one more compressed page
1953 * BZ_FINISH no more input, flush the state
1954 */
1955 static void
dumpsys_bzrun(helper_t * hp,void * buf,size_t size,int mode)1956 dumpsys_bzrun(helper_t *hp, void *buf, size_t size, int mode)
1957 {
1958 dumpsync_t *ds = hp->ds;
1959 const int CSIZE = sizeof (dumpcsize_t);
1960 bz_stream *ps = &hp->bzstream;
1961 int rc = 0;
1962 uint32_t csize;
1963 dumpcsize_t cs;
1964
1965 /* Set input pointers to new input page */
1966 if (size > 0) {
1967 ps->avail_in = size;
1968 ps->next_in = buf;
1969 }
1970
1971 /* CONSTCOND */
1972 while (1) {
1973
1974 /* Quit when all input has been consumed */
1975 if (ps->avail_in == 0 && mode == BZ_RUN)
1976 break;
1977
1978 /* Get a new output buffer */
1979 if (hp->cpout == NULL) {
1980 HRSTART(hp->perpage, outwait);
1981 hp->cpout = CQ_GET(freebufq);
1982 HRSTOP(hp->perpage, outwait);
1983 ps->avail_out = hp->cpout->size - CSIZE;
1984 ps->next_out = hp->cpout->buf + CSIZE;
1985 }
1986
1987 /* Compress input, or finalize */
1988 HRSTART(hp->perpage, compress);
1989 rc = BZ2_bzCompress(ps, mode);
1990 HRSTOP(hp->perpage, compress);
1991
1992 /* Check for error */
1993 if (mode == BZ_RUN && rc != BZ_RUN_OK) {
1994 dumpsys_errmsg(hp, "%d: BZ_RUN error %s at page %lx\n",
1995 hp->helper, BZ2_bzErrorString(rc),
1996 hp->cpin->pagenum);
1997 break;
1998 }
1999
2000 /* Write the buffer if it is full, or we are flushing */
2001 if (ps->avail_out == 0 || mode == BZ_FINISH) {
2002 csize = hp->cpout->size - CSIZE - ps->avail_out;
2003 cs = DUMP_SET_TAG(csize, hp->tag);
2004 if (csize > 0) {
2005 (void) memcpy(hp->cpout->buf, &cs, CSIZE);
2006 dumpsys_swrite(hp, hp->cpout, csize + CSIZE);
2007 hp->cpout = NULL;
2008 }
2009 }
2010
2011 /* Check for final complete */
2012 if (mode == BZ_FINISH) {
2013 if (rc == BZ_STREAM_END)
2014 break;
2015 if (rc != BZ_FINISH_OK) {
2016 dumpsys_errmsg(hp, "%d: BZ_FINISH error %s\n",
2017 hp->helper, BZ2_bzErrorString(rc));
2018 break;
2019 }
2020 }
2021 }
2022
2023 /* Cleanup state and buffers */
2024 if (mode == BZ_FINISH) {
2025
2026 /* Reset state so that it is re-usable. */
2027 (void) BZ2_bzCompressReset(&hp->bzstream);
2028
2029 /* Give any unused outout buffer to the main task */
2030 if (hp->cpout != NULL) {
2031 hp->cpout->used = 0;
2032 CQ_PUT(mainq, hp->cpout, CBUF_ERRMSG);
2033 hp->cpout = NULL;
2034 }
2035 }
2036 }
2037
2038 static void
dumpsys_bz2compress(helper_t * hp)2039 dumpsys_bz2compress(helper_t *hp)
2040 {
2041 dumpsync_t *ds = hp->ds;
2042 dumpstreamhdr_t sh;
2043
2044 (void) strcpy(sh.stream_magic, DUMP_STREAM_MAGIC);
2045 sh.stream_pagenum = (pgcnt_t)-1;
2046 sh.stream_npages = 0;
2047 hp->cpin = NULL;
2048 hp->cpout = NULL;
2049 hp->cperr = NULL;
2050 hp->in = 0;
2051 hp->out = 0;
2052 hp->bzstream.avail_in = 0;
2053
2054 /* Bump reference to mainq while we are running */
2055 CQ_OPEN(mainq);
2056
2057 /* Get one page at a time */
2058 while (dumpsys_sread(hp)) {
2059 if (sh.stream_pagenum != hp->cpin->pagenum) {
2060 sh.stream_pagenum = hp->cpin->pagenum;
2061 sh.stream_npages = btop(hp->cpin->used);
2062 dumpsys_bzrun(hp, &sh, sizeof (sh), BZ_RUN);
2063 }
2064 dumpsys_bzrun(hp, hp->page, PAGESIZE, 0);
2065 }
2066
2067 /* Done with input, flush any partial buffer */
2068 if (sh.stream_pagenum != (pgcnt_t)-1) {
2069 dumpsys_bzrun(hp, NULL, 0, BZ_FINISH);
2070 dumpsys_errmsg(hp, NULL);
2071 }
2072
2073 ASSERT(hp->cpin == NULL && hp->cpout == NULL && hp->cperr == NULL);
2074
2075 /* Decrement main queue count, we are done */
2076 CQ_CLOSE(mainq);
2077 }
2078
2079 /*
2080 * Compress with lzjb
2081 * write stream block if full or size==0
2082 * if csize==0 write stream header, else write <csize, data>
2083 * size==0 is a call to flush a buffer
2084 * hp->cpout is the buffer we are flushing or filling
2085 * hp->out is the next index to fill data
2086 * osize is either csize+data, or the size of a stream header
2087 */
2088 static void
dumpsys_lzjbrun(helper_t * hp,size_t csize,void * buf,size_t size)2089 dumpsys_lzjbrun(helper_t *hp, size_t csize, void *buf, size_t size)
2090 {
2091 dumpsync_t *ds = hp->ds;
2092 const int CSIZE = sizeof (dumpcsize_t);
2093 dumpcsize_t cs;
2094 size_t osize = csize > 0 ? CSIZE + size : size;
2095
2096 /* If flush, and there is no buffer, just return */
2097 if (size == 0 && hp->cpout == NULL)
2098 return;
2099
2100 /* If flush, or cpout is full, write it out */
2101 if (size == 0 ||
2102 hp->cpout != NULL && hp->out + osize > hp->cpout->size) {
2103
2104 /* Set tag+size word at the front of the stream block. */
2105 cs = DUMP_SET_TAG(hp->out - CSIZE, hp->tag);
2106 (void) memcpy(hp->cpout->buf, &cs, CSIZE);
2107
2108 /* Write block to dump file. */
2109 dumpsys_swrite(hp, hp->cpout, hp->out);
2110
2111 /* Clear pointer to indicate we need a new buffer */
2112 hp->cpout = NULL;
2113
2114 /* flushing, we are done */
2115 if (size == 0)
2116 return;
2117 }
2118
2119 /* Get an output buffer if we dont have one. */
2120 if (hp->cpout == NULL) {
2121 HRSTART(hp->perpage, outwait);
2122 hp->cpout = CQ_GET(freebufq);
2123 HRSTOP(hp->perpage, outwait);
2124 hp->out = CSIZE;
2125 }
2126
2127 /* Store csize word. This is the size of compressed data. */
2128 if (csize > 0) {
2129 cs = DUMP_SET_TAG(csize, 0);
2130 (void) memcpy(hp->cpout->buf + hp->out, &cs, CSIZE);
2131 hp->out += CSIZE;
2132 }
2133
2134 /* Store the data. */
2135 (void) memcpy(hp->cpout->buf + hp->out, buf, size);
2136 hp->out += size;
2137 }
2138
2139 static void
dumpsys_lzjbcompress(helper_t * hp)2140 dumpsys_lzjbcompress(helper_t *hp)
2141 {
2142 dumpsync_t *ds = hp->ds;
2143 size_t csize;
2144 dumpstreamhdr_t sh;
2145
2146 (void) strcpy(sh.stream_magic, DUMP_STREAM_MAGIC);
2147 sh.stream_pagenum = (pfn_t)-1;
2148 sh.stream_npages = 0;
2149 hp->cpin = NULL;
2150 hp->cpout = NULL;
2151 hp->cperr = NULL;
2152 hp->in = 0;
2153 hp->out = 0;
2154
2155 /* Bump reference to mainq while we are running */
2156 CQ_OPEN(mainq);
2157
2158 /* Get one page at a time */
2159 while (dumpsys_sread(hp)) {
2160
2161 /* Create a stream header for each new input map */
2162 if (sh.stream_pagenum != hp->cpin->pagenum) {
2163 sh.stream_pagenum = hp->cpin->pagenum;
2164 sh.stream_npages = btop(hp->cpin->used);
2165 dumpsys_lzjbrun(hp, 0, &sh, sizeof (sh));
2166 }
2167
2168 /* Compress one page */
2169 HRSTART(hp->perpage, compress);
2170 csize = compress(hp->page, hp->lzbuf, PAGESIZE);
2171 HRSTOP(hp->perpage, compress);
2172
2173 /* Add csize+data to output block */
2174 ASSERT(csize > 0 && csize <= PAGESIZE);
2175 dumpsys_lzjbrun(hp, csize, hp->lzbuf, csize);
2176 }
2177
2178 /* Done with input, flush any partial buffer */
2179 if (sh.stream_pagenum != (pfn_t)-1) {
2180 dumpsys_lzjbrun(hp, 0, NULL, 0);
2181 dumpsys_errmsg(hp, NULL);
2182 }
2183
2184 ASSERT(hp->cpin == NULL && hp->cpout == NULL && hp->cperr == NULL);
2185
2186 /* Decrement main queue count, we are done */
2187 CQ_CLOSE(mainq);
2188 }
2189
2190 /*
2191 * Dump helper called from panic_idle() to compress pages. CPUs in
2192 * this path must not call most kernel services.
2193 *
2194 * During panic, all but one of the CPUs is idle. These CPUs are used
2195 * as helpers working in parallel to copy and compress memory
2196 * pages. During a panic, however, these processors cannot call any
2197 * kernel services. This is because mutexes become no-ops during
2198 * panic, and, cross-call interrupts are inhibited. Therefore, during
2199 * panic dump the helper CPUs communicate with the panic CPU using
2200 * memory variables. All memory mapping and I/O is performed by the
2201 * panic CPU.
2202 *
2203 * At dump configuration time, helper_lock is set and helpers_wanted
2204 * is 0. dumpsys() decides whether to set helpers_wanted before
2205 * clearing helper_lock.
2206 *
2207 * At panic time, idle CPUs spin-wait on helper_lock, then alternately
2208 * take the lock and become a helper, or return.
2209 */
2210 void
dumpsys_helper()2211 dumpsys_helper()
2212 {
2213 dumpsys_spinlock(&dumpcfg.helper_lock);
2214 if (dumpcfg.helpers_wanted) {
2215 helper_t *hp, *hpend = &dumpcfg.helper[dumpcfg.nhelper];
2216
2217 for (hp = dumpcfg.helper; hp != hpend; hp++) {
2218 if (hp->helper == FREEHELPER) {
2219 hp->helper = CPU->cpu_id;
2220 BT_SET(dumpcfg.helpermap, CPU->cpu_seqid);
2221
2222 dumpsys_spinunlock(&dumpcfg.helper_lock);
2223
2224 if (dumpcfg.clevel < DUMP_CLEVEL_BZIP2)
2225 dumpsys_lzjbcompress(hp);
2226 else
2227 dumpsys_bz2compress(hp);
2228
2229 hp->helper = DONEHELPER;
2230 return;
2231 }
2232 }
2233
2234 /* No more helpers are needed. */
2235 dumpcfg.helpers_wanted = 0;
2236
2237 }
2238 dumpsys_spinunlock(&dumpcfg.helper_lock);
2239 }
2240
2241 /*
2242 * No-wait helper callable in spin loops.
2243 *
2244 * Do not wait for helper_lock. Just check helpers_wanted. The caller
2245 * may decide to continue. This is the "c)ontinue, s)ync, r)eset? s"
2246 * case.
2247 */
2248 void
dumpsys_helper_nw()2249 dumpsys_helper_nw()
2250 {
2251 if (dumpcfg.helpers_wanted)
2252 dumpsys_helper();
2253 }
2254
2255 /*
2256 * Dump helper for live dumps.
2257 * These run as a system task.
2258 */
2259 static void
dumpsys_live_helper(void * arg)2260 dumpsys_live_helper(void *arg)
2261 {
2262 helper_t *hp = arg;
2263
2264 BT_ATOMIC_SET(dumpcfg.helpermap, CPU->cpu_seqid);
2265 if (dumpcfg.clevel < DUMP_CLEVEL_BZIP2)
2266 dumpsys_lzjbcompress(hp);
2267 else
2268 dumpsys_bz2compress(hp);
2269 }
2270
2271 /*
2272 * Compress one page with lzjb (single threaded case)
2273 */
2274 static void
dumpsys_lzjb_page(helper_t * hp,cbuf_t * cp)2275 dumpsys_lzjb_page(helper_t *hp, cbuf_t *cp)
2276 {
2277 dumpsync_t *ds = hp->ds;
2278 uint32_t csize;
2279
2280 hp->helper = MAINHELPER;
2281 hp->in = 0;
2282 hp->used = 0;
2283 hp->cpin = cp;
2284 while (hp->used < cp->used) {
2285 HRSTART(hp->perpage, copy);
2286 hp->in = dumpsys_copy_page(hp, hp->in);
2287 hp->used += PAGESIZE;
2288 HRSTOP(hp->perpage, copy);
2289
2290 HRSTART(hp->perpage, compress);
2291 csize = compress(hp->page, hp->lzbuf, PAGESIZE);
2292 HRSTOP(hp->perpage, compress);
2293
2294 HRSTART(hp->perpage, write);
2295 dumpvp_write(&csize, sizeof (csize));
2296 dumpvp_write(hp->lzbuf, csize);
2297 HRSTOP(hp->perpage, write);
2298 }
2299 CQ_PUT(mainq, hp->cpin, CBUF_USEDMAP);
2300 hp->cpin = NULL;
2301 }
2302
2303 /*
2304 * Main task to dump pages. This is called on the dump CPU.
2305 */
2306 static void
dumpsys_main_task(void * arg)2307 dumpsys_main_task(void *arg)
2308 {
2309 dumpsync_t *ds = arg;
2310 pgcnt_t pagenum = 0, bitnum = 0, hibitnum;
2311 dumpmlw_t mlw;
2312 cbuf_t *cp;
2313 pgcnt_t baseoff, pfnoff;
2314 pfn_t base, pfn;
2315 boolean_t dumpserial;
2316 int i;
2317
2318 /*
2319 * Fall back to serial mode if there are no helpers.
2320 * dump_plat_mincpu can be set to 0 at any time.
2321 * dumpcfg.helpermap must contain at least one member.
2322 *
2323 * It is possible that the helpers haven't registered
2324 * in helpermap yet; wait up to DUMP_HELPER_MAX_WAIT for
2325 * at least one helper to register.
2326 */
2327 dumpserial = B_TRUE;
2328 if (dump_plat_mincpu != 0 && dumpcfg.clevel != 0) {
2329 hrtime_t hrtmax = MSEC2NSEC(DUMP_HELPER_MAX_WAIT);
2330 hrtime_t hrtstart = gethrtime();
2331
2332 for (;;) {
2333 for (i = 0; i < BT_BITOUL(NCPU); ++i) {
2334 if (dumpcfg.helpermap[i] != 0) {
2335 dumpserial = B_FALSE;
2336 break;
2337 }
2338 }
2339
2340 if ((!dumpserial) ||
2341 ((gethrtime() - hrtstart) >= hrtmax)) {
2342 break;
2343 }
2344
2345 SMT_PAUSE();
2346 }
2347
2348 if (dumpserial) {
2349 dumpcfg.clevel = 0;
2350 if (dumpcfg.helper[0].lzbuf == NULL) {
2351 dumpcfg.helper[0].lzbuf =
2352 dumpcfg.helper[1].page;
2353 }
2354 }
2355 }
2356
2357 dump_init_memlist_walker(&mlw);
2358
2359 for (;;) {
2360 int sec = (gethrtime() - ds->start) / NANOSEC;
2361
2362 /*
2363 * Render a simple progress display on the system console to
2364 * make clear to the operator that the system has not hung.
2365 * Emit an update when dump progress has advanced by one
2366 * percent, or when no update has been drawn in the last
2367 * second.
2368 */
2369 if (ds->percent > ds->percent_done || sec > ds->sec_done) {
2370 ds->sec_done = sec;
2371 ds->percent_done = ds->percent;
2372 uprintf("^\rdumping: %2d:%02d %3d%% done",
2373 sec / 60, sec % 60, ds->percent);
2374 ds->neednl = 1;
2375 }
2376
2377 while (CQ_IS_EMPTY(mainq) && !CQ_IS_EMPTY(writerq)) {
2378
2379 /* the writerq never blocks */
2380 cp = CQ_GET(writerq);
2381 if (cp == NULL)
2382 break;
2383
2384 dump_timeleft = dump_timeout;
2385
2386 HRSTART(ds->perpage, write);
2387 dumpvp_write(cp->buf, cp->used);
2388 HRSTOP(ds->perpage, write);
2389
2390 CQ_PUT(freebufq, cp, CBUF_FREEBUF);
2391 }
2392
2393 /*
2394 * Wait here for some buffers to process. Returns NULL
2395 * when all helpers have terminated and all buffers
2396 * have been processed.
2397 */
2398 cp = CQ_GET(mainq);
2399
2400 if (cp == NULL) {
2401
2402 /* Drain the write queue. */
2403 if (!CQ_IS_EMPTY(writerq))
2404 continue;
2405
2406 /* Main task exits here. */
2407 break;
2408 }
2409
2410 dump_timeleft = dump_timeout;
2411
2412 switch (cp->state) {
2413
2414 case CBUF_FREEMAP:
2415
2416 /*
2417 * Note that we drop CBUF_FREEMAP buffers on
2418 * the floor (they will not be on any cqueue)
2419 * when we no longer need them.
2420 */
2421 if (bitnum >= dumpcfg.bitmapsize)
2422 break;
2423
2424 if (dump_ioerr) {
2425 bitnum = dumpcfg.bitmapsize;
2426 CQ_CLOSE(helperq);
2427 break;
2428 }
2429
2430 HRSTART(ds->perpage, bitmap);
2431 for (; bitnum < dumpcfg.bitmapsize; bitnum++)
2432 if (BT_TEST(dumpcfg.bitmap, bitnum))
2433 break;
2434 HRSTOP(ds->perpage, bitmap);
2435 dump_timeleft = dump_timeout;
2436
2437 if (bitnum >= dumpcfg.bitmapsize) {
2438 CQ_CLOSE(helperq);
2439 break;
2440 }
2441
2442 /*
2443 * Try to map CBUF_MAPSIZE ranges. Can't
2444 * assume that memory segment size is a
2445 * multiple of CBUF_MAPSIZE. Can't assume that
2446 * the segment starts on a CBUF_MAPSIZE
2447 * boundary.
2448 */
2449 pfn = dump_bitnum_to_pfn(bitnum, &mlw);
2450 ASSERT(pfn != PFN_INVALID);
2451 ASSERT(bitnum + mlw.mpleft <= dumpcfg.bitmapsize);
2452
2453 base = P2ALIGN(pfn, CBUF_MAPNP);
2454 if (base < mlw.mpaddr) {
2455 base = mlw.mpaddr;
2456 baseoff = P2PHASE(base, CBUF_MAPNP);
2457 } else {
2458 baseoff = 0;
2459 }
2460
2461 pfnoff = pfn - base;
2462 if (pfnoff + mlw.mpleft < CBUF_MAPNP) {
2463 hibitnum = bitnum + mlw.mpleft;
2464 cp->size = ptob(pfnoff + mlw.mpleft);
2465 } else {
2466 hibitnum = bitnum - pfnoff + CBUF_MAPNP -
2467 baseoff;
2468 cp->size = CBUF_MAPSIZE - ptob(baseoff);
2469 }
2470
2471 cp->pfn = pfn;
2472 cp->bitnum = bitnum++;
2473 cp->pagenum = pagenum++;
2474 cp->off = ptob(pfnoff);
2475
2476 for (; bitnum < hibitnum; bitnum++)
2477 if (BT_TEST(dumpcfg.bitmap, bitnum))
2478 pagenum++;
2479
2480 dump_timeleft = dump_timeout;
2481 cp->used = ptob(pagenum - cp->pagenum);
2482
2483 HRSTART(ds->perpage, map);
2484 hat_devload(kas.a_hat, cp->buf, cp->size, base,
2485 PROT_READ, HAT_LOAD_NOCONSIST);
2486 HRSTOP(ds->perpage, map);
2487
2488 ds->pages_mapped += btop(cp->size);
2489 ds->pages_used += pagenum - cp->pagenum;
2490
2491 CQ_OPEN(mainq);
2492
2493 /*
2494 * If there are no helpers the main task does
2495 * non-streams lzjb compress.
2496 */
2497 if (dumpserial) {
2498 dumpsys_lzjb_page(dumpcfg.helper, cp);
2499 } else {
2500 /* pass mapped pages to a helper */
2501 CQ_PUT(helperq, cp, CBUF_INREADY);
2502 }
2503
2504 /* the last page was done */
2505 if (bitnum >= dumpcfg.bitmapsize)
2506 CQ_CLOSE(helperq);
2507
2508 break;
2509
2510 case CBUF_USEDMAP:
2511
2512 ds->npages += btop(cp->used);
2513
2514 HRSTART(ds->perpage, unmap);
2515 hat_unload(kas.a_hat, cp->buf, cp->size, HAT_UNLOAD);
2516 HRSTOP(ds->perpage, unmap);
2517
2518 if (bitnum < dumpcfg.bitmapsize)
2519 CQ_PUT(mainq, cp, CBUF_FREEMAP);
2520 CQ_CLOSE(mainq);
2521
2522 ASSERT(ds->npages <= dumphdr->dump_npages);
2523 ds->percent = ds->npages * 100LL / dumphdr->dump_npages;
2524 break;
2525
2526 case CBUF_WRITE:
2527
2528 CQ_PUT(writerq, cp, CBUF_WRITE);
2529 break;
2530
2531 case CBUF_ERRMSG:
2532
2533 if (cp->used > 0) {
2534 cp->buf[cp->size - 2] = '\n';
2535 cp->buf[cp->size - 1] = '\0';
2536 if (ds->neednl) {
2537 uprintf("\n%s", cp->buf);
2538 ds->neednl = 0;
2539 } else {
2540 uprintf("%s", cp->buf);
2541 }
2542 /* wait for console output */
2543 drv_usecwait(200000);
2544 dump_timeleft = dump_timeout;
2545 }
2546 CQ_PUT(freebufq, cp, CBUF_FREEBUF);
2547 break;
2548
2549 default:
2550 uprintf("dump: unexpected buffer state %d, "
2551 "buffer will be lost\n", cp->state);
2552 break;
2553
2554 } /* end switch */
2555 }
2556 }
2557
2558 #ifdef COLLECT_METRICS
2559 size_t
dumpsys_metrics(dumpsync_t * ds,char * buf,size_t size)2560 dumpsys_metrics(dumpsync_t *ds, char *buf, size_t size)
2561 {
2562 dumpcfg_t *cfg = &dumpcfg;
2563 int myid = CPU->cpu_seqid;
2564 int i, compress_ratio;
2565 int sec, iorate;
2566 helper_t *hp, *hpend = &cfg->helper[cfg->nhelper];
2567 char *e = buf + size;
2568 char *p = buf;
2569
2570 sec = ds->elapsed / (1000 * 1000 * 1000ULL);
2571 if (sec < 1)
2572 sec = 1;
2573
2574 if (ds->iotime < 1)
2575 ds->iotime = 1;
2576 iorate = (ds->nwrite * 100000ULL) / ds->iotime;
2577
2578 compress_ratio = 100LL * ds->npages / btopr(ds->nwrite + 1);
2579
2580 #define P(...) (p += p < e ? snprintf(p, e - p, __VA_ARGS__) : 0)
2581
2582 P("Master cpu_seqid,%d\n", CPU->cpu_seqid);
2583 P("Master cpu_id,%d\n", CPU->cpu_id);
2584 P("dump_flags,0x%x\n", dumphdr->dump_flags);
2585 P("dump_ioerr,%d\n", dump_ioerr);
2586
2587 P("Helpers:\n");
2588 for (i = 0; i < ncpus; i++) {
2589 if ((i & 15) == 0)
2590 P(",,%03d,", i);
2591 if (i == myid)
2592 P(" M");
2593 else if (BT_TEST(cfg->helpermap, i))
2594 P("%4d", cpu_seq[i]->cpu_id);
2595 else
2596 P(" *");
2597 if ((i & 15) == 15)
2598 P("\n");
2599 }
2600
2601 P("ncbuf_used,%d\n", cfg->ncbuf_used);
2602 P("ncmap,%d\n", cfg->ncmap);
2603
2604 P("Found %ldM ranges,%ld\n", (CBUF_MAPSIZE / DUMP_1MB), cfg->found4m);
2605 P("Found small pages,%ld\n", cfg->foundsm);
2606
2607 P("Compression level,%d\n", cfg->clevel);
2608 P("Compression type,%s %s", cfg->clevel == 0 ? "serial" : "parallel",
2609 cfg->clevel >= DUMP_CLEVEL_BZIP2 ? "bzip2" : "lzjb");
2610 if (cfg->clevel >= DUMP_CLEVEL_BZIP2)
2611 P(" (level %d)\n", dump_bzip2_level);
2612 else
2613 P("\n");
2614 P("Compression ratio,%d.%02d\n", compress_ratio / 100, compress_ratio %
2615 100);
2616 P("nhelper_used,%d\n", cfg->nhelper_used);
2617
2618 P("Dump I/O rate MBS,%d.%02d\n", iorate / 100, iorate % 100);
2619 P("..total bytes,%lld\n", (u_longlong_t)ds->nwrite);
2620 P("..total nsec,%lld\n", (u_longlong_t)ds->iotime);
2621 P("dumpbuf.iosize,%ld\n", dumpbuf.iosize);
2622 P("dumpbuf.size,%ld\n", dumpbuf.size);
2623
2624 P("Dump pages/sec,%llu\n", (u_longlong_t)ds->npages / sec);
2625 P("Dump pages,%llu\n", (u_longlong_t)ds->npages);
2626 P("Dump time,%d\n", sec);
2627
2628 if (ds->pages_mapped > 0)
2629 P("per-cent map utilization,%d\n", (int)((100 * ds->pages_used)
2630 / ds->pages_mapped));
2631
2632 P("\nPer-page metrics:\n");
2633 if (ds->npages > 0) {
2634 for (hp = cfg->helper; hp != hpend; hp++) {
2635 #define PERPAGE(x) ds->perpage.x += hp->perpage.x;
2636 PERPAGES;
2637 #undef PERPAGE
2638 }
2639 #define PERPAGE(x) \
2640 P("%s nsec/page,%d\n", #x, (int)(ds->perpage.x / ds->npages));
2641 PERPAGES;
2642 #undef PERPAGE
2643 P("freebufq.empty,%d\n", (int)(ds->freebufq.empty /
2644 ds->npages));
2645 P("helperq.empty,%d\n", (int)(ds->helperq.empty /
2646 ds->npages));
2647 P("writerq.empty,%d\n", (int)(ds->writerq.empty /
2648 ds->npages));
2649 P("mainq.empty,%d\n", (int)(ds->mainq.empty / ds->npages));
2650
2651 P("I/O wait nsec/page,%llu\n", (u_longlong_t)(ds->iowait /
2652 ds->npages));
2653 }
2654 #undef P
2655 if (p < e)
2656 bzero(p, e - p);
2657 return (p - buf);
2658 }
2659 #endif /* COLLECT_METRICS */
2660
2661 /*
2662 * Dump the system.
2663 */
2664 void
dumpsys(void)2665 dumpsys(void)
2666 {
2667 dumpsync_t *ds = &dumpsync;
2668 taskq_t *livetaskq = NULL;
2669 pfn_t pfn;
2670 pgcnt_t bitnum;
2671 proc_t *p;
2672 helper_t *hp, *hpend = &dumpcfg.helper[dumpcfg.nhelper];
2673 cbuf_t *cp;
2674 pid_t npids, pidx;
2675 char *content;
2676 char *buf;
2677 size_t size;
2678 int save_dump_clevel;
2679 dumpmlw_t mlw;
2680 dumpcsize_t datatag;
2681 dumpdatahdr_t datahdr;
2682
2683 if (dumpvp == NULL || dumphdr == NULL) {
2684 uprintf("skipping system dump - no dump device configured\n");
2685 if (panicstr) {
2686 dumpcfg.helpers_wanted = 0;
2687 dumpsys_spinunlock(&dumpcfg.helper_lock);
2688 }
2689 return;
2690 }
2691 dumpbuf.cur = dumpbuf.start;
2692
2693 /* clear the sync variables */
2694 ASSERT(dumpcfg.nhelper > 0);
2695 bzero(ds, sizeof (*ds));
2696 ds->dumpcpu = CPU->cpu_id;
2697
2698 /*
2699 * Calculate the starting block for dump. If we're dumping on a
2700 * swap device, start 1/5 of the way in; otherwise, start at the
2701 * beginning. And never use the first page -- it may be a disk label.
2702 */
2703 if (dumpvp->v_flag & VISSWAP)
2704 dumphdr->dump_start = P2ROUNDUP(dumpvp_size / 5, DUMP_OFFSET);
2705 else
2706 dumphdr->dump_start = DUMP_OFFSET;
2707
2708 dumphdr->dump_flags = DF_VALID | DF_COMPLETE | DF_LIVE | DF_COMPRESSED;
2709 dumphdr->dump_crashtime = gethrestime_sec();
2710 dumphdr->dump_npages = 0;
2711 dumphdr->dump_nvtop = 0;
2712 bzero(dumpcfg.bitmap, BT_SIZEOFMAP(dumpcfg.bitmapsize));
2713 dump_timeleft = dump_timeout;
2714
2715 if (panicstr) {
2716 dumphdr->dump_flags &= ~DF_LIVE;
2717 (void) VOP_DUMPCTL(dumpvp, DUMP_FREE, NULL, NULL);
2718 (void) VOP_DUMPCTL(dumpvp, DUMP_ALLOC, NULL, NULL);
2719 (void) vsnprintf(dumphdr->dump_panicstring, DUMP_PANICSIZE,
2720 panicstr, panicargs);
2721
2722 }
2723
2724 if (dump_conflags & DUMP_ALL)
2725 content = "all";
2726 else if (dump_conflags & DUMP_CURPROC)
2727 content = "kernel + curproc";
2728 else
2729 content = "kernel";
2730 uprintf("dumping to %s, offset %lld, content: %s\n", dumppath,
2731 dumphdr->dump_start, content);
2732
2733 /* Make sure nodename is current */
2734 bcopy(utsname.nodename, dumphdr->dump_utsname.nodename, SYS_NMLN);
2735
2736 /*
2737 * If this is a live dump, try to open a VCHR vnode for better
2738 * performance. We must take care to flush the buffer cache
2739 * first.
2740 */
2741 if (!panicstr) {
2742 vnode_t *cdev_vp, *cmn_cdev_vp;
2743
2744 ASSERT(dumpbuf.cdev_vp == NULL);
2745 cdev_vp = makespecvp(VTOS(dumpvp)->s_dev, VCHR);
2746 if (cdev_vp != NULL) {
2747 cmn_cdev_vp = common_specvp(cdev_vp);
2748 if (VOP_OPEN(&cmn_cdev_vp, FREAD | FWRITE, kcred, NULL)
2749 == 0) {
2750 if (vn_has_cached_data(dumpvp))
2751 (void) pvn_vplist_dirty(dumpvp, 0, NULL,
2752 B_INVAL | B_TRUNC, kcred);
2753 dumpbuf.cdev_vp = cmn_cdev_vp;
2754 } else {
2755 VN_RELE(cdev_vp);
2756 }
2757 }
2758 }
2759
2760 /*
2761 * Store a hires timestamp so we can look it up during debugging.
2762 */
2763 lbolt_debug_entry();
2764
2765 /*
2766 * Leave room for the message and ereport save areas and terminal dump
2767 * header.
2768 */
2769 dumpbuf.vp_limit = dumpvp_size - DUMP_LOGSIZE - DUMP_OFFSET -
2770 DUMP_ERPTSIZE;
2771
2772 /*
2773 * Write out the symbol table. It's no longer compressed,
2774 * so its 'size' and 'csize' are equal.
2775 */
2776 dumpbuf.vp_off = dumphdr->dump_ksyms = dumphdr->dump_start + PAGESIZE;
2777 dumphdr->dump_ksyms_size = dumphdr->dump_ksyms_csize =
2778 ksyms_snapshot(dumpvp_ksyms_write, NULL, LONG_MAX);
2779
2780 /*
2781 * Write out the translation map.
2782 */
2783 dumphdr->dump_map = dumpvp_flush();
2784 dump_as(&kas);
2785 dumphdr->dump_nvtop += dump_plat_addr();
2786
2787 /*
2788 * call into hat, which may have unmapped pages that also need to
2789 * be in the dump
2790 */
2791 hat_dump();
2792
2793 if (dump_conflags & DUMP_ALL) {
2794 mutex_enter(&pidlock);
2795
2796 for (npids = 0, p = practive; p != NULL; p = p->p_next)
2797 dumpcfg.pids[npids++] = p->p_pid;
2798
2799 mutex_exit(&pidlock);
2800
2801 for (pidx = 0; pidx < npids; pidx++)
2802 (void) dump_process(dumpcfg.pids[pidx]);
2803
2804 dump_init_memlist_walker(&mlw);
2805 for (bitnum = 0; bitnum < dumpcfg.bitmapsize; bitnum++) {
2806 dump_timeleft = dump_timeout;
2807 pfn = dump_bitnum_to_pfn(bitnum, &mlw);
2808 /*
2809 * Some hypervisors do not have all pages available to
2810 * be accessed by the guest OS. Check for page
2811 * accessibility.
2812 */
2813 if (plat_hold_page(pfn, PLAT_HOLD_NO_LOCK, NULL) !=
2814 PLAT_HOLD_OK)
2815 continue;
2816 BT_SET(dumpcfg.bitmap, bitnum);
2817 }
2818 dumphdr->dump_npages = dumpcfg.bitmapsize;
2819 dumphdr->dump_flags |= DF_ALL;
2820
2821 } else if (dump_conflags & DUMP_CURPROC) {
2822 /*
2823 * Determine which pid is to be dumped. If we're panicking, we
2824 * dump the process associated with panic_thread (if any). If
2825 * this is a live dump, we dump the process associated with
2826 * curthread.
2827 */
2828 npids = 0;
2829 if (panicstr) {
2830 if (panic_thread != NULL &&
2831 panic_thread->t_procp != NULL &&
2832 panic_thread->t_procp != &p0) {
2833 dumpcfg.pids[npids++] =
2834 panic_thread->t_procp->p_pid;
2835 }
2836 } else {
2837 dumpcfg.pids[npids++] = curthread->t_procp->p_pid;
2838 }
2839
2840 if (npids && dump_process(dumpcfg.pids[0]) == 0)
2841 dumphdr->dump_flags |= DF_CURPROC;
2842 else
2843 dumphdr->dump_flags |= DF_KERNEL;
2844
2845 } else {
2846 dumphdr->dump_flags |= DF_KERNEL;
2847 }
2848
2849 dumphdr->dump_hashmask = (1 << highbit(dumphdr->dump_nvtop - 1)) - 1;
2850
2851 /*
2852 * Write out the pfn table.
2853 */
2854 dumphdr->dump_pfn = dumpvp_flush();
2855 dump_init_memlist_walker(&mlw);
2856 for (bitnum = 0; bitnum < dumpcfg.bitmapsize; bitnum++) {
2857 dump_timeleft = dump_timeout;
2858 if (!BT_TEST(dumpcfg.bitmap, bitnum))
2859 continue;
2860 pfn = dump_bitnum_to_pfn(bitnum, &mlw);
2861 ASSERT(pfn != PFN_INVALID);
2862 dumpvp_write(&pfn, sizeof (pfn_t));
2863 }
2864 dump_plat_pfn();
2865
2866 /*
2867 * Write out all the pages.
2868 * Map pages, copy them handling UEs, compress, and write them out.
2869 * Cooperate with any helpers running on CPUs in panic_idle().
2870 */
2871 dumphdr->dump_data = dumpvp_flush();
2872
2873 bzero(dumpcfg.helpermap, BT_SIZEOFMAP(NCPU));
2874 ds->live = dumpcfg.clevel > 0 &&
2875 (dumphdr->dump_flags & DF_LIVE) != 0;
2876
2877 save_dump_clevel = dumpcfg.clevel;
2878 if (panicstr)
2879 dumpsys_get_maxmem();
2880 else if (dumpcfg.clevel >= DUMP_CLEVEL_BZIP2)
2881 dumpcfg.clevel = DUMP_CLEVEL_LZJB;
2882
2883 dumpcfg.nhelper_used = 0;
2884 for (hp = dumpcfg.helper; hp != hpend; hp++) {
2885 if (hp->page == NULL) {
2886 hp->helper = DONEHELPER;
2887 continue;
2888 }
2889 ++dumpcfg.nhelper_used;
2890 hp->helper = FREEHELPER;
2891 hp->taskqid = NULL;
2892 hp->ds = ds;
2893 bzero(&hp->perpage, sizeof (hp->perpage));
2894 if (dumpcfg.clevel >= DUMP_CLEVEL_BZIP2)
2895 (void) BZ2_bzCompressReset(&hp->bzstream);
2896 }
2897
2898 CQ_OPEN(freebufq);
2899 CQ_OPEN(helperq);
2900
2901 dumpcfg.ncbuf_used = 0;
2902 for (cp = dumpcfg.cbuf; cp != &dumpcfg.cbuf[dumpcfg.ncbuf]; cp++) {
2903 if (cp->buf != NULL) {
2904 CQ_PUT(freebufq, cp, CBUF_FREEBUF);
2905 ++dumpcfg.ncbuf_used;
2906 }
2907 }
2908
2909 for (cp = dumpcfg.cmap; cp != &dumpcfg.cmap[dumpcfg.ncmap]; cp++)
2910 CQ_PUT(mainq, cp, CBUF_FREEMAP);
2911
2912 ds->start = gethrtime();
2913 ds->iowaitts = ds->start;
2914
2915 /* start helpers */
2916 if (ds->live) {
2917 int n = dumpcfg.nhelper_used;
2918 int pri = MINCLSYSPRI - 25;
2919
2920 livetaskq = taskq_create("LiveDump", n, pri, n, n,
2921 TASKQ_PREPOPULATE);
2922 for (hp = dumpcfg.helper; hp != hpend; hp++) {
2923 if (hp->page == NULL)
2924 continue;
2925 hp->helper = hp - dumpcfg.helper;
2926 hp->taskqid = taskq_dispatch(livetaskq,
2927 dumpsys_live_helper, (void *)hp, TQ_NOSLEEP);
2928 }
2929
2930 } else {
2931 if (panicstr)
2932 kmem_dump_begin();
2933 dumpcfg.helpers_wanted = dumpcfg.clevel > 0;
2934 dumpsys_spinunlock(&dumpcfg.helper_lock);
2935 }
2936
2937 /* run main task */
2938 dumpsys_main_task(ds);
2939
2940 ds->elapsed = gethrtime() - ds->start;
2941 if (ds->elapsed < 1)
2942 ds->elapsed = 1;
2943
2944 if (livetaskq != NULL)
2945 taskq_destroy(livetaskq);
2946
2947 if (ds->neednl) {
2948 uprintf("\n");
2949 ds->neednl = 0;
2950 }
2951
2952 /* record actual pages dumped */
2953 dumphdr->dump_npages = ds->npages;
2954
2955 /* platform-specific data */
2956 dumphdr->dump_npages += dump_plat_data(dumpcfg.cbuf[0].buf);
2957
2958 /* note any errors by clearing DF_COMPLETE */
2959 if (dump_ioerr || ds->npages < dumphdr->dump_npages)
2960 dumphdr->dump_flags &= ~DF_COMPLETE;
2961
2962 /* end of stream blocks */
2963 datatag = 0;
2964 dumpvp_write(&datatag, sizeof (datatag));
2965
2966 bzero(&datahdr, sizeof (datahdr));
2967
2968 /* buffer for metrics */
2969 buf = dumpcfg.cbuf[0].buf;
2970 size = MIN(dumpcfg.cbuf[0].size, DUMP_OFFSET - sizeof (dumphdr_t) -
2971 sizeof (dumpdatahdr_t));
2972
2973 /* finish the kmem intercepts, collect kmem verbose info */
2974 if (panicstr) {
2975 datahdr.dump_metrics = kmem_dump_finish(buf, size);
2976 buf += datahdr.dump_metrics;
2977 size -= datahdr.dump_metrics;
2978 }
2979
2980 /* record in the header whether this is a fault-management panic */
2981 if (panicstr)
2982 dumphdr->dump_fm_panic = is_fm_panic();
2983
2984 /* compression info in data header */
2985 datahdr.dump_datahdr_magic = DUMP_DATAHDR_MAGIC;
2986 datahdr.dump_datahdr_version = DUMP_DATAHDR_VERSION;
2987 datahdr.dump_maxcsize = CBUF_SIZE;
2988 datahdr.dump_maxrange = CBUF_MAPSIZE / PAGESIZE;
2989 datahdr.dump_nstreams = dumpcfg.nhelper_used;
2990 datahdr.dump_clevel = dumpcfg.clevel;
2991 #ifdef COLLECT_METRICS
2992 if (dump_metrics_on)
2993 datahdr.dump_metrics += dumpsys_metrics(ds, buf, size);
2994 #endif
2995 datahdr.dump_data_csize = dumpvp_flush() - dumphdr->dump_data;
2996
2997 /*
2998 * Write out the initial and terminal dump headers.
2999 */
3000 dumpbuf.vp_off = dumphdr->dump_start;
3001 dumpvp_write(dumphdr, sizeof (dumphdr_t));
3002 (void) dumpvp_flush();
3003
3004 dumpbuf.vp_limit = dumpvp_size;
3005 dumpbuf.vp_off = dumpbuf.vp_limit - DUMP_OFFSET;
3006 dumpvp_write(dumphdr, sizeof (dumphdr_t));
3007 dumpvp_write(&datahdr, sizeof (dumpdatahdr_t));
3008 dumpvp_write(dumpcfg.cbuf[0].buf, datahdr.dump_metrics);
3009
3010 (void) dumpvp_flush();
3011
3012 uprintf("\r%3d%% done: %llu pages dumped, ",
3013 ds->percent_done, (u_longlong_t)ds->npages);
3014
3015 if (dump_ioerr == 0) {
3016 uprintf("dump succeeded\n");
3017 } else {
3018 uprintf("dump failed: error %d\n", dump_ioerr);
3019 #ifdef DEBUG
3020 if (panicstr)
3021 debug_enter("dump failed");
3022 #endif
3023 }
3024
3025 /*
3026 * Write out all undelivered messages. This has to be the *last*
3027 * thing we do because the dump process itself emits messages.
3028 */
3029 if (panicstr) {
3030 dump_summary();
3031 dump_ereports();
3032 dump_messages();
3033 }
3034
3035 delay(2 * hz); /* let people see the 'done' message */
3036 dump_timeleft = 0;
3037 dump_ioerr = 0;
3038
3039 /* restore settings after live dump completes */
3040 if (!panicstr) {
3041 dumpcfg.clevel = save_dump_clevel;
3042
3043 /* release any VCHR open of the dump device */
3044 if (dumpbuf.cdev_vp != NULL) {
3045 (void) VOP_CLOSE(dumpbuf.cdev_vp, FREAD | FWRITE, 1, 0,
3046 kcred, NULL);
3047 VN_RELE(dumpbuf.cdev_vp);
3048 dumpbuf.cdev_vp = NULL;
3049 }
3050 }
3051 }
3052
3053 /*
3054 * This function is called whenever the memory size, as represented
3055 * by the phys_install list, changes.
3056 */
3057 void
dump_resize()3058 dump_resize()
3059 {
3060 mutex_enter(&dump_lock);
3061 dumphdr_init();
3062 dumpbuf_resize();
3063 dump_update_clevel();
3064 mutex_exit(&dump_lock);
3065 }
3066
3067 /*
3068 * This function allows for dynamic resizing of a dump area. It assumes that
3069 * the underlying device has update its appropriate size(9P).
3070 */
3071 int
dumpvp_resize()3072 dumpvp_resize()
3073 {
3074 int error;
3075 vattr_t vattr;
3076
3077 mutex_enter(&dump_lock);
3078 vattr.va_mask = AT_SIZE;
3079 if ((error = VOP_GETATTR(dumpvp, &vattr, 0, kcred, NULL)) != 0) {
3080 mutex_exit(&dump_lock);
3081 return (error);
3082 }
3083
3084 if (error == 0 && vattr.va_size < 2 * DUMP_LOGSIZE + DUMP_ERPTSIZE) {
3085 mutex_exit(&dump_lock);
3086 return (ENOSPC);
3087 }
3088
3089 dumpvp_size = vattr.va_size & -DUMP_OFFSET;
3090 mutex_exit(&dump_lock);
3091 return (0);
3092 }
3093
3094 int
dump_set_uuid(const char * uuidstr)3095 dump_set_uuid(const char *uuidstr)
3096 {
3097 const char *ptr;
3098 int i;
3099
3100 if (uuidstr == NULL || strnlen(uuidstr, 36 + 1) != 36)
3101 return (EINVAL);
3102
3103 /* uuid_parse is not common code so check manually */
3104 for (i = 0, ptr = uuidstr; i < 36; i++, ptr++) {
3105 switch (i) {
3106 case 8:
3107 case 13:
3108 case 18:
3109 case 23:
3110 if (*ptr != '-')
3111 return (EINVAL);
3112 break;
3113
3114 default:
3115 if (!isxdigit(*ptr))
3116 return (EINVAL);
3117 break;
3118 }
3119 }
3120
3121 if (dump_osimage_uuid[0] != '\0')
3122 return (EALREADY);
3123
3124 (void) strncpy(dump_osimage_uuid, uuidstr, 36 + 1);
3125
3126 cmn_err(CE_CONT, "?This Solaris instance has UUID %s\n",
3127 dump_osimage_uuid);
3128
3129 return (0);
3130 }
3131
3132 const char *
dump_get_uuid(void)3133 dump_get_uuid(void)
3134 {
3135 return (dump_osimage_uuid[0] != '\0' ? dump_osimage_uuid : "");
3136 }
3137