xref: /illumos-gate/usr/src/uts/common/os/dumpsubr.c (revision 1770502e3d1e6d2a0c07529d1699cf722a70501b)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #include <sys/types.h>
28 #include <sys/param.h>
29 #include <sys/systm.h>
30 #include <sys/vm.h>
31 #include <sys/proc.h>
32 #include <sys/file.h>
33 #include <sys/conf.h>
34 #include <sys/kmem.h>
35 #include <sys/mem.h>
36 #include <sys/mman.h>
37 #include <sys/vnode.h>
38 #include <sys/errno.h>
39 #include <sys/memlist.h>
40 #include <sys/dumphdr.h>
41 #include <sys/dumpadm.h>
42 #include <sys/ksyms.h>
43 #include <sys/compress.h>
44 #include <sys/stream.h>
45 #include <sys/strsun.h>
46 #include <sys/cmn_err.h>
47 #include <sys/bitmap.h>
48 #include <sys/modctl.h>
49 #include <sys/utsname.h>
50 #include <sys/systeminfo.h>
51 #include <sys/vmem.h>
52 #include <sys/log.h>
53 #include <sys/var.h>
54 #include <sys/debug.h>
55 #include <sys/sunddi.h>
56 #include <fs/fs_subr.h>
57 #include <sys/fs/snode.h>
58 #include <sys/ontrap.h>
59 #include <sys/panic.h>
60 #include <sys/dkio.h>
61 #include <sys/vtoc.h>
62 #include <sys/errorq.h>
63 #include <sys/fm/util.h>
64 #include <sys/fs/zfs.h>
65 
66 #include <vm/hat.h>
67 #include <vm/as.h>
68 #include <vm/page.h>
69 #include <vm/pvn.h>
70 #include <vm/seg.h>
71 #include <vm/seg_kmem.h>
72 #include <sys/clock_impl.h>
73 
74 #include <bzip2/bzlib.h>
75 
76 /*
77  * Crash dump time is dominated by disk write time.  To reduce this,
78  * the stronger compression method bzip2 is applied to reduce the dump
79  * size and hence reduce I/O time.  However, bzip2 is much more
80  * computationally expensive than the existing lzjb algorithm, so to
81  * avoid increasing compression time, CPUs that are otherwise idle
82  * during panic are employed to parallelize the compression task.
83  * Many helper CPUs are needed to prevent bzip2 from being a
84  * bottleneck, and on systems with too few CPUs, the lzjb algorithm is
85  * parallelized instead. Lastly, I/O and compression are performed by
86  * different CPUs, and are hence overlapped in time, unlike the older
87  * serial code.
88  *
89  * Another important consideration is the speed of the dump
90  * device. Faster disks need less CPUs in order to benefit from
91  * parallel lzjb versus parallel bzip2. Therefore, the CPU count
92  * threshold for switching from parallel lzjb to paralled bzip2 is
93  * elevated for faster disks. The dump device speed is adduced from
94  * the setting for dumpbuf.iosize, see dump_update_clevel.
95  */
96 
97 /*
98  * exported vars
99  */
100 kmutex_t	dump_lock;		/* lock for dump configuration */
101 dumphdr_t	*dumphdr;		/* dump header */
102 int		dump_conflags = DUMP_KERNEL; /* dump configuration flags */
103 vnode_t		*dumpvp;		/* dump device vnode pointer */
104 u_offset_t	dumpvp_size;		/* size of dump device, in bytes */
105 char		*dumppath;		/* pathname of dump device */
106 int		dump_timeout = 120;	/* timeout for dumping pages */
107 int		dump_timeleft;		/* portion of dump_timeout remaining */
108 int		dump_ioerr;		/* dump i/o error */
109 int		dump_check_used;	/* enable check for used pages */
110 
111 /*
112  * Tunables for dump compression and parallelism. These can be set via
113  * /etc/system.
114  *
115  * dump_ncpu_low	number of helpers for parallel lzjb
116  *	This is also the minimum configuration.
117  *
118  * dump_bzip2_level	bzip2 compression level: 1-9
119  *	Higher numbers give greater compression, but take more memory
120  *	and time. Memory used per helper is ~(dump_bzip2_level * 1MB).
121  *
122  * dump_plat_mincpu	the cross-over limit for using bzip2 (per platform):
123  *	if dump_plat_mincpu == 0, then always do single threaded dump
124  *	if ncpu >= dump_plat_mincpu then try to use bzip2
125  *
126  * dump_metrics_on	if set, metrics are collected in the kernel, passed
127  *	to savecore via the dump file, and recorded by savecore in
128  *	METRICS.txt.
129  */
130 uint_t dump_ncpu_low = 4;	/* minimum config for parallel lzjb */
131 uint_t dump_bzip2_level = 1;	/* bzip2 level (1-9) */
132 
133 /* Define multiple buffers per helper to avoid stalling */
134 #define	NCBUF_PER_HELPER	2
135 #define	NCMAP_PER_HELPER	4
136 
137 /* minimum number of helpers configured */
138 #define	MINHELPERS	(dump_ncpu_low)
139 #define	MINCBUFS	(MINHELPERS * NCBUF_PER_HELPER)
140 
141 /*
142  * Define constant parameters.
143  *
144  * CBUF_SIZE		size of an output buffer
145  *
146  * CBUF_MAPSIZE		size of virtual range for mapping pages
147  *
148  * CBUF_MAPNP		size of virtual range in pages
149  *
150  */
151 #define	DUMP_1KB	((size_t)1 << 10)
152 #define	DUMP_1MB	((size_t)1 << 20)
153 #define	CBUF_SIZE	((size_t)1 << 17)
154 #define	CBUF_MAPSHIFT	(22)
155 #define	CBUF_MAPSIZE	((size_t)1 << CBUF_MAPSHIFT)
156 #define	CBUF_MAPNP	((size_t)1 << (CBUF_MAPSHIFT - PAGESHIFT))
157 
158 /*
159  * Compression metrics are accumulated nano-second subtotals. The
160  * results are normalized by the number of pages dumped. A report is
161  * generated when dumpsys() completes and is saved in the dump image
162  * after the trailing dump header.
163  *
164  * Metrics are always collected. Set the variable dump_metrics_on to
165  * cause metrics to be saved in the crash file, where savecore will
166  * save it in the file METRICS.txt.
167  */
168 #define	PERPAGES \
169 	PERPAGE(bitmap) PERPAGE(map) PERPAGE(unmap) \
170 	PERPAGE(copy) PERPAGE(compress) \
171 	PERPAGE(write) \
172 	PERPAGE(inwait) PERPAGE(outwait)
173 
174 typedef struct perpage {
175 #define	PERPAGE(x) hrtime_t x;
176 	PERPAGES
177 #undef PERPAGE
178 } perpage_t;
179 
180 /*
181  * This macro controls the code generation for collecting dump
182  * performance information. By default, the code is generated, but
183  * automatic saving of the information is disabled. If dump_metrics_on
184  * is set to 1, the timing information is passed to savecore via the
185  * crash file, where it is appended to the file dump-dir/METRICS.txt.
186  */
187 #define	COLLECT_METRICS
188 
189 #ifdef COLLECT_METRICS
190 uint_t dump_metrics_on = 0;	/* set to 1 to enable recording metrics */
191 
192 #define	HRSTART(v, m)		v##ts.m = gethrtime()
193 #define	HRSTOP(v, m)		v.m += gethrtime() - v##ts.m
194 #define	HRBEGIN(v, m, s)	v##ts.m = gethrtime(); v.size += s
195 #define	HREND(v, m)		v.m += gethrtime() - v##ts.m
196 #define	HRNORM(v, m, n)		v.m /= (n)
197 
198 #else
199 #define	HRSTART(v, m)
200 #define	HRSTOP(v, m)
201 #define	HRBEGIN(v, m, s)
202 #define	HREND(v, m)
203 #define	HRNORM(v, m, n)
204 #endif	/* COLLECT_METRICS */
205 
206 /*
207  * Buffers for copying and compressing memory pages.
208  *
209  * cbuf_t buffer controllers: used for both input and output.
210  *
211  * The buffer state indicates how it is being used:
212  *
213  * CBUF_FREEMAP: CBUF_MAPSIZE virtual address range is available for
214  * mapping input pages.
215  *
216  * CBUF_INREADY: input pages are mapped and ready for compression by a
217  * helper.
218  *
219  * CBUF_USEDMAP: mapping has been consumed by a helper. Needs unmap.
220  *
221  * CBUF_FREEBUF: CBUF_SIZE output buffer, which is available.
222  *
223  * CBUF_WRITE: CBUF_SIZE block of compressed pages from a helper,
224  * ready to write out.
225  *
226  * CBUF_ERRMSG: CBUF_SIZE block of error messages from a helper
227  * (reports UE errors.)
228  */
229 
230 typedef enum cbufstate {
231 	CBUF_FREEMAP,
232 	CBUF_INREADY,
233 	CBUF_USEDMAP,
234 	CBUF_FREEBUF,
235 	CBUF_WRITE,
236 	CBUF_ERRMSG
237 } cbufstate_t;
238 
239 typedef struct cbuf cbuf_t;
240 
241 struct cbuf {
242 	cbuf_t *next;			/* next in list */
243 	cbufstate_t state;		/* processing state */
244 	size_t used;			/* amount used */
245 	size_t size;			/* mem size */
246 	char *buf;			/* kmem or vmem */
247 	pgcnt_t pagenum;		/* index to pfn map */
248 	pgcnt_t bitnum;			/* first set bitnum */
249 	pfn_t pfn;			/* first pfn in mapped range */
250 	int off;			/* byte offset to first pfn */
251 };
252 
253 /*
254  * cqueue_t queues: a uni-directional channel for communication
255  * from the master to helper tasks or vice-versa using put and
256  * get primitives. Both mappings and data buffers are passed via
257  * queues. Producers close a queue when done. The number of
258  * active producers is reference counted so the consumer can
259  * detect end of data. Concurrent access is mediated by atomic
260  * operations for panic dump, or mutex/cv for live dump.
261  *
262  * There a four queues, used as follows:
263  *
264  * Queue		Dataflow		NewState
265  * --------------------------------------------------
266  * mainq		master -> master	FREEMAP
267  * master has initialized or unmapped an input buffer
268  * --------------------------------------------------
269  * helperq		master -> helper	INREADY
270  * master has mapped input for use by helper
271  * --------------------------------------------------
272  * mainq		master <- helper	USEDMAP
273  * helper is done with input
274  * --------------------------------------------------
275  * freebufq		master -> helper	FREEBUF
276  * master has initialized or written an output buffer
277  * --------------------------------------------------
278  * mainq		master <- helper	WRITE
279  * block of compressed pages from a helper
280  * --------------------------------------------------
281  * mainq		master <- helper	ERRMSG
282  * error messages from a helper (memory error case)
283  * --------------------------------------------------
284  * writerq		master <- master	WRITE
285  * non-blocking queue of blocks to write
286  * --------------------------------------------------
287  */
288 typedef struct cqueue {
289 	cbuf_t *volatile first;		/* first in list */
290 	cbuf_t *last;			/* last in list */
291 	hrtime_t ts;			/* timestamp */
292 	hrtime_t empty;			/* total time empty */
293 	kmutex_t mutex;			/* live state lock */
294 	kcondvar_t cv;			/* live wait var */
295 	lock_t spinlock;		/* panic mode spin lock */
296 	volatile uint_t open;		/* producer ref count */
297 } cqueue_t;
298 
299 /*
300  * Convenience macros for using the cqueue functions
301  * Note that the caller must have defined "dumpsync_t *ds"
302  */
303 #define	CQ_IS_EMPTY(q)					\
304 	(ds->q.first == NULL)
305 
306 #define	CQ_OPEN(q)					\
307 	atomic_inc_uint(&ds->q.open)
308 
309 #define	CQ_CLOSE(q)					\
310 	dumpsys_close_cq(&ds->q, ds->live)
311 
312 #define	CQ_PUT(q, cp, st)				\
313 	dumpsys_put_cq(&ds->q, cp, st, ds->live)
314 
315 #define	CQ_GET(q)					\
316 	dumpsys_get_cq(&ds->q, ds->live)
317 
318 /*
319  * Dynamic state when dumpsys() is running.
320  */
321 typedef struct dumpsync {
322 	pgcnt_t npages;			/* subtotal of pages dumped */
323 	pgcnt_t pages_mapped;		/* subtotal of pages mapped */
324 	pgcnt_t pages_used;		/* subtotal of pages used per map */
325 	size_t nwrite;			/* subtotal of bytes written */
326 	uint_t live;			/* running live dump */
327 	uint_t neednl;			/* will need to print a newline */
328 	uint_t percent;			/* dump progress */
329 	uint_t percent_done;		/* dump progress reported */
330 	cqueue_t freebufq;		/* free kmem bufs for writing */
331 	cqueue_t mainq;			/* input for main task */
332 	cqueue_t helperq;		/* input for helpers */
333 	cqueue_t writerq;		/* input for writer */
334 	hrtime_t start;			/* start time */
335 	hrtime_t elapsed;		/* elapsed time when completed */
336 	hrtime_t iotime;		/* time spent writing nwrite bytes */
337 	hrtime_t iowait;		/* time spent waiting for output */
338 	hrtime_t iowaitts;		/* iowait timestamp */
339 	perpage_t perpage;		/* metrics */
340 	perpage_t perpagets;
341 	int dumpcpu;			/* master cpu */
342 } dumpsync_t;
343 
344 static dumpsync_t dumpsync;		/* synchronization vars */
345 
346 /*
347  * helper_t helpers: contains the context for a stream. CPUs run in
348  * parallel at dump time; each CPU creates a single stream of
349  * compression data.  Stream data is divided into CBUF_SIZE blocks.
350  * The blocks are written in order within a stream. But, blocks from
351  * multiple streams can be interleaved. Each stream is identified by a
352  * unique tag.
353  */
354 typedef struct helper {
355 	int helper;			/* bound helper id */
356 	int tag;			/* compression stream tag */
357 	perpage_t perpage;		/* per page metrics */
358 	perpage_t perpagets;		/* per page metrics (timestamps) */
359 	taskqid_t taskqid;		/* live dump task ptr */
360 	int in, out;			/* buffer offsets */
361 	cbuf_t *cpin, *cpout, *cperr;	/* cbuf objects in process */
362 	dumpsync_t *ds;			/* pointer to sync vars */
363 	size_t used;			/* counts input consumed */
364 	char *page;			/* buffer for page copy */
365 	char *lzbuf;			/* lzjb output */
366 	bz_stream bzstream;		/* bzip2 state */
367 } helper_t;
368 
369 #define	MAINHELPER	(-1)		/* helper is also the main task */
370 #define	FREEHELPER	(-2)		/* unbound helper */
371 #define	DONEHELPER	(-3)		/* helper finished */
372 
373 /*
374  * configuration vars for dumpsys
375  */
376 typedef struct dumpcfg {
377 	int	threshold;	/* ncpu threshold for bzip2 */
378 	int	nhelper;	/* number of helpers */
379 	int	nhelper_used;	/* actual number of helpers used */
380 	int	ncmap;		/* number VA pages for compression */
381 	int	ncbuf;		/* number of bufs for compression */
382 	int	ncbuf_used;	/* number of bufs in use */
383 	uint_t	clevel;		/* dump compression level */
384 	helper_t *helper;	/* array of helpers */
385 	cbuf_t	*cmap;		/* array of input (map) buffers */
386 	cbuf_t	*cbuf;		/* array of output  buffers */
387 	ulong_t	*helpermap;	/* set of dumpsys helper CPU ids */
388 	ulong_t	*bitmap;	/* bitmap for marking pages to dump */
389 	ulong_t	*rbitmap;	/* bitmap for used CBUF_MAPSIZE ranges */
390 	pgcnt_t	bitmapsize;	/* size of bitmap */
391 	pgcnt_t	rbitmapsize;	/* size of bitmap for ranges */
392 	pgcnt_t found4m;	/* number ranges allocated by dump */
393 	pgcnt_t foundsm;	/* number small pages allocated by dump */
394 	pid_t	*pids;		/* list of process IDs at dump time */
395 	size_t	maxsize;	/* memory size needed at dump time */
396 	size_t	maxvmsize;	/* size of reserved VM */
397 	char	*maxvm;		/* reserved VM for spare pages */
398 	lock_t	helper_lock;	/* protect helper state */
399 	char	helpers_wanted;	/* flag to enable parallelism */
400 } dumpcfg_t;
401 
402 static dumpcfg_t dumpcfg;	/* config vars */
403 
404 /*
405  * The dump I/O buffer.
406  *
407  * There is one I/O buffer used by dumpvp_write and dumvp_flush. It is
408  * sized according to the optimum device transfer speed.
409  */
410 typedef struct dumpbuf {
411 	vnode_t	*cdev_vp;	/* VCHR open of the dump device */
412 	len_t	vp_limit;	/* maximum write offset */
413 	offset_t vp_off;	/* current dump device offset */
414 	char	*cur;		/* dump write pointer */
415 	char	*start;		/* dump buffer address */
416 	char	*end;		/* dump buffer end */
417 	size_t	size;		/* size of dumpbuf in bytes */
418 	size_t	iosize;		/* best transfer size for device */
419 } dumpbuf_t;
420 
421 dumpbuf_t dumpbuf;		/* I/O buffer */
422 
423 /*
424  * The dump I/O buffer must be at least one page, at most xfer_size
425  * bytes, and should scale with physmem in between.  The transfer size
426  * passed in will either represent a global default (maxphys) or the
427  * best size for the device.  The size of the dumpbuf I/O buffer is
428  * limited by dumpbuf_limit (8MB by default) because the dump
429  * performance saturates beyond a certain size.  The default is to
430  * select 1/4096 of the memory.
431  */
432 static int	dumpbuf_fraction = 12;	/* memory size scale factor */
433 static size_t	dumpbuf_limit = 8 * DUMP_1MB;	/* max I/O buf size */
434 
435 static size_t
436 dumpbuf_iosize(size_t xfer_size)
437 {
438 	size_t iosize = ptob(physmem >> dumpbuf_fraction);
439 
440 	if (iosize < PAGESIZE)
441 		iosize = PAGESIZE;
442 	else if (iosize > xfer_size)
443 		iosize = xfer_size;
444 	if (iosize > dumpbuf_limit)
445 		iosize = dumpbuf_limit;
446 	return (iosize & PAGEMASK);
447 }
448 
449 /*
450  * resize the I/O buffer
451  */
452 static void
453 dumpbuf_resize(void)
454 {
455 	char *old_buf = dumpbuf.start;
456 	size_t old_size = dumpbuf.size;
457 	char *new_buf;
458 	size_t new_size;
459 
460 	ASSERT(MUTEX_HELD(&dump_lock));
461 
462 	new_size = dumpbuf_iosize(MAX(dumpbuf.iosize, maxphys));
463 	if (new_size <= old_size)
464 		return; /* no need to reallocate buffer */
465 
466 	new_buf = kmem_alloc(new_size, KM_SLEEP);
467 	dumpbuf.size = new_size;
468 	dumpbuf.start = new_buf;
469 	dumpbuf.end = new_buf + new_size;
470 	kmem_free(old_buf, old_size);
471 }
472 
473 /*
474  * dump_update_clevel is called when dumpadm configures the dump device.
475  * 	Calculate number of helpers and buffers.
476  * 	Allocate the minimum configuration for now.
477  *
478  * When the dump file is configured we reserve a minimum amount of
479  * memory for use at crash time. But we reserve VA for all the memory
480  * we really want in order to do the fastest dump possible. The VA is
481  * backed by pages not being dumped, according to the bitmap. If
482  * there is insufficient spare memory, however, we fall back to the
483  * minimum.
484  *
485  * Live dump (savecore -L) always uses the minimum config.
486  *
487  * clevel 0 is single threaded lzjb
488  * clevel 1 is parallel lzjb
489  * clevel 2 is parallel bzip2
490  *
491  * The ncpu threshold is selected with dump_plat_mincpu.
492  * On OPL, set_platform_defaults() overrides the sun4u setting.
493  * The actual values are defined via DUMP_PLAT_*_MINCPU macros.
494  *
495  * Architecture		Threshold	Algorithm
496  * sun4u       		<  51		parallel lzjb
497  * sun4u       		>= 51		parallel bzip2(*)
498  * sun4u OPL   		<  8		parallel lzjb
499  * sun4u OPL   		>= 8		parallel bzip2(*)
500  * sun4v       		<  128		parallel lzjb
501  * sun4v       		>= 128		parallel bzip2(*)
502  * x86			< 11		parallel lzjb
503  * x86			>= 11		parallel bzip2(*)
504  * 32-bit      		N/A		single-threaded lzjb
505  *
506  * (*) bzip2 is only chosen if there is sufficient available
507  * memory for buffers at dump time. See dumpsys_get_maxmem().
508  *
509  * Faster dump devices have larger I/O buffers. The threshold value is
510  * increased according to the size of the dump I/O buffer, because
511  * parallel lzjb performs better with faster disks. For buffers >= 1MB
512  * the threshold is 3X; for buffers >= 256K threshold is 2X.
513  *
514  * For parallel dumps, the number of helpers is ncpu-1. The CPU
515  * running panic runs the main task. For single-threaded dumps, the
516  * panic CPU does lzjb compression (it is tagged as MAINHELPER.)
517  *
518  * Need multiple buffers per helper so that they do not block waiting
519  * for the main task.
520  *				parallel	single-threaded
521  * Number of output buffers:	nhelper*2		1
522  * Number of mapping buffers:	nhelper*4		1
523  *
524  */
525 static void
526 dump_update_clevel()
527 {
528 	int tag;
529 	size_t bz2size;
530 	helper_t *hp, *hpend;
531 	cbuf_t *cp, *cpend;
532 	dumpcfg_t *old = &dumpcfg;
533 	dumpcfg_t newcfg = *old;
534 	dumpcfg_t *new = &newcfg;
535 
536 	ASSERT(MUTEX_HELD(&dump_lock));
537 
538 	/*
539 	 * Free the previously allocated bufs and VM.
540 	 */
541 	if (old->helper != NULL) {
542 
543 		/* helpers */
544 		hpend = &old->helper[old->nhelper];
545 		for (hp = old->helper; hp != hpend; hp++) {
546 			if (hp->lzbuf != NULL)
547 				kmem_free(hp->lzbuf, PAGESIZE);
548 			if (hp->page != NULL)
549 				kmem_free(hp->page, PAGESIZE);
550 		}
551 		kmem_free(old->helper, old->nhelper * sizeof (helper_t));
552 
553 		/* VM space for mapping pages */
554 		cpend = &old->cmap[old->ncmap];
555 		for (cp = old->cmap; cp != cpend; cp++)
556 			vmem_xfree(heap_arena, cp->buf, CBUF_MAPSIZE);
557 		kmem_free(old->cmap, old->ncmap * sizeof (cbuf_t));
558 
559 		/* output bufs */
560 		cpend = &old->cbuf[old->ncbuf];
561 		for (cp = old->cbuf; cp != cpend; cp++)
562 			if (cp->buf != NULL)
563 				kmem_free(cp->buf, cp->size);
564 		kmem_free(old->cbuf, old->ncbuf * sizeof (cbuf_t));
565 
566 		/* reserved VM for dumpsys_get_maxmem */
567 		if (old->maxvmsize > 0)
568 			vmem_xfree(heap_arena, old->maxvm, old->maxvmsize);
569 	}
570 
571 	/*
572 	 * Allocate memory and VM.
573 	 * One CPU runs dumpsys, the rest are helpers.
574 	 */
575 	new->nhelper = ncpus - 1;
576 	if (new->nhelper < 1)
577 		new->nhelper = 1;
578 
579 	if (new->nhelper > DUMP_MAX_NHELPER)
580 		new->nhelper = DUMP_MAX_NHELPER;
581 
582 	/* increase threshold for faster disks */
583 	new->threshold = dump_plat_mincpu;
584 	if (dumpbuf.iosize >= DUMP_1MB)
585 		new->threshold *= 3;
586 	else if (dumpbuf.iosize >= (256 * DUMP_1KB))
587 		new->threshold *= 2;
588 
589 	/* figure compression level based upon the computed threshold. */
590 	if (dump_plat_mincpu == 0 || new->nhelper < 2) {
591 		new->clevel = 0;
592 		new->nhelper = 1;
593 	} else if ((new->nhelper + 1) >= new->threshold) {
594 		new->clevel = DUMP_CLEVEL_BZIP2;
595 	} else {
596 		new->clevel = DUMP_CLEVEL_LZJB;
597 	}
598 
599 	if (new->clevel == 0) {
600 		new->ncbuf = 1;
601 		new->ncmap = 1;
602 	} else {
603 		new->ncbuf = NCBUF_PER_HELPER * new->nhelper;
604 		new->ncmap = NCMAP_PER_HELPER * new->nhelper;
605 	}
606 
607 	/*
608 	 * Allocate new data structures and buffers for MINHELPERS,
609 	 * and also figure the max desired size.
610 	 */
611 	bz2size = BZ2_bzCompressInitSize(dump_bzip2_level);
612 	new->maxsize = 0;
613 	new->maxvmsize = 0;
614 	new->maxvm = NULL;
615 	tag = 1;
616 	new->helper = kmem_zalloc(new->nhelper * sizeof (helper_t), KM_SLEEP);
617 	hpend = &new->helper[new->nhelper];
618 	for (hp = new->helper; hp != hpend; hp++) {
619 		hp->tag = tag++;
620 		if (hp < &new->helper[MINHELPERS]) {
621 			hp->lzbuf = kmem_alloc(PAGESIZE, KM_SLEEP);
622 			hp->page = kmem_alloc(PAGESIZE, KM_SLEEP);
623 		} else if (new->clevel < DUMP_CLEVEL_BZIP2) {
624 			new->maxsize += 2 * PAGESIZE;
625 		} else {
626 			new->maxsize += PAGESIZE;
627 		}
628 		if (new->clevel >= DUMP_CLEVEL_BZIP2)
629 			new->maxsize += bz2size;
630 	}
631 
632 	new->cbuf = kmem_zalloc(new->ncbuf * sizeof (cbuf_t), KM_SLEEP);
633 	cpend = &new->cbuf[new->ncbuf];
634 	for (cp = new->cbuf; cp != cpend; cp++) {
635 		cp->state = CBUF_FREEBUF;
636 		cp->size = CBUF_SIZE;
637 		if (cp < &new->cbuf[MINCBUFS])
638 			cp->buf = kmem_alloc(cp->size, KM_SLEEP);
639 		else
640 			new->maxsize += cp->size;
641 	}
642 
643 	new->cmap = kmem_zalloc(new->ncmap * sizeof (cbuf_t), KM_SLEEP);
644 	cpend = &new->cmap[new->ncmap];
645 	for (cp = new->cmap; cp != cpend; cp++) {
646 		cp->state = CBUF_FREEMAP;
647 		cp->size = CBUF_MAPSIZE;
648 		cp->buf = vmem_xalloc(heap_arena, CBUF_MAPSIZE, CBUF_MAPSIZE,
649 		    0, 0, NULL, NULL, VM_SLEEP);
650 	}
651 
652 	/* reserve VA to be backed with spare pages at crash time */
653 	if (new->maxsize > 0) {
654 		new->maxsize = P2ROUNDUP(new->maxsize, PAGESIZE);
655 		new->maxvmsize = P2ROUNDUP(new->maxsize, CBUF_MAPSIZE);
656 		new->maxvm = vmem_xalloc(heap_arena, new->maxvmsize,
657 		    CBUF_MAPSIZE, 0, 0, NULL, NULL, VM_SLEEP);
658 	}
659 
660 	/* set new config pointers */
661 	*old = *new;
662 }
663 
664 /*
665  * Define a struct memlist walker to optimize bitnum to pfn
666  * lookup. The walker maintains the state of the list traversal.
667  */
668 typedef struct dumpmlw {
669 	struct memlist	*mp;		/* current memlist */
670 	pgcnt_t		basenum;	/* bitnum base offset */
671 	pgcnt_t		mppages;	/* current memlist size */
672 	pgcnt_t		mpleft;		/* size to end of current memlist */
673 	pfn_t		mpaddr;		/* first pfn in memlist */
674 } dumpmlw_t;
675 
676 /* initialize the walker */
677 static inline void
678 dump_init_memlist_walker(dumpmlw_t *pw)
679 {
680 	pw->mp = phys_install;
681 	pw->basenum = 0;
682 	pw->mppages = pw->mp->size >> PAGESHIFT;
683 	pw->mpleft = pw->mppages;
684 	pw->mpaddr = pw->mp->address >> PAGESHIFT;
685 }
686 
687 /*
688  * Lookup pfn given bitnum. The memlist can be quite long on some
689  * systems (e.g.: one per board). To optimize sequential lookups, the
690  * caller initializes and presents a memlist walker.
691  */
692 static pfn_t
693 dump_bitnum_to_pfn(pgcnt_t bitnum, dumpmlw_t *pw)
694 {
695 	bitnum -= pw->basenum;
696 	while (pw->mp != NULL) {
697 		if (bitnum < pw->mppages) {
698 			pw->mpleft = pw->mppages - bitnum;
699 			return (pw->mpaddr + bitnum);
700 		}
701 		bitnum -= pw->mppages;
702 		pw->basenum += pw->mppages;
703 		pw->mp = pw->mp->next;
704 		if (pw->mp != NULL) {
705 			pw->mppages = pw->mp->size >> PAGESHIFT;
706 			pw->mpleft = pw->mppages;
707 			pw->mpaddr = pw->mp->address >> PAGESHIFT;
708 		}
709 	}
710 	return (PFN_INVALID);
711 }
712 
713 static pgcnt_t
714 dump_pfn_to_bitnum(pfn_t pfn)
715 {
716 	struct memlist *mp;
717 	pgcnt_t bitnum = 0;
718 
719 	for (mp = phys_install; mp != NULL; mp = mp->next) {
720 		if (pfn >= (mp->address >> PAGESHIFT) &&
721 		    pfn < ((mp->address + mp->size) >> PAGESHIFT))
722 			return (bitnum + pfn - (mp->address >> PAGESHIFT));
723 		bitnum += mp->size >> PAGESHIFT;
724 	}
725 	return ((pgcnt_t)-1);
726 }
727 
728 /*
729  * Set/test bitmap for a CBUF_MAPSIZE range which includes pfn. The
730  * mapping of pfn to range index is imperfect because pfn and bitnum
731  * do not have the same phase. To make sure a CBUF_MAPSIZE range is
732  * covered, call this for both ends:
733  *	dump_set_used(base)
734  *	dump_set_used(base+CBUF_MAPNP-1)
735  *
736  * This is used during a panic dump to mark pages allocated by
737  * dumpsys_get_maxmem(). The macro IS_DUMP_PAGE(pp) is used by
738  * page_get_mnode_freelist() to make sure pages used by dump are never
739  * allocated.
740  */
741 #define	CBUF_MAPP2R(pfn)	((pfn) >> (CBUF_MAPSHIFT - PAGESHIFT))
742 
743 static void
744 dump_set_used(pfn_t pfn)
745 {
746 
747 	pgcnt_t bitnum, rbitnum;
748 
749 	bitnum = dump_pfn_to_bitnum(pfn);
750 	ASSERT(bitnum != (pgcnt_t)-1);
751 
752 	rbitnum = CBUF_MAPP2R(bitnum);
753 	ASSERT(rbitnum < dumpcfg.rbitmapsize);
754 
755 	BT_SET(dumpcfg.rbitmap, rbitnum);
756 }
757 
758 int
759 dump_test_used(pfn_t pfn)
760 {
761 	pgcnt_t bitnum, rbitnum;
762 
763 	bitnum = dump_pfn_to_bitnum(pfn);
764 	ASSERT(bitnum != (pgcnt_t)-1);
765 
766 	rbitnum = CBUF_MAPP2R(bitnum);
767 	ASSERT(rbitnum < dumpcfg.rbitmapsize);
768 
769 	return (BT_TEST(dumpcfg.rbitmap, rbitnum));
770 }
771 
772 /*
773  * dumpbzalloc and dumpbzfree are callbacks from the bzip2 library.
774  * dumpsys_get_maxmem() uses them for BZ2_bzCompressInit().
775  */
776 static void *
777 dumpbzalloc(void *opaque, int items, int size)
778 {
779 	size_t *sz;
780 	char *ret;
781 
782 	ASSERT(opaque != NULL);
783 	sz = opaque;
784 	ret = dumpcfg.maxvm + *sz;
785 	*sz += items * size;
786 	*sz = P2ROUNDUP(*sz, BZ2_BZALLOC_ALIGN);
787 	ASSERT(*sz <= dumpcfg.maxvmsize);
788 	return (ret);
789 }
790 
791 /*ARGSUSED*/
792 static void
793 dumpbzfree(void *opaque, void *addr)
794 {
795 }
796 
797 /*
798  * Perform additional checks on the page to see if we can really use
799  * it. The kernel (kas) pages are always set in the bitmap. However,
800  * boot memory pages (prom_ppages or P_BOOTPAGES) are not in the
801  * bitmap. So we check for them.
802  */
803 static inline int
804 dump_pfn_check(pfn_t pfn)
805 {
806 	page_t *pp = page_numtopp_nolock(pfn);
807 #if defined(__sparc)
808 	extern struct vnode prom_ppages;
809 #endif
810 
811 	if (pp == NULL || pp->p_pagenum != pfn ||
812 #if defined(__sparc)
813 	    pp->p_vnode == &prom_ppages ||
814 #else
815 	    PP_ISBOOTPAGES(pp) ||
816 #endif
817 	    pp->p_toxic != 0)
818 		return (0);
819 	return (1);
820 }
821 
822 /*
823  * Check a range to see if all contained pages are available and
824  * return non-zero if the range can be used.
825  */
826 static inline int
827 dump_range_check(pgcnt_t start, pgcnt_t end, pfn_t pfn)
828 {
829 	for (; start < end; start++, pfn++) {
830 		if (BT_TEST(dumpcfg.bitmap, start))
831 			return (0);
832 		if (!dump_pfn_check(pfn))
833 			return (0);
834 	}
835 	return (1);
836 }
837 
838 /*
839  * dumpsys_get_maxmem() is called during panic. Find unused ranges
840  * and use them for buffers. If we find enough memory switch to
841  * parallel bzip2, otherwise use parallel lzjb.
842  *
843  * It searches the dump bitmap in 2 passes. The first time it looks
844  * for CBUF_MAPSIZE ranges. On the second pass it uses small pages.
845  */
846 static void
847 dumpsys_get_maxmem()
848 {
849 	dumpcfg_t *cfg = &dumpcfg;
850 	cbuf_t *endcp = &cfg->cbuf[cfg->ncbuf];
851 	helper_t *endhp = &cfg->helper[cfg->nhelper];
852 	pgcnt_t bitnum, end;
853 	size_t sz, endsz, bz2size;
854 	pfn_t pfn, off;
855 	cbuf_t *cp;
856 	helper_t *hp, *ohp;
857 	dumpmlw_t mlw;
858 	int k;
859 
860 	if (cfg->maxsize == 0 || cfg->clevel < DUMP_CLEVEL_LZJB ||
861 	    (dump_conflags & DUMP_ALL) != 0)
862 		return;
863 
864 	sz = 0;
865 	cfg->found4m = 0;
866 	cfg->foundsm = 0;
867 
868 	/* bitmap of ranges used to estimate which pfns are being used */
869 	bzero(dumpcfg.rbitmap, BT_SIZEOFMAP(dumpcfg.rbitmapsize));
870 
871 	/* find ranges that are not being dumped to use for buffers */
872 	dump_init_memlist_walker(&mlw);
873 	for (bitnum = 0; bitnum < dumpcfg.bitmapsize; bitnum = end) {
874 		dump_timeleft = dump_timeout;
875 		end = bitnum + CBUF_MAPNP;
876 		pfn = dump_bitnum_to_pfn(bitnum, &mlw);
877 		ASSERT(pfn != PFN_INVALID);
878 
879 		/* skip partial range at end of mem segment */
880 		if (mlw.mpleft < CBUF_MAPNP) {
881 			end = bitnum + mlw.mpleft;
882 			continue;
883 		}
884 
885 		/* skip non aligned pages */
886 		off = P2PHASE(pfn, CBUF_MAPNP);
887 		if (off != 0) {
888 			end -= off;
889 			continue;
890 		}
891 
892 		if (!dump_range_check(bitnum, end, pfn))
893 			continue;
894 
895 		ASSERT((sz + CBUF_MAPSIZE) <= cfg->maxvmsize);
896 		hat_devload(kas.a_hat, cfg->maxvm + sz, CBUF_MAPSIZE, pfn,
897 		    PROT_READ | PROT_WRITE, HAT_LOAD_NOCONSIST);
898 		sz += CBUF_MAPSIZE;
899 		cfg->found4m++;
900 
901 		/* set the bitmap for both ends to be sure to cover the range */
902 		dump_set_used(pfn);
903 		dump_set_used(pfn + CBUF_MAPNP - 1);
904 
905 		if (sz >= cfg->maxsize)
906 			goto foundmax;
907 	}
908 
909 	/* Add small pages if we can't find enough large pages. */
910 	dump_init_memlist_walker(&mlw);
911 	for (bitnum = 0; bitnum < dumpcfg.bitmapsize; bitnum = end) {
912 		dump_timeleft = dump_timeout;
913 		end = bitnum + CBUF_MAPNP;
914 		pfn = dump_bitnum_to_pfn(bitnum, &mlw);
915 		ASSERT(pfn != PFN_INVALID);
916 
917 		/* Find any non-aligned pages at start and end of segment. */
918 		off = P2PHASE(pfn, CBUF_MAPNP);
919 		if (mlw.mpleft < CBUF_MAPNP) {
920 			end = bitnum + mlw.mpleft;
921 		} else if (off != 0) {
922 			end -= off;
923 		} else if (cfg->found4m && dump_test_used(pfn)) {
924 			continue;
925 		}
926 
927 		for (; bitnum < end; bitnum++, pfn++) {
928 			dump_timeleft = dump_timeout;
929 			if (BT_TEST(dumpcfg.bitmap, bitnum))
930 				continue;
931 			if (!dump_pfn_check(pfn))
932 				continue;
933 			ASSERT((sz + PAGESIZE) <= cfg->maxvmsize);
934 			hat_devload(kas.a_hat, cfg->maxvm + sz, PAGESIZE, pfn,
935 			    PROT_READ | PROT_WRITE, HAT_LOAD_NOCONSIST);
936 			sz += PAGESIZE;
937 			cfg->foundsm++;
938 			dump_set_used(pfn);
939 			if (sz >= cfg->maxsize)
940 				goto foundmax;
941 		}
942 	}
943 
944 	/* Fall back to lzjb if we did not get enough memory for bzip2. */
945 	endsz = (cfg->maxsize * cfg->threshold) / cfg->nhelper;
946 	if (sz < endsz) {
947 		cfg->clevel = DUMP_CLEVEL_LZJB;
948 	}
949 
950 	/* Allocate memory for as many helpers as we can. */
951 foundmax:
952 
953 	/* Byte offsets into memory found and mapped above */
954 	endsz = sz;
955 	sz = 0;
956 
957 	/* Set the size for bzip2 state. Only bzip2 needs it. */
958 	bz2size = BZ2_bzCompressInitSize(dump_bzip2_level);
959 
960 	/* Skip the preallocate output buffers. */
961 	cp = &cfg->cbuf[MINCBUFS];
962 
963 	/* Use this to move memory up from the preallocated helpers. */
964 	ohp = cfg->helper;
965 
966 	/* Loop over all helpers and allocate memory. */
967 	for (hp = cfg->helper; hp < endhp; hp++) {
968 
969 		/* Skip preallocated helpers by checking hp->page. */
970 		if (hp->page == NULL) {
971 			if (cfg->clevel <= DUMP_CLEVEL_LZJB) {
972 				/* lzjb needs 2 1-page buffers */
973 				if ((sz + (2 * PAGESIZE)) > endsz)
974 					break;
975 				hp->page = cfg->maxvm + sz;
976 				sz += PAGESIZE;
977 				hp->lzbuf = cfg->maxvm + sz;
978 				sz += PAGESIZE;
979 
980 			} else if (ohp->lzbuf != NULL) {
981 				/* re-use the preallocted lzjb page for bzip2 */
982 				hp->page = ohp->lzbuf;
983 				ohp->lzbuf = NULL;
984 				++ohp;
985 
986 			} else {
987 				/* bzip2 needs a 1-page buffer */
988 				if ((sz + PAGESIZE) > endsz)
989 					break;
990 				hp->page = cfg->maxvm + sz;
991 				sz += PAGESIZE;
992 			}
993 		}
994 
995 		/*
996 		 * Add output buffers per helper. The number of
997 		 * buffers per helper is determined by the ratio of
998 		 * ncbuf to nhelper.
999 		 */
1000 		for (k = 0; cp < endcp && (sz + CBUF_SIZE) <= endsz &&
1001 		    k < NCBUF_PER_HELPER; k++) {
1002 			cp->state = CBUF_FREEBUF;
1003 			cp->size = CBUF_SIZE;
1004 			cp->buf = cfg->maxvm + sz;
1005 			sz += CBUF_SIZE;
1006 			++cp;
1007 		}
1008 
1009 		/*
1010 		 * bzip2 needs compression state. Use the dumpbzalloc
1011 		 * and dumpbzfree callbacks to allocate the memory.
1012 		 * bzip2 does allocation only at init time.
1013 		 */
1014 		if (cfg->clevel >= DUMP_CLEVEL_BZIP2) {
1015 			if ((sz + bz2size) > endsz) {
1016 				hp->page = NULL;
1017 				break;
1018 			} else {
1019 				hp->bzstream.opaque = &sz;
1020 				hp->bzstream.bzalloc = dumpbzalloc;
1021 				hp->bzstream.bzfree = dumpbzfree;
1022 				(void) BZ2_bzCompressInit(&hp->bzstream,
1023 				    dump_bzip2_level, 0, 0);
1024 				hp->bzstream.opaque = NULL;
1025 			}
1026 		}
1027 	}
1028 
1029 	/* Finish allocating output buffers */
1030 	for (; cp < endcp && (sz + CBUF_SIZE) <= endsz; cp++) {
1031 		cp->state = CBUF_FREEBUF;
1032 		cp->size = CBUF_SIZE;
1033 		cp->buf = cfg->maxvm + sz;
1034 		sz += CBUF_SIZE;
1035 	}
1036 
1037 	/* Enable IS_DUMP_PAGE macro, which checks for pages we took. */
1038 	if (cfg->found4m || cfg->foundsm)
1039 		dump_check_used = 1;
1040 
1041 	ASSERT(sz <= endsz);
1042 }
1043 
1044 static void
1045 dumphdr_init(void)
1046 {
1047 	pgcnt_t npages = 0;
1048 
1049 	ASSERT(MUTEX_HELD(&dump_lock));
1050 
1051 	if (dumphdr == NULL) {
1052 		dumphdr = kmem_zalloc(sizeof (dumphdr_t), KM_SLEEP);
1053 		dumphdr->dump_magic = DUMP_MAGIC;
1054 		dumphdr->dump_version = DUMP_VERSION;
1055 		dumphdr->dump_wordsize = DUMP_WORDSIZE;
1056 		dumphdr->dump_pageshift = PAGESHIFT;
1057 		dumphdr->dump_pagesize = PAGESIZE;
1058 		dumphdr->dump_utsname = utsname;
1059 		(void) strcpy(dumphdr->dump_platform, platform);
1060 		dumpbuf.size = dumpbuf_iosize(maxphys);
1061 		dumpbuf.start = kmem_alloc(dumpbuf.size, KM_SLEEP);
1062 		dumpbuf.end = dumpbuf.start + dumpbuf.size;
1063 		dumpcfg.pids = kmem_alloc(v.v_proc * sizeof (pid_t), KM_SLEEP);
1064 		dumpcfg.helpermap = kmem_zalloc(BT_SIZEOFMAP(NCPU), KM_SLEEP);
1065 		LOCK_INIT_HELD(&dumpcfg.helper_lock);
1066 	}
1067 
1068 	npages = num_phys_pages();
1069 
1070 	if (dumpcfg.bitmapsize != npages) {
1071 		size_t rlen = CBUF_MAPP2R(P2ROUNDUP(npages, CBUF_MAPNP));
1072 		void *map = kmem_alloc(BT_SIZEOFMAP(npages), KM_SLEEP);
1073 		void *rmap = kmem_alloc(BT_SIZEOFMAP(rlen), KM_SLEEP);
1074 
1075 		if (dumpcfg.bitmap != NULL)
1076 			kmem_free(dumpcfg.bitmap, BT_SIZEOFMAP(dumpcfg.
1077 			    bitmapsize));
1078 		if (dumpcfg.rbitmap != NULL)
1079 			kmem_free(dumpcfg.rbitmap, BT_SIZEOFMAP(dumpcfg.
1080 			    rbitmapsize));
1081 		dumpcfg.bitmap = map;
1082 		dumpcfg.bitmapsize = npages;
1083 		dumpcfg.rbitmap = rmap;
1084 		dumpcfg.rbitmapsize = rlen;
1085 	}
1086 }
1087 
1088 /*
1089  * Establish a new dump device.
1090  */
1091 int
1092 dumpinit(vnode_t *vp, char *name, int justchecking)
1093 {
1094 	vnode_t *cvp;
1095 	vattr_t vattr;
1096 	vnode_t *cdev_vp;
1097 	int error = 0;
1098 
1099 	ASSERT(MUTEX_HELD(&dump_lock));
1100 
1101 	dumphdr_init();
1102 
1103 	cvp = common_specvp(vp);
1104 	if (cvp == dumpvp)
1105 		return (0);
1106 
1107 	/*
1108 	 * Determine whether this is a plausible dump device.  We want either:
1109 	 * (1) a real device that's not mounted and has a cb_dump routine, or
1110 	 * (2) a swapfile on some filesystem that has a vop_dump routine.
1111 	 */
1112 	if ((error = VOP_OPEN(&cvp, FREAD | FWRITE, kcred, NULL)) != 0)
1113 		return (error);
1114 
1115 	vattr.va_mask = AT_SIZE | AT_TYPE | AT_RDEV;
1116 	if ((error = VOP_GETATTR(cvp, &vattr, 0, kcred, NULL)) == 0) {
1117 		if (vattr.va_type == VBLK || vattr.va_type == VCHR) {
1118 			if (devopsp[getmajor(vattr.va_rdev)]->
1119 			    devo_cb_ops->cb_dump == nodev)
1120 				error = ENOTSUP;
1121 			else if (vfs_devismounted(vattr.va_rdev))
1122 				error = EBUSY;
1123 			if (strcmp(ddi_driver_name(VTOS(cvp)->s_dip),
1124 			    ZFS_DRIVER) == 0 &&
1125 			    IS_SWAPVP(common_specvp(cvp)))
1126 					error = EBUSY;
1127 		} else {
1128 			if (vn_matchopval(cvp, VOPNAME_DUMP, fs_nosys) ||
1129 			    !IS_SWAPVP(cvp))
1130 				error = ENOTSUP;
1131 		}
1132 	}
1133 
1134 	if (error == 0 && vattr.va_size < 2 * DUMP_LOGSIZE + DUMP_ERPTSIZE)
1135 		error = ENOSPC;
1136 
1137 	if (error || justchecking) {
1138 		(void) VOP_CLOSE(cvp, FREAD | FWRITE, 1, (offset_t)0,
1139 		    kcred, NULL);
1140 		return (error);
1141 	}
1142 
1143 	VN_HOLD(cvp);
1144 
1145 	if (dumpvp != NULL)
1146 		dumpfini();	/* unconfigure the old dump device */
1147 
1148 	dumpvp = cvp;
1149 	dumpvp_size = vattr.va_size & -DUMP_OFFSET;
1150 	dumppath = kmem_alloc(strlen(name) + 1, KM_SLEEP);
1151 	(void) strcpy(dumppath, name);
1152 	dumpbuf.iosize = 0;
1153 
1154 	/*
1155 	 * If the dump device is a block device, attempt to open up the
1156 	 * corresponding character device and determine its maximum transfer
1157 	 * size.  We use this information to potentially resize dumpbuf to a
1158 	 * larger and more optimal size for performing i/o to the dump device.
1159 	 */
1160 	if (cvp->v_type == VBLK &&
1161 	    (cdev_vp = makespecvp(VTOS(cvp)->s_dev, VCHR)) != NULL) {
1162 		if (VOP_OPEN(&cdev_vp, FREAD | FWRITE, kcred, NULL) == 0) {
1163 			size_t blk_size;
1164 			struct dk_cinfo dki;
1165 			struct dk_minfo minf;
1166 
1167 			if (VOP_IOCTL(cdev_vp, DKIOCGMEDIAINFO,
1168 			    (intptr_t)&minf, FKIOCTL, kcred, NULL, NULL)
1169 			    == 0 && minf.dki_lbsize != 0)
1170 				blk_size = minf.dki_lbsize;
1171 			else
1172 				blk_size = DEV_BSIZE;
1173 
1174 			if (VOP_IOCTL(cdev_vp, DKIOCINFO, (intptr_t)&dki,
1175 			    FKIOCTL, kcred, NULL, NULL) == 0) {
1176 				dumpbuf.iosize = dki.dki_maxtransfer * blk_size;
1177 				dumpbuf_resize();
1178 			}
1179 			/*
1180 			 * If we are working with a zvol then dumpify it
1181 			 * if it's not being used as swap.
1182 			 */
1183 			if (strcmp(dki.dki_dname, ZVOL_DRIVER) == 0) {
1184 				if (IS_SWAPVP(common_specvp(cvp)))
1185 					error = EBUSY;
1186 				else if ((error = VOP_IOCTL(cdev_vp,
1187 				    DKIOCDUMPINIT, NULL, FKIOCTL, kcred,
1188 				    NULL, NULL)) != 0)
1189 					dumpfini();
1190 			}
1191 
1192 			(void) VOP_CLOSE(cdev_vp, FREAD | FWRITE, 1, 0,
1193 			    kcred, NULL);
1194 		}
1195 
1196 		VN_RELE(cdev_vp);
1197 	}
1198 
1199 	cmn_err(CE_CONT, "?dump on %s size %llu MB\n", name, dumpvp_size >> 20);
1200 
1201 	dump_update_clevel();
1202 
1203 	return (error);
1204 }
1205 
1206 void
1207 dumpfini(void)
1208 {
1209 	vattr_t vattr;
1210 	boolean_t is_zfs = B_FALSE;
1211 	vnode_t *cdev_vp;
1212 	ASSERT(MUTEX_HELD(&dump_lock));
1213 
1214 	kmem_free(dumppath, strlen(dumppath) + 1);
1215 
1216 	/*
1217 	 * Determine if we are using zvols for our dump device
1218 	 */
1219 	vattr.va_mask = AT_RDEV;
1220 	if (VOP_GETATTR(dumpvp, &vattr, 0, kcred, NULL) == 0) {
1221 		is_zfs = (getmajor(vattr.va_rdev) ==
1222 		    ddi_name_to_major(ZFS_DRIVER)) ? B_TRUE : B_FALSE;
1223 	}
1224 
1225 	/*
1226 	 * If we have a zvol dump device then we call into zfs so
1227 	 * that it may have a chance to cleanup.
1228 	 */
1229 	if (is_zfs &&
1230 	    (cdev_vp = makespecvp(VTOS(dumpvp)->s_dev, VCHR)) != NULL) {
1231 		if (VOP_OPEN(&cdev_vp, FREAD | FWRITE, kcred, NULL) == 0) {
1232 			(void) VOP_IOCTL(cdev_vp, DKIOCDUMPFINI, NULL, FKIOCTL,
1233 			    kcred, NULL, NULL);
1234 			(void) VOP_CLOSE(cdev_vp, FREAD | FWRITE, 1, 0,
1235 			    kcred, NULL);
1236 		}
1237 		VN_RELE(cdev_vp);
1238 	}
1239 
1240 	(void) VOP_CLOSE(dumpvp, FREAD | FWRITE, 1, (offset_t)0, kcred, NULL);
1241 
1242 	VN_RELE(dumpvp);
1243 
1244 	dumpvp = NULL;
1245 	dumpvp_size = 0;
1246 	dumppath = NULL;
1247 }
1248 
1249 static offset_t
1250 dumpvp_flush(void)
1251 {
1252 	size_t size = P2ROUNDUP(dumpbuf.cur - dumpbuf.start, PAGESIZE);
1253 	hrtime_t iotime;
1254 	int err;
1255 
1256 	if (dumpbuf.vp_off + size > dumpbuf.vp_limit) {
1257 		dump_ioerr = ENOSPC;
1258 		dumpbuf.vp_off = dumpbuf.vp_limit;
1259 	} else if (size != 0) {
1260 		iotime = gethrtime();
1261 		dumpsync.iowait += iotime - dumpsync.iowaitts;
1262 		if (panicstr)
1263 			err = VOP_DUMP(dumpvp, dumpbuf.start,
1264 			    lbtodb(dumpbuf.vp_off), btod(size), NULL);
1265 		else
1266 			err = vn_rdwr(UIO_WRITE, dumpbuf.cdev_vp != NULL ?
1267 			    dumpbuf.cdev_vp : dumpvp, dumpbuf.start, size,
1268 			    dumpbuf.vp_off, UIO_SYSSPACE, 0, dumpbuf.vp_limit,
1269 			    kcred, 0);
1270 		if (err && dump_ioerr == 0)
1271 			dump_ioerr = err;
1272 		dumpsync.iowaitts = gethrtime();
1273 		dumpsync.iotime += dumpsync.iowaitts - iotime;
1274 		dumpsync.nwrite += size;
1275 		dumpbuf.vp_off += size;
1276 	}
1277 	dumpbuf.cur = dumpbuf.start;
1278 	dump_timeleft = dump_timeout;
1279 	return (dumpbuf.vp_off);
1280 }
1281 
1282 /* maximize write speed by keeping seek offset aligned with size */
1283 void
1284 dumpvp_write(const void *va, size_t size)
1285 {
1286 	size_t len, off, sz;
1287 
1288 	while (size != 0) {
1289 		len = MIN(size, dumpbuf.end - dumpbuf.cur);
1290 		if (len == 0) {
1291 			off = P2PHASE(dumpbuf.vp_off, dumpbuf.size);
1292 			if (off == 0 || !ISP2(dumpbuf.size)) {
1293 				(void) dumpvp_flush();
1294 			} else {
1295 				sz = dumpbuf.size - off;
1296 				dumpbuf.cur = dumpbuf.start + sz;
1297 				(void) dumpvp_flush();
1298 				ovbcopy(dumpbuf.start + sz, dumpbuf.start, off);
1299 				dumpbuf.cur += off;
1300 			}
1301 		} else {
1302 			bcopy(va, dumpbuf.cur, len);
1303 			va = (char *)va + len;
1304 			dumpbuf.cur += len;
1305 			size -= len;
1306 		}
1307 	}
1308 }
1309 
1310 /*ARGSUSED*/
1311 static void
1312 dumpvp_ksyms_write(const void *src, void *dst, size_t size)
1313 {
1314 	dumpvp_write(src, size);
1315 }
1316 
1317 /*
1318  * Mark 'pfn' in the bitmap and dump its translation table entry.
1319  */
1320 void
1321 dump_addpage(struct as *as, void *va, pfn_t pfn)
1322 {
1323 	mem_vtop_t mem_vtop;
1324 	pgcnt_t bitnum;
1325 
1326 	if ((bitnum = dump_pfn_to_bitnum(pfn)) != (pgcnt_t)-1) {
1327 		if (!BT_TEST(dumpcfg.bitmap, bitnum)) {
1328 			dumphdr->dump_npages++;
1329 			BT_SET(dumpcfg.bitmap, bitnum);
1330 		}
1331 		dumphdr->dump_nvtop++;
1332 		mem_vtop.m_as = as;
1333 		mem_vtop.m_va = va;
1334 		mem_vtop.m_pfn = pfn;
1335 		dumpvp_write(&mem_vtop, sizeof (mem_vtop_t));
1336 	}
1337 	dump_timeleft = dump_timeout;
1338 }
1339 
1340 /*
1341  * Mark 'pfn' in the bitmap
1342  */
1343 void
1344 dump_page(pfn_t pfn)
1345 {
1346 	pgcnt_t bitnum;
1347 
1348 	if ((bitnum = dump_pfn_to_bitnum(pfn)) != (pgcnt_t)-1) {
1349 		if (!BT_TEST(dumpcfg.bitmap, bitnum)) {
1350 			dumphdr->dump_npages++;
1351 			BT_SET(dumpcfg.bitmap, bitnum);
1352 		}
1353 	}
1354 	dump_timeleft = dump_timeout;
1355 }
1356 
1357 /*
1358  * Dump the <as, va, pfn> information for a given address space.
1359  * SEGOP_DUMP() will call dump_addpage() for each page in the segment.
1360  */
1361 static void
1362 dump_as(struct as *as)
1363 {
1364 	struct seg *seg;
1365 
1366 	AS_LOCK_ENTER(as, &as->a_lock, RW_READER);
1367 	for (seg = AS_SEGFIRST(as); seg; seg = AS_SEGNEXT(as, seg)) {
1368 		if (seg->s_as != as)
1369 			break;
1370 		if (seg->s_ops == NULL)
1371 			continue;
1372 		SEGOP_DUMP(seg);
1373 	}
1374 	AS_LOCK_EXIT(as, &as->a_lock);
1375 
1376 	if (seg != NULL)
1377 		cmn_err(CE_WARN, "invalid segment %p in address space %p",
1378 		    (void *)seg, (void *)as);
1379 }
1380 
1381 static int
1382 dump_process(pid_t pid)
1383 {
1384 	proc_t *p = sprlock(pid);
1385 
1386 	if (p == NULL)
1387 		return (-1);
1388 	if (p->p_as != &kas) {
1389 		mutex_exit(&p->p_lock);
1390 		dump_as(p->p_as);
1391 		mutex_enter(&p->p_lock);
1392 	}
1393 
1394 	sprunlock(p);
1395 
1396 	return (0);
1397 }
1398 
1399 void
1400 dump_ereports(void)
1401 {
1402 	u_offset_t dumpvp_start;
1403 	erpt_dump_t ed;
1404 
1405 	if (dumpvp == NULL || dumphdr == NULL)
1406 		return;
1407 
1408 	dumpbuf.cur = dumpbuf.start;
1409 	dumpbuf.vp_limit = dumpvp_size - (DUMP_OFFSET + DUMP_LOGSIZE);
1410 	dumpvp_start = dumpbuf.vp_limit - DUMP_ERPTSIZE;
1411 	dumpbuf.vp_off = dumpvp_start;
1412 
1413 	fm_ereport_dump();
1414 	if (panicstr)
1415 		errorq_dump();
1416 
1417 	bzero(&ed, sizeof (ed)); /* indicate end of ereports */
1418 	dumpvp_write(&ed, sizeof (ed));
1419 	(void) dumpvp_flush();
1420 
1421 	if (!panicstr) {
1422 		(void) VOP_PUTPAGE(dumpvp, dumpvp_start,
1423 		    (size_t)(dumpbuf.vp_off - dumpvp_start),
1424 		    B_INVAL | B_FORCE, kcred, NULL);
1425 	}
1426 }
1427 
1428 void
1429 dump_messages(void)
1430 {
1431 	log_dump_t ld;
1432 	mblk_t *mctl, *mdata;
1433 	queue_t *q, *qlast;
1434 	u_offset_t dumpvp_start;
1435 
1436 	if (dumpvp == NULL || dumphdr == NULL || log_consq == NULL)
1437 		return;
1438 
1439 	dumpbuf.cur = dumpbuf.start;
1440 	dumpbuf.vp_limit = dumpvp_size - DUMP_OFFSET;
1441 	dumpvp_start = dumpbuf.vp_limit - DUMP_LOGSIZE;
1442 	dumpbuf.vp_off = dumpvp_start;
1443 
1444 	qlast = NULL;
1445 	do {
1446 		for (q = log_consq; q->q_next != qlast; q = q->q_next)
1447 			continue;
1448 		for (mctl = q->q_first; mctl != NULL; mctl = mctl->b_next) {
1449 			dump_timeleft = dump_timeout;
1450 			mdata = mctl->b_cont;
1451 			ld.ld_magic = LOG_MAGIC;
1452 			ld.ld_msgsize = MBLKL(mctl->b_cont);
1453 			ld.ld_csum = checksum32(mctl->b_rptr, MBLKL(mctl));
1454 			ld.ld_msum = checksum32(mdata->b_rptr, MBLKL(mdata));
1455 			dumpvp_write(&ld, sizeof (ld));
1456 			dumpvp_write(mctl->b_rptr, MBLKL(mctl));
1457 			dumpvp_write(mdata->b_rptr, MBLKL(mdata));
1458 		}
1459 	} while ((qlast = q) != log_consq);
1460 
1461 	ld.ld_magic = 0;		/* indicate end of messages */
1462 	dumpvp_write(&ld, sizeof (ld));
1463 	(void) dumpvp_flush();
1464 	if (!panicstr) {
1465 		(void) VOP_PUTPAGE(dumpvp, dumpvp_start,
1466 		    (size_t)(dumpbuf.vp_off - dumpvp_start),
1467 		    B_INVAL | B_FORCE, kcred, NULL);
1468 	}
1469 }
1470 
1471 /*
1472  * The following functions are called on multiple CPUs during dump.
1473  * They must not use most kernel services, because all cross-calls are
1474  * disabled during panic. Therefore, blocking locks and cache flushes
1475  * will not work.
1476  */
1477 
1478 static int
1479 dump_pagecopy(void *src, void *dst)
1480 {
1481 	long *wsrc = (long *)src;
1482 	long *wdst = (long *)dst;
1483 	const ulong_t ncopies = PAGESIZE / sizeof (long);
1484 	volatile int w = 0;
1485 	volatile int ueoff = -1;
1486 	on_trap_data_t otd;
1487 
1488 	if (on_trap(&otd, OT_DATA_EC)) {
1489 		if (ueoff == -1)
1490 			ueoff = w * sizeof (long);
1491 #ifdef _LP64
1492 		wdst[w++] = 0xbadecc00badecc;
1493 #else
1494 		wdst[w++] = 0xbadecc;
1495 #endif
1496 	}
1497 	while (w < ncopies) {
1498 		wdst[w] = wsrc[w];
1499 		w++;
1500 	}
1501 	no_trap();
1502 	return (ueoff);
1503 }
1504 
1505 static void
1506 dumpsys_close_cq(cqueue_t *cq, int live)
1507 {
1508 	if (live) {
1509 		mutex_enter(&cq->mutex);
1510 		atomic_dec_uint(&cq->open);
1511 		cv_signal(&cq->cv);
1512 		mutex_exit(&cq->mutex);
1513 	} else {
1514 		atomic_dec_uint(&cq->open);
1515 	}
1516 }
1517 
1518 static inline void
1519 dumpsys_spinlock(lock_t *lp)
1520 {
1521 	uint_t backoff = 0;
1522 	int loop_count = 0;
1523 
1524 	while (LOCK_HELD(lp) || !lock_spin_try(lp)) {
1525 		if (++loop_count >= ncpus) {
1526 			backoff = mutex_lock_backoff(0);
1527 			loop_count = 0;
1528 		} else {
1529 			backoff = mutex_lock_backoff(backoff);
1530 		}
1531 		mutex_lock_delay(backoff);
1532 	}
1533 }
1534 
1535 static inline void
1536 dumpsys_spinunlock(lock_t *lp)
1537 {
1538 	lock_clear(lp);
1539 }
1540 
1541 static inline void
1542 dumpsys_lock(cqueue_t *cq, int live)
1543 {
1544 	if (live)
1545 		mutex_enter(&cq->mutex);
1546 	else
1547 		dumpsys_spinlock(&cq->spinlock);
1548 }
1549 
1550 static inline void
1551 dumpsys_unlock(cqueue_t *cq, int live, int signal)
1552 {
1553 	if (live) {
1554 		if (signal)
1555 			cv_signal(&cq->cv);
1556 		mutex_exit(&cq->mutex);
1557 	} else {
1558 		dumpsys_spinunlock(&cq->spinlock);
1559 	}
1560 }
1561 
1562 static void
1563 dumpsys_wait_cq(cqueue_t *cq, int live)
1564 {
1565 	if (live) {
1566 		cv_wait(&cq->cv, &cq->mutex);
1567 	} else {
1568 		dumpsys_spinunlock(&cq->spinlock);
1569 		while (cq->open)
1570 			if (cq->first)
1571 				break;
1572 		dumpsys_spinlock(&cq->spinlock);
1573 	}
1574 }
1575 
1576 static void
1577 dumpsys_put_cq(cqueue_t *cq, cbuf_t *cp, int newstate, int live)
1578 {
1579 	if (cp == NULL)
1580 		return;
1581 
1582 	dumpsys_lock(cq, live);
1583 
1584 	if (cq->ts != 0) {
1585 		cq->empty += gethrtime() - cq->ts;
1586 		cq->ts = 0;
1587 	}
1588 
1589 	cp->state = newstate;
1590 	cp->next = NULL;
1591 	if (cq->last == NULL)
1592 		cq->first = cp;
1593 	else
1594 		cq->last->next = cp;
1595 	cq->last = cp;
1596 
1597 	dumpsys_unlock(cq, live, 1);
1598 }
1599 
1600 static cbuf_t *
1601 dumpsys_get_cq(cqueue_t *cq, int live)
1602 {
1603 	cbuf_t *cp;
1604 	hrtime_t now = gethrtime();
1605 
1606 	dumpsys_lock(cq, live);
1607 
1608 	/* CONSTCOND */
1609 	while (1) {
1610 		cp = (cbuf_t *)cq->first;
1611 		if (cp == NULL) {
1612 			if (cq->open == 0)
1613 				break;
1614 			dumpsys_wait_cq(cq, live);
1615 			continue;
1616 		}
1617 		cq->first = cp->next;
1618 		if (cq->first == NULL) {
1619 			cq->last = NULL;
1620 			cq->ts = now;
1621 		}
1622 		break;
1623 	}
1624 
1625 	dumpsys_unlock(cq, live, cq->first != NULL || cq->open == 0);
1626 	return (cp);
1627 }
1628 
1629 /*
1630  * Send an error message to the console. If the main task is running
1631  * just write the message via uprintf. If a helper is running the
1632  * message has to be put on a queue for the main task. Setting fmt to
1633  * NULL means flush the error message buffer. If fmt is not NULL, just
1634  * add the text to the existing buffer.
1635  */
1636 static void
1637 dumpsys_errmsg(helper_t *hp, const char *fmt, ...)
1638 {
1639 	dumpsync_t *ds = hp->ds;
1640 	cbuf_t *cp = hp->cperr;
1641 	va_list adx;
1642 
1643 	if (hp->helper == MAINHELPER) {
1644 		if (fmt != NULL) {
1645 			if (ds->neednl) {
1646 				uprintf("\n");
1647 				ds->neednl = 0;
1648 			}
1649 			va_start(adx, fmt);
1650 			vuprintf(fmt, adx);
1651 			va_end(adx);
1652 		}
1653 	} else if (fmt == NULL) {
1654 		if (cp != NULL) {
1655 			CQ_PUT(mainq, cp, CBUF_ERRMSG);
1656 			hp->cperr = NULL;
1657 		}
1658 	} else {
1659 		if (hp->cperr == NULL) {
1660 			cp = CQ_GET(freebufq);
1661 			hp->cperr = cp;
1662 			cp->used = 0;
1663 		}
1664 		va_start(adx, fmt);
1665 		cp->used += vsnprintf(cp->buf + cp->used, cp->size - cp->used,
1666 		    fmt, adx);
1667 		va_end(adx);
1668 		if ((cp->used + LOG_MSGSIZE) > cp->size) {
1669 			CQ_PUT(mainq, cp, CBUF_ERRMSG);
1670 			hp->cperr = NULL;
1671 		}
1672 	}
1673 }
1674 
1675 /*
1676  * Write an output buffer to the dump file. If the main task is
1677  * running just write the data. If a helper is running the output is
1678  * placed on a queue for the main task.
1679  */
1680 static void
1681 dumpsys_swrite(helper_t *hp, cbuf_t *cp, size_t used)
1682 {
1683 	dumpsync_t *ds = hp->ds;
1684 
1685 	if (hp->helper == MAINHELPER) {
1686 		HRSTART(ds->perpage, write);
1687 		dumpvp_write(cp->buf, used);
1688 		HRSTOP(ds->perpage, write);
1689 		CQ_PUT(freebufq, cp, CBUF_FREEBUF);
1690 	} else {
1691 		cp->used = used;
1692 		CQ_PUT(mainq, cp, CBUF_WRITE);
1693 	}
1694 }
1695 
1696 /*
1697  * Copy one page within the mapped range. The offset starts at 0 and
1698  * is relative to the first pfn. cp->buf + cp->off is the address of
1699  * the first pfn. If dump_pagecopy returns a UE offset, create an
1700  * error message.  Returns the offset to the next pfn in the range
1701  * selected by the bitmap.
1702  */
1703 static int
1704 dumpsys_copy_page(helper_t *hp, int offset)
1705 {
1706 	cbuf_t *cp = hp->cpin;
1707 	int ueoff;
1708 
1709 	ASSERT(cp->off + offset + PAGESIZE <= cp->size);
1710 	ASSERT(BT_TEST(dumpcfg.bitmap, cp->bitnum));
1711 
1712 	ueoff = dump_pagecopy(cp->buf + cp->off + offset, hp->page);
1713 
1714 	/* ueoff is the offset in the page to a UE error */
1715 	if (ueoff != -1) {
1716 		uint64_t pa = ptob(cp->pfn) + offset + ueoff;
1717 
1718 		dumpsys_errmsg(hp, "memory error at PA 0x%08x.%08x\n",
1719 		    (uint32_t)(pa >> 32), (uint32_t)pa);
1720 	}
1721 
1722 	/*
1723 	 * Advance bitnum and offset to the next input page for the
1724 	 * next call to this function.
1725 	 */
1726 	offset += PAGESIZE;
1727 	cp->bitnum++;
1728 	while (cp->off + offset < cp->size) {
1729 		if (BT_TEST(dumpcfg.bitmap, cp->bitnum))
1730 			break;
1731 		offset += PAGESIZE;
1732 		cp->bitnum++;
1733 	}
1734 
1735 	return (offset);
1736 }
1737 
1738 /*
1739  * Read the helper queue, and copy one mapped page. Return 0 when
1740  * done. Return 1 when a page has been copied into hp->page.
1741  */
1742 static int
1743 dumpsys_sread(helper_t *hp)
1744 {
1745 	dumpsync_t *ds = hp->ds;
1746 
1747 	/* CONSTCOND */
1748 	while (1) {
1749 
1750 		/* Find the next input buffer. */
1751 		if (hp->cpin == NULL) {
1752 			HRSTART(hp->perpage, inwait);
1753 
1754 			/* CONSTCOND */
1755 			while (1) {
1756 				hp->cpin = CQ_GET(helperq);
1757 				dump_timeleft = dump_timeout;
1758 
1759 				/*
1760 				 * NULL return means the helper queue
1761 				 * is closed and empty.
1762 				 */
1763 				if (hp->cpin == NULL)
1764 					break;
1765 
1766 				/* Have input, check for dump I/O error. */
1767 				if (!dump_ioerr)
1768 					break;
1769 
1770 				/*
1771 				 * If an I/O error occurs, stay in the
1772 				 * loop in order to empty the helper
1773 				 * queue. Return the buffers to the
1774 				 * main task to unmap and free it.
1775 				 */
1776 				hp->cpin->used = 0;
1777 				CQ_PUT(mainq, hp->cpin, CBUF_USEDMAP);
1778 			}
1779 			HRSTOP(hp->perpage, inwait);
1780 
1781 			/* Stop here when the helper queue is closed. */
1782 			if (hp->cpin == NULL)
1783 				break;
1784 
1785 			/* Set the offset=0 to get the first pfn. */
1786 			hp->in = 0;
1787 
1788 			/* Set the total processed to 0 */
1789 			hp->used = 0;
1790 		}
1791 
1792 		/* Process the next page. */
1793 		if (hp->used < hp->cpin->used) {
1794 
1795 			/*
1796 			 * Get the next page from the input buffer and
1797 			 * return a copy.
1798 			 */
1799 			ASSERT(hp->in != -1);
1800 			HRSTART(hp->perpage, copy);
1801 			hp->in = dumpsys_copy_page(hp, hp->in);
1802 			hp->used += PAGESIZE;
1803 			HRSTOP(hp->perpage, copy);
1804 			break;
1805 
1806 		} else {
1807 
1808 			/*
1809 			 * Done with the input. Flush the VM and
1810 			 * return the buffer to the main task.
1811 			 */
1812 			if (panicstr && hp->helper != MAINHELPER)
1813 				hat_flush_range(kas.a_hat,
1814 				    hp->cpin->buf, hp->cpin->size);
1815 			dumpsys_errmsg(hp, NULL);
1816 			CQ_PUT(mainq, hp->cpin, CBUF_USEDMAP);
1817 			hp->cpin = NULL;
1818 		}
1819 	}
1820 
1821 	return (hp->cpin != NULL);
1822 }
1823 
1824 /*
1825  * Compress size bytes starting at buf with bzip2
1826  * mode:
1827  *	BZ_RUN		add one more compressed page
1828  *	BZ_FINISH	no more input, flush the state
1829  */
1830 static void
1831 dumpsys_bzrun(helper_t *hp, void *buf, size_t size, int mode)
1832 {
1833 	dumpsync_t *ds = hp->ds;
1834 	const int CSIZE = sizeof (dumpcsize_t);
1835 	bz_stream *ps = &hp->bzstream;
1836 	int rc = 0;
1837 	uint32_t csize;
1838 	dumpcsize_t cs;
1839 
1840 	/* Set input pointers to new input page */
1841 	if (size > 0) {
1842 		ps->avail_in = size;
1843 		ps->next_in = buf;
1844 	}
1845 
1846 	/* CONSTCOND */
1847 	while (1) {
1848 
1849 		/* Quit when all input has been consumed */
1850 		if (ps->avail_in == 0 && mode == BZ_RUN)
1851 			break;
1852 
1853 		/* Get a new output buffer */
1854 		if (hp->cpout == NULL) {
1855 			HRSTART(hp->perpage, outwait);
1856 			hp->cpout = CQ_GET(freebufq);
1857 			HRSTOP(hp->perpage, outwait);
1858 			ps->avail_out = hp->cpout->size - CSIZE;
1859 			ps->next_out = hp->cpout->buf + CSIZE;
1860 		}
1861 
1862 		/* Compress input, or finalize */
1863 		HRSTART(hp->perpage, compress);
1864 		rc = BZ2_bzCompress(ps, mode);
1865 		HRSTOP(hp->perpage, compress);
1866 
1867 		/* Check for error */
1868 		if (mode == BZ_RUN && rc != BZ_RUN_OK) {
1869 			dumpsys_errmsg(hp, "%d: BZ_RUN error %s at page %lx\n",
1870 			    hp->helper, BZ2_bzErrorString(rc),
1871 			    hp->cpin->pagenum);
1872 			break;
1873 		}
1874 
1875 		/* Write the buffer if it is full, or we are flushing */
1876 		if (ps->avail_out == 0 || mode == BZ_FINISH) {
1877 			csize = hp->cpout->size - CSIZE - ps->avail_out;
1878 			cs = DUMP_SET_TAG(csize, hp->tag);
1879 			if (csize > 0) {
1880 				(void) memcpy(hp->cpout->buf, &cs, CSIZE);
1881 				dumpsys_swrite(hp, hp->cpout, csize + CSIZE);
1882 				hp->cpout = NULL;
1883 			}
1884 		}
1885 
1886 		/* Check for final complete */
1887 		if (mode == BZ_FINISH) {
1888 			if (rc == BZ_STREAM_END)
1889 				break;
1890 			if (rc != BZ_FINISH_OK) {
1891 				dumpsys_errmsg(hp, "%d: BZ_FINISH error %s\n",
1892 				    hp->helper, BZ2_bzErrorString(rc));
1893 				break;
1894 			}
1895 		}
1896 	}
1897 
1898 	/* Cleanup state and buffers */
1899 	if (mode == BZ_FINISH) {
1900 
1901 		/* Reset state so that it is re-usable. */
1902 		(void) BZ2_bzCompressReset(&hp->bzstream);
1903 
1904 		/* Give any unused outout buffer to the main task */
1905 		if (hp->cpout != NULL) {
1906 			hp->cpout->used = 0;
1907 			CQ_PUT(mainq, hp->cpout, CBUF_ERRMSG);
1908 			hp->cpout = NULL;
1909 		}
1910 	}
1911 }
1912 
1913 static void
1914 dumpsys_bz2compress(helper_t *hp)
1915 {
1916 	dumpsync_t *ds = hp->ds;
1917 	dumpstreamhdr_t sh;
1918 
1919 	(void) strcpy(sh.stream_magic, DUMP_STREAM_MAGIC);
1920 	sh.stream_pagenum = (pgcnt_t)-1;
1921 	sh.stream_npages = 0;
1922 	hp->cpin = NULL;
1923 	hp->cpout = NULL;
1924 	hp->cperr = NULL;
1925 	hp->in = 0;
1926 	hp->out = 0;
1927 	hp->bzstream.avail_in = 0;
1928 
1929 	/* Bump reference to mainq while we are running */
1930 	CQ_OPEN(mainq);
1931 
1932 	/* Get one page at a time */
1933 	while (dumpsys_sread(hp)) {
1934 		if (sh.stream_pagenum != hp->cpin->pagenum) {
1935 			sh.stream_pagenum = hp->cpin->pagenum;
1936 			sh.stream_npages = btop(hp->cpin->used);
1937 			dumpsys_bzrun(hp, &sh, sizeof (sh), BZ_RUN);
1938 		}
1939 		dumpsys_bzrun(hp, hp->page, PAGESIZE, 0);
1940 	}
1941 
1942 	/* Done with input, flush any partial buffer */
1943 	if (sh.stream_pagenum != (pgcnt_t)-1) {
1944 		dumpsys_bzrun(hp, NULL, 0, BZ_FINISH);
1945 		dumpsys_errmsg(hp, NULL);
1946 	}
1947 
1948 	ASSERT(hp->cpin == NULL && hp->cpout == NULL && hp->cperr == NULL);
1949 
1950 	/* Decrement main queue count, we are done */
1951 	CQ_CLOSE(mainq);
1952 }
1953 
1954 /*
1955  * Compress with lzjb
1956  * write stream block if full or size==0
1957  * if csize==0 write stream header, else write <csize, data>
1958  * size==0 is a call to flush a buffer
1959  * hp->cpout is the buffer we are flushing or filling
1960  * hp->out is the next index to fill data
1961  * osize is either csize+data, or the size of a stream header
1962  */
1963 static void
1964 dumpsys_lzjbrun(helper_t *hp, size_t csize, void *buf, size_t size)
1965 {
1966 	dumpsync_t *ds = hp->ds;
1967 	const int CSIZE = sizeof (dumpcsize_t);
1968 	dumpcsize_t cs;
1969 	size_t osize = csize > 0 ? CSIZE + size : size;
1970 
1971 	/* If flush, and there is no buffer, just return */
1972 	if (size == 0 && hp->cpout == NULL)
1973 		return;
1974 
1975 	/* If flush, or cpout is full, write it out */
1976 	if (size == 0 ||
1977 	    hp->cpout != NULL && hp->out + osize > hp->cpout->size) {
1978 
1979 		/* Set tag+size word at the front of the stream block. */
1980 		cs = DUMP_SET_TAG(hp->out - CSIZE, hp->tag);
1981 		(void) memcpy(hp->cpout->buf, &cs, CSIZE);
1982 
1983 		/* Write block to dump file. */
1984 		dumpsys_swrite(hp, hp->cpout, hp->out);
1985 
1986 		/* Clear pointer to indicate we need a new buffer */
1987 		hp->cpout = NULL;
1988 
1989 		/* flushing, we are done */
1990 		if (size == 0)
1991 			return;
1992 	}
1993 
1994 	/* Get an output buffer if we dont have one. */
1995 	if (hp->cpout == NULL) {
1996 		HRSTART(hp->perpage, outwait);
1997 		hp->cpout = CQ_GET(freebufq);
1998 		HRSTOP(hp->perpage, outwait);
1999 		hp->out = CSIZE;
2000 	}
2001 
2002 	/* Store csize word. This is the size of compressed data. */
2003 	if (csize > 0) {
2004 		cs = DUMP_SET_TAG(csize, 0);
2005 		(void) memcpy(hp->cpout->buf + hp->out, &cs, CSIZE);
2006 		hp->out += CSIZE;
2007 	}
2008 
2009 	/* Store the data. */
2010 	(void) memcpy(hp->cpout->buf + hp->out, buf, size);
2011 	hp->out += size;
2012 }
2013 
2014 static void
2015 dumpsys_lzjbcompress(helper_t *hp)
2016 {
2017 	dumpsync_t *ds = hp->ds;
2018 	size_t csize;
2019 	dumpstreamhdr_t sh;
2020 
2021 	(void) strcpy(sh.stream_magic, DUMP_STREAM_MAGIC);
2022 	sh.stream_pagenum = (pfn_t)-1;
2023 	sh.stream_npages = 0;
2024 	hp->cpin = NULL;
2025 	hp->cpout = NULL;
2026 	hp->cperr = NULL;
2027 	hp->in = 0;
2028 	hp->out = 0;
2029 
2030 	/* Bump reference to mainq while we are running */
2031 	CQ_OPEN(mainq);
2032 
2033 	/* Get one page at a time */
2034 	while (dumpsys_sread(hp)) {
2035 
2036 		/* Create a stream header for each new input map */
2037 		if (sh.stream_pagenum != hp->cpin->pagenum) {
2038 			sh.stream_pagenum = hp->cpin->pagenum;
2039 			sh.stream_npages = btop(hp->cpin->used);
2040 			dumpsys_lzjbrun(hp, 0, &sh, sizeof (sh));
2041 		}
2042 
2043 		/* Compress one page */
2044 		HRSTART(hp->perpage, compress);
2045 		csize = compress(hp->page, hp->lzbuf, PAGESIZE);
2046 		HRSTOP(hp->perpage, compress);
2047 
2048 		/* Add csize+data to output block */
2049 		ASSERT(csize > 0 && csize <= PAGESIZE);
2050 		dumpsys_lzjbrun(hp, csize, hp->lzbuf, csize);
2051 	}
2052 
2053 	/* Done with input, flush any partial buffer */
2054 	if (sh.stream_pagenum != (pfn_t)-1) {
2055 		dumpsys_lzjbrun(hp, 0, NULL, 0);
2056 		dumpsys_errmsg(hp, NULL);
2057 	}
2058 
2059 	ASSERT(hp->cpin == NULL && hp->cpout == NULL && hp->cperr == NULL);
2060 
2061 	/* Decrement main queue count, we are done */
2062 	CQ_CLOSE(mainq);
2063 }
2064 
2065 /*
2066  * Dump helper called from panic_idle() to compress pages.  CPUs in
2067  * this path must not call most kernel services.
2068  *
2069  * During panic, all but one of the CPUs is idle. These CPUs are used
2070  * as helpers working in parallel to copy and compress memory
2071  * pages. During a panic, however, these processors cannot call any
2072  * kernel services. This is because mutexes become no-ops during
2073  * panic, and, cross-call interrupts are inhibited.  Therefore, during
2074  * panic dump the helper CPUs communicate with the panic CPU using
2075  * memory variables. All memory mapping and I/O is performed by the
2076  * panic CPU.
2077  */
2078 void
2079 dumpsys_helper()
2080 {
2081 	dumpsys_spinlock(&dumpcfg.helper_lock);
2082 	if (dumpcfg.helpers_wanted) {
2083 		helper_t *hp, *hpend = &dumpcfg.helper[dumpcfg.nhelper];
2084 
2085 		for (hp = dumpcfg.helper; hp != hpend; hp++) {
2086 			if (hp->helper == FREEHELPER) {
2087 				hp->helper = CPU->cpu_id;
2088 				BT_SET(dumpcfg.helpermap, CPU->cpu_seqid);
2089 
2090 				dumpsys_spinunlock(&dumpcfg.helper_lock);
2091 
2092 				if (dumpcfg.clevel < DUMP_CLEVEL_BZIP2)
2093 					dumpsys_lzjbcompress(hp);
2094 				else
2095 					dumpsys_bz2compress(hp);
2096 
2097 				hp->helper = DONEHELPER;
2098 				return;
2099 			}
2100 		}
2101 	}
2102 	dumpsys_spinunlock(&dumpcfg.helper_lock);
2103 }
2104 
2105 /*
2106  * Dump helper for live dumps.
2107  * These run as a system task.
2108  */
2109 static void
2110 dumpsys_live_helper(void *arg)
2111 {
2112 	helper_t *hp = arg;
2113 
2114 	BT_ATOMIC_SET(dumpcfg.helpermap, CPU->cpu_seqid);
2115 	if (dumpcfg.clevel < DUMP_CLEVEL_BZIP2)
2116 		dumpsys_lzjbcompress(hp);
2117 	else
2118 		dumpsys_bz2compress(hp);
2119 }
2120 
2121 /*
2122  * Compress one page with lzjb (single threaded case)
2123  */
2124 static void
2125 dumpsys_lzjb_page(helper_t *hp, cbuf_t *cp)
2126 {
2127 	dumpsync_t *ds = hp->ds;
2128 	uint32_t csize;
2129 
2130 	hp->helper = MAINHELPER;
2131 	hp->in = 0;
2132 	hp->used = 0;
2133 	hp->cpin = cp;
2134 	while (hp->used < cp->used) {
2135 		HRSTART(hp->perpage, copy);
2136 		hp->in = dumpsys_copy_page(hp, hp->in);
2137 		hp->used += PAGESIZE;
2138 		HRSTOP(hp->perpage, copy);
2139 
2140 		HRSTART(hp->perpage, compress);
2141 		csize = compress(hp->page, hp->lzbuf, PAGESIZE);
2142 		HRSTOP(hp->perpage, compress);
2143 
2144 		HRSTART(hp->perpage, write);
2145 		dumpvp_write(&csize, sizeof (csize));
2146 		dumpvp_write(hp->lzbuf, csize);
2147 		HRSTOP(hp->perpage, write);
2148 	}
2149 	CQ_PUT(mainq, hp->cpin, CBUF_USEDMAP);
2150 	hp->cpin = NULL;
2151 }
2152 
2153 /*
2154  * Main task to dump pages. This is called on the dump CPU.
2155  */
2156 static void
2157 dumpsys_main_task(void *arg)
2158 {
2159 	dumpsync_t *ds = arg;
2160 	pgcnt_t pagenum = 0, bitnum = 0, hibitnum;
2161 	dumpmlw_t mlw;
2162 	cbuf_t *cp;
2163 	pgcnt_t baseoff, pfnoff;
2164 	pfn_t base, pfn;
2165 	int sec;
2166 
2167 	dump_init_memlist_walker(&mlw);
2168 
2169 	/* CONSTCOND */
2170 	while (1) {
2171 
2172 		if (ds->percent > ds->percent_done) {
2173 			ds->percent_done = ds->percent;
2174 			sec = (gethrtime() - ds->start) / 1000 / 1000 / 1000;
2175 			uprintf("^\r%2d:%02d %3d%% done",
2176 			    sec / 60, sec % 60, ds->percent);
2177 			ds->neednl = 1;
2178 		}
2179 
2180 		while (CQ_IS_EMPTY(mainq) && !CQ_IS_EMPTY(writerq)) {
2181 
2182 			/* the writerq never blocks */
2183 			cp = CQ_GET(writerq);
2184 			if (cp == NULL)
2185 				break;
2186 
2187 			dump_timeleft = dump_timeout;
2188 
2189 			HRSTART(ds->perpage, write);
2190 			dumpvp_write(cp->buf, cp->used);
2191 			HRSTOP(ds->perpage, write);
2192 
2193 			CQ_PUT(freebufq, cp, CBUF_FREEBUF);
2194 		}
2195 
2196 		/*
2197 		 * Wait here for some buffers to process. Returns NULL
2198 		 * when all helpers have terminated and all buffers
2199 		 * have been processed.
2200 		 */
2201 		cp = CQ_GET(mainq);
2202 
2203 		if (cp == NULL) {
2204 
2205 			/* Drain the write queue. */
2206 			if (!CQ_IS_EMPTY(writerq))
2207 				continue;
2208 
2209 			/* Main task exits here. */
2210 			break;
2211 		}
2212 
2213 		dump_timeleft = dump_timeout;
2214 
2215 		switch (cp->state) {
2216 
2217 		case CBUF_FREEMAP:
2218 
2219 			/*
2220 			 * Note that we drop CBUF_FREEMAP buffers on
2221 			 * the floor (they will not be on any cqueue)
2222 			 * when we no longer need them.
2223 			 */
2224 			if (bitnum >= dumpcfg.bitmapsize)
2225 				break;
2226 
2227 			if (dump_ioerr) {
2228 				bitnum = dumpcfg.bitmapsize;
2229 				CQ_CLOSE(helperq);
2230 				break;
2231 			}
2232 
2233 			HRSTART(ds->perpage, bitmap);
2234 			for (; bitnum < dumpcfg.bitmapsize; bitnum++)
2235 				if (BT_TEST(dumpcfg.bitmap, bitnum))
2236 					break;
2237 			HRSTOP(ds->perpage, bitmap);
2238 			dump_timeleft = dump_timeout;
2239 
2240 			if (bitnum >= dumpcfg.bitmapsize) {
2241 				CQ_CLOSE(helperq);
2242 				break;
2243 			}
2244 
2245 			/*
2246 			 * Try to map CBUF_MAPSIZE ranges. Can't
2247 			 * assume that memory segment size is a
2248 			 * multiple of CBUF_MAPSIZE. Can't assume that
2249 			 * the segment starts on a CBUF_MAPSIZE
2250 			 * boundary.
2251 			 */
2252 			pfn = dump_bitnum_to_pfn(bitnum, &mlw);
2253 			ASSERT(pfn != PFN_INVALID);
2254 			ASSERT(bitnum + mlw.mpleft <= dumpcfg.bitmapsize);
2255 
2256 			base = P2ALIGN(pfn, CBUF_MAPNP);
2257 			if (base < mlw.mpaddr) {
2258 				base = mlw.mpaddr;
2259 				baseoff = P2PHASE(base, CBUF_MAPNP);
2260 			} else {
2261 				baseoff = 0;
2262 			}
2263 
2264 			pfnoff = pfn - base;
2265 			if (pfnoff + mlw.mpleft < CBUF_MAPNP) {
2266 				hibitnum = bitnum + mlw.mpleft;
2267 				cp->size = ptob(pfnoff + mlw.mpleft);
2268 			} else {
2269 				hibitnum = bitnum - pfnoff + CBUF_MAPNP -
2270 				    baseoff;
2271 				cp->size = CBUF_MAPSIZE - ptob(baseoff);
2272 			}
2273 
2274 			cp->pfn = pfn;
2275 			cp->bitnum = bitnum++;
2276 			cp->pagenum = pagenum++;
2277 			cp->off = ptob(pfnoff);
2278 
2279 			for (; bitnum < hibitnum; bitnum++)
2280 				if (BT_TEST(dumpcfg.bitmap, bitnum))
2281 					pagenum++;
2282 
2283 			dump_timeleft = dump_timeout;
2284 			cp->used = ptob(pagenum - cp->pagenum);
2285 
2286 			HRSTART(ds->perpage, map);
2287 			hat_devload(kas.a_hat, cp->buf, cp->size, base,
2288 			    PROT_READ, HAT_LOAD_NOCONSIST);
2289 			HRSTOP(ds->perpage, map);
2290 
2291 			ds->pages_mapped += btop(cp->size);
2292 			ds->pages_used += pagenum - cp->pagenum;
2293 
2294 			CQ_OPEN(mainq);
2295 
2296 			/*
2297 			 * If there are no helpers the main task does
2298 			 * non-streams lzjb compress.
2299 			 */
2300 			if (dumpcfg.clevel == 0) {
2301 				dumpsys_lzjb_page(dumpcfg.helper, cp);
2302 				break;
2303 			}
2304 
2305 			/* pass mapped pages to a helper */
2306 			CQ_PUT(helperq, cp, CBUF_INREADY);
2307 
2308 			/* the last page was done */
2309 			if (bitnum >= dumpcfg.bitmapsize)
2310 				CQ_CLOSE(helperq);
2311 
2312 			break;
2313 
2314 		case CBUF_USEDMAP:
2315 
2316 			ds->npages += btop(cp->used);
2317 
2318 			HRSTART(ds->perpage, unmap);
2319 			hat_unload(kas.a_hat, cp->buf, cp->size, HAT_UNLOAD);
2320 			HRSTOP(ds->perpage, unmap);
2321 
2322 			if (bitnum < dumpcfg.bitmapsize)
2323 				CQ_PUT(mainq, cp, CBUF_FREEMAP);
2324 			CQ_CLOSE(mainq);
2325 
2326 			ASSERT(ds->npages <= dumphdr->dump_npages);
2327 			ds->percent = ds->npages * 100LL / dumphdr->dump_npages;
2328 			break;
2329 
2330 		case CBUF_WRITE:
2331 
2332 			CQ_PUT(writerq, cp, CBUF_WRITE);
2333 			break;
2334 
2335 		case CBUF_ERRMSG:
2336 
2337 			if (cp->used > 0) {
2338 				cp->buf[cp->size - 2] = '\n';
2339 				cp->buf[cp->size - 1] = '\0';
2340 				if (ds->neednl) {
2341 					uprintf("\n%s", cp->buf);
2342 					ds->neednl = 0;
2343 				} else {
2344 					uprintf("%s", cp->buf);
2345 				}
2346 			}
2347 			CQ_PUT(freebufq, cp, CBUF_FREEBUF);
2348 			break;
2349 
2350 		default:
2351 			uprintf("dump: unexpected buffer state %d, "
2352 			    "buffer will be lost\n", cp->state);
2353 			break;
2354 
2355 		} /* end switch */
2356 
2357 	} /* end while(1) */
2358 }
2359 
2360 #ifdef	COLLECT_METRICS
2361 size_t
2362 dumpsys_metrics(dumpsync_t *ds, char *buf, size_t size)
2363 {
2364 	dumpcfg_t *cfg = &dumpcfg;
2365 	int myid = CPU->cpu_seqid;
2366 	int i, compress_ratio;
2367 	int sec, iorate;
2368 	helper_t *hp, *hpend = &cfg->helper[cfg->nhelper];
2369 	char *e = buf + size;
2370 	char *p = buf;
2371 
2372 	sec = ds->elapsed / (1000 * 1000 * 1000ULL);
2373 	if (sec < 1)
2374 		sec = 1;
2375 
2376 	if (ds->iotime < 1)
2377 		ds->iotime = 1;
2378 	iorate = (ds->nwrite * 100000ULL) / ds->iotime;
2379 
2380 	compress_ratio = 100LL * ds->npages / btopr(ds->nwrite + 1);
2381 
2382 #define	P(...) (p += p < e ? snprintf(p, e - p, __VA_ARGS__) : 0)
2383 
2384 	P("Master cpu_seqid,%d\n", CPU->cpu_seqid);
2385 	P("Master cpu_id,%d\n", CPU->cpu_id);
2386 	P("dump_flags,0x%x\n", dumphdr->dump_flags);
2387 	P("dump_ioerr,%d\n", dump_ioerr);
2388 
2389 	P("Helpers:\n");
2390 	for (i = 0; i < ncpus; i++) {
2391 		if ((i & 15) == 0)
2392 			P(",,%03d,", i);
2393 		if (i == myid)
2394 			P("   M");
2395 		else if (BT_TEST(cfg->helpermap, i))
2396 			P("%4d", cpu_seq[i]->cpu_id);
2397 		else
2398 			P("   *");
2399 		if ((i & 15) == 15)
2400 			P("\n");
2401 	}
2402 
2403 	P("ncbuf_used,%d\n", cfg->ncbuf_used);
2404 	P("ncmap,%d\n", cfg->ncmap);
2405 
2406 	P("Found %ldM ranges,%ld\n", (CBUF_MAPSIZE / DUMP_1MB), cfg->found4m);
2407 	P("Found small pages,%ld\n", cfg->foundsm);
2408 
2409 	P("Compression level,%d\n", cfg->clevel);
2410 	P("Compression type,%s %s\n", cfg->clevel == 0 ? "serial" : "parallel",
2411 	    cfg->clevel >= DUMP_CLEVEL_BZIP2 ? "bzip2" : "lzjb");
2412 	P("Compression ratio,%d.%02d\n", compress_ratio / 100, compress_ratio %
2413 	    100);
2414 	P("nhelper_used,%d\n", cfg->nhelper_used);
2415 
2416 	P("Dump I/O rate MBS,%d.%02d\n", iorate / 100, iorate % 100);
2417 	P("..total bytes,%lld\n", (u_longlong_t)ds->nwrite);
2418 	P("..total nsec,%lld\n", (u_longlong_t)ds->iotime);
2419 	P("dumpbuf.iosize,%ld\n", dumpbuf.iosize);
2420 	P("dumpbuf.size,%ld\n", dumpbuf.size);
2421 
2422 	P("Dump pages/sec,%llu\n", (u_longlong_t)ds->npages / sec);
2423 	P("Dump pages,%llu\n", (u_longlong_t)ds->npages);
2424 	P("Dump time,%d\n", sec);
2425 
2426 	if (ds->pages_mapped > 0)
2427 		P("per-cent map utilization,%d\n", (int)((100 * ds->pages_used)
2428 		    / ds->pages_mapped));
2429 
2430 	P("\nPer-page metrics:\n");
2431 	if (ds->npages > 0) {
2432 		for (hp = cfg->helper; hp != hpend; hp++) {
2433 #define	PERPAGE(x)	ds->perpage.x += hp->perpage.x;
2434 			PERPAGES;
2435 #undef PERPAGE
2436 		}
2437 #define	PERPAGE(x) \
2438 		P("%s nsec/page,%d\n", #x, (int)(ds->perpage.x / ds->npages));
2439 		PERPAGES;
2440 #undef PERPAGE
2441 		P("freebufq.empty,%d\n", (int)(ds->freebufq.empty /
2442 		    ds->npages));
2443 		P("helperq.empty,%d\n", (int)(ds->helperq.empty /
2444 		    ds->npages));
2445 		P("writerq.empty,%d\n", (int)(ds->writerq.empty /
2446 		    ds->npages));
2447 		P("mainq.empty,%d\n", (int)(ds->mainq.empty / ds->npages));
2448 
2449 		P("I/O wait nsec/page,%llu\n", (u_longlong_t)(ds->iowait /
2450 		    ds->npages));
2451 	}
2452 #undef P
2453 	if (p < e)
2454 		bzero(p, e - p);
2455 	return (p - buf);
2456 }
2457 #endif	/* COLLECT_METRICS */
2458 
2459 /*
2460  * Dump the system.
2461  */
2462 void
2463 dumpsys(void)
2464 {
2465 	dumpsync_t *ds = &dumpsync;
2466 	taskq_t *livetaskq = NULL;
2467 	pfn_t pfn;
2468 	pgcnt_t bitnum;
2469 	proc_t *p;
2470 	helper_t *hp, *hpend = &dumpcfg.helper[dumpcfg.nhelper];
2471 	cbuf_t *cp;
2472 	pid_t npids, pidx;
2473 	char *content;
2474 	int save_dump_clevel;
2475 	dumpmlw_t mlw;
2476 	dumpcsize_t datatag;
2477 	dumpdatahdr_t datahdr;
2478 
2479 	if (dumpvp == NULL || dumphdr == NULL) {
2480 		uprintf("skipping system dump - no dump device configured\n");
2481 		if (panicstr) {
2482 			dumpcfg.helpers_wanted = 0;
2483 			dumpsys_spinunlock(&dumpcfg.helper_lock);
2484 		}
2485 		return;
2486 	}
2487 	dumpbuf.cur = dumpbuf.start;
2488 
2489 	/* clear the sync variables */
2490 	ASSERT(dumpcfg.nhelper > 0);
2491 	bzero(ds, sizeof (*ds));
2492 	ds->dumpcpu = CPU->cpu_id;
2493 
2494 	/*
2495 	 * Calculate the starting block for dump.  If we're dumping on a
2496 	 * swap device, start 1/5 of the way in; otherwise, start at the
2497 	 * beginning.  And never use the first page -- it may be a disk label.
2498 	 */
2499 	if (dumpvp->v_flag & VISSWAP)
2500 		dumphdr->dump_start = P2ROUNDUP(dumpvp_size / 5, DUMP_OFFSET);
2501 	else
2502 		dumphdr->dump_start = DUMP_OFFSET;
2503 
2504 	dumphdr->dump_flags = DF_VALID | DF_COMPLETE | DF_LIVE | DF_COMPRESSED;
2505 	dumphdr->dump_crashtime = gethrestime_sec();
2506 	dumphdr->dump_npages = 0;
2507 	dumphdr->dump_nvtop = 0;
2508 	bzero(dumpcfg.bitmap, BT_SIZEOFMAP(dumpcfg.bitmapsize));
2509 	dump_timeleft = dump_timeout;
2510 
2511 	if (panicstr) {
2512 		dumphdr->dump_flags &= ~DF_LIVE;
2513 		(void) VOP_DUMPCTL(dumpvp, DUMP_FREE, NULL, NULL);
2514 		(void) VOP_DUMPCTL(dumpvp, DUMP_ALLOC, NULL, NULL);
2515 		(void) vsnprintf(dumphdr->dump_panicstring, DUMP_PANICSIZE,
2516 		    panicstr, panicargs);
2517 
2518 	}
2519 
2520 	if (dump_conflags & DUMP_ALL)
2521 		content = "all";
2522 	else if (dump_conflags & DUMP_CURPROC)
2523 		content = "kernel + curproc";
2524 	else
2525 		content = "kernel";
2526 	uprintf("dumping to %s, offset %lld, content: %s\n", dumppath,
2527 	    dumphdr->dump_start, content);
2528 
2529 	/* Make sure nodename is current */
2530 	bcopy(utsname.nodename, dumphdr->dump_utsname.nodename, SYS_NMLN);
2531 
2532 	/*
2533 	 * If this is a live dump, try to open a VCHR vnode for better
2534 	 * performance. We must take care to flush the buffer cache
2535 	 * first.
2536 	 */
2537 	if (!panicstr) {
2538 		vnode_t *cdev_vp, *cmn_cdev_vp;
2539 
2540 		ASSERT(dumpbuf.cdev_vp == NULL);
2541 		cdev_vp = makespecvp(VTOS(dumpvp)->s_dev, VCHR);
2542 		if (cdev_vp != NULL) {
2543 			cmn_cdev_vp = common_specvp(cdev_vp);
2544 			if (VOP_OPEN(&cmn_cdev_vp, FREAD | FWRITE, kcred, NULL)
2545 			    == 0) {
2546 				if (vn_has_cached_data(dumpvp))
2547 					(void) pvn_vplist_dirty(dumpvp, 0, NULL,
2548 					    B_INVAL | B_TRUNC, kcred);
2549 				dumpbuf.cdev_vp = cmn_cdev_vp;
2550 			} else {
2551 				VN_RELE(cdev_vp);
2552 			}
2553 		}
2554 	}
2555 
2556 	/*
2557 	 * Store a hires timestamp so we can look it up during debugging.
2558 	 */
2559 	lbolt_debug_entry();
2560 
2561 	/*
2562 	 * Leave room for the message and ereport save areas and terminal dump
2563 	 * header.
2564 	 */
2565 	dumpbuf.vp_limit = dumpvp_size - DUMP_LOGSIZE - DUMP_OFFSET -
2566 	    DUMP_ERPTSIZE;
2567 
2568 	/*
2569 	 * Write out the symbol table.  It's no longer compressed,
2570 	 * so its 'size' and 'csize' are equal.
2571 	 */
2572 	dumpbuf.vp_off = dumphdr->dump_ksyms = dumphdr->dump_start + PAGESIZE;
2573 	dumphdr->dump_ksyms_size = dumphdr->dump_ksyms_csize =
2574 	    ksyms_snapshot(dumpvp_ksyms_write, NULL, LONG_MAX);
2575 
2576 	/*
2577 	 * Write out the translation map.
2578 	 */
2579 	dumphdr->dump_map = dumpvp_flush();
2580 	dump_as(&kas);
2581 	dumphdr->dump_nvtop += dump_plat_addr();
2582 
2583 	/*
2584 	 * call into hat, which may have unmapped pages that also need to
2585 	 * be in the dump
2586 	 */
2587 	hat_dump();
2588 
2589 	if (dump_conflags & DUMP_ALL) {
2590 		mutex_enter(&pidlock);
2591 
2592 		for (npids = 0, p = practive; p != NULL; p = p->p_next)
2593 			dumpcfg.pids[npids++] = p->p_pid;
2594 
2595 		mutex_exit(&pidlock);
2596 
2597 		for (pidx = 0; pidx < npids; pidx++)
2598 			(void) dump_process(dumpcfg.pids[pidx]);
2599 
2600 		for (bitnum = 0; bitnum < dumpcfg.bitmapsize; bitnum++) {
2601 			dump_timeleft = dump_timeout;
2602 			BT_SET(dumpcfg.bitmap, bitnum);
2603 		}
2604 		dumphdr->dump_npages = dumpcfg.bitmapsize;
2605 		dumphdr->dump_flags |= DF_ALL;
2606 
2607 	} else if (dump_conflags & DUMP_CURPROC) {
2608 		/*
2609 		 * Determine which pid is to be dumped.  If we're panicking, we
2610 		 * dump the process associated with panic_thread (if any).  If
2611 		 * this is a live dump, we dump the process associated with
2612 		 * curthread.
2613 		 */
2614 		npids = 0;
2615 		if (panicstr) {
2616 			if (panic_thread != NULL &&
2617 			    panic_thread->t_procp != NULL &&
2618 			    panic_thread->t_procp != &p0) {
2619 				dumpcfg.pids[npids++] =
2620 				    panic_thread->t_procp->p_pid;
2621 			}
2622 		} else {
2623 			dumpcfg.pids[npids++] = curthread->t_procp->p_pid;
2624 		}
2625 
2626 		if (npids && dump_process(dumpcfg.pids[0]) == 0)
2627 			dumphdr->dump_flags |= DF_CURPROC;
2628 		else
2629 			dumphdr->dump_flags |= DF_KERNEL;
2630 
2631 	} else {
2632 		dumphdr->dump_flags |= DF_KERNEL;
2633 	}
2634 
2635 	dumphdr->dump_hashmask = (1 << highbit(dumphdr->dump_nvtop - 1)) - 1;
2636 
2637 	/*
2638 	 * Write out the pfn table.
2639 	 */
2640 	dumphdr->dump_pfn = dumpvp_flush();
2641 	dump_init_memlist_walker(&mlw);
2642 	for (bitnum = 0; bitnum < dumpcfg.bitmapsize; bitnum++) {
2643 		dump_timeleft = dump_timeout;
2644 		if (!BT_TEST(dumpcfg.bitmap, bitnum))
2645 			continue;
2646 		pfn = dump_bitnum_to_pfn(bitnum, &mlw);
2647 		ASSERT(pfn != PFN_INVALID);
2648 		dumpvp_write(&pfn, sizeof (pfn_t));
2649 	}
2650 	dump_plat_pfn();
2651 
2652 	/*
2653 	 * Write out all the pages.
2654 	 * Map pages, copy them handling UEs, compress, and write them out.
2655 	 * Cooperate with any helpers running on CPUs in panic_idle().
2656 	 */
2657 	dumphdr->dump_data = dumpvp_flush();
2658 
2659 	bzero(dumpcfg.helpermap, BT_SIZEOFMAP(NCPU));
2660 	ds->live = dumpcfg.clevel > 0 &&
2661 	    (dumphdr->dump_flags & DF_LIVE) != 0;
2662 
2663 	save_dump_clevel = dumpcfg.clevel;
2664 	if (panicstr)
2665 		dumpsys_get_maxmem();
2666 	else if (dumpcfg.clevel >= DUMP_CLEVEL_BZIP2)
2667 		dumpcfg.clevel = DUMP_CLEVEL_LZJB;
2668 
2669 	dumpcfg.nhelper_used = 0;
2670 	for (hp = dumpcfg.helper; hp != hpend; hp++) {
2671 		if (hp->page == NULL) {
2672 			hp->helper = DONEHELPER;
2673 			continue;
2674 		}
2675 		++dumpcfg.nhelper_used;
2676 		hp->helper = FREEHELPER;
2677 		hp->taskqid = NULL;
2678 		hp->ds = ds;
2679 		bzero(&hp->perpage, sizeof (hp->perpage));
2680 		if (dumpcfg.clevel >= DUMP_CLEVEL_BZIP2)
2681 			(void) BZ2_bzCompressReset(&hp->bzstream);
2682 	}
2683 
2684 	CQ_OPEN(freebufq);
2685 	CQ_OPEN(helperq);
2686 
2687 	dumpcfg.ncbuf_used = 0;
2688 	for (cp = dumpcfg.cbuf; cp != &dumpcfg.cbuf[dumpcfg.ncbuf]; cp++) {
2689 		if (cp->buf != NULL) {
2690 			CQ_PUT(freebufq, cp, CBUF_FREEBUF);
2691 			++dumpcfg.ncbuf_used;
2692 		}
2693 	}
2694 
2695 	for (cp = dumpcfg.cmap; cp != &dumpcfg.cmap[dumpcfg.ncmap]; cp++)
2696 		CQ_PUT(mainq, cp, CBUF_FREEMAP);
2697 
2698 	ds->start = gethrtime();
2699 	ds->iowaitts = ds->start;
2700 
2701 	/* start helpers */
2702 	if (ds->live) {
2703 		int n = dumpcfg.nhelper_used;
2704 		int pri = MINCLSYSPRI - 25;
2705 
2706 		livetaskq = taskq_create("LiveDump", n, pri, n, n,
2707 		    TASKQ_PREPOPULATE);
2708 		for (hp = dumpcfg.helper; hp != hpend; hp++) {
2709 			if (hp->page == NULL)
2710 				continue;
2711 			hp->helper = hp - dumpcfg.helper;
2712 			hp->taskqid = taskq_dispatch(livetaskq,
2713 			    dumpsys_live_helper, (void *)hp, TQ_NOSLEEP);
2714 		}
2715 
2716 	} else {
2717 		dumpcfg.helpers_wanted = dumpcfg.clevel > 0;
2718 		dumpsys_spinunlock(&dumpcfg.helper_lock);
2719 	}
2720 
2721 	/* run main task */
2722 	dumpsys_main_task(ds);
2723 
2724 	ds->elapsed = gethrtime() - ds->start;
2725 	if (ds->elapsed < 1)
2726 		ds->elapsed = 1;
2727 
2728 	if (livetaskq != NULL)
2729 		taskq_destroy(livetaskq);
2730 
2731 	if (ds->neednl) {
2732 		uprintf("\n");
2733 		ds->neednl = 0;
2734 	}
2735 
2736 	/* record actual pages dumped */
2737 	dumphdr->dump_npages = ds->npages;
2738 
2739 	/* platform-specific data */
2740 	dumphdr->dump_npages += dump_plat_data(dumpcfg.cbuf[0].buf);
2741 
2742 	/* note any errors by clearing DF_COMPLETE */
2743 	if (dump_ioerr || ds->npages < dumphdr->dump_npages)
2744 		dumphdr->dump_flags &= ~DF_COMPLETE;
2745 
2746 	/* end of stream blocks */
2747 	datatag = 0;
2748 	dumpvp_write(&datatag, sizeof (datatag));
2749 
2750 	/* compression info in data header */
2751 	bzero(&datahdr, sizeof (datahdr));
2752 	datahdr.dump_datahdr_magic = DUMP_DATAHDR_MAGIC;
2753 	datahdr.dump_datahdr_version = DUMP_DATAHDR_VERSION;
2754 	datahdr.dump_maxcsize = CBUF_SIZE;
2755 	datahdr.dump_maxrange = CBUF_MAPSIZE / PAGESIZE;
2756 	datahdr.dump_nstreams = dumpcfg.nhelper_used;
2757 	datahdr.dump_clevel = dumpcfg.clevel;
2758 #ifdef COLLECT_METRICS
2759 	if (dump_metrics_on)
2760 		datahdr.dump_metrics = dumpsys_metrics(ds, dumpcfg.cbuf[0].buf,
2761 		    MIN(dumpcfg.cbuf[0].size, DUMP_OFFSET - sizeof (dumphdr_t) -
2762 		    sizeof (dumpdatahdr_t)));
2763 #endif
2764 	datahdr.dump_data_csize = dumpvp_flush() - dumphdr->dump_data;
2765 
2766 	/*
2767 	 * Write out the initial and terminal dump headers.
2768 	 */
2769 	dumpbuf.vp_off = dumphdr->dump_start;
2770 	dumpvp_write(dumphdr, sizeof (dumphdr_t));
2771 	(void) dumpvp_flush();
2772 
2773 	dumpbuf.vp_limit = dumpvp_size;
2774 	dumpbuf.vp_off = dumpbuf.vp_limit - DUMP_OFFSET;
2775 	dumpvp_write(dumphdr, sizeof (dumphdr_t));
2776 	dumpvp_write(&datahdr, sizeof (dumpdatahdr_t));
2777 	dumpvp_write(dumpcfg.cbuf[0].buf, datahdr.dump_metrics);
2778 
2779 	(void) dumpvp_flush();
2780 
2781 	uprintf("\r%3d%% done: %llu pages dumped, ",
2782 	    ds->percent_done, (u_longlong_t)ds->npages);
2783 
2784 	if (dump_ioerr == 0) {
2785 		uprintf("dump succeeded\n");
2786 	} else {
2787 		uprintf("dump failed: error %d\n", dump_ioerr);
2788 #ifdef DEBUG
2789 		if (panicstr)
2790 			debug_enter("dump failed");
2791 #endif
2792 	}
2793 
2794 	/*
2795 	 * Write out all undelivered messages.  This has to be the *last*
2796 	 * thing we do because the dump process itself emits messages.
2797 	 */
2798 	if (panicstr) {
2799 		dump_ereports();
2800 		dump_messages();
2801 	}
2802 
2803 	delay(2 * hz);	/* let people see the 'done' message */
2804 	dump_timeleft = 0;
2805 	dump_ioerr = 0;
2806 
2807 	/* restore settings after live dump completes */
2808 	if (!panicstr) {
2809 		dumpcfg.clevel = save_dump_clevel;
2810 
2811 		/* release any VCHR open of the dump device */
2812 		if (dumpbuf.cdev_vp != NULL) {
2813 			(void) VOP_CLOSE(dumpbuf.cdev_vp, FREAD | FWRITE, 1, 0,
2814 			    kcred, NULL);
2815 			VN_RELE(dumpbuf.cdev_vp);
2816 			dumpbuf.cdev_vp = NULL;
2817 		}
2818 	}
2819 }
2820 
2821 /*
2822  * This function is called whenever the memory size, as represented
2823  * by the phys_install list, changes.
2824  */
2825 void
2826 dump_resize()
2827 {
2828 	mutex_enter(&dump_lock);
2829 	dumphdr_init();
2830 	dumpbuf_resize();
2831 	dump_update_clevel();
2832 	mutex_exit(&dump_lock);
2833 }
2834 
2835 /*
2836  * This function allows for dynamic resizing of a dump area. It assumes that
2837  * the underlying device has update its appropriate size(9P).
2838  */
2839 int
2840 dumpvp_resize()
2841 {
2842 	int error;
2843 	vattr_t vattr;
2844 
2845 	mutex_enter(&dump_lock);
2846 	vattr.va_mask = AT_SIZE;
2847 	if ((error = VOP_GETATTR(dumpvp, &vattr, 0, kcred, NULL)) != 0) {
2848 		mutex_exit(&dump_lock);
2849 		return (error);
2850 	}
2851 
2852 	if (error == 0 && vattr.va_size < 2 * DUMP_LOGSIZE + DUMP_ERPTSIZE) {
2853 		mutex_exit(&dump_lock);
2854 		return (ENOSPC);
2855 	}
2856 
2857 	dumpvp_size = vattr.va_size & -DUMP_OFFSET;
2858 	mutex_exit(&dump_lock);
2859 	return (0);
2860 }
2861