xref: /freebsd/sys/dev/md/md.c (revision 5c831a5bd61576cacb48b39f8eeb47b92707a355)
1 /*-
2  * ----------------------------------------------------------------------------
3  * "THE BEER-WARE LICENSE" (Revision 42):
4  * <phk@FreeBSD.ORG> wrote this file.  As long as you retain this notice you
5  * can do whatever you want with this stuff. If we meet some day, and you think
6  * this stuff is worth it, you can buy me a beer in return.   Poul-Henning Kamp
7  * ----------------------------------------------------------------------------
8  *
9  * $FreeBSD$
10  *
11  */
12 
13 /*-
14  * The following functions are based in the vn(4) driver: mdstart_swap(),
15  * mdstart_vnode(), mdcreate_swap(), mdcreate_vnode() and mddestroy(),
16  * and as such under the following copyright:
17  *
18  * Copyright (c) 1988 University of Utah.
19  * Copyright (c) 1990, 1993
20  *	The Regents of the University of California.  All rights reserved.
21  * Copyright (c) 2013 The FreeBSD Foundation
22  * All rights reserved.
23  *
24  * This code is derived from software contributed to Berkeley by
25  * the Systems Programming Group of the University of Utah Computer
26  * Science Department.
27  *
28  * Portions of this software were developed by Konstantin Belousov
29  * under sponsorship from the FreeBSD Foundation.
30  *
31  * Redistribution and use in source and binary forms, with or without
32  * modification, are permitted provided that the following conditions
33  * are met:
34  * 1. Redistributions of source code must retain the above copyright
35  *    notice, this list of conditions and the following disclaimer.
36  * 2. Redistributions in binary form must reproduce the above copyright
37  *    notice, this list of conditions and the following disclaimer in the
38  *    documentation and/or other materials provided with the distribution.
39  * 3. Neither the name of the University nor the names of its contributors
40  *    may be used to endorse or promote products derived from this software
41  *    without specific prior written permission.
42  *
43  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
44  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
45  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
46  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
47  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
48  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
49  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
50  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
51  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
52  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
53  * SUCH DAMAGE.
54  *
55  * from: Utah Hdr: vn.c 1.13 94/04/02
56  *
57  *	from: @(#)vn.c	8.6 (Berkeley) 4/1/94
58  * From: src/sys/dev/vn/vn.c,v 1.122 2000/12/16 16:06:03
59  */
60 
61 #include "opt_rootdevname.h"
62 #include "opt_geom.h"
63 #include "opt_md.h"
64 
65 #include <sys/param.h>
66 #include <sys/systm.h>
67 #include <sys/bio.h>
68 #include <sys/buf.h>
69 #include <sys/conf.h>
70 #include <sys/devicestat.h>
71 #include <sys/fcntl.h>
72 #include <sys/kernel.h>
73 #include <sys/kthread.h>
74 #include <sys/limits.h>
75 #include <sys/linker.h>
76 #include <sys/lock.h>
77 #include <sys/malloc.h>
78 #include <sys/mdioctl.h>
79 #include <sys/mount.h>
80 #include <sys/mutex.h>
81 #include <sys/sx.h>
82 #include <sys/namei.h>
83 #include <sys/proc.h>
84 #include <sys/queue.h>
85 #include <sys/rwlock.h>
86 #include <sys/sbuf.h>
87 #include <sys/sched.h>
88 #include <sys/sf_buf.h>
89 #include <sys/sysctl.h>
90 #include <sys/vnode.h>
91 #include <sys/disk.h>
92 
93 #include <geom/geom.h>
94 #include <geom/geom_int.h>
95 
96 #include <vm/vm.h>
97 #include <vm/vm_param.h>
98 #include <vm/vm_object.h>
99 #include <vm/vm_page.h>
100 #include <vm/vm_pager.h>
101 #include <vm/swap_pager.h>
102 #include <vm/uma.h>
103 
104 #include <machine/bus.h>
105 
106 #define MD_MODVER 1
107 
108 #define MD_SHUTDOWN	0x10000		/* Tell worker thread to terminate. */
109 #define	MD_EXITING	0x20000		/* Worker thread is exiting. */
110 
111 #ifndef MD_NSECT
112 #define MD_NSECT (10000 * 2)
113 #endif
114 
115 static MALLOC_DEFINE(M_MD, "md_disk", "Memory Disk");
116 static MALLOC_DEFINE(M_MDSECT, "md_sectors", "Memory Disk Sectors");
117 
118 static int md_debug;
119 SYSCTL_INT(_debug, OID_AUTO, mddebug, CTLFLAG_RW, &md_debug, 0,
120     "Enable md(4) debug messages");
121 static int md_malloc_wait;
122 SYSCTL_INT(_vm, OID_AUTO, md_malloc_wait, CTLFLAG_RW, &md_malloc_wait, 0,
123     "Allow malloc to wait for memory allocations");
124 
125 #if defined(MD_ROOT) && !defined(MD_ROOT_FSTYPE)
126 #define	MD_ROOT_FSTYPE	"ufs"
127 #endif
128 
129 #if defined(MD_ROOT)
130 /*
131  * Preloaded image gets put here.
132  */
133 #if defined(MD_ROOT_SIZE)
134 /*
135  * We put the mfs_root symbol into the oldmfs section of the kernel object file.
136  * Applications that patch the object with the image can determine
137  * the size looking at the oldmfs section size within the kernel.
138  */
139 u_char mfs_root[MD_ROOT_SIZE*1024] __attribute__ ((section ("oldmfs")));
140 const int mfs_root_size = sizeof(mfs_root);
141 #else
142 extern volatile u_char __weak_symbol mfs_root;
143 extern volatile u_char __weak_symbol mfs_root_end;
144 __GLOBL(mfs_root);
145 __GLOBL(mfs_root_end);
146 #define mfs_root_size ((uintptr_t)(&mfs_root_end - &mfs_root))
147 #endif
148 #endif
149 
150 static g_init_t g_md_init;
151 static g_fini_t g_md_fini;
152 static g_start_t g_md_start;
153 static g_access_t g_md_access;
154 static void g_md_dumpconf(struct sbuf *sb, const char *indent,
155     struct g_geom *gp, struct g_consumer *cp __unused, struct g_provider *pp);
156 
157 static struct cdev *status_dev = NULL;
158 static struct sx md_sx;
159 static struct unrhdr *md_uh;
160 
161 static d_ioctl_t mdctlioctl;
162 
163 static struct cdevsw mdctl_cdevsw = {
164 	.d_version =	D_VERSION,
165 	.d_ioctl =	mdctlioctl,
166 	.d_name =	MD_NAME,
167 };
168 
169 struct g_class g_md_class = {
170 	.name = "MD",
171 	.version = G_VERSION,
172 	.init = g_md_init,
173 	.fini = g_md_fini,
174 	.start = g_md_start,
175 	.access = g_md_access,
176 	.dumpconf = g_md_dumpconf,
177 };
178 
179 DECLARE_GEOM_CLASS(g_md_class, g_md);
180 
181 
182 static LIST_HEAD(, md_s) md_softc_list = LIST_HEAD_INITIALIZER(md_softc_list);
183 
184 #define NINDIR	(PAGE_SIZE / sizeof(uintptr_t))
185 #define NMASK	(NINDIR-1)
186 static int nshift;
187 
188 static int md_vnode_pbuf_freecnt;
189 
190 struct indir {
191 	uintptr_t	*array;
192 	u_int		total;
193 	u_int		used;
194 	u_int		shift;
195 };
196 
197 struct md_s {
198 	int unit;
199 	LIST_ENTRY(md_s) list;
200 	struct bio_queue_head bio_queue;
201 	struct mtx queue_mtx;
202 	struct mtx stat_mtx;
203 	struct cdev *dev;
204 	enum md_types type;
205 	off_t mediasize;
206 	unsigned sectorsize;
207 	unsigned opencount;
208 	unsigned fwheads;
209 	unsigned fwsectors;
210 	unsigned flags;
211 	char name[20];
212 	struct proc *procp;
213 	struct g_geom *gp;
214 	struct g_provider *pp;
215 	int (*start)(struct md_s *sc, struct bio *bp);
216 	struct devstat *devstat;
217 
218 	/* MD_MALLOC related fields */
219 	struct indir *indir;
220 	uma_zone_t uma;
221 
222 	/* MD_PRELOAD related fields */
223 	u_char *pl_ptr;
224 	size_t pl_len;
225 
226 	/* MD_VNODE related fields */
227 	struct vnode *vnode;
228 	char file[PATH_MAX];
229 	char label[PATH_MAX];
230 	struct ucred *cred;
231 
232 	/* MD_SWAP related fields */
233 	vm_object_t object;
234 };
235 
236 static struct indir *
237 new_indir(u_int shift)
238 {
239 	struct indir *ip;
240 
241 	ip = malloc(sizeof *ip, M_MD, (md_malloc_wait ? M_WAITOK : M_NOWAIT)
242 	    | M_ZERO);
243 	if (ip == NULL)
244 		return (NULL);
245 	ip->array = malloc(sizeof(uintptr_t) * NINDIR,
246 	    M_MDSECT, (md_malloc_wait ? M_WAITOK : M_NOWAIT) | M_ZERO);
247 	if (ip->array == NULL) {
248 		free(ip, M_MD);
249 		return (NULL);
250 	}
251 	ip->total = NINDIR;
252 	ip->shift = shift;
253 	return (ip);
254 }
255 
256 static void
257 del_indir(struct indir *ip)
258 {
259 
260 	free(ip->array, M_MDSECT);
261 	free(ip, M_MD);
262 }
263 
264 static void
265 destroy_indir(struct md_s *sc, struct indir *ip)
266 {
267 	int i;
268 
269 	for (i = 0; i < NINDIR; i++) {
270 		if (!ip->array[i])
271 			continue;
272 		if (ip->shift)
273 			destroy_indir(sc, (struct indir*)(ip->array[i]));
274 		else if (ip->array[i] > 255)
275 			uma_zfree(sc->uma, (void *)(ip->array[i]));
276 	}
277 	del_indir(ip);
278 }
279 
280 /*
281  * This function does the math and allocates the top level "indir" structure
282  * for a device of "size" sectors.
283  */
284 
285 static struct indir *
286 dimension(off_t size)
287 {
288 	off_t rcnt;
289 	struct indir *ip;
290 	int layer;
291 
292 	rcnt = size;
293 	layer = 0;
294 	while (rcnt > NINDIR) {
295 		rcnt /= NINDIR;
296 		layer++;
297 	}
298 
299 	/*
300 	 * XXX: the top layer is probably not fully populated, so we allocate
301 	 * too much space for ip->array in here.
302 	 */
303 	ip = malloc(sizeof *ip, M_MD, M_WAITOK | M_ZERO);
304 	ip->array = malloc(sizeof(uintptr_t) * NINDIR,
305 	    M_MDSECT, M_WAITOK | M_ZERO);
306 	ip->total = NINDIR;
307 	ip->shift = layer * nshift;
308 	return (ip);
309 }
310 
311 /*
312  * Read a given sector
313  */
314 
315 static uintptr_t
316 s_read(struct indir *ip, off_t offset)
317 {
318 	struct indir *cip;
319 	int idx;
320 	uintptr_t up;
321 
322 	if (md_debug > 1)
323 		printf("s_read(%jd)\n", (intmax_t)offset);
324 	up = 0;
325 	for (cip = ip; cip != NULL;) {
326 		if (cip->shift) {
327 			idx = (offset >> cip->shift) & NMASK;
328 			up = cip->array[idx];
329 			cip = (struct indir *)up;
330 			continue;
331 		}
332 		idx = offset & NMASK;
333 		return (cip->array[idx]);
334 	}
335 	return (0);
336 }
337 
338 /*
339  * Write a given sector, prune the tree if the value is 0
340  */
341 
342 static int
343 s_write(struct indir *ip, off_t offset, uintptr_t ptr)
344 {
345 	struct indir *cip, *lip[10];
346 	int idx, li;
347 	uintptr_t up;
348 
349 	if (md_debug > 1)
350 		printf("s_write(%jd, %p)\n", (intmax_t)offset, (void *)ptr);
351 	up = 0;
352 	li = 0;
353 	cip = ip;
354 	for (;;) {
355 		lip[li++] = cip;
356 		if (cip->shift) {
357 			idx = (offset >> cip->shift) & NMASK;
358 			up = cip->array[idx];
359 			if (up != 0) {
360 				cip = (struct indir *)up;
361 				continue;
362 			}
363 			/* Allocate branch */
364 			cip->array[idx] =
365 			    (uintptr_t)new_indir(cip->shift - nshift);
366 			if (cip->array[idx] == 0)
367 				return (ENOSPC);
368 			cip->used++;
369 			up = cip->array[idx];
370 			cip = (struct indir *)up;
371 			continue;
372 		}
373 		/* leafnode */
374 		idx = offset & NMASK;
375 		up = cip->array[idx];
376 		if (up != 0)
377 			cip->used--;
378 		cip->array[idx] = ptr;
379 		if (ptr != 0)
380 			cip->used++;
381 		break;
382 	}
383 	if (cip->used != 0 || li == 1)
384 		return (0);
385 	li--;
386 	while (cip->used == 0 && cip != ip) {
387 		li--;
388 		idx = (offset >> lip[li]->shift) & NMASK;
389 		up = lip[li]->array[idx];
390 		KASSERT(up == (uintptr_t)cip, ("md screwed up"));
391 		del_indir(cip);
392 		lip[li]->array[idx] = 0;
393 		lip[li]->used--;
394 		cip = lip[li];
395 	}
396 	return (0);
397 }
398 
399 
400 static int
401 g_md_access(struct g_provider *pp, int r, int w, int e)
402 {
403 	struct md_s *sc;
404 
405 	sc = pp->geom->softc;
406 	if (sc == NULL) {
407 		if (r <= 0 && w <= 0 && e <= 0)
408 			return (0);
409 		return (ENXIO);
410 	}
411 	r += pp->acr;
412 	w += pp->acw;
413 	e += pp->ace;
414 	if ((sc->flags & MD_READONLY) != 0 && w > 0)
415 		return (EROFS);
416 	if ((pp->acr + pp->acw + pp->ace) == 0 && (r + w + e) > 0) {
417 		sc->opencount = 1;
418 	} else if ((pp->acr + pp->acw + pp->ace) > 0 && (r + w + e) == 0) {
419 		sc->opencount = 0;
420 	}
421 	return (0);
422 }
423 
424 static void
425 g_md_start(struct bio *bp)
426 {
427 	struct md_s *sc;
428 
429 	sc = bp->bio_to->geom->softc;
430 	if ((bp->bio_cmd == BIO_READ) || (bp->bio_cmd == BIO_WRITE)) {
431 		mtx_lock(&sc->stat_mtx);
432 		devstat_start_transaction_bio(sc->devstat, bp);
433 		mtx_unlock(&sc->stat_mtx);
434 	}
435 	mtx_lock(&sc->queue_mtx);
436 	bioq_disksort(&sc->bio_queue, bp);
437 	mtx_unlock(&sc->queue_mtx);
438 	wakeup(sc);
439 }
440 
441 #define	MD_MALLOC_MOVE_ZERO	1
442 #define	MD_MALLOC_MOVE_FILL	2
443 #define	MD_MALLOC_MOVE_READ	3
444 #define	MD_MALLOC_MOVE_WRITE	4
445 #define	MD_MALLOC_MOVE_CMP	5
446 
447 static int
448 md_malloc_move_ma(vm_page_t **mp, int *ma_offs, unsigned sectorsize,
449     void *ptr, u_char fill, int op)
450 {
451 	struct sf_buf *sf;
452 	vm_page_t m, *mp1;
453 	char *p, first;
454 	off_t *uc;
455 	unsigned n;
456 	int error, i, ma_offs1, sz, first_read;
457 
458 	m = NULL;
459 	error = 0;
460 	sf = NULL;
461 	/* if (op == MD_MALLOC_MOVE_CMP) { gcc */
462 		first = 0;
463 		first_read = 0;
464 		uc = ptr;
465 		mp1 = *mp;
466 		ma_offs1 = *ma_offs;
467 	/* } */
468 	sched_pin();
469 	for (n = sectorsize; n != 0; n -= sz) {
470 		sz = imin(PAGE_SIZE - *ma_offs, n);
471 		if (m != **mp) {
472 			if (sf != NULL)
473 				sf_buf_free(sf);
474 			m = **mp;
475 			sf = sf_buf_alloc(m, SFB_CPUPRIVATE |
476 			    (md_malloc_wait ? 0 : SFB_NOWAIT));
477 			if (sf == NULL) {
478 				error = ENOMEM;
479 				break;
480 			}
481 		}
482 		p = (char *)sf_buf_kva(sf) + *ma_offs;
483 		switch (op) {
484 		case MD_MALLOC_MOVE_ZERO:
485 			bzero(p, sz);
486 			break;
487 		case MD_MALLOC_MOVE_FILL:
488 			memset(p, fill, sz);
489 			break;
490 		case MD_MALLOC_MOVE_READ:
491 			bcopy(ptr, p, sz);
492 			cpu_flush_dcache(p, sz);
493 			break;
494 		case MD_MALLOC_MOVE_WRITE:
495 			bcopy(p, ptr, sz);
496 			break;
497 		case MD_MALLOC_MOVE_CMP:
498 			for (i = 0; i < sz; i++, p++) {
499 				if (!first_read) {
500 					*uc = (u_char)*p;
501 					first = *p;
502 					first_read = 1;
503 				} else if (*p != first) {
504 					error = EDOOFUS;
505 					break;
506 				}
507 			}
508 			break;
509 		default:
510 			KASSERT(0, ("md_malloc_move_ma unknown op %d\n", op));
511 			break;
512 		}
513 		if (error != 0)
514 			break;
515 		*ma_offs += sz;
516 		*ma_offs %= PAGE_SIZE;
517 		if (*ma_offs == 0)
518 			(*mp)++;
519 		ptr = (char *)ptr + sz;
520 	}
521 
522 	if (sf != NULL)
523 		sf_buf_free(sf);
524 	sched_unpin();
525 	if (op == MD_MALLOC_MOVE_CMP && error != 0) {
526 		*mp = mp1;
527 		*ma_offs = ma_offs1;
528 	}
529 	return (error);
530 }
531 
532 static int
533 md_malloc_move_vlist(bus_dma_segment_t **pvlist, int *pma_offs,
534     unsigned len, void *ptr, u_char fill, int op)
535 {
536 	bus_dma_segment_t *vlist;
537 	uint8_t *p, *end, first;
538 	off_t *uc;
539 	int ma_offs, seg_len;
540 
541 	vlist = *pvlist;
542 	ma_offs = *pma_offs;
543 	uc = ptr;
544 
545 	for (; len != 0; len -= seg_len) {
546 		seg_len = imin(vlist->ds_len - ma_offs, len);
547 		p = (uint8_t *)(uintptr_t)vlist->ds_addr + ma_offs;
548 		switch (op) {
549 		case MD_MALLOC_MOVE_ZERO:
550 			bzero(p, seg_len);
551 			break;
552 		case MD_MALLOC_MOVE_FILL:
553 			memset(p, fill, seg_len);
554 			break;
555 		case MD_MALLOC_MOVE_READ:
556 			bcopy(ptr, p, seg_len);
557 			cpu_flush_dcache(p, seg_len);
558 			break;
559 		case MD_MALLOC_MOVE_WRITE:
560 			bcopy(p, ptr, seg_len);
561 			break;
562 		case MD_MALLOC_MOVE_CMP:
563 			end = p + seg_len;
564 			first = *uc = *p;
565 			/* Confirm all following bytes match the first */
566 			while (++p < end) {
567 				if (*p != first)
568 					return (EDOOFUS);
569 			}
570 			break;
571 		default:
572 			KASSERT(0, ("md_malloc_move_vlist unknown op %d\n", op));
573 			break;
574 		}
575 
576 		ma_offs += seg_len;
577 		if (ma_offs == vlist->ds_len) {
578 			ma_offs = 0;
579 			vlist++;
580 		}
581 		ptr = (uint8_t *)ptr + seg_len;
582 	}
583 	*pvlist = vlist;
584 	*pma_offs = ma_offs;
585 
586 	return (0);
587 }
588 
589 static int
590 mdstart_malloc(struct md_s *sc, struct bio *bp)
591 {
592 	u_char *dst;
593 	vm_page_t *m;
594 	bus_dma_segment_t *vlist;
595 	int i, error, error1, ma_offs, notmapped;
596 	off_t secno, nsec, uc;
597 	uintptr_t sp, osp;
598 
599 	switch (bp->bio_cmd) {
600 	case BIO_READ:
601 	case BIO_WRITE:
602 	case BIO_DELETE:
603 		break;
604 	default:
605 		return (EOPNOTSUPP);
606 	}
607 
608 	notmapped = (bp->bio_flags & BIO_UNMAPPED) != 0;
609 	vlist = (bp->bio_flags & BIO_VLIST) != 0 ?
610 	    (bus_dma_segment_t *)bp->bio_data : NULL;
611 	if (notmapped) {
612 		m = bp->bio_ma;
613 		ma_offs = bp->bio_ma_offset;
614 		dst = NULL;
615 		KASSERT(vlist == NULL, ("vlists cannot be unmapped"));
616 	} else if (vlist != NULL) {
617 		ma_offs = bp->bio_ma_offset;
618 		dst = NULL;
619 	} else {
620 		dst = bp->bio_data;
621 	}
622 
623 	nsec = bp->bio_length / sc->sectorsize;
624 	secno = bp->bio_offset / sc->sectorsize;
625 	error = 0;
626 	while (nsec--) {
627 		osp = s_read(sc->indir, secno);
628 		if (bp->bio_cmd == BIO_DELETE) {
629 			if (osp != 0)
630 				error = s_write(sc->indir, secno, 0);
631 		} else if (bp->bio_cmd == BIO_READ) {
632 			if (osp == 0) {
633 				if (notmapped) {
634 					error = md_malloc_move_ma(&m, &ma_offs,
635 					    sc->sectorsize, NULL, 0,
636 					    MD_MALLOC_MOVE_ZERO);
637 				} else if (vlist != NULL) {
638 					error = md_malloc_move_vlist(&vlist,
639 					    &ma_offs, sc->sectorsize, NULL, 0,
640 					    MD_MALLOC_MOVE_ZERO);
641 				} else
642 					bzero(dst, sc->sectorsize);
643 			} else if (osp <= 255) {
644 				if (notmapped) {
645 					error = md_malloc_move_ma(&m, &ma_offs,
646 					    sc->sectorsize, NULL, osp,
647 					    MD_MALLOC_MOVE_FILL);
648 				} else if (vlist != NULL) {
649 					error = md_malloc_move_vlist(&vlist,
650 					    &ma_offs, sc->sectorsize, NULL, osp,
651 					    MD_MALLOC_MOVE_FILL);
652 				} else
653 					memset(dst, osp, sc->sectorsize);
654 			} else {
655 				if (notmapped) {
656 					error = md_malloc_move_ma(&m, &ma_offs,
657 					    sc->sectorsize, (void *)osp, 0,
658 					    MD_MALLOC_MOVE_READ);
659 				} else if (vlist != NULL) {
660 					error = md_malloc_move_vlist(&vlist,
661 					    &ma_offs, sc->sectorsize,
662 					    (void *)osp, 0,
663 					    MD_MALLOC_MOVE_READ);
664 				} else {
665 					bcopy((void *)osp, dst, sc->sectorsize);
666 					cpu_flush_dcache(dst, sc->sectorsize);
667 				}
668 			}
669 			osp = 0;
670 		} else if (bp->bio_cmd == BIO_WRITE) {
671 			if (sc->flags & MD_COMPRESS) {
672 				if (notmapped) {
673 					error1 = md_malloc_move_ma(&m, &ma_offs,
674 					    sc->sectorsize, &uc, 0,
675 					    MD_MALLOC_MOVE_CMP);
676 					i = error1 == 0 ? sc->sectorsize : 0;
677 				} else if (vlist != NULL) {
678 					error1 = md_malloc_move_vlist(&vlist,
679 					    &ma_offs, sc->sectorsize, &uc, 0,
680 					    MD_MALLOC_MOVE_CMP);
681 					i = error1 == 0 ? sc->sectorsize : 0;
682 				} else {
683 					uc = dst[0];
684 					for (i = 1; i < sc->sectorsize; i++) {
685 						if (dst[i] != uc)
686 							break;
687 					}
688 				}
689 			} else {
690 				i = 0;
691 				uc = 0;
692 			}
693 			if (i == sc->sectorsize) {
694 				if (osp != uc)
695 					error = s_write(sc->indir, secno, uc);
696 			} else {
697 				if (osp <= 255) {
698 					sp = (uintptr_t)uma_zalloc(sc->uma,
699 					    md_malloc_wait ? M_WAITOK :
700 					    M_NOWAIT);
701 					if (sp == 0) {
702 						error = ENOSPC;
703 						break;
704 					}
705 					if (notmapped) {
706 						error = md_malloc_move_ma(&m,
707 						    &ma_offs, sc->sectorsize,
708 						    (void *)sp, 0,
709 						    MD_MALLOC_MOVE_WRITE);
710 					} else if (vlist != NULL) {
711 						error = md_malloc_move_vlist(
712 						    &vlist, &ma_offs,
713 						    sc->sectorsize, (void *)sp,
714 						    0, MD_MALLOC_MOVE_WRITE);
715 					} else {
716 						bcopy(dst, (void *)sp,
717 						    sc->sectorsize);
718 					}
719 					error = s_write(sc->indir, secno, sp);
720 				} else {
721 					if (notmapped) {
722 						error = md_malloc_move_ma(&m,
723 						    &ma_offs, sc->sectorsize,
724 						    (void *)osp, 0,
725 						    MD_MALLOC_MOVE_WRITE);
726 					} else if (vlist != NULL) {
727 						error = md_malloc_move_vlist(
728 						    &vlist, &ma_offs,
729 						    sc->sectorsize, (void *)osp,
730 						    0, MD_MALLOC_MOVE_WRITE);
731 					} else {
732 						bcopy(dst, (void *)osp,
733 						    sc->sectorsize);
734 					}
735 					osp = 0;
736 				}
737 			}
738 		} else {
739 			error = EOPNOTSUPP;
740 		}
741 		if (osp > 255)
742 			uma_zfree(sc->uma, (void*)osp);
743 		if (error != 0)
744 			break;
745 		secno++;
746 		if (!notmapped && vlist == NULL)
747 			dst += sc->sectorsize;
748 	}
749 	bp->bio_resid = 0;
750 	return (error);
751 }
752 
753 static void
754 mdcopyto_vlist(void *src, bus_dma_segment_t *vlist, off_t offset, off_t len)
755 {
756 	off_t seg_len;
757 
758 	while (offset >= vlist->ds_len) {
759 		offset -= vlist->ds_len;
760 		vlist++;
761 	}
762 
763 	while (len != 0) {
764 		seg_len = omin(len, vlist->ds_len - offset);
765 		bcopy(src, (void *)(uintptr_t)(vlist->ds_addr + offset),
766 		    seg_len);
767 		offset = 0;
768 		src = (uint8_t *)src + seg_len;
769 		len -= seg_len;
770 		vlist++;
771 	}
772 }
773 
774 static void
775 mdcopyfrom_vlist(bus_dma_segment_t *vlist, off_t offset, void *dst, off_t len)
776 {
777 	off_t seg_len;
778 
779 	while (offset >= vlist->ds_len) {
780 		offset -= vlist->ds_len;
781 		vlist++;
782 	}
783 
784 	while (len != 0) {
785 		seg_len = omin(len, vlist->ds_len - offset);
786 		bcopy((void *)(uintptr_t)(vlist->ds_addr + offset), dst,
787 		    seg_len);
788 		offset = 0;
789 		dst = (uint8_t *)dst + seg_len;
790 		len -= seg_len;
791 		vlist++;
792 	}
793 }
794 
795 static int
796 mdstart_preload(struct md_s *sc, struct bio *bp)
797 {
798 	uint8_t *p;
799 
800 	p = sc->pl_ptr + bp->bio_offset;
801 	switch (bp->bio_cmd) {
802 	case BIO_READ:
803 		if ((bp->bio_flags & BIO_VLIST) != 0) {
804 			mdcopyto_vlist(p, (bus_dma_segment_t *)bp->bio_data,
805 			    bp->bio_ma_offset, bp->bio_length);
806 		} else {
807 			bcopy(p, bp->bio_data, bp->bio_length);
808 		}
809 		cpu_flush_dcache(bp->bio_data, bp->bio_length);
810 		break;
811 	case BIO_WRITE:
812 		if ((bp->bio_flags & BIO_VLIST) != 0) {
813 			mdcopyfrom_vlist((bus_dma_segment_t *)bp->bio_data,
814 			    bp->bio_ma_offset, p, bp->bio_length);
815 		} else {
816 			bcopy(bp->bio_data, p, bp->bio_length);
817 		}
818 		break;
819 	}
820 	bp->bio_resid = 0;
821 	return (0);
822 }
823 
824 static int
825 mdstart_vnode(struct md_s *sc, struct bio *bp)
826 {
827 	int error;
828 	struct uio auio;
829 	struct iovec aiov;
830 	struct iovec *piov;
831 	struct mount *mp;
832 	struct vnode *vp;
833 	struct buf *pb;
834 	bus_dma_segment_t *vlist;
835 	struct thread *td;
836 	off_t iolen, len, zerosize;
837 	int ma_offs, npages;
838 
839 	switch (bp->bio_cmd) {
840 	case BIO_READ:
841 		auio.uio_rw = UIO_READ;
842 		break;
843 	case BIO_WRITE:
844 	case BIO_DELETE:
845 		auio.uio_rw = UIO_WRITE;
846 		break;
847 	case BIO_FLUSH:
848 		break;
849 	default:
850 		return (EOPNOTSUPP);
851 	}
852 
853 	td = curthread;
854 	vp = sc->vnode;
855 	pb = NULL;
856 	piov = NULL;
857 	ma_offs = bp->bio_ma_offset;
858 	len = bp->bio_length;
859 
860 	/*
861 	 * VNODE I/O
862 	 *
863 	 * If an error occurs, we set BIO_ERROR but we do not set
864 	 * B_INVAL because (for a write anyway), the buffer is
865 	 * still valid.
866 	 */
867 
868 	if (bp->bio_cmd == BIO_FLUSH) {
869 		(void) vn_start_write(vp, &mp, V_WAIT);
870 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
871 		error = VOP_FSYNC(vp, MNT_WAIT, td);
872 		VOP_UNLOCK(vp, 0);
873 		vn_finished_write(mp);
874 		return (error);
875 	}
876 
877 	auio.uio_offset = (vm_ooffset_t)bp->bio_offset;
878 	auio.uio_resid = bp->bio_length;
879 	auio.uio_segflg = UIO_SYSSPACE;
880 	auio.uio_td = td;
881 
882 	if (bp->bio_cmd == BIO_DELETE) {
883 		/*
884 		 * Emulate BIO_DELETE by writing zeros.
885 		 */
886 		zerosize = ZERO_REGION_SIZE -
887 		    (ZERO_REGION_SIZE % sc->sectorsize);
888 		auio.uio_iovcnt = howmany(bp->bio_length, zerosize);
889 		piov = malloc(sizeof(*piov) * auio.uio_iovcnt, M_MD, M_WAITOK);
890 		auio.uio_iov = piov;
891 		while (len > 0) {
892 			piov->iov_base = __DECONST(void *, zero_region);
893 			piov->iov_len = len;
894 			if (len > zerosize)
895 				piov->iov_len = zerosize;
896 			len -= piov->iov_len;
897 			piov++;
898 		}
899 		piov = auio.uio_iov;
900 	} else if ((bp->bio_flags & BIO_VLIST) != 0) {
901 		piov = malloc(sizeof(*piov) * bp->bio_ma_n, M_MD, M_WAITOK);
902 		auio.uio_iov = piov;
903 		vlist = (bus_dma_segment_t *)bp->bio_data;
904 		while (len > 0) {
905 			piov->iov_base = (void *)(uintptr_t)(vlist->ds_addr +
906 			    ma_offs);
907 			piov->iov_len = vlist->ds_len - ma_offs;
908 			if (piov->iov_len > len)
909 				piov->iov_len = len;
910 			len -= piov->iov_len;
911 			ma_offs = 0;
912 			vlist++;
913 			piov++;
914 		}
915 		auio.uio_iovcnt = piov - auio.uio_iov;
916 		piov = auio.uio_iov;
917 	} else if ((bp->bio_flags & BIO_UNMAPPED) != 0) {
918 		pb = getpbuf(&md_vnode_pbuf_freecnt);
919 		bp->bio_resid = len;
920 unmapped_step:
921 		npages = atop(min(MAXPHYS, round_page(len + (ma_offs &
922 		    PAGE_MASK))));
923 		iolen = min(ptoa(npages) - (ma_offs & PAGE_MASK), len);
924 		KASSERT(iolen > 0, ("zero iolen"));
925 		pmap_qenter((vm_offset_t)pb->b_data,
926 		    &bp->bio_ma[atop(ma_offs)], npages);
927 		aiov.iov_base = (void *)((vm_offset_t)pb->b_data +
928 		    (ma_offs & PAGE_MASK));
929 		aiov.iov_len = iolen;
930 		auio.uio_iov = &aiov;
931 		auio.uio_iovcnt = 1;
932 		auio.uio_resid = iolen;
933 	} else {
934 		aiov.iov_base = bp->bio_data;
935 		aiov.iov_len = bp->bio_length;
936 		auio.uio_iov = &aiov;
937 		auio.uio_iovcnt = 1;
938 	}
939 	/*
940 	 * When reading set IO_DIRECT to try to avoid double-caching
941 	 * the data.  When writing IO_DIRECT is not optimal.
942 	 */
943 	if (auio.uio_rw == UIO_READ) {
944 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
945 		error = VOP_READ(vp, &auio, IO_DIRECT, sc->cred);
946 		VOP_UNLOCK(vp, 0);
947 	} else {
948 		(void) vn_start_write(vp, &mp, V_WAIT);
949 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
950 		error = VOP_WRITE(vp, &auio, sc->flags & MD_ASYNC ? 0 : IO_SYNC,
951 		    sc->cred);
952 		VOP_UNLOCK(vp, 0);
953 		vn_finished_write(mp);
954 		if (error == 0)
955 			sc->flags &= ~MD_VERIFY;
956 	}
957 
958 	if (pb != NULL) {
959 		pmap_qremove((vm_offset_t)pb->b_data, npages);
960 		if (error == 0) {
961 			len -= iolen;
962 			bp->bio_resid -= iolen;
963 			ma_offs += iolen;
964 			if (len > 0)
965 				goto unmapped_step;
966 		}
967 		relpbuf(pb, &md_vnode_pbuf_freecnt);
968 	}
969 
970 	free(piov, M_MD);
971 	if (pb == NULL)
972 		bp->bio_resid = auio.uio_resid;
973 	return (error);
974 }
975 
976 static void
977 md_swap_page_free(vm_page_t m)
978 {
979 
980 	vm_page_xunbusy(m);
981 	vm_page_lock(m);
982 	vm_page_free(m);
983 	vm_page_unlock(m);
984 }
985 
986 static int
987 mdstart_swap(struct md_s *sc, struct bio *bp)
988 {
989 	vm_page_t m;
990 	u_char *p;
991 	vm_pindex_t i, lastp;
992 	bus_dma_segment_t *vlist;
993 	int rv, ma_offs, offs, len, lastend;
994 
995 	switch (bp->bio_cmd) {
996 	case BIO_READ:
997 	case BIO_WRITE:
998 	case BIO_DELETE:
999 		break;
1000 	default:
1001 		return (EOPNOTSUPP);
1002 	}
1003 
1004 	p = bp->bio_data;
1005 	ma_offs = (bp->bio_flags & (BIO_UNMAPPED|BIO_VLIST)) != 0 ?
1006 	    bp->bio_ma_offset : 0;
1007 	vlist = (bp->bio_flags & BIO_VLIST) != 0 ?
1008 	    (bus_dma_segment_t *)bp->bio_data : NULL;
1009 
1010 	/*
1011 	 * offs is the offset at which to start operating on the
1012 	 * next (ie, first) page.  lastp is the last page on
1013 	 * which we're going to operate.  lastend is the ending
1014 	 * position within that last page (ie, PAGE_SIZE if
1015 	 * we're operating on complete aligned pages).
1016 	 */
1017 	offs = bp->bio_offset % PAGE_SIZE;
1018 	lastp = (bp->bio_offset + bp->bio_length - 1) / PAGE_SIZE;
1019 	lastend = (bp->bio_offset + bp->bio_length - 1) % PAGE_SIZE + 1;
1020 
1021 	rv = VM_PAGER_OK;
1022 	VM_OBJECT_WLOCK(sc->object);
1023 	vm_object_pip_add(sc->object, 1);
1024 	for (i = bp->bio_offset / PAGE_SIZE; i <= lastp; i++) {
1025 		len = ((i == lastp) ? lastend : PAGE_SIZE) - offs;
1026 		m = vm_page_grab(sc->object, i, VM_ALLOC_SYSTEM);
1027 		if (bp->bio_cmd == BIO_READ) {
1028 			if (m->valid == VM_PAGE_BITS_ALL)
1029 				rv = VM_PAGER_OK;
1030 			else
1031 				rv = vm_pager_get_pages(sc->object, &m, 1,
1032 				    NULL, NULL);
1033 			if (rv == VM_PAGER_ERROR) {
1034 				md_swap_page_free(m);
1035 				break;
1036 			} else if (rv == VM_PAGER_FAIL) {
1037 				/*
1038 				 * Pager does not have the page.  Zero
1039 				 * the allocated page, and mark it as
1040 				 * valid. Do not set dirty, the page
1041 				 * can be recreated if thrown out.
1042 				 */
1043 				pmap_zero_page(m);
1044 				m->valid = VM_PAGE_BITS_ALL;
1045 			}
1046 			if ((bp->bio_flags & BIO_UNMAPPED) != 0) {
1047 				pmap_copy_pages(&m, offs, bp->bio_ma,
1048 				    ma_offs, len);
1049 			} else if ((bp->bio_flags & BIO_VLIST) != 0) {
1050 				physcopyout_vlist(VM_PAGE_TO_PHYS(m) + offs,
1051 				    vlist, ma_offs, len);
1052 				cpu_flush_dcache(p, len);
1053 			} else {
1054 				physcopyout(VM_PAGE_TO_PHYS(m) + offs, p, len);
1055 				cpu_flush_dcache(p, len);
1056 			}
1057 		} else if (bp->bio_cmd == BIO_WRITE) {
1058 			if (len == PAGE_SIZE || m->valid == VM_PAGE_BITS_ALL)
1059 				rv = VM_PAGER_OK;
1060 			else
1061 				rv = vm_pager_get_pages(sc->object, &m, 1,
1062 				    NULL, NULL);
1063 			if (rv == VM_PAGER_ERROR) {
1064 				md_swap_page_free(m);
1065 				break;
1066 			} else if (rv == VM_PAGER_FAIL)
1067 				pmap_zero_page(m);
1068 
1069 			if ((bp->bio_flags & BIO_UNMAPPED) != 0) {
1070 				pmap_copy_pages(bp->bio_ma, ma_offs, &m,
1071 				    offs, len);
1072 			} else if ((bp->bio_flags & BIO_VLIST) != 0) {
1073 				physcopyin_vlist(vlist, ma_offs,
1074 				    VM_PAGE_TO_PHYS(m) + offs, len);
1075 			} else {
1076 				physcopyin(p, VM_PAGE_TO_PHYS(m) + offs, len);
1077 			}
1078 
1079 			m->valid = VM_PAGE_BITS_ALL;
1080 			if (m->dirty != VM_PAGE_BITS_ALL) {
1081 				vm_page_dirty(m);
1082 				vm_pager_page_unswapped(m);
1083 			}
1084 		} else if (bp->bio_cmd == BIO_DELETE) {
1085 			if (len == PAGE_SIZE || m->valid == VM_PAGE_BITS_ALL)
1086 				rv = VM_PAGER_OK;
1087 			else
1088 				rv = vm_pager_get_pages(sc->object, &m, 1,
1089 				    NULL, NULL);
1090 			if (rv == VM_PAGER_ERROR) {
1091 				md_swap_page_free(m);
1092 				break;
1093 			} else if (rv == VM_PAGER_FAIL) {
1094 				md_swap_page_free(m);
1095 				m = NULL;
1096 			} else {
1097 				/* Page is valid. */
1098 				if (len != PAGE_SIZE) {
1099 					pmap_zero_page_area(m, offs, len);
1100 					if (m->dirty != VM_PAGE_BITS_ALL) {
1101 						vm_page_dirty(m);
1102 						vm_pager_page_unswapped(m);
1103 					}
1104 				} else {
1105 					vm_pager_page_unswapped(m);
1106 					md_swap_page_free(m);
1107 					m = NULL;
1108 				}
1109 			}
1110 		}
1111 		if (m != NULL) {
1112 			vm_page_xunbusy(m);
1113 			vm_page_lock(m);
1114 			vm_page_activate(m);
1115 			vm_page_unlock(m);
1116 		}
1117 
1118 		/* Actions on further pages start at offset 0 */
1119 		p += PAGE_SIZE - offs;
1120 		offs = 0;
1121 		ma_offs += len;
1122 	}
1123 	vm_object_pip_wakeup(sc->object);
1124 	VM_OBJECT_WUNLOCK(sc->object);
1125 	return (rv != VM_PAGER_ERROR ? 0 : ENOSPC);
1126 }
1127 
1128 static int
1129 mdstart_null(struct md_s *sc, struct bio *bp)
1130 {
1131 
1132 	switch (bp->bio_cmd) {
1133 	case BIO_READ:
1134 		bzero(bp->bio_data, bp->bio_length);
1135 		cpu_flush_dcache(bp->bio_data, bp->bio_length);
1136 		break;
1137 	case BIO_WRITE:
1138 		break;
1139 	}
1140 	bp->bio_resid = 0;
1141 	return (0);
1142 }
1143 
1144 static void
1145 md_kthread(void *arg)
1146 {
1147 	struct md_s *sc;
1148 	struct bio *bp;
1149 	int error;
1150 
1151 	sc = arg;
1152 	thread_lock(curthread);
1153 	sched_prio(curthread, PRIBIO);
1154 	thread_unlock(curthread);
1155 	if (sc->type == MD_VNODE)
1156 		curthread->td_pflags |= TDP_NORUNNINGBUF;
1157 
1158 	for (;;) {
1159 		mtx_lock(&sc->queue_mtx);
1160 		if (sc->flags & MD_SHUTDOWN) {
1161 			sc->flags |= MD_EXITING;
1162 			mtx_unlock(&sc->queue_mtx);
1163 			kproc_exit(0);
1164 		}
1165 		bp = bioq_takefirst(&sc->bio_queue);
1166 		if (!bp) {
1167 			msleep(sc, &sc->queue_mtx, PRIBIO | PDROP, "mdwait", 0);
1168 			continue;
1169 		}
1170 		mtx_unlock(&sc->queue_mtx);
1171 		if (bp->bio_cmd == BIO_GETATTR) {
1172 			int isv = ((sc->flags & MD_VERIFY) != 0);
1173 
1174 			if ((sc->fwsectors && sc->fwheads &&
1175 			    (g_handleattr_int(bp, "GEOM::fwsectors",
1176 			    sc->fwsectors) ||
1177 			    g_handleattr_int(bp, "GEOM::fwheads",
1178 			    sc->fwheads))) ||
1179 			    g_handleattr_int(bp, "GEOM::candelete", 1))
1180 				error = -1;
1181 			else if (g_handleattr_int(bp, "MNT::verified", isv))
1182 				error = -1;
1183 			else
1184 				error = EOPNOTSUPP;
1185 		} else {
1186 			error = sc->start(sc, bp);
1187 		}
1188 
1189 		if (error != -1) {
1190 			bp->bio_completed = bp->bio_length;
1191 			if ((bp->bio_cmd == BIO_READ) || (bp->bio_cmd == BIO_WRITE))
1192 				devstat_end_transaction_bio(sc->devstat, bp);
1193 			g_io_deliver(bp, error);
1194 		}
1195 	}
1196 }
1197 
1198 static struct md_s *
1199 mdfind(int unit)
1200 {
1201 	struct md_s *sc;
1202 
1203 	LIST_FOREACH(sc, &md_softc_list, list) {
1204 		if (sc->unit == unit)
1205 			break;
1206 	}
1207 	return (sc);
1208 }
1209 
1210 static struct md_s *
1211 mdnew(int unit, int *errp, enum md_types type)
1212 {
1213 	struct md_s *sc;
1214 	int error;
1215 
1216 	*errp = 0;
1217 	if (unit == -1)
1218 		unit = alloc_unr(md_uh);
1219 	else
1220 		unit = alloc_unr_specific(md_uh, unit);
1221 
1222 	if (unit == -1) {
1223 		*errp = EBUSY;
1224 		return (NULL);
1225 	}
1226 
1227 	sc = (struct md_s *)malloc(sizeof *sc, M_MD, M_WAITOK | M_ZERO);
1228 	sc->type = type;
1229 	bioq_init(&sc->bio_queue);
1230 	mtx_init(&sc->queue_mtx, "md bio queue", NULL, MTX_DEF);
1231 	mtx_init(&sc->stat_mtx, "md stat", NULL, MTX_DEF);
1232 	sc->unit = unit;
1233 	sprintf(sc->name, "md%d", unit);
1234 	LIST_INSERT_HEAD(&md_softc_list, sc, list);
1235 	error = kproc_create(md_kthread, sc, &sc->procp, 0, 0,"%s", sc->name);
1236 	if (error == 0)
1237 		return (sc);
1238 	LIST_REMOVE(sc, list);
1239 	mtx_destroy(&sc->stat_mtx);
1240 	mtx_destroy(&sc->queue_mtx);
1241 	free_unr(md_uh, sc->unit);
1242 	free(sc, M_MD);
1243 	*errp = error;
1244 	return (NULL);
1245 }
1246 
1247 static void
1248 mdinit(struct md_s *sc)
1249 {
1250 	struct g_geom *gp;
1251 	struct g_provider *pp;
1252 
1253 	g_topology_lock();
1254 	gp = g_new_geomf(&g_md_class, "md%d", sc->unit);
1255 	gp->softc = sc;
1256 	pp = g_new_providerf(gp, "md%d", sc->unit);
1257 	pp->flags |= G_PF_DIRECT_SEND | G_PF_DIRECT_RECEIVE;
1258 	pp->mediasize = sc->mediasize;
1259 	pp->sectorsize = sc->sectorsize;
1260 	switch (sc->type) {
1261 	case MD_MALLOC:
1262 	case MD_VNODE:
1263 	case MD_SWAP:
1264 		pp->flags |= G_PF_ACCEPT_UNMAPPED;
1265 		break;
1266 	case MD_PRELOAD:
1267 	case MD_NULL:
1268 		break;
1269 	}
1270 	sc->gp = gp;
1271 	sc->pp = pp;
1272 	g_error_provider(pp, 0);
1273 	g_topology_unlock();
1274 	sc->devstat = devstat_new_entry("md", sc->unit, sc->sectorsize,
1275 	    DEVSTAT_ALL_SUPPORTED, DEVSTAT_TYPE_DIRECT, DEVSTAT_PRIORITY_MAX);
1276 }
1277 
1278 static int
1279 mdcreate_malloc(struct md_s *sc, struct md_ioctl *mdio)
1280 {
1281 	uintptr_t sp;
1282 	int error;
1283 	off_t u;
1284 
1285 	error = 0;
1286 	if (mdio->md_options & ~(MD_AUTOUNIT | MD_COMPRESS | MD_RESERVE))
1287 		return (EINVAL);
1288 	if (mdio->md_sectorsize != 0 && !powerof2(mdio->md_sectorsize))
1289 		return (EINVAL);
1290 	/* Compression doesn't make sense if we have reserved space */
1291 	if (mdio->md_options & MD_RESERVE)
1292 		mdio->md_options &= ~MD_COMPRESS;
1293 	if (mdio->md_fwsectors != 0)
1294 		sc->fwsectors = mdio->md_fwsectors;
1295 	if (mdio->md_fwheads != 0)
1296 		sc->fwheads = mdio->md_fwheads;
1297 	sc->flags = mdio->md_options & (MD_COMPRESS | MD_FORCE);
1298 	sc->indir = dimension(sc->mediasize / sc->sectorsize);
1299 	sc->uma = uma_zcreate(sc->name, sc->sectorsize, NULL, NULL, NULL, NULL,
1300 	    0x1ff, 0);
1301 	if (mdio->md_options & MD_RESERVE) {
1302 		off_t nsectors;
1303 
1304 		nsectors = sc->mediasize / sc->sectorsize;
1305 		for (u = 0; u < nsectors; u++) {
1306 			sp = (uintptr_t)uma_zalloc(sc->uma, (md_malloc_wait ?
1307 			    M_WAITOK : M_NOWAIT) | M_ZERO);
1308 			if (sp != 0)
1309 				error = s_write(sc->indir, u, sp);
1310 			else
1311 				error = ENOMEM;
1312 			if (error != 0)
1313 				break;
1314 		}
1315 	}
1316 	return (error);
1317 }
1318 
1319 
1320 static int
1321 mdsetcred(struct md_s *sc, struct ucred *cred)
1322 {
1323 	char *tmpbuf;
1324 	int error = 0;
1325 
1326 	/*
1327 	 * Set credits in our softc
1328 	 */
1329 
1330 	if (sc->cred)
1331 		crfree(sc->cred);
1332 	sc->cred = crhold(cred);
1333 
1334 	/*
1335 	 * Horrible kludge to establish credentials for NFS  XXX.
1336 	 */
1337 
1338 	if (sc->vnode) {
1339 		struct uio auio;
1340 		struct iovec aiov;
1341 
1342 		tmpbuf = malloc(sc->sectorsize, M_TEMP, M_WAITOK);
1343 		bzero(&auio, sizeof(auio));
1344 
1345 		aiov.iov_base = tmpbuf;
1346 		aiov.iov_len = sc->sectorsize;
1347 		auio.uio_iov = &aiov;
1348 		auio.uio_iovcnt = 1;
1349 		auio.uio_offset = 0;
1350 		auio.uio_rw = UIO_READ;
1351 		auio.uio_segflg = UIO_SYSSPACE;
1352 		auio.uio_resid = aiov.iov_len;
1353 		vn_lock(sc->vnode, LK_EXCLUSIVE | LK_RETRY);
1354 		error = VOP_READ(sc->vnode, &auio, 0, sc->cred);
1355 		VOP_UNLOCK(sc->vnode, 0);
1356 		free(tmpbuf, M_TEMP);
1357 	}
1358 	return (error);
1359 }
1360 
1361 static int
1362 mdcreate_vnode(struct md_s *sc, struct md_ioctl *mdio, struct thread *td)
1363 {
1364 	struct vattr vattr;
1365 	struct nameidata nd;
1366 	char *fname;
1367 	int error, flags;
1368 
1369 	/*
1370 	 * Kernel-originated requests must have the filename appended
1371 	 * to the mdio structure to protect against malicious software.
1372 	 */
1373 	fname = mdio->md_file;
1374 	if ((void *)fname != (void *)(mdio + 1)) {
1375 		error = copyinstr(fname, sc->file, sizeof(sc->file), NULL);
1376 		if (error != 0)
1377 			return (error);
1378 	} else
1379 		strlcpy(sc->file, fname, sizeof(sc->file));
1380 
1381 	/*
1382 	 * If the user specified that this is a read only device, don't
1383 	 * set the FWRITE mask before trying to open the backing store.
1384 	 */
1385 	flags = FREAD | ((mdio->md_options & MD_READONLY) ? 0 : FWRITE) \
1386 	    | ((mdio->md_options & MD_VERIFY) ? 0 : O_VERIFY);
1387 	NDINIT(&nd, LOOKUP, FOLLOW, UIO_SYSSPACE, sc->file, td);
1388 	error = vn_open(&nd, &flags, 0, NULL);
1389 	if (error != 0)
1390 		return (error);
1391 	NDFREE(&nd, NDF_ONLY_PNBUF);
1392 	if (nd.ni_vp->v_type != VREG) {
1393 		error = EINVAL;
1394 		goto bad;
1395 	}
1396 	error = VOP_GETATTR(nd.ni_vp, &vattr, td->td_ucred);
1397 	if (error != 0)
1398 		goto bad;
1399 	if (VOP_ISLOCKED(nd.ni_vp) != LK_EXCLUSIVE) {
1400 		vn_lock(nd.ni_vp, LK_UPGRADE | LK_RETRY);
1401 		if (nd.ni_vp->v_iflag & VI_DOOMED) {
1402 			/* Forced unmount. */
1403 			error = EBADF;
1404 			goto bad;
1405 		}
1406 	}
1407 	nd.ni_vp->v_vflag |= VV_MD;
1408 	VOP_UNLOCK(nd.ni_vp, 0);
1409 
1410 	if (mdio->md_fwsectors != 0)
1411 		sc->fwsectors = mdio->md_fwsectors;
1412 	if (mdio->md_fwheads != 0)
1413 		sc->fwheads = mdio->md_fwheads;
1414 	sc->flags = mdio->md_options & (MD_FORCE | MD_ASYNC | MD_VERIFY);
1415 	if (!(flags & FWRITE))
1416 		sc->flags |= MD_READONLY;
1417 	sc->vnode = nd.ni_vp;
1418 
1419 	error = mdsetcred(sc, td->td_ucred);
1420 	if (error != 0) {
1421 		sc->vnode = NULL;
1422 		vn_lock(nd.ni_vp, LK_EXCLUSIVE | LK_RETRY);
1423 		nd.ni_vp->v_vflag &= ~VV_MD;
1424 		goto bad;
1425 	}
1426 	return (0);
1427 bad:
1428 	VOP_UNLOCK(nd.ni_vp, 0);
1429 	(void)vn_close(nd.ni_vp, flags, td->td_ucred, td);
1430 	return (error);
1431 }
1432 
1433 static int
1434 mddestroy(struct md_s *sc, struct thread *td)
1435 {
1436 
1437 	if (sc->gp) {
1438 		sc->gp->softc = NULL;
1439 		g_topology_lock();
1440 		g_wither_geom(sc->gp, ENXIO);
1441 		g_topology_unlock();
1442 		sc->gp = NULL;
1443 		sc->pp = NULL;
1444 	}
1445 	if (sc->devstat) {
1446 		devstat_remove_entry(sc->devstat);
1447 		sc->devstat = NULL;
1448 	}
1449 	mtx_lock(&sc->queue_mtx);
1450 	sc->flags |= MD_SHUTDOWN;
1451 	wakeup(sc);
1452 	while (!(sc->flags & MD_EXITING))
1453 		msleep(sc->procp, &sc->queue_mtx, PRIBIO, "mddestroy", hz / 10);
1454 	mtx_unlock(&sc->queue_mtx);
1455 	mtx_destroy(&sc->stat_mtx);
1456 	mtx_destroy(&sc->queue_mtx);
1457 	if (sc->vnode != NULL) {
1458 		vn_lock(sc->vnode, LK_EXCLUSIVE | LK_RETRY);
1459 		sc->vnode->v_vflag &= ~VV_MD;
1460 		VOP_UNLOCK(sc->vnode, 0);
1461 		(void)vn_close(sc->vnode, sc->flags & MD_READONLY ?
1462 		    FREAD : (FREAD|FWRITE), sc->cred, td);
1463 	}
1464 	if (sc->cred != NULL)
1465 		crfree(sc->cred);
1466 	if (sc->object != NULL)
1467 		vm_object_deallocate(sc->object);
1468 	if (sc->indir)
1469 		destroy_indir(sc, sc->indir);
1470 	if (sc->uma)
1471 		uma_zdestroy(sc->uma);
1472 
1473 	LIST_REMOVE(sc, list);
1474 	free_unr(md_uh, sc->unit);
1475 	free(sc, M_MD);
1476 	return (0);
1477 }
1478 
1479 static int
1480 mdresize(struct md_s *sc, struct md_ioctl *mdio)
1481 {
1482 	int error, res;
1483 	vm_pindex_t oldpages, newpages;
1484 
1485 	switch (sc->type) {
1486 	case MD_VNODE:
1487 	case MD_NULL:
1488 		break;
1489 	case MD_SWAP:
1490 		if (mdio->md_mediasize <= 0 ||
1491 		    (mdio->md_mediasize % PAGE_SIZE) != 0)
1492 			return (EDOM);
1493 		oldpages = OFF_TO_IDX(round_page(sc->mediasize));
1494 		newpages = OFF_TO_IDX(round_page(mdio->md_mediasize));
1495 		if (newpages < oldpages) {
1496 			VM_OBJECT_WLOCK(sc->object);
1497 			vm_object_page_remove(sc->object, newpages, 0, 0);
1498 			swap_pager_freespace(sc->object, newpages,
1499 			    oldpages - newpages);
1500 			swap_release_by_cred(IDX_TO_OFF(oldpages -
1501 			    newpages), sc->cred);
1502 			sc->object->charge = IDX_TO_OFF(newpages);
1503 			sc->object->size = newpages;
1504 			VM_OBJECT_WUNLOCK(sc->object);
1505 		} else if (newpages > oldpages) {
1506 			res = swap_reserve_by_cred(IDX_TO_OFF(newpages -
1507 			    oldpages), sc->cred);
1508 			if (!res)
1509 				return (ENOMEM);
1510 			if ((mdio->md_options & MD_RESERVE) ||
1511 			    (sc->flags & MD_RESERVE)) {
1512 				error = swap_pager_reserve(sc->object,
1513 				    oldpages, newpages - oldpages);
1514 				if (error < 0) {
1515 					swap_release_by_cred(
1516 					    IDX_TO_OFF(newpages - oldpages),
1517 					    sc->cred);
1518 					return (EDOM);
1519 				}
1520 			}
1521 			VM_OBJECT_WLOCK(sc->object);
1522 			sc->object->charge = IDX_TO_OFF(newpages);
1523 			sc->object->size = newpages;
1524 			VM_OBJECT_WUNLOCK(sc->object);
1525 		}
1526 		break;
1527 	default:
1528 		return (EOPNOTSUPP);
1529 	}
1530 
1531 	sc->mediasize = mdio->md_mediasize;
1532 	g_topology_lock();
1533 	g_resize_provider(sc->pp, sc->mediasize);
1534 	g_topology_unlock();
1535 	return (0);
1536 }
1537 
1538 static int
1539 mdcreate_swap(struct md_s *sc, struct md_ioctl *mdio, struct thread *td)
1540 {
1541 	vm_ooffset_t npage;
1542 	int error;
1543 
1544 	/*
1545 	 * Range check.  Disallow negative sizes and sizes not being
1546 	 * multiple of page size.
1547 	 */
1548 	if (sc->mediasize <= 0 || (sc->mediasize % PAGE_SIZE) != 0)
1549 		return (EDOM);
1550 
1551 	/*
1552 	 * Allocate an OBJT_SWAP object.
1553 	 *
1554 	 * Note the truncation.
1555 	 */
1556 
1557 	if ((mdio->md_options & MD_VERIFY) != 0)
1558 		return (EINVAL);
1559 	npage = mdio->md_mediasize / PAGE_SIZE;
1560 	if (mdio->md_fwsectors != 0)
1561 		sc->fwsectors = mdio->md_fwsectors;
1562 	if (mdio->md_fwheads != 0)
1563 		sc->fwheads = mdio->md_fwheads;
1564 	sc->object = vm_pager_allocate(OBJT_SWAP, NULL, PAGE_SIZE * npage,
1565 	    VM_PROT_DEFAULT, 0, td->td_ucred);
1566 	if (sc->object == NULL)
1567 		return (ENOMEM);
1568 	sc->flags = mdio->md_options & (MD_FORCE | MD_RESERVE);
1569 	if (mdio->md_options & MD_RESERVE) {
1570 		if (swap_pager_reserve(sc->object, 0, npage) < 0) {
1571 			error = EDOM;
1572 			goto finish;
1573 		}
1574 	}
1575 	error = mdsetcred(sc, td->td_ucred);
1576  finish:
1577 	if (error != 0) {
1578 		vm_object_deallocate(sc->object);
1579 		sc->object = NULL;
1580 	}
1581 	return (error);
1582 }
1583 
1584 static int
1585 mdcreate_null(struct md_s *sc, struct md_ioctl *mdio, struct thread *td)
1586 {
1587 
1588 	/*
1589 	 * Range check.  Disallow negative sizes and sizes not being
1590 	 * multiple of page size.
1591 	 */
1592 	if (sc->mediasize <= 0 || (sc->mediasize % PAGE_SIZE) != 0)
1593 		return (EDOM);
1594 
1595 	return (0);
1596 }
1597 
1598 static int
1599 xmdctlioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags, struct thread *td)
1600 {
1601 	struct md_ioctl *mdio;
1602 	struct md_s *sc;
1603 	int error, i;
1604 	unsigned sectsize;
1605 
1606 	if (md_debug)
1607 		printf("mdctlioctl(%s %lx %p %x %p)\n",
1608 			devtoname(dev), cmd, addr, flags, td);
1609 
1610 	mdio = (struct md_ioctl *)addr;
1611 	if (mdio->md_version != MDIOVERSION)
1612 		return (EINVAL);
1613 
1614 	/*
1615 	 * We assert the version number in the individual ioctl
1616 	 * handlers instead of out here because (a) it is possible we
1617 	 * may add another ioctl in the future which doesn't read an
1618 	 * mdio, and (b) the correct return value for an unknown ioctl
1619 	 * is ENOIOCTL, not EINVAL.
1620 	 */
1621 	error = 0;
1622 	switch (cmd) {
1623 	case MDIOCATTACH:
1624 		switch (mdio->md_type) {
1625 		case MD_MALLOC:
1626 		case MD_PRELOAD:
1627 		case MD_VNODE:
1628 		case MD_SWAP:
1629 		case MD_NULL:
1630 			break;
1631 		default:
1632 			return (EINVAL);
1633 		}
1634 		if (mdio->md_sectorsize == 0)
1635 			sectsize = DEV_BSIZE;
1636 		else
1637 			sectsize = mdio->md_sectorsize;
1638 		if (sectsize > MAXPHYS || mdio->md_mediasize < sectsize)
1639 			return (EINVAL);
1640 		if (mdio->md_options & MD_AUTOUNIT)
1641 			sc = mdnew(-1, &error, mdio->md_type);
1642 		else {
1643 			if (mdio->md_unit > INT_MAX)
1644 				return (EINVAL);
1645 			sc = mdnew(mdio->md_unit, &error, mdio->md_type);
1646 		}
1647 		if (sc == NULL)
1648 			return (error);
1649 		if (mdio->md_label != NULL)
1650 			error = copyinstr(mdio->md_label, sc->label,
1651 			    sizeof(sc->label), NULL);
1652 		if (error != 0)
1653 			goto err_after_new;
1654 		if (mdio->md_options & MD_AUTOUNIT)
1655 			mdio->md_unit = sc->unit;
1656 		sc->mediasize = mdio->md_mediasize;
1657 		sc->sectorsize = sectsize;
1658 		error = EDOOFUS;
1659 		switch (sc->type) {
1660 		case MD_MALLOC:
1661 			sc->start = mdstart_malloc;
1662 			error = mdcreate_malloc(sc, mdio);
1663 			break;
1664 		case MD_PRELOAD:
1665 			/*
1666 			 * We disallow attaching preloaded memory disks via
1667 			 * ioctl. Preloaded memory disks are automatically
1668 			 * attached in g_md_init().
1669 			 */
1670 			error = EOPNOTSUPP;
1671 			break;
1672 		case MD_VNODE:
1673 			sc->start = mdstart_vnode;
1674 			error = mdcreate_vnode(sc, mdio, td);
1675 			break;
1676 		case MD_SWAP:
1677 			sc->start = mdstart_swap;
1678 			error = mdcreate_swap(sc, mdio, td);
1679 			break;
1680 		case MD_NULL:
1681 			sc->start = mdstart_null;
1682 			error = mdcreate_null(sc, mdio, td);
1683 			break;
1684 		}
1685 err_after_new:
1686 		if (error != 0) {
1687 			mddestroy(sc, td);
1688 			return (error);
1689 		}
1690 
1691 		/* Prune off any residual fractional sector */
1692 		i = sc->mediasize % sc->sectorsize;
1693 		sc->mediasize -= i;
1694 
1695 		mdinit(sc);
1696 		return (0);
1697 	case MDIOCDETACH:
1698 		if (mdio->md_mediasize != 0 ||
1699 		    (mdio->md_options & ~MD_FORCE) != 0)
1700 			return (EINVAL);
1701 
1702 		sc = mdfind(mdio->md_unit);
1703 		if (sc == NULL)
1704 			return (ENOENT);
1705 		if (sc->opencount != 0 && !(sc->flags & MD_FORCE) &&
1706 		    !(mdio->md_options & MD_FORCE))
1707 			return (EBUSY);
1708 		return (mddestroy(sc, td));
1709 	case MDIOCRESIZE:
1710 		if ((mdio->md_options & ~(MD_FORCE | MD_RESERVE)) != 0)
1711 			return (EINVAL);
1712 
1713 		sc = mdfind(mdio->md_unit);
1714 		if (sc == NULL)
1715 			return (ENOENT);
1716 		if (mdio->md_mediasize < sc->sectorsize)
1717 			return (EINVAL);
1718 		if (mdio->md_mediasize < sc->mediasize &&
1719 		    !(sc->flags & MD_FORCE) &&
1720 		    !(mdio->md_options & MD_FORCE))
1721 			return (EBUSY);
1722 		return (mdresize(sc, mdio));
1723 	case MDIOCQUERY:
1724 		sc = mdfind(mdio->md_unit);
1725 		if (sc == NULL)
1726 			return (ENOENT);
1727 		mdio->md_type = sc->type;
1728 		mdio->md_options = sc->flags;
1729 		mdio->md_mediasize = sc->mediasize;
1730 		mdio->md_sectorsize = sc->sectorsize;
1731 		error = 0;
1732 		if (mdio->md_label != NULL) {
1733 			error = copyout(sc->label, mdio->md_label,
1734 			    strlen(sc->label) + 1);
1735 		}
1736 		if (sc->type == MD_VNODE ||
1737 		    (sc->type == MD_PRELOAD && mdio->md_file != NULL))
1738 			error = copyout(sc->file, mdio->md_file,
1739 			    strlen(sc->file) + 1);
1740 		return (error);
1741 	case MDIOCLIST:
1742 		i = 1;
1743 		LIST_FOREACH(sc, &md_softc_list, list) {
1744 			if (i == MDNPAD - 1)
1745 				mdio->md_pad[i] = -1;
1746 			else
1747 				mdio->md_pad[i++] = sc->unit;
1748 		}
1749 		mdio->md_pad[0] = i - 1;
1750 		return (0);
1751 	default:
1752 		return (ENOIOCTL);
1753 	};
1754 }
1755 
1756 static int
1757 mdctlioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags, struct thread *td)
1758 {
1759 	int error;
1760 
1761 	sx_xlock(&md_sx);
1762 	error = xmdctlioctl(dev, cmd, addr, flags, td);
1763 	sx_xunlock(&md_sx);
1764 	return (error);
1765 }
1766 
1767 static void
1768 md_preloaded(u_char *image, size_t length, const char *name)
1769 {
1770 	struct md_s *sc;
1771 	int error;
1772 
1773 	sc = mdnew(-1, &error, MD_PRELOAD);
1774 	if (sc == NULL)
1775 		return;
1776 	sc->mediasize = length;
1777 	sc->sectorsize = DEV_BSIZE;
1778 	sc->pl_ptr = image;
1779 	sc->pl_len = length;
1780 	sc->start = mdstart_preload;
1781 	if (name != NULL)
1782 		strlcpy(sc->file, name, sizeof(sc->file));
1783 #if defined(MD_ROOT) && !defined(ROOTDEVNAME)
1784 	if (sc->unit == 0)
1785 		rootdevnames[0] = MD_ROOT_FSTYPE ":/dev/md0";
1786 #endif
1787 	mdinit(sc);
1788 	if (name != NULL) {
1789 		printf("%s%d: Preloaded image <%s> %zd bytes at %p\n",
1790 		    MD_NAME, sc->unit, name, length, image);
1791 	} else {
1792 		printf("%s%d: Embedded image %zd bytes at %p\n",
1793 		    MD_NAME, sc->unit, length, image);
1794 	}
1795 }
1796 
1797 static void
1798 g_md_init(struct g_class *mp __unused)
1799 {
1800 	caddr_t mod;
1801 	u_char *ptr, *name, *type;
1802 	unsigned len;
1803 	int i;
1804 
1805 	/* figure out log2(NINDIR) */
1806 	for (i = NINDIR, nshift = -1; i; nshift++)
1807 		i >>= 1;
1808 
1809 	mod = NULL;
1810 	sx_init(&md_sx, "MD config lock");
1811 	g_topology_unlock();
1812 	md_uh = new_unrhdr(0, INT_MAX, NULL);
1813 #ifdef MD_ROOT
1814 	if (mfs_root_size != 0) {
1815 		sx_xlock(&md_sx);
1816 		md_preloaded(__DEVOLATILE(u_char *, &mfs_root), mfs_root_size,
1817 		    NULL);
1818 		sx_xunlock(&md_sx);
1819 	}
1820 #endif
1821 	/* XXX: are preload_* static or do they need Giant ? */
1822 	while ((mod = preload_search_next_name(mod)) != NULL) {
1823 		name = (char *)preload_search_info(mod, MODINFO_NAME);
1824 		if (name == NULL)
1825 			continue;
1826 		type = (char *)preload_search_info(mod, MODINFO_TYPE);
1827 		if (type == NULL)
1828 			continue;
1829 		if (strcmp(type, "md_image") && strcmp(type, "mfs_root"))
1830 			continue;
1831 		ptr = preload_fetch_addr(mod);
1832 		len = preload_fetch_size(mod);
1833 		if (ptr != NULL && len != 0) {
1834 			sx_xlock(&md_sx);
1835 			md_preloaded(ptr, len, name);
1836 			sx_xunlock(&md_sx);
1837 		}
1838 	}
1839 	md_vnode_pbuf_freecnt = nswbuf / 10;
1840 	status_dev = make_dev(&mdctl_cdevsw, INT_MAX, UID_ROOT, GID_WHEEL,
1841 	    0600, MDCTL_NAME);
1842 	g_topology_lock();
1843 }
1844 
1845 static void
1846 g_md_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp,
1847     struct g_consumer *cp __unused, struct g_provider *pp)
1848 {
1849 	struct md_s *mp;
1850 	char *type;
1851 
1852 	mp = gp->softc;
1853 	if (mp == NULL)
1854 		return;
1855 
1856 	switch (mp->type) {
1857 	case MD_MALLOC:
1858 		type = "malloc";
1859 		break;
1860 	case MD_PRELOAD:
1861 		type = "preload";
1862 		break;
1863 	case MD_VNODE:
1864 		type = "vnode";
1865 		break;
1866 	case MD_SWAP:
1867 		type = "swap";
1868 		break;
1869 	case MD_NULL:
1870 		type = "null";
1871 		break;
1872 	default:
1873 		type = "unknown";
1874 		break;
1875 	}
1876 
1877 	if (pp != NULL) {
1878 		if (indent == NULL) {
1879 			sbuf_printf(sb, " u %d", mp->unit);
1880 			sbuf_printf(sb, " s %ju", (uintmax_t) mp->sectorsize);
1881 			sbuf_printf(sb, " f %ju", (uintmax_t) mp->fwheads);
1882 			sbuf_printf(sb, " fs %ju", (uintmax_t) mp->fwsectors);
1883 			sbuf_printf(sb, " l %ju", (uintmax_t) mp->mediasize);
1884 			sbuf_printf(sb, " t %s", type);
1885 			if ((mp->type == MD_VNODE && mp->vnode != NULL) ||
1886 			    (mp->type == MD_PRELOAD && mp->file[0] != '\0'))
1887 				sbuf_printf(sb, " file %s", mp->file);
1888 			sbuf_printf(sb, " label %s", mp->label);
1889 		} else {
1890 			sbuf_printf(sb, "%s<unit>%d</unit>\n", indent,
1891 			    mp->unit);
1892 			sbuf_printf(sb, "%s<sectorsize>%ju</sectorsize>\n",
1893 			    indent, (uintmax_t) mp->sectorsize);
1894 			sbuf_printf(sb, "%s<fwheads>%ju</fwheads>\n",
1895 			    indent, (uintmax_t) mp->fwheads);
1896 			sbuf_printf(sb, "%s<fwsectors>%ju</fwsectors>\n",
1897 			    indent, (uintmax_t) mp->fwsectors);
1898 			sbuf_printf(sb, "%s<length>%ju</length>\n",
1899 			    indent, (uintmax_t) mp->mediasize);
1900 			sbuf_printf(sb, "%s<compression>%s</compression>\n", indent,
1901 			    (mp->flags & MD_COMPRESS) == 0 ? "off": "on");
1902 			sbuf_printf(sb, "%s<access>%s</access>\n", indent,
1903 			    (mp->flags & MD_READONLY) == 0 ? "read-write":
1904 			    "read-only");
1905 			sbuf_printf(sb, "%s<type>%s</type>\n", indent,
1906 			    type);
1907 			if ((mp->type == MD_VNODE && mp->vnode != NULL) ||
1908 			    (mp->type == MD_PRELOAD && mp->file[0] != '\0')) {
1909 				sbuf_printf(sb, "%s<file>", indent);
1910 				g_conf_printf_escaped(sb, "%s", mp->file);
1911 				sbuf_printf(sb, "</file>\n");
1912 			}
1913 			sbuf_printf(sb, "%s<label>", indent);
1914 			g_conf_printf_escaped(sb, "%s", mp->label);
1915 			sbuf_printf(sb, "</label>\n");
1916 		}
1917 	}
1918 }
1919 
1920 static void
1921 g_md_fini(struct g_class *mp __unused)
1922 {
1923 
1924 	sx_destroy(&md_sx);
1925 	if (status_dev != NULL)
1926 		destroy_dev(status_dev);
1927 	delete_unrhdr(md_uh);
1928 }
1929