xref: /freebsd/sys/dev/md/md.c (revision 21fdc27a054f668c8b6c2be503fa68622e5226da)
1 /*-
2  * ----------------------------------------------------------------------------
3  * "THE BEER-WARE LICENSE" (Revision 42):
4  * <phk@FreeBSD.ORG> wrote this file.  As long as you retain this notice you
5  * can do whatever you want with this stuff. If we meet some day, and you think
6  * this stuff is worth it, you can buy me a beer in return.   Poul-Henning Kamp
7  * ----------------------------------------------------------------------------
8  *
9  * $FreeBSD$
10  *
11  */
12 
13 /*-
14  * The following functions are based in the vn(4) driver: mdstart_swap(),
15  * mdstart_vnode(), mdcreate_swap(), mdcreate_vnode() and mddestroy(),
16  * and as such under the following copyright:
17  *
18  * Copyright (c) 1988 University of Utah.
19  * Copyright (c) 1990, 1993
20  *	The Regents of the University of California.  All rights reserved.
21  *
22  * This code is derived from software contributed to Berkeley by
23  * the Systems Programming Group of the University of Utah Computer
24  * Science Department.
25  *
26  * Redistribution and use in source and binary forms, with or without
27  * modification, are permitted provided that the following conditions
28  * are met:
29  * 1. Redistributions of source code must retain the above copyright
30  *    notice, this list of conditions and the following disclaimer.
31  * 2. Redistributions in binary form must reproduce the above copyright
32  *    notice, this list of conditions and the following disclaimer in the
33  *    documentation and/or other materials provided with the distribution.
34  * 4. Neither the name of the University nor the names of its contributors
35  *    may be used to endorse or promote products derived from this software
36  *    without specific prior written permission.
37  *
38  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
39  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
40  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
41  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
42  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
43  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
44  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
45  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
46  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
47  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
48  * SUCH DAMAGE.
49  *
50  * from: Utah Hdr: vn.c 1.13 94/04/02
51  *
52  *	from: @(#)vn.c	8.6 (Berkeley) 4/1/94
53  * From: src/sys/dev/vn/vn.c,v 1.122 2000/12/16 16:06:03
54  */
55 
56 #include "opt_geom.h"
57 #include "opt_md.h"
58 
59 #include <sys/param.h>
60 #include <sys/systm.h>
61 #include <sys/bio.h>
62 #include <sys/conf.h>
63 #include <sys/devicestat.h>
64 #include <sys/fcntl.h>
65 #include <sys/kernel.h>
66 #include <sys/kthread.h>
67 #include <sys/limits.h>
68 #include <sys/linker.h>
69 #include <sys/lock.h>
70 #include <sys/malloc.h>
71 #include <sys/mdioctl.h>
72 #include <sys/mount.h>
73 #include <sys/mutex.h>
74 #include <sys/sx.h>
75 #include <sys/namei.h>
76 #include <sys/proc.h>
77 #include <sys/queue.h>
78 #include <sys/sched.h>
79 #include <sys/sf_buf.h>
80 #include <sys/sysctl.h>
81 #include <sys/vnode.h>
82 
83 #include <geom/geom.h>
84 
85 #include <vm/vm.h>
86 #include <vm/vm_object.h>
87 #include <vm/vm_page.h>
88 #include <vm/vm_pager.h>
89 #include <vm/swap_pager.h>
90 #include <vm/uma.h>
91 
92 #define MD_MODVER 1
93 
94 #define MD_SHUTDOWN	0x10000		/* Tell worker thread to terminate. */
95 #define	MD_EXITING	0x20000		/* Worker thread is exiting. */
96 
97 #ifndef MD_NSECT
98 #define MD_NSECT (10000 * 2)
99 #endif
100 
101 static MALLOC_DEFINE(M_MD, "md_disk", "Memory Disk");
102 static MALLOC_DEFINE(M_MDSECT, "md_sectors", "Memory Disk Sectors");
103 
104 static int md_debug;
105 SYSCTL_INT(_debug, OID_AUTO, mddebug, CTLFLAG_RW, &md_debug, 0, "");
106 
107 #if defined(MD_ROOT) && defined(MD_ROOT_SIZE)
108 /*
109  * Preloaded image gets put here.
110  * Applications that patch the object with the image can determine
111  * the size looking at the start and end markers (strings),
112  * so we want them contiguous.
113  */
114 static struct {
115 	u_char start[MD_ROOT_SIZE*1024];
116 	u_char end[128];
117 } mfs_root = {
118 	.start = "MFS Filesystem goes here",
119 	.end = "MFS Filesystem had better STOP here",
120 };
121 #endif
122 
123 static g_init_t g_md_init;
124 static g_fini_t g_md_fini;
125 static g_start_t g_md_start;
126 static g_access_t g_md_access;
127 static void g_md_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp,
128     struct g_consumer *cp __unused, struct g_provider *pp);
129 
130 static int	mdunits;
131 static struct cdev *status_dev = 0;
132 static struct sx md_sx;
133 
134 static d_ioctl_t mdctlioctl;
135 
136 static struct cdevsw mdctl_cdevsw = {
137 	.d_version =	D_VERSION,
138 	.d_ioctl =	mdctlioctl,
139 	.d_name =	MD_NAME,
140 };
141 
142 struct g_class g_md_class = {
143 	.name = "MD",
144 	.version = G_VERSION,
145 	.init = g_md_init,
146 	.fini = g_md_fini,
147 	.start = g_md_start,
148 	.access = g_md_access,
149 	.dumpconf = g_md_dumpconf,
150 };
151 
152 DECLARE_GEOM_CLASS(g_md_class, g_md);
153 
154 
155 static LIST_HEAD(, md_s) md_softc_list = LIST_HEAD_INITIALIZER(md_softc_list);
156 
157 #define NINDIR	(PAGE_SIZE / sizeof(uintptr_t))
158 #define NMASK	(NINDIR-1)
159 static int nshift;
160 
161 struct indir {
162 	uintptr_t	*array;
163 	u_int		total;
164 	u_int		used;
165 	u_int		shift;
166 };
167 
168 struct md_s {
169 	int unit;
170 	LIST_ENTRY(md_s) list;
171 	struct bio_queue_head bio_queue;
172 	struct mtx queue_mtx;
173 	struct cdev *dev;
174 	enum md_types type;
175 	off_t mediasize;
176 	unsigned sectorsize;
177 	unsigned opencount;
178 	unsigned fwheads;
179 	unsigned fwsectors;
180 	unsigned flags;
181 	char name[20];
182 	struct proc *procp;
183 	struct g_geom *gp;
184 	struct g_provider *pp;
185 	int (*start)(struct md_s *sc, struct bio *bp);
186 	struct devstat *devstat;
187 
188 	/* MD_MALLOC related fields */
189 	struct indir *indir;
190 	uma_zone_t uma;
191 
192 	/* MD_PRELOAD related fields */
193 	u_char *pl_ptr;
194 	size_t pl_len;
195 
196 	/* MD_VNODE related fields */
197 	struct vnode *vnode;
198 	char file[PATH_MAX];
199 	struct ucred *cred;
200 
201 	/* MD_SWAP related fields */
202 	vm_object_t object;
203 };
204 
205 static struct indir *
206 new_indir(u_int shift)
207 {
208 	struct indir *ip;
209 
210 	ip = malloc(sizeof *ip, M_MD, M_NOWAIT | M_ZERO);
211 	if (ip == NULL)
212 		return (NULL);
213 	ip->array = malloc(sizeof(uintptr_t) * NINDIR,
214 	    M_MDSECT, M_NOWAIT | M_ZERO);
215 	if (ip->array == NULL) {
216 		free(ip, M_MD);
217 		return (NULL);
218 	}
219 	ip->total = NINDIR;
220 	ip->shift = shift;
221 	return (ip);
222 }
223 
224 static void
225 del_indir(struct indir *ip)
226 {
227 
228 	free(ip->array, M_MDSECT);
229 	free(ip, M_MD);
230 }
231 
232 static void
233 destroy_indir(struct md_s *sc, struct indir *ip)
234 {
235 	int i;
236 
237 	for (i = 0; i < NINDIR; i++) {
238 		if (!ip->array[i])
239 			continue;
240 		if (ip->shift)
241 			destroy_indir(sc, (struct indir*)(ip->array[i]));
242 		else if (ip->array[i] > 255)
243 			uma_zfree(sc->uma, (void *)(ip->array[i]));
244 	}
245 	del_indir(ip);
246 }
247 
248 /*
249  * This function does the math and allocates the top level "indir" structure
250  * for a device of "size" sectors.
251  */
252 
253 static struct indir *
254 dimension(off_t size)
255 {
256 	off_t rcnt;
257 	struct indir *ip;
258 	int i, layer;
259 
260 	rcnt = size;
261 	layer = 0;
262 	while (rcnt > NINDIR) {
263 		rcnt /= NINDIR;
264 		layer++;
265 	}
266 	/* figure out log2(NINDIR) */
267 	for (i = NINDIR, nshift = -1; i; nshift++)
268 		i >>= 1;
269 
270 	/*
271 	 * XXX: the top layer is probably not fully populated, so we allocate
272 	 * too much space for ip->array in here.
273 	 */
274 	ip = malloc(sizeof *ip, M_MD, M_WAITOK | M_ZERO);
275 	ip->array = malloc(sizeof(uintptr_t) * NINDIR,
276 	    M_MDSECT, M_WAITOK | M_ZERO);
277 	ip->total = NINDIR;
278 	ip->shift = layer * nshift;
279 	return (ip);
280 }
281 
282 /*
283  * Read a given sector
284  */
285 
286 static uintptr_t
287 s_read(struct indir *ip, off_t offset)
288 {
289 	struct indir *cip;
290 	int idx;
291 	uintptr_t up;
292 
293 	if (md_debug > 1)
294 		printf("s_read(%jd)\n", (intmax_t)offset);
295 	up = 0;
296 	for (cip = ip; cip != NULL;) {
297 		if (cip->shift) {
298 			idx = (offset >> cip->shift) & NMASK;
299 			up = cip->array[idx];
300 			cip = (struct indir *)up;
301 			continue;
302 		}
303 		idx = offset & NMASK;
304 		return (cip->array[idx]);
305 	}
306 	return (0);
307 }
308 
309 /*
310  * Write a given sector, prune the tree if the value is 0
311  */
312 
313 static int
314 s_write(struct indir *ip, off_t offset, uintptr_t ptr)
315 {
316 	struct indir *cip, *lip[10];
317 	int idx, li;
318 	uintptr_t up;
319 
320 	if (md_debug > 1)
321 		printf("s_write(%jd, %p)\n", (intmax_t)offset, (void *)ptr);
322 	up = 0;
323 	li = 0;
324 	cip = ip;
325 	for (;;) {
326 		lip[li++] = cip;
327 		if (cip->shift) {
328 			idx = (offset >> cip->shift) & NMASK;
329 			up = cip->array[idx];
330 			if (up != 0) {
331 				cip = (struct indir *)up;
332 				continue;
333 			}
334 			/* Allocate branch */
335 			cip->array[idx] =
336 			    (uintptr_t)new_indir(cip->shift - nshift);
337 			if (cip->array[idx] == 0)
338 				return (ENOSPC);
339 			cip->used++;
340 			up = cip->array[idx];
341 			cip = (struct indir *)up;
342 			continue;
343 		}
344 		/* leafnode */
345 		idx = offset & NMASK;
346 		up = cip->array[idx];
347 		if (up != 0)
348 			cip->used--;
349 		cip->array[idx] = ptr;
350 		if (ptr != 0)
351 			cip->used++;
352 		break;
353 	}
354 	if (cip->used != 0 || li == 1)
355 		return (0);
356 	li--;
357 	while (cip->used == 0 && cip != ip) {
358 		li--;
359 		idx = (offset >> lip[li]->shift) & NMASK;
360 		up = lip[li]->array[idx];
361 		KASSERT(up == (uintptr_t)cip, ("md screwed up"));
362 		del_indir(cip);
363 		lip[li]->array[idx] = 0;
364 		lip[li]->used--;
365 		cip = lip[li];
366 	}
367 	return (0);
368 }
369 
370 
371 static int
372 g_md_access(struct g_provider *pp, int r, int w, int e)
373 {
374 	struct md_s *sc;
375 
376 	sc = pp->geom->softc;
377 	if (sc == NULL) {
378 		if (r <= 0 && w <= 0 && e <= 0)
379 			return (0);
380 		return (ENXIO);
381 	}
382 	r += pp->acr;
383 	w += pp->acw;
384 	e += pp->ace;
385 	if ((sc->flags & MD_READONLY) != 0 && w > 0)
386 		return (EROFS);
387 	if ((pp->acr + pp->acw + pp->ace) == 0 && (r + w + e) > 0) {
388 		sc->opencount = 1;
389 	} else if ((pp->acr + pp->acw + pp->ace) > 0 && (r + w + e) == 0) {
390 		sc->opencount = 0;
391 	}
392 	return (0);
393 }
394 
395 static void
396 g_md_start(struct bio *bp)
397 {
398 	struct md_s *sc;
399 
400 	sc = bp->bio_to->geom->softc;
401 	if ((bp->bio_cmd == BIO_READ) || (bp->bio_cmd == BIO_WRITE))
402 		devstat_start_transaction_bio(sc->devstat, bp);
403 	mtx_lock(&sc->queue_mtx);
404 	bioq_disksort(&sc->bio_queue, bp);
405 	mtx_unlock(&sc->queue_mtx);
406 	wakeup(sc);
407 }
408 
409 static int
410 mdstart_malloc(struct md_s *sc, struct bio *bp)
411 {
412 	int i, error;
413 	u_char *dst;
414 	off_t secno, nsec, uc;
415 	uintptr_t sp, osp;
416 
417 	switch (bp->bio_cmd) {
418 	case BIO_READ:
419 	case BIO_WRITE:
420 	case BIO_DELETE:
421 		break;
422 	default:
423 		return (EOPNOTSUPP);
424 	}
425 
426 	nsec = bp->bio_length / sc->sectorsize;
427 	secno = bp->bio_offset / sc->sectorsize;
428 	dst = bp->bio_data;
429 	error = 0;
430 	while (nsec--) {
431 		osp = s_read(sc->indir, secno);
432 		if (bp->bio_cmd == BIO_DELETE) {
433 			if (osp != 0)
434 				error = s_write(sc->indir, secno, 0);
435 		} else if (bp->bio_cmd == BIO_READ) {
436 			if (osp == 0)
437 				bzero(dst, sc->sectorsize);
438 			else if (osp <= 255)
439 				memset(dst, osp, sc->sectorsize);
440 			else {
441 				bcopy((void *)osp, dst, sc->sectorsize);
442 				cpu_flush_dcache(dst, sc->sectorsize);
443 			}
444 			osp = 0;
445 		} else if (bp->bio_cmd == BIO_WRITE) {
446 			if (sc->flags & MD_COMPRESS) {
447 				uc = dst[0];
448 				for (i = 1; i < sc->sectorsize; i++)
449 					if (dst[i] != uc)
450 						break;
451 			} else {
452 				i = 0;
453 				uc = 0;
454 			}
455 			if (i == sc->sectorsize) {
456 				if (osp != uc)
457 					error = s_write(sc->indir, secno, uc);
458 			} else {
459 				if (osp <= 255) {
460 					sp = (uintptr_t)uma_zalloc(sc->uma,
461 					    M_NOWAIT);
462 					if (sp == 0) {
463 						error = ENOSPC;
464 						break;
465 					}
466 					bcopy(dst, (void *)sp, sc->sectorsize);
467 					error = s_write(sc->indir, secno, sp);
468 				} else {
469 					bcopy(dst, (void *)osp, sc->sectorsize);
470 					osp = 0;
471 				}
472 			}
473 		} else {
474 			error = EOPNOTSUPP;
475 		}
476 		if (osp > 255)
477 			uma_zfree(sc->uma, (void*)osp);
478 		if (error != 0)
479 			break;
480 		secno++;
481 		dst += sc->sectorsize;
482 	}
483 	bp->bio_resid = 0;
484 	return (error);
485 }
486 
487 static int
488 mdstart_preload(struct md_s *sc, struct bio *bp)
489 {
490 
491 	switch (bp->bio_cmd) {
492 	case BIO_READ:
493 		bcopy(sc->pl_ptr + bp->bio_offset, bp->bio_data,
494 		    bp->bio_length);
495 		cpu_flush_dcache(bp->bio_data, bp->bio_length);
496 		break;
497 	case BIO_WRITE:
498 		bcopy(bp->bio_data, sc->pl_ptr + bp->bio_offset,
499 		    bp->bio_length);
500 		break;
501 	}
502 	bp->bio_resid = 0;
503 	return (0);
504 }
505 
506 static int
507 mdstart_vnode(struct md_s *sc, struct bio *bp)
508 {
509 	int error, vfslocked;
510 	struct uio auio;
511 	struct iovec aiov;
512 	struct mount *mp;
513 	struct vnode *vp;
514 	struct thread *td;
515 
516 	switch (bp->bio_cmd) {
517 	case BIO_READ:
518 	case BIO_WRITE:
519 	case BIO_FLUSH:
520 		break;
521 	default:
522 		return (EOPNOTSUPP);
523 	}
524 
525 	td = curthread;
526 	vp = sc->vnode;
527 
528 	/*
529 	 * VNODE I/O
530 	 *
531 	 * If an error occurs, we set BIO_ERROR but we do not set
532 	 * B_INVAL because (for a write anyway), the buffer is
533 	 * still valid.
534 	 */
535 
536 	if (bp->bio_cmd == BIO_FLUSH) {
537 		vfslocked = VFS_LOCK_GIANT(vp->v_mount);
538 		(void) vn_start_write(vp, &mp, V_WAIT);
539 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
540 		error = VOP_FSYNC(vp, MNT_WAIT, td);
541 		VOP_UNLOCK(vp, 0);
542 		vn_finished_write(mp);
543 		VFS_UNLOCK_GIANT(vfslocked);
544 		return (error);
545 	}
546 
547 	bzero(&auio, sizeof(auio));
548 
549 	aiov.iov_base = bp->bio_data;
550 	aiov.iov_len = bp->bio_length;
551 	auio.uio_iov = &aiov;
552 	auio.uio_iovcnt = 1;
553 	auio.uio_offset = (vm_ooffset_t)bp->bio_offset;
554 	auio.uio_segflg = UIO_SYSSPACE;
555 	if (bp->bio_cmd == BIO_READ)
556 		auio.uio_rw = UIO_READ;
557 	else if (bp->bio_cmd == BIO_WRITE)
558 		auio.uio_rw = UIO_WRITE;
559 	else
560 		panic("wrong BIO_OP in mdstart_vnode");
561 	auio.uio_resid = bp->bio_length;
562 	auio.uio_td = td;
563 	/*
564 	 * When reading set IO_DIRECT to try to avoid double-caching
565 	 * the data.  When writing IO_DIRECT is not optimal.
566 	 */
567 	vfslocked = VFS_LOCK_GIANT(vp->v_mount);
568 	if (bp->bio_cmd == BIO_READ) {
569 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
570 		error = VOP_READ(vp, &auio, IO_DIRECT, sc->cred);
571 		VOP_UNLOCK(vp, 0);
572 	} else {
573 		(void) vn_start_write(vp, &mp, V_WAIT);
574 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
575 		error = VOP_WRITE(vp, &auio, sc->flags & MD_ASYNC ? 0 : IO_SYNC,
576 		    sc->cred);
577 		VOP_UNLOCK(vp, 0);
578 		vn_finished_write(mp);
579 	}
580 	VFS_UNLOCK_GIANT(vfslocked);
581 	bp->bio_resid = auio.uio_resid;
582 	return (error);
583 }
584 
585 static int
586 mdstart_swap(struct md_s *sc, struct bio *bp)
587 {
588 	struct sf_buf *sf;
589 	int rv, offs, len, lastend;
590 	vm_pindex_t i, lastp;
591 	vm_page_t m;
592 	u_char *p;
593 
594 	switch (bp->bio_cmd) {
595 	case BIO_READ:
596 	case BIO_WRITE:
597 	case BIO_DELETE:
598 		break;
599 	default:
600 		return (EOPNOTSUPP);
601 	}
602 
603 	p = bp->bio_data;
604 
605 	/*
606 	 * offs is the offset at which to start operating on the
607 	 * next (ie, first) page.  lastp is the last page on
608 	 * which we're going to operate.  lastend is the ending
609 	 * position within that last page (ie, PAGE_SIZE if
610 	 * we're operating on complete aligned pages).
611 	 */
612 	offs = bp->bio_offset % PAGE_SIZE;
613 	lastp = (bp->bio_offset + bp->bio_length - 1) / PAGE_SIZE;
614 	lastend = (bp->bio_offset + bp->bio_length - 1) % PAGE_SIZE + 1;
615 
616 	rv = VM_PAGER_OK;
617 	VM_OBJECT_LOCK(sc->object);
618 	vm_object_pip_add(sc->object, 1);
619 	for (i = bp->bio_offset / PAGE_SIZE; i <= lastp; i++) {
620 		len = ((i == lastp) ? lastend : PAGE_SIZE) - offs;
621 
622 		m = vm_page_grab(sc->object, i,
623 		    VM_ALLOC_NORMAL|VM_ALLOC_RETRY);
624 		VM_OBJECT_UNLOCK(sc->object);
625 		sched_pin();
626 		sf = sf_buf_alloc(m, SFB_CPUPRIVATE);
627 		VM_OBJECT_LOCK(sc->object);
628 		if (bp->bio_cmd == BIO_READ) {
629 			if (m->valid != VM_PAGE_BITS_ALL)
630 				rv = vm_pager_get_pages(sc->object, &m, 1, 0);
631 			if (rv == VM_PAGER_ERROR) {
632 				sf_buf_free(sf);
633 				sched_unpin();
634 				vm_page_wakeup(m);
635 				break;
636 			}
637 			bcopy((void *)(sf_buf_kva(sf) + offs), p, len);
638 			cpu_flush_dcache(p, len);
639 		} else if (bp->bio_cmd == BIO_WRITE) {
640 			if (len != PAGE_SIZE && m->valid != VM_PAGE_BITS_ALL)
641 				rv = vm_pager_get_pages(sc->object, &m, 1, 0);
642 			if (rv == VM_PAGER_ERROR) {
643 				sf_buf_free(sf);
644 				sched_unpin();
645 				vm_page_wakeup(m);
646 				break;
647 			}
648 			bcopy(p, (void *)(sf_buf_kva(sf) + offs), len);
649 			m->valid = VM_PAGE_BITS_ALL;
650 #if 0
651 		} else if (bp->bio_cmd == BIO_DELETE) {
652 			if (len != PAGE_SIZE && m->valid != VM_PAGE_BITS_ALL)
653 				rv = vm_pager_get_pages(sc->object, &m, 1, 0);
654 			if (rv == VM_PAGER_ERROR) {
655 				sf_buf_free(sf);
656 				sched_unpin();
657 				vm_page_wakeup(m);
658 				break;
659 			}
660 			bzero((void *)(sf_buf_kva(sf) + offs), len);
661 			vm_page_dirty(m);
662 			m->valid = VM_PAGE_BITS_ALL;
663 #endif
664 		}
665 		sf_buf_free(sf);
666 		sched_unpin();
667 		vm_page_wakeup(m);
668 		vm_page_lock_queues();
669 		vm_page_activate(m);
670 		if (bp->bio_cmd == BIO_WRITE)
671 			vm_page_dirty(m);
672 		vm_page_unlock_queues();
673 
674 		/* Actions on further pages start at offset 0 */
675 		p += PAGE_SIZE - offs;
676 		offs = 0;
677 #if 0
678 if (bootverbose || bp->bio_offset / PAGE_SIZE < 17)
679 printf("wire_count %d busy %d flags %x hold_count %d act_count %d queue %d valid %d dirty %d @ %d\n",
680     m->wire_count, m->busy,
681     m->flags, m->hold_count, m->act_count, m->queue, m->valid, m->dirty, i);
682 #endif
683 	}
684 	vm_object_pip_subtract(sc->object, 1);
685 	vm_object_set_writeable_dirty(sc->object);
686 	VM_OBJECT_UNLOCK(sc->object);
687 	return (rv != VM_PAGER_ERROR ? 0 : ENOSPC);
688 }
689 
690 static void
691 md_kthread(void *arg)
692 {
693 	struct md_s *sc;
694 	struct bio *bp;
695 	int error;
696 
697 	sc = arg;
698 	thread_lock(curthread);
699 	sched_prio(curthread, PRIBIO);
700 	thread_unlock(curthread);
701 	if (sc->type == MD_VNODE)
702 		curthread->td_pflags |= TDP_NORUNNINGBUF;
703 
704 	for (;;) {
705 		mtx_lock(&sc->queue_mtx);
706 		if (sc->flags & MD_SHUTDOWN) {
707 			sc->flags |= MD_EXITING;
708 			mtx_unlock(&sc->queue_mtx);
709 			kproc_exit(0);
710 		}
711 		bp = bioq_takefirst(&sc->bio_queue);
712 		if (!bp) {
713 			msleep(sc, &sc->queue_mtx, PRIBIO | PDROP, "mdwait", 0);
714 			continue;
715 		}
716 		mtx_unlock(&sc->queue_mtx);
717 		if (bp->bio_cmd == BIO_GETATTR) {
718 			if (sc->fwsectors && sc->fwheads &&
719 			    (g_handleattr_int(bp, "GEOM::fwsectors",
720 			    sc->fwsectors) ||
721 			    g_handleattr_int(bp, "GEOM::fwheads",
722 			    sc->fwheads)))
723 				error = -1;
724 			else
725 				error = EOPNOTSUPP;
726 		} else {
727 			error = sc->start(sc, bp);
728 		}
729 
730 		if (error != -1) {
731 			bp->bio_completed = bp->bio_length;
732 			g_io_deliver(bp, error);
733 			if ((bp->bio_cmd == BIO_READ) || (bp->bio_cmd == BIO_WRITE))
734 				devstat_end_transaction_bio(sc->devstat, bp);
735 		}
736 	}
737 }
738 
739 static struct md_s *
740 mdfind(int unit)
741 {
742 	struct md_s *sc;
743 
744 	LIST_FOREACH(sc, &md_softc_list, list) {
745 		if (sc->unit == unit)
746 			break;
747 	}
748 	return (sc);
749 }
750 
751 static struct md_s *
752 mdnew(int unit, int *errp, enum md_types type)
753 {
754 	struct md_s *sc, *sc2;
755 	int error, max = -1;
756 
757 	*errp = 0;
758 	LIST_FOREACH(sc2, &md_softc_list, list) {
759 		if (unit == sc2->unit) {
760 			*errp = EBUSY;
761 			return (NULL);
762 		}
763 		if (unit == -1 && sc2->unit > max)
764 			max = sc2->unit;
765 	}
766 	if (unit == -1)
767 		unit = max + 1;
768 	sc = (struct md_s *)malloc(sizeof *sc, M_MD, M_WAITOK | M_ZERO);
769 	sc->type = type;
770 	bioq_init(&sc->bio_queue);
771 	mtx_init(&sc->queue_mtx, "md bio queue", NULL, MTX_DEF);
772 	sc->unit = unit;
773 	sprintf(sc->name, "md%d", unit);
774 	LIST_INSERT_HEAD(&md_softc_list, sc, list);
775 	error = kproc_create(md_kthread, sc, &sc->procp, 0, 0,"%s", sc->name);
776 	if (error == 0)
777 		return (sc);
778 	LIST_REMOVE(sc, list);
779 	mtx_destroy(&sc->queue_mtx);
780 	free(sc, M_MD);
781 	*errp = error;
782 	return (NULL);
783 }
784 
785 static void
786 mdinit(struct md_s *sc)
787 {
788 
789 	struct g_geom *gp;
790 	struct g_provider *pp;
791 
792 	g_topology_lock();
793 	gp = g_new_geomf(&g_md_class, "md%d", sc->unit);
794 	gp->softc = sc;
795 	pp = g_new_providerf(gp, "md%d", sc->unit);
796 	pp->mediasize = sc->mediasize;
797 	pp->sectorsize = sc->sectorsize;
798 	sc->gp = gp;
799 	sc->pp = pp;
800 	g_error_provider(pp, 0);
801 	g_topology_unlock();
802 	sc->devstat = devstat_new_entry("md", sc->unit, sc->sectorsize,
803 	    DEVSTAT_ALL_SUPPORTED, DEVSTAT_TYPE_DIRECT, DEVSTAT_PRIORITY_MAX);
804 }
805 
806 /*
807  * XXX: we should check that the range they feed us is mapped.
808  * XXX: we should implement read-only.
809  */
810 
811 static int
812 mdcreate_preload(struct md_s *sc, struct md_ioctl *mdio)
813 {
814 
815 	if (mdio->md_options & ~(MD_AUTOUNIT | MD_FORCE))
816 		return (EINVAL);
817 	if (mdio->md_base == 0)
818 		return (EINVAL);
819 	sc->flags = mdio->md_options & MD_FORCE;
820 	/* Cast to pointer size, then to pointer to avoid warning */
821 	sc->pl_ptr = (u_char *)(uintptr_t)mdio->md_base;
822 	sc->pl_len = (size_t)sc->mediasize;
823 	return (0);
824 }
825 
826 
827 static int
828 mdcreate_malloc(struct md_s *sc, struct md_ioctl *mdio)
829 {
830 	uintptr_t sp;
831 	int error;
832 	off_t u;
833 
834 	error = 0;
835 	if (mdio->md_options & ~(MD_AUTOUNIT | MD_COMPRESS | MD_RESERVE))
836 		return (EINVAL);
837 	if (mdio->md_sectorsize != 0 && !powerof2(mdio->md_sectorsize))
838 		return (EINVAL);
839 	/* Compression doesn't make sense if we have reserved space */
840 	if (mdio->md_options & MD_RESERVE)
841 		mdio->md_options &= ~MD_COMPRESS;
842 	if (mdio->md_fwsectors != 0)
843 		sc->fwsectors = mdio->md_fwsectors;
844 	if (mdio->md_fwheads != 0)
845 		sc->fwheads = mdio->md_fwheads;
846 	sc->flags = mdio->md_options & (MD_COMPRESS | MD_FORCE);
847 	sc->indir = dimension(sc->mediasize / sc->sectorsize);
848 	sc->uma = uma_zcreate(sc->name, sc->sectorsize, NULL, NULL, NULL, NULL,
849 	    0x1ff, 0);
850 	if (mdio->md_options & MD_RESERVE) {
851 		off_t nsectors;
852 
853 		nsectors = sc->mediasize / sc->sectorsize;
854 		for (u = 0; u < nsectors; u++) {
855 			sp = (uintptr_t)uma_zalloc(sc->uma, M_NOWAIT | M_ZERO);
856 			if (sp != 0)
857 				error = s_write(sc->indir, u, sp);
858 			else
859 				error = ENOMEM;
860 			if (error != 0)
861 				break;
862 		}
863 	}
864 	return (error);
865 }
866 
867 
868 static int
869 mdsetcred(struct md_s *sc, struct ucred *cred)
870 {
871 	char *tmpbuf;
872 	int error = 0;
873 
874 	/*
875 	 * Set credits in our softc
876 	 */
877 
878 	if (sc->cred)
879 		crfree(sc->cred);
880 	sc->cred = crhold(cred);
881 
882 	/*
883 	 * Horrible kludge to establish credentials for NFS  XXX.
884 	 */
885 
886 	if (sc->vnode) {
887 		struct uio auio;
888 		struct iovec aiov;
889 
890 		tmpbuf = malloc(sc->sectorsize, M_TEMP, M_WAITOK);
891 		bzero(&auio, sizeof(auio));
892 
893 		aiov.iov_base = tmpbuf;
894 		aiov.iov_len = sc->sectorsize;
895 		auio.uio_iov = &aiov;
896 		auio.uio_iovcnt = 1;
897 		auio.uio_offset = 0;
898 		auio.uio_rw = UIO_READ;
899 		auio.uio_segflg = UIO_SYSSPACE;
900 		auio.uio_resid = aiov.iov_len;
901 		vn_lock(sc->vnode, LK_EXCLUSIVE | LK_RETRY);
902 		error = VOP_READ(sc->vnode, &auio, 0, sc->cred);
903 		VOP_UNLOCK(sc->vnode, 0);
904 		free(tmpbuf, M_TEMP);
905 	}
906 	return (error);
907 }
908 
909 static int
910 mdcreate_vnode(struct md_s *sc, struct md_ioctl *mdio, struct thread *td)
911 {
912 	struct vattr vattr;
913 	struct nameidata nd;
914 	int error, flags, vfslocked;
915 
916 	error = copyinstr(mdio->md_file, sc->file, sizeof(sc->file), NULL);
917 	if (error != 0)
918 		return (error);
919 	flags = FREAD|FWRITE;
920 	/*
921 	 * If the user specified that this is a read only device, unset the
922 	 * FWRITE mask before trying to open the backing store.
923 	 */
924 	if ((mdio->md_options & MD_READONLY) != 0)
925 		flags &= ~FWRITE;
926 	NDINIT(&nd, LOOKUP, FOLLOW | MPSAFE, UIO_SYSSPACE, sc->file, td);
927 	error = vn_open(&nd, &flags, 0, NULL);
928 	if (error != 0)
929 		return (error);
930 	vfslocked = NDHASGIANT(&nd);
931 	NDFREE(&nd, NDF_ONLY_PNBUF);
932 	if (nd.ni_vp->v_type != VREG) {
933 		error = EINVAL;
934 		goto bad;
935 	}
936 	error = VOP_GETATTR(nd.ni_vp, &vattr, td->td_ucred);
937 	if (error != 0)
938 		goto bad;
939 	if (VOP_ISLOCKED(nd.ni_vp) != LK_EXCLUSIVE) {
940 		vn_lock(nd.ni_vp, LK_UPGRADE | LK_RETRY);
941 		if (nd.ni_vp->v_iflag & VI_DOOMED) {
942 			/* Forced unmount. */
943 			error = EBADF;
944 			goto bad;
945 		}
946 	}
947 	nd.ni_vp->v_vflag |= VV_MD;
948 	VOP_UNLOCK(nd.ni_vp, 0);
949 
950 	if (mdio->md_fwsectors != 0)
951 		sc->fwsectors = mdio->md_fwsectors;
952 	if (mdio->md_fwheads != 0)
953 		sc->fwheads = mdio->md_fwheads;
954 	sc->flags = mdio->md_options & (MD_FORCE | MD_ASYNC);
955 	if (!(flags & FWRITE))
956 		sc->flags |= MD_READONLY;
957 	sc->vnode = nd.ni_vp;
958 
959 	error = mdsetcred(sc, td->td_ucred);
960 	if (error != 0) {
961 		sc->vnode = NULL;
962 		vn_lock(nd.ni_vp, LK_EXCLUSIVE | LK_RETRY);
963 		nd.ni_vp->v_vflag &= ~VV_MD;
964 		goto bad;
965 	}
966 	VFS_UNLOCK_GIANT(vfslocked);
967 	return (0);
968 bad:
969 	VOP_UNLOCK(nd.ni_vp, 0);
970 	(void)vn_close(nd.ni_vp, flags, td->td_ucred, td);
971 	VFS_UNLOCK_GIANT(vfslocked);
972 	return (error);
973 }
974 
975 static int
976 mddestroy(struct md_s *sc, struct thread *td)
977 {
978 	int vfslocked;
979 
980 	if (sc->gp) {
981 		sc->gp->softc = NULL;
982 		g_topology_lock();
983 		g_wither_geom(sc->gp, ENXIO);
984 		g_topology_unlock();
985 		sc->gp = NULL;
986 		sc->pp = NULL;
987 	}
988 	if (sc->devstat) {
989 		devstat_remove_entry(sc->devstat);
990 		sc->devstat = NULL;
991 	}
992 	mtx_lock(&sc->queue_mtx);
993 	sc->flags |= MD_SHUTDOWN;
994 	wakeup(sc);
995 	while (!(sc->flags & MD_EXITING))
996 		msleep(sc->procp, &sc->queue_mtx, PRIBIO, "mddestroy", hz / 10);
997 	mtx_unlock(&sc->queue_mtx);
998 	mtx_destroy(&sc->queue_mtx);
999 	if (sc->vnode != NULL) {
1000 		vfslocked = VFS_LOCK_GIANT(sc->vnode->v_mount);
1001 		vn_lock(sc->vnode, LK_EXCLUSIVE | LK_RETRY);
1002 		sc->vnode->v_vflag &= ~VV_MD;
1003 		VOP_UNLOCK(sc->vnode, 0);
1004 		(void)vn_close(sc->vnode, sc->flags & MD_READONLY ?
1005 		    FREAD : (FREAD|FWRITE), sc->cred, td);
1006 		VFS_UNLOCK_GIANT(vfslocked);
1007 	}
1008 	if (sc->cred != NULL)
1009 		crfree(sc->cred);
1010 	if (sc->object != NULL)
1011 		vm_object_deallocate(sc->object);
1012 	if (sc->indir)
1013 		destroy_indir(sc, sc->indir);
1014 	if (sc->uma)
1015 		uma_zdestroy(sc->uma);
1016 
1017 	LIST_REMOVE(sc, list);
1018 	free(sc, M_MD);
1019 	return (0);
1020 }
1021 
1022 static int
1023 mdcreate_swap(struct md_s *sc, struct md_ioctl *mdio, struct thread *td)
1024 {
1025 	vm_ooffset_t npage;
1026 	int error;
1027 
1028 	/*
1029 	 * Range check.  Disallow negative sizes or any size less then the
1030 	 * size of a page.  Then round to a page.
1031 	 */
1032 	if (sc->mediasize == 0 || (sc->mediasize % PAGE_SIZE) != 0)
1033 		return (EDOM);
1034 
1035 	/*
1036 	 * Allocate an OBJT_SWAP object.
1037 	 *
1038 	 * Note the truncation.
1039 	 */
1040 
1041 	npage = mdio->md_mediasize / PAGE_SIZE;
1042 	if (mdio->md_fwsectors != 0)
1043 		sc->fwsectors = mdio->md_fwsectors;
1044 	if (mdio->md_fwheads != 0)
1045 		sc->fwheads = mdio->md_fwheads;
1046 	sc->object = vm_pager_allocate(OBJT_SWAP, NULL, PAGE_SIZE * npage,
1047 	    VM_PROT_DEFAULT, 0, td->td_ucred);
1048 	if (sc->object == NULL)
1049 		return (ENOMEM);
1050 	sc->flags = mdio->md_options & MD_FORCE;
1051 	if (mdio->md_options & MD_RESERVE) {
1052 		if (swap_pager_reserve(sc->object, 0, npage) < 0) {
1053 			error = EDOM;
1054 			goto finish;
1055 		}
1056 	}
1057 	error = mdsetcred(sc, td->td_ucred);
1058  finish:
1059 	if (error != 0) {
1060 		vm_object_deallocate(sc->object);
1061 		sc->object = NULL;
1062 	}
1063 	return (error);
1064 }
1065 
1066 
1067 static int
1068 xmdctlioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags, struct thread *td)
1069 {
1070 	struct md_ioctl *mdio;
1071 	struct md_s *sc;
1072 	int error, i;
1073 
1074 	if (md_debug)
1075 		printf("mdctlioctl(%s %lx %p %x %p)\n",
1076 			devtoname(dev), cmd, addr, flags, td);
1077 
1078 	mdio = (struct md_ioctl *)addr;
1079 	if (mdio->md_version != MDIOVERSION)
1080 		return (EINVAL);
1081 
1082 	/*
1083 	 * We assert the version number in the individual ioctl
1084 	 * handlers instead of out here because (a) it is possible we
1085 	 * may add another ioctl in the future which doesn't read an
1086 	 * mdio, and (b) the correct return value for an unknown ioctl
1087 	 * is ENOIOCTL, not EINVAL.
1088 	 */
1089 	error = 0;
1090 	switch (cmd) {
1091 	case MDIOCATTACH:
1092 		switch (mdio->md_type) {
1093 		case MD_MALLOC:
1094 		case MD_PRELOAD:
1095 		case MD_VNODE:
1096 		case MD_SWAP:
1097 			break;
1098 		default:
1099 			return (EINVAL);
1100 		}
1101 		if (mdio->md_options & MD_AUTOUNIT)
1102 			sc = mdnew(-1, &error, mdio->md_type);
1103 		else
1104 			sc = mdnew(mdio->md_unit, &error, mdio->md_type);
1105 		if (sc == NULL)
1106 			return (error);
1107 		if (mdio->md_options & MD_AUTOUNIT)
1108 			mdio->md_unit = sc->unit;
1109 		sc->mediasize = mdio->md_mediasize;
1110 		if (mdio->md_sectorsize == 0)
1111 			sc->sectorsize = DEV_BSIZE;
1112 		else
1113 			sc->sectorsize = mdio->md_sectorsize;
1114 		error = EDOOFUS;
1115 		switch (sc->type) {
1116 		case MD_MALLOC:
1117 			sc->start = mdstart_malloc;
1118 			error = mdcreate_malloc(sc, mdio);
1119 			break;
1120 		case MD_PRELOAD:
1121 			sc->start = mdstart_preload;
1122 			error = mdcreate_preload(sc, mdio);
1123 			break;
1124 		case MD_VNODE:
1125 			sc->start = mdstart_vnode;
1126 			error = mdcreate_vnode(sc, mdio, td);
1127 			break;
1128 		case MD_SWAP:
1129 			sc->start = mdstart_swap;
1130 			error = mdcreate_swap(sc, mdio, td);
1131 			break;
1132 		}
1133 		if (error != 0) {
1134 			mddestroy(sc, td);
1135 			return (error);
1136 		}
1137 
1138 		/* Prune off any residual fractional sector */
1139 		i = sc->mediasize % sc->sectorsize;
1140 		sc->mediasize -= i;
1141 
1142 		mdinit(sc);
1143 		return (0);
1144 	case MDIOCDETACH:
1145 		if (mdio->md_mediasize != 0 ||
1146 		    (mdio->md_options & ~MD_FORCE) != 0)
1147 			return (EINVAL);
1148 
1149 		sc = mdfind(mdio->md_unit);
1150 		if (sc == NULL)
1151 			return (ENOENT);
1152 		if (sc->opencount != 0 && !(sc->flags & MD_FORCE) &&
1153 		    !(mdio->md_options & MD_FORCE))
1154 			return (EBUSY);
1155 		return (mddestroy(sc, td));
1156 	case MDIOCQUERY:
1157 		sc = mdfind(mdio->md_unit);
1158 		if (sc == NULL)
1159 			return (ENOENT);
1160 		mdio->md_type = sc->type;
1161 		mdio->md_options = sc->flags;
1162 		mdio->md_mediasize = sc->mediasize;
1163 		mdio->md_sectorsize = sc->sectorsize;
1164 		if (sc->type == MD_VNODE)
1165 			error = copyout(sc->file, mdio->md_file,
1166 			    strlen(sc->file) + 1);
1167 		return (error);
1168 	case MDIOCLIST:
1169 		i = 1;
1170 		LIST_FOREACH(sc, &md_softc_list, list) {
1171 			if (i == MDNPAD - 1)
1172 				mdio->md_pad[i] = -1;
1173 			else
1174 				mdio->md_pad[i++] = sc->unit;
1175 		}
1176 		mdio->md_pad[0] = i - 1;
1177 		return (0);
1178 	default:
1179 		return (ENOIOCTL);
1180 	};
1181 }
1182 
1183 static int
1184 mdctlioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags, struct thread *td)
1185 {
1186 	int error;
1187 
1188 	sx_xlock(&md_sx);
1189 	error = xmdctlioctl(dev, cmd, addr, flags, td);
1190 	sx_xunlock(&md_sx);
1191 	return (error);
1192 }
1193 
1194 static void
1195 md_preloaded(u_char *image, size_t length)
1196 {
1197 	struct md_s *sc;
1198 	int error;
1199 
1200 	sc = mdnew(-1, &error, MD_PRELOAD);
1201 	if (sc == NULL)
1202 		return;
1203 	sc->mediasize = length;
1204 	sc->sectorsize = DEV_BSIZE;
1205 	sc->pl_ptr = image;
1206 	sc->pl_len = length;
1207 	sc->start = mdstart_preload;
1208 #ifdef MD_ROOT
1209 	if (sc->unit == 0)
1210 		rootdevnames[0] = "ufs:/dev/md0";
1211 #endif
1212 	mdinit(sc);
1213 }
1214 
1215 static void
1216 g_md_init(struct g_class *mp __unused)
1217 {
1218 
1219 	caddr_t mod;
1220 	caddr_t c;
1221 	u_char *ptr, *name, *type;
1222 	unsigned len;
1223 
1224 	mod = NULL;
1225 	sx_init(&md_sx, "MD config lock");
1226 	g_topology_unlock();
1227 #ifdef MD_ROOT_SIZE
1228 	sx_xlock(&md_sx);
1229 	md_preloaded(mfs_root.start, sizeof(mfs_root.start));
1230 	sx_xunlock(&md_sx);
1231 #endif
1232 	/* XXX: are preload_* static or do they need Giant ? */
1233 	while ((mod = preload_search_next_name(mod)) != NULL) {
1234 		name = (char *)preload_search_info(mod, MODINFO_NAME);
1235 		if (name == NULL)
1236 			continue;
1237 		type = (char *)preload_search_info(mod, MODINFO_TYPE);
1238 		if (type == NULL)
1239 			continue;
1240 		if (strcmp(type, "md_image") && strcmp(type, "mfs_root"))
1241 			continue;
1242 		c = preload_search_info(mod, MODINFO_ADDR);
1243 		ptr = *(u_char **)c;
1244 		c = preload_search_info(mod, MODINFO_SIZE);
1245 		len = *(size_t *)c;
1246 		printf("%s%d: Preloaded image <%s> %d bytes at %p\n",
1247 		    MD_NAME, mdunits, name, len, ptr);
1248 		sx_xlock(&md_sx);
1249 		md_preloaded(ptr, len);
1250 		sx_xunlock(&md_sx);
1251 	}
1252 	status_dev = make_dev(&mdctl_cdevsw, INT_MAX, UID_ROOT, GID_WHEEL,
1253 	    0600, MDCTL_NAME);
1254 	g_topology_lock();
1255 }
1256 
1257 static void
1258 g_md_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp,
1259     struct g_consumer *cp __unused, struct g_provider *pp)
1260 {
1261 	struct md_s *mp;
1262 	char *type;
1263 
1264 	mp = gp->softc;
1265 	if (mp == NULL)
1266 		return;
1267 
1268 	switch (mp->type) {
1269 	case MD_MALLOC:
1270 		type = "malloc";
1271 		break;
1272 	case MD_PRELOAD:
1273 		type = "preload";
1274 		break;
1275 	case MD_VNODE:
1276 		type = "vnode";
1277 		break;
1278 	case MD_SWAP:
1279 		type = "swap";
1280 		break;
1281 	default:
1282 		type = "unknown";
1283 		break;
1284 	}
1285 
1286 	if (pp != NULL) {
1287 		if (indent == NULL) {
1288 			sbuf_printf(sb, " u %d", mp->unit);
1289 			sbuf_printf(sb, " s %ju", (uintmax_t) mp->sectorsize);
1290 			sbuf_printf(sb, " f %ju", (uintmax_t) mp->fwheads);
1291 			sbuf_printf(sb, " fs %ju", (uintmax_t) mp->fwsectors);
1292 			sbuf_printf(sb, " l %ju", (uintmax_t) mp->mediasize);
1293 			sbuf_printf(sb, " t %s", type);
1294 			if (mp->type == MD_VNODE && mp->vnode != NULL)
1295 				sbuf_printf(sb, " file %s", mp->file);
1296 		} else {
1297 			sbuf_printf(sb, "%s<unit>%d</unit>\n", indent,
1298 			    mp->unit);
1299 			sbuf_printf(sb, "%s<sectorsize>%ju</sectorsize>\n",
1300 			    indent, (uintmax_t) mp->sectorsize);
1301 			sbuf_printf(sb, "%s<fwheads>%ju</fwheads>\n",
1302 			    indent, (uintmax_t) mp->fwheads);
1303 			sbuf_printf(sb, "%s<fwsectors>%ju</fwsectors>\n",
1304 			    indent, (uintmax_t) mp->fwsectors);
1305 			sbuf_printf(sb, "%s<length>%ju</length>\n",
1306 			    indent, (uintmax_t) mp->mediasize);
1307 			sbuf_printf(sb, "%s<type>%s</type>\n", indent,
1308 			    type);
1309 			if (mp->type == MD_VNODE && mp->vnode != NULL)
1310 				sbuf_printf(sb, "%s<file>%s</file>\n",
1311 				    indent, mp->file);
1312 		}
1313 	}
1314 }
1315 
1316 static void
1317 g_md_fini(struct g_class *mp __unused)
1318 {
1319 
1320 	sx_destroy(&md_sx);
1321 	if (status_dev != NULL)
1322 		destroy_dev(status_dev);
1323 }
1324