xref: /freebsd/sys/dev/md/md.c (revision 70e0bbedef95258a4dadc996d641a9bebd3f107d)
1 /*-
2  * ----------------------------------------------------------------------------
3  * "THE BEER-WARE LICENSE" (Revision 42):
4  * <phk@FreeBSD.ORG> wrote this file.  As long as you retain this notice you
5  * can do whatever you want with this stuff. If we meet some day, and you think
6  * this stuff is worth it, you can buy me a beer in return.   Poul-Henning Kamp
7  * ----------------------------------------------------------------------------
8  *
9  * $FreeBSD$
10  *
11  */
12 
13 /*-
14  * The following functions are based in the vn(4) driver: mdstart_swap(),
15  * mdstart_vnode(), mdcreate_swap(), mdcreate_vnode() and mddestroy(),
16  * and as such under the following copyright:
17  *
18  * Copyright (c) 1988 University of Utah.
19  * Copyright (c) 1990, 1993
20  *	The Regents of the University of California.  All rights reserved.
21  *
22  * This code is derived from software contributed to Berkeley by
23  * the Systems Programming Group of the University of Utah Computer
24  * Science Department.
25  *
26  * Redistribution and use in source and binary forms, with or without
27  * modification, are permitted provided that the following conditions
28  * are met:
29  * 1. Redistributions of source code must retain the above copyright
30  *    notice, this list of conditions and the following disclaimer.
31  * 2. Redistributions in binary form must reproduce the above copyright
32  *    notice, this list of conditions and the following disclaimer in the
33  *    documentation and/or other materials provided with the distribution.
34  * 4. Neither the name of the University nor the names of its contributors
35  *    may be used to endorse or promote products derived from this software
36  *    without specific prior written permission.
37  *
38  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
39  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
40  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
41  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
42  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
43  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
44  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
45  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
46  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
47  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
48  * SUCH DAMAGE.
49  *
50  * from: Utah Hdr: vn.c 1.13 94/04/02
51  *
52  *	from: @(#)vn.c	8.6 (Berkeley) 4/1/94
53  * From: src/sys/dev/vn/vn.c,v 1.122 2000/12/16 16:06:03
54  */
55 
56 #include "opt_geom.h"
57 #include "opt_md.h"
58 
59 #include <sys/param.h>
60 #include <sys/systm.h>
61 #include <sys/bio.h>
62 #include <sys/conf.h>
63 #include <sys/devicestat.h>
64 #include <sys/fcntl.h>
65 #include <sys/kernel.h>
66 #include <sys/kthread.h>
67 #include <sys/limits.h>
68 #include <sys/linker.h>
69 #include <sys/lock.h>
70 #include <sys/malloc.h>
71 #include <sys/mdioctl.h>
72 #include <sys/mount.h>
73 #include <sys/mutex.h>
74 #include <sys/sx.h>
75 #include <sys/namei.h>
76 #include <sys/proc.h>
77 #include <sys/queue.h>
78 #include <sys/sbuf.h>
79 #include <sys/sched.h>
80 #include <sys/sf_buf.h>
81 #include <sys/sysctl.h>
82 #include <sys/vnode.h>
83 
84 #include <geom/geom.h>
85 
86 #include <vm/vm.h>
87 #include <vm/vm_object.h>
88 #include <vm/vm_page.h>
89 #include <vm/vm_pager.h>
90 #include <vm/swap_pager.h>
91 #include <vm/uma.h>
92 
93 #include <machine/vmparam.h>
94 
95 #define MD_MODVER 1
96 
97 #define MD_SHUTDOWN	0x10000		/* Tell worker thread to terminate. */
98 #define	MD_EXITING	0x20000		/* Worker thread is exiting. */
99 
100 #ifndef MD_NSECT
101 #define MD_NSECT (10000 * 2)
102 #endif
103 
104 static MALLOC_DEFINE(M_MD, "md_disk", "Memory Disk");
105 static MALLOC_DEFINE(M_MDSECT, "md_sectors", "Memory Disk Sectors");
106 
107 static int md_debug;
108 SYSCTL_INT(_debug, OID_AUTO, mddebug, CTLFLAG_RW, &md_debug, 0,
109     "Enable md(4) debug messages");
110 static int md_malloc_wait;
111 SYSCTL_INT(_vm, OID_AUTO, md_malloc_wait, CTLFLAG_RW, &md_malloc_wait, 0,
112     "Allow malloc to wait for memory allocations");
113 
114 #if defined(MD_ROOT) && defined(MD_ROOT_SIZE)
115 /*
116  * Preloaded image gets put here.
117  * Applications that patch the object with the image can determine
118  * the size looking at the start and end markers (strings),
119  * so we want them contiguous.
120  */
121 static struct {
122 	u_char start[MD_ROOT_SIZE*1024];
123 	u_char end[128];
124 } mfs_root = {
125 	.start = "MFS Filesystem goes here",
126 	.end = "MFS Filesystem had better STOP here",
127 };
128 #endif
129 
130 static g_init_t g_md_init;
131 static g_fini_t g_md_fini;
132 static g_start_t g_md_start;
133 static g_access_t g_md_access;
134 static void g_md_dumpconf(struct sbuf *sb, const char *indent,
135     struct g_geom *gp, struct g_consumer *cp __unused, struct g_provider *pp);
136 
137 static int mdunits;
138 static struct cdev *status_dev = 0;
139 static struct sx md_sx;
140 static struct unrhdr *md_uh;
141 
142 static d_ioctl_t mdctlioctl;
143 
144 static struct cdevsw mdctl_cdevsw = {
145 	.d_version =	D_VERSION,
146 	.d_ioctl =	mdctlioctl,
147 	.d_name =	MD_NAME,
148 };
149 
150 struct g_class g_md_class = {
151 	.name = "MD",
152 	.version = G_VERSION,
153 	.init = g_md_init,
154 	.fini = g_md_fini,
155 	.start = g_md_start,
156 	.access = g_md_access,
157 	.dumpconf = g_md_dumpconf,
158 };
159 
160 DECLARE_GEOM_CLASS(g_md_class, g_md);
161 
162 
163 static LIST_HEAD(, md_s) md_softc_list = LIST_HEAD_INITIALIZER(md_softc_list);
164 
165 #define NINDIR	(PAGE_SIZE / sizeof(uintptr_t))
166 #define NMASK	(NINDIR-1)
167 static int nshift;
168 
169 struct indir {
170 	uintptr_t	*array;
171 	u_int		total;
172 	u_int		used;
173 	u_int		shift;
174 };
175 
176 struct md_s {
177 	int unit;
178 	LIST_ENTRY(md_s) list;
179 	struct bio_queue_head bio_queue;
180 	struct mtx queue_mtx;
181 	struct cdev *dev;
182 	enum md_types type;
183 	off_t mediasize;
184 	unsigned sectorsize;
185 	unsigned opencount;
186 	unsigned fwheads;
187 	unsigned fwsectors;
188 	unsigned flags;
189 	char name[20];
190 	struct proc *procp;
191 	struct g_geom *gp;
192 	struct g_provider *pp;
193 	int (*start)(struct md_s *sc, struct bio *bp);
194 	struct devstat *devstat;
195 
196 	/* MD_MALLOC related fields */
197 	struct indir *indir;
198 	uma_zone_t uma;
199 
200 	/* MD_PRELOAD related fields */
201 	u_char *pl_ptr;
202 	size_t pl_len;
203 
204 	/* MD_VNODE related fields */
205 	struct vnode *vnode;
206 	char file[PATH_MAX];
207 	struct ucred *cred;
208 
209 	/* MD_SWAP related fields */
210 	vm_object_t object;
211 };
212 
213 static struct indir *
214 new_indir(u_int shift)
215 {
216 	struct indir *ip;
217 
218 	ip = malloc(sizeof *ip, M_MD, (md_malloc_wait ? M_WAITOK : M_NOWAIT)
219 	    | M_ZERO);
220 	if (ip == NULL)
221 		return (NULL);
222 	ip->array = malloc(sizeof(uintptr_t) * NINDIR,
223 	    M_MDSECT, (md_malloc_wait ? M_WAITOK : M_NOWAIT) | M_ZERO);
224 	if (ip->array == NULL) {
225 		free(ip, M_MD);
226 		return (NULL);
227 	}
228 	ip->total = NINDIR;
229 	ip->shift = shift;
230 	return (ip);
231 }
232 
233 static void
234 del_indir(struct indir *ip)
235 {
236 
237 	free(ip->array, M_MDSECT);
238 	free(ip, M_MD);
239 }
240 
241 static void
242 destroy_indir(struct md_s *sc, struct indir *ip)
243 {
244 	int i;
245 
246 	for (i = 0; i < NINDIR; i++) {
247 		if (!ip->array[i])
248 			continue;
249 		if (ip->shift)
250 			destroy_indir(sc, (struct indir*)(ip->array[i]));
251 		else if (ip->array[i] > 255)
252 			uma_zfree(sc->uma, (void *)(ip->array[i]));
253 	}
254 	del_indir(ip);
255 }
256 
257 /*
258  * This function does the math and allocates the top level "indir" structure
259  * for a device of "size" sectors.
260  */
261 
262 static struct indir *
263 dimension(off_t size)
264 {
265 	off_t rcnt;
266 	struct indir *ip;
267 	int layer;
268 
269 	rcnt = size;
270 	layer = 0;
271 	while (rcnt > NINDIR) {
272 		rcnt /= NINDIR;
273 		layer++;
274 	}
275 
276 	/*
277 	 * XXX: the top layer is probably not fully populated, so we allocate
278 	 * too much space for ip->array in here.
279 	 */
280 	ip = malloc(sizeof *ip, M_MD, M_WAITOK | M_ZERO);
281 	ip->array = malloc(sizeof(uintptr_t) * NINDIR,
282 	    M_MDSECT, M_WAITOK | M_ZERO);
283 	ip->total = NINDIR;
284 	ip->shift = layer * nshift;
285 	return (ip);
286 }
287 
288 /*
289  * Read a given sector
290  */
291 
292 static uintptr_t
293 s_read(struct indir *ip, off_t offset)
294 {
295 	struct indir *cip;
296 	int idx;
297 	uintptr_t up;
298 
299 	if (md_debug > 1)
300 		printf("s_read(%jd)\n", (intmax_t)offset);
301 	up = 0;
302 	for (cip = ip; cip != NULL;) {
303 		if (cip->shift) {
304 			idx = (offset >> cip->shift) & NMASK;
305 			up = cip->array[idx];
306 			cip = (struct indir *)up;
307 			continue;
308 		}
309 		idx = offset & NMASK;
310 		return (cip->array[idx]);
311 	}
312 	return (0);
313 }
314 
315 /*
316  * Write a given sector, prune the tree if the value is 0
317  */
318 
319 static int
320 s_write(struct indir *ip, off_t offset, uintptr_t ptr)
321 {
322 	struct indir *cip, *lip[10];
323 	int idx, li;
324 	uintptr_t up;
325 
326 	if (md_debug > 1)
327 		printf("s_write(%jd, %p)\n", (intmax_t)offset, (void *)ptr);
328 	up = 0;
329 	li = 0;
330 	cip = ip;
331 	for (;;) {
332 		lip[li++] = cip;
333 		if (cip->shift) {
334 			idx = (offset >> cip->shift) & NMASK;
335 			up = cip->array[idx];
336 			if (up != 0) {
337 				cip = (struct indir *)up;
338 				continue;
339 			}
340 			/* Allocate branch */
341 			cip->array[idx] =
342 			    (uintptr_t)new_indir(cip->shift - nshift);
343 			if (cip->array[idx] == 0)
344 				return (ENOSPC);
345 			cip->used++;
346 			up = cip->array[idx];
347 			cip = (struct indir *)up;
348 			continue;
349 		}
350 		/* leafnode */
351 		idx = offset & NMASK;
352 		up = cip->array[idx];
353 		if (up != 0)
354 			cip->used--;
355 		cip->array[idx] = ptr;
356 		if (ptr != 0)
357 			cip->used++;
358 		break;
359 	}
360 	if (cip->used != 0 || li == 1)
361 		return (0);
362 	li--;
363 	while (cip->used == 0 && cip != ip) {
364 		li--;
365 		idx = (offset >> lip[li]->shift) & NMASK;
366 		up = lip[li]->array[idx];
367 		KASSERT(up == (uintptr_t)cip, ("md screwed up"));
368 		del_indir(cip);
369 		lip[li]->array[idx] = 0;
370 		lip[li]->used--;
371 		cip = lip[li];
372 	}
373 	return (0);
374 }
375 
376 
377 static int
378 g_md_access(struct g_provider *pp, int r, int w, int e)
379 {
380 	struct md_s *sc;
381 
382 	sc = pp->geom->softc;
383 	if (sc == NULL) {
384 		if (r <= 0 && w <= 0 && e <= 0)
385 			return (0);
386 		return (ENXIO);
387 	}
388 	r += pp->acr;
389 	w += pp->acw;
390 	e += pp->ace;
391 	if ((sc->flags & MD_READONLY) != 0 && w > 0)
392 		return (EROFS);
393 	if ((pp->acr + pp->acw + pp->ace) == 0 && (r + w + e) > 0) {
394 		sc->opencount = 1;
395 	} else if ((pp->acr + pp->acw + pp->ace) > 0 && (r + w + e) == 0) {
396 		sc->opencount = 0;
397 	}
398 	return (0);
399 }
400 
401 static void
402 g_md_start(struct bio *bp)
403 {
404 	struct md_s *sc;
405 
406 	sc = bp->bio_to->geom->softc;
407 	if ((bp->bio_cmd == BIO_READ) || (bp->bio_cmd == BIO_WRITE))
408 		devstat_start_transaction_bio(sc->devstat, bp);
409 	mtx_lock(&sc->queue_mtx);
410 	bioq_disksort(&sc->bio_queue, bp);
411 	mtx_unlock(&sc->queue_mtx);
412 	wakeup(sc);
413 }
414 
415 static int
416 mdstart_malloc(struct md_s *sc, struct bio *bp)
417 {
418 	int i, error;
419 	u_char *dst;
420 	off_t secno, nsec, uc;
421 	uintptr_t sp, osp;
422 
423 	switch (bp->bio_cmd) {
424 	case BIO_READ:
425 	case BIO_WRITE:
426 	case BIO_DELETE:
427 		break;
428 	default:
429 		return (EOPNOTSUPP);
430 	}
431 
432 	nsec = bp->bio_length / sc->sectorsize;
433 	secno = bp->bio_offset / sc->sectorsize;
434 	dst = bp->bio_data;
435 	error = 0;
436 	while (nsec--) {
437 		osp = s_read(sc->indir, secno);
438 		if (bp->bio_cmd == BIO_DELETE) {
439 			if (osp != 0)
440 				error = s_write(sc->indir, secno, 0);
441 		} else if (bp->bio_cmd == BIO_READ) {
442 			if (osp == 0)
443 				bzero(dst, sc->sectorsize);
444 			else if (osp <= 255)
445 				memset(dst, osp, sc->sectorsize);
446 			else {
447 				bcopy((void *)osp, dst, sc->sectorsize);
448 				cpu_flush_dcache(dst, sc->sectorsize);
449 			}
450 			osp = 0;
451 		} else if (bp->bio_cmd == BIO_WRITE) {
452 			if (sc->flags & MD_COMPRESS) {
453 				uc = dst[0];
454 				for (i = 1; i < sc->sectorsize; i++)
455 					if (dst[i] != uc)
456 						break;
457 			} else {
458 				i = 0;
459 				uc = 0;
460 			}
461 			if (i == sc->sectorsize) {
462 				if (osp != uc)
463 					error = s_write(sc->indir, secno, uc);
464 			} else {
465 				if (osp <= 255) {
466 					sp = (uintptr_t)uma_zalloc(sc->uma,
467 					    md_malloc_wait ? M_WAITOK :
468 					    M_NOWAIT);
469 					if (sp == 0) {
470 						error = ENOSPC;
471 						break;
472 					}
473 					bcopy(dst, (void *)sp, sc->sectorsize);
474 					error = s_write(sc->indir, secno, sp);
475 				} else {
476 					bcopy(dst, (void *)osp, sc->sectorsize);
477 					osp = 0;
478 				}
479 			}
480 		} else {
481 			error = EOPNOTSUPP;
482 		}
483 		if (osp > 255)
484 			uma_zfree(sc->uma, (void*)osp);
485 		if (error != 0)
486 			break;
487 		secno++;
488 		dst += sc->sectorsize;
489 	}
490 	bp->bio_resid = 0;
491 	return (error);
492 }
493 
494 static int
495 mdstart_preload(struct md_s *sc, struct bio *bp)
496 {
497 
498 	switch (bp->bio_cmd) {
499 	case BIO_READ:
500 		bcopy(sc->pl_ptr + bp->bio_offset, bp->bio_data,
501 		    bp->bio_length);
502 		cpu_flush_dcache(bp->bio_data, bp->bio_length);
503 		break;
504 	case BIO_WRITE:
505 		bcopy(bp->bio_data, sc->pl_ptr + bp->bio_offset,
506 		    bp->bio_length);
507 		break;
508 	}
509 	bp->bio_resid = 0;
510 	return (0);
511 }
512 
513 static int
514 mdstart_vnode(struct md_s *sc, struct bio *bp)
515 {
516 	int error, vfslocked;
517 	struct uio auio;
518 	struct iovec aiov;
519 	struct mount *mp;
520 	struct vnode *vp;
521 	struct thread *td;
522 	off_t end, zerosize;
523 
524 	switch (bp->bio_cmd) {
525 	case BIO_READ:
526 	case BIO_WRITE:
527 	case BIO_DELETE:
528 	case BIO_FLUSH:
529 		break;
530 	default:
531 		return (EOPNOTSUPP);
532 	}
533 
534 	td = curthread;
535 	vp = sc->vnode;
536 
537 	/*
538 	 * VNODE I/O
539 	 *
540 	 * If an error occurs, we set BIO_ERROR but we do not set
541 	 * B_INVAL because (for a write anyway), the buffer is
542 	 * still valid.
543 	 */
544 
545 	if (bp->bio_cmd == BIO_FLUSH) {
546 		vfslocked = VFS_LOCK_GIANT(vp->v_mount);
547 		(void) vn_start_write(vp, &mp, V_WAIT);
548 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
549 		error = VOP_FSYNC(vp, MNT_WAIT, td);
550 		VOP_UNLOCK(vp, 0);
551 		vn_finished_write(mp);
552 		VFS_UNLOCK_GIANT(vfslocked);
553 		return (error);
554 	}
555 
556 	bzero(&auio, sizeof(auio));
557 
558 	/*
559 	 * Special case for BIO_DELETE.  On the surface, this is very
560 	 * similar to BIO_WRITE, except that we write from our own
561 	 * fixed-length buffer, so we have to loop.  The net result is
562 	 * that the two cases end up having very little in common.
563 	 */
564 	if (bp->bio_cmd == BIO_DELETE) {
565 		zerosize = ZERO_REGION_SIZE -
566 		    (ZERO_REGION_SIZE % sc->sectorsize);
567 		auio.uio_iov = &aiov;
568 		auio.uio_iovcnt = 1;
569 		auio.uio_offset = (vm_ooffset_t)bp->bio_offset;
570 		auio.uio_segflg = UIO_SYSSPACE;
571 		auio.uio_rw = UIO_WRITE;
572 		auio.uio_td = td;
573 		end = bp->bio_offset + bp->bio_length;
574 		vfslocked = VFS_LOCK_GIANT(vp->v_mount);
575 		(void) vn_start_write(vp, &mp, V_WAIT);
576 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
577 		error = 0;
578 		while (auio.uio_offset < end) {
579 			aiov.iov_base = __DECONST(void *, zero_region);
580 			aiov.iov_len = end - auio.uio_offset;
581 			if (aiov.iov_len > zerosize)
582 				aiov.iov_len = zerosize;
583 			auio.uio_resid = aiov.iov_len;
584 			error = VOP_WRITE(vp, &auio,
585 			    sc->flags & MD_ASYNC ? 0 : IO_SYNC, sc->cred);
586 			if (error != 0)
587 				break;
588 		}
589 		VOP_UNLOCK(vp, 0);
590 		vn_finished_write(mp);
591 		bp->bio_resid = end - auio.uio_offset;
592 		VFS_UNLOCK_GIANT(vfslocked);
593 		return (error);
594 	}
595 
596 	aiov.iov_base = bp->bio_data;
597 	aiov.iov_len = bp->bio_length;
598 	auio.uio_iov = &aiov;
599 	auio.uio_iovcnt = 1;
600 	auio.uio_offset = (vm_ooffset_t)bp->bio_offset;
601 	auio.uio_segflg = UIO_SYSSPACE;
602 	if (bp->bio_cmd == BIO_READ)
603 		auio.uio_rw = UIO_READ;
604 	else if (bp->bio_cmd == BIO_WRITE)
605 		auio.uio_rw = UIO_WRITE;
606 	else
607 		panic("wrong BIO_OP in mdstart_vnode");
608 	auio.uio_resid = bp->bio_length;
609 	auio.uio_td = td;
610 	/*
611 	 * When reading set IO_DIRECT to try to avoid double-caching
612 	 * the data.  When writing IO_DIRECT is not optimal.
613 	 */
614 	vfslocked = VFS_LOCK_GIANT(vp->v_mount);
615 	if (bp->bio_cmd == BIO_READ) {
616 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
617 		error = VOP_READ(vp, &auio, IO_DIRECT, sc->cred);
618 		VOP_UNLOCK(vp, 0);
619 	} else {
620 		(void) vn_start_write(vp, &mp, V_WAIT);
621 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
622 		error = VOP_WRITE(vp, &auio, sc->flags & MD_ASYNC ? 0 : IO_SYNC,
623 		    sc->cred);
624 		VOP_UNLOCK(vp, 0);
625 		vn_finished_write(mp);
626 	}
627 	VFS_UNLOCK_GIANT(vfslocked);
628 	bp->bio_resid = auio.uio_resid;
629 	return (error);
630 }
631 
632 static int
633 mdstart_swap(struct md_s *sc, struct bio *bp)
634 {
635 	struct sf_buf *sf;
636 	int rv, offs, len, lastend;
637 	vm_pindex_t i, lastp;
638 	vm_page_t m;
639 	u_char *p;
640 
641 	switch (bp->bio_cmd) {
642 	case BIO_READ:
643 	case BIO_WRITE:
644 	case BIO_DELETE:
645 		break;
646 	default:
647 		return (EOPNOTSUPP);
648 	}
649 
650 	p = bp->bio_data;
651 
652 	/*
653 	 * offs is the offset at which to start operating on the
654 	 * next (ie, first) page.  lastp is the last page on
655 	 * which we're going to operate.  lastend is the ending
656 	 * position within that last page (ie, PAGE_SIZE if
657 	 * we're operating on complete aligned pages).
658 	 */
659 	offs = bp->bio_offset % PAGE_SIZE;
660 	lastp = (bp->bio_offset + bp->bio_length - 1) / PAGE_SIZE;
661 	lastend = (bp->bio_offset + bp->bio_length - 1) % PAGE_SIZE + 1;
662 
663 	rv = VM_PAGER_OK;
664 	VM_OBJECT_LOCK(sc->object);
665 	vm_object_pip_add(sc->object, 1);
666 	for (i = bp->bio_offset / PAGE_SIZE; i <= lastp; i++) {
667 		len = ((i == lastp) ? lastend : PAGE_SIZE) - offs;
668 
669 		m = vm_page_grab(sc->object, i,
670 		    VM_ALLOC_NORMAL|VM_ALLOC_RETRY);
671 		VM_OBJECT_UNLOCK(sc->object);
672 		sched_pin();
673 		sf = sf_buf_alloc(m, SFB_CPUPRIVATE);
674 		VM_OBJECT_LOCK(sc->object);
675 		if (bp->bio_cmd == BIO_READ) {
676 			if (m->valid != VM_PAGE_BITS_ALL)
677 				rv = vm_pager_get_pages(sc->object, &m, 1, 0);
678 			if (rv == VM_PAGER_ERROR) {
679 				sf_buf_free(sf);
680 				sched_unpin();
681 				vm_page_wakeup(m);
682 				break;
683 			}
684 			bcopy((void *)(sf_buf_kva(sf) + offs), p, len);
685 			cpu_flush_dcache(p, len);
686 		} else if (bp->bio_cmd == BIO_WRITE) {
687 			if (len != PAGE_SIZE && m->valid != VM_PAGE_BITS_ALL)
688 				rv = vm_pager_get_pages(sc->object, &m, 1, 0);
689 			if (rv == VM_PAGER_ERROR) {
690 				sf_buf_free(sf);
691 				sched_unpin();
692 				vm_page_wakeup(m);
693 				break;
694 			}
695 			bcopy(p, (void *)(sf_buf_kva(sf) + offs), len);
696 			m->valid = VM_PAGE_BITS_ALL;
697 		} else if (bp->bio_cmd == BIO_DELETE) {
698 			if (len != PAGE_SIZE && m->valid != VM_PAGE_BITS_ALL)
699 				rv = vm_pager_get_pages(sc->object, &m, 1, 0);
700 			if (rv == VM_PAGER_ERROR) {
701 				sf_buf_free(sf);
702 				sched_unpin();
703 				vm_page_wakeup(m);
704 				break;
705 			}
706 			if (len != PAGE_SIZE) {
707 				bzero((void *)(sf_buf_kva(sf) + offs), len);
708 				vm_page_clear_dirty(m, offs, len);
709 				m->valid = VM_PAGE_BITS_ALL;
710 			} else
711 				vm_pager_page_unswapped(m);
712 		}
713 		sf_buf_free(sf);
714 		sched_unpin();
715 		vm_page_wakeup(m);
716 		vm_page_lock(m);
717 		if (bp->bio_cmd == BIO_DELETE && len == PAGE_SIZE)
718 			vm_page_free(m);
719 		else
720 			vm_page_activate(m);
721 		vm_page_unlock(m);
722 		if (bp->bio_cmd == BIO_WRITE)
723 			vm_page_dirty(m);
724 
725 		/* Actions on further pages start at offset 0 */
726 		p += PAGE_SIZE - offs;
727 		offs = 0;
728 #if 0
729 if (bootverbose || bp->bio_offset / PAGE_SIZE < 17)
730 printf("wire_count %d busy %d flags %x hold_count %d act_count %d queue %d valid %d dirty %d @ %d\n",
731     m->wire_count, m->busy,
732     m->flags, m->hold_count, m->act_count, m->queue, m->valid, m->dirty, i);
733 #endif
734 	}
735 	vm_object_pip_subtract(sc->object, 1);
736 	VM_OBJECT_UNLOCK(sc->object);
737 	return (rv != VM_PAGER_ERROR ? 0 : ENOSPC);
738 }
739 
740 static void
741 md_kthread(void *arg)
742 {
743 	struct md_s *sc;
744 	struct bio *bp;
745 	int error;
746 
747 	sc = arg;
748 	thread_lock(curthread);
749 	sched_prio(curthread, PRIBIO);
750 	thread_unlock(curthread);
751 	if (sc->type == MD_VNODE)
752 		curthread->td_pflags |= TDP_NORUNNINGBUF;
753 
754 	for (;;) {
755 		mtx_lock(&sc->queue_mtx);
756 		if (sc->flags & MD_SHUTDOWN) {
757 			sc->flags |= MD_EXITING;
758 			mtx_unlock(&sc->queue_mtx);
759 			kproc_exit(0);
760 		}
761 		bp = bioq_takefirst(&sc->bio_queue);
762 		if (!bp) {
763 			msleep(sc, &sc->queue_mtx, PRIBIO | PDROP, "mdwait", 0);
764 			continue;
765 		}
766 		mtx_unlock(&sc->queue_mtx);
767 		if (bp->bio_cmd == BIO_GETATTR) {
768 			if ((sc->fwsectors && sc->fwheads &&
769 			    (g_handleattr_int(bp, "GEOM::fwsectors",
770 			    sc->fwsectors) ||
771 			    g_handleattr_int(bp, "GEOM::fwheads",
772 			    sc->fwheads))) ||
773 			    g_handleattr_int(bp, "GEOM::candelete", 1))
774 				error = -1;
775 			else
776 				error = EOPNOTSUPP;
777 		} else {
778 			error = sc->start(sc, bp);
779 		}
780 
781 		if (error != -1) {
782 			bp->bio_completed = bp->bio_length;
783 			if ((bp->bio_cmd == BIO_READ) || (bp->bio_cmd == BIO_WRITE))
784 				devstat_end_transaction_bio(sc->devstat, bp);
785 			g_io_deliver(bp, error);
786 		}
787 	}
788 }
789 
790 static struct md_s *
791 mdfind(int unit)
792 {
793 	struct md_s *sc;
794 
795 	LIST_FOREACH(sc, &md_softc_list, list) {
796 		if (sc->unit == unit)
797 			break;
798 	}
799 	return (sc);
800 }
801 
802 static struct md_s *
803 mdnew(int unit, int *errp, enum md_types type)
804 {
805 	struct md_s *sc;
806 	int error;
807 
808 	*errp = 0;
809 	if (unit == -1)
810 		unit = alloc_unr(md_uh);
811 	else
812 		unit = alloc_unr_specific(md_uh, unit);
813 
814 	if (unit == -1) {
815 		*errp = EBUSY;
816 		return (NULL);
817 	}
818 
819 	sc = (struct md_s *)malloc(sizeof *sc, M_MD, M_WAITOK | M_ZERO);
820 	sc->type = type;
821 	bioq_init(&sc->bio_queue);
822 	mtx_init(&sc->queue_mtx, "md bio queue", NULL, MTX_DEF);
823 	sc->unit = unit;
824 	sprintf(sc->name, "md%d", unit);
825 	LIST_INSERT_HEAD(&md_softc_list, sc, list);
826 	error = kproc_create(md_kthread, sc, &sc->procp, 0, 0,"%s", sc->name);
827 	if (error == 0)
828 		return (sc);
829 	LIST_REMOVE(sc, list);
830 	mtx_destroy(&sc->queue_mtx);
831 	free_unr(md_uh, sc->unit);
832 	free(sc, M_MD);
833 	*errp = error;
834 	return (NULL);
835 }
836 
837 static void
838 mdinit(struct md_s *sc)
839 {
840 	struct g_geom *gp;
841 	struct g_provider *pp;
842 
843 	g_topology_lock();
844 	gp = g_new_geomf(&g_md_class, "md%d", sc->unit);
845 	gp->softc = sc;
846 	pp = g_new_providerf(gp, "md%d", sc->unit);
847 	pp->mediasize = sc->mediasize;
848 	pp->sectorsize = sc->sectorsize;
849 	sc->gp = gp;
850 	sc->pp = pp;
851 	g_error_provider(pp, 0);
852 	g_topology_unlock();
853 	sc->devstat = devstat_new_entry("md", sc->unit, sc->sectorsize,
854 	    DEVSTAT_ALL_SUPPORTED, DEVSTAT_TYPE_DIRECT, DEVSTAT_PRIORITY_MAX);
855 }
856 
857 /*
858  * XXX: we should check that the range they feed us is mapped.
859  * XXX: we should implement read-only.
860  */
861 
862 static int
863 mdcreate_preload(struct md_s *sc, struct md_ioctl *mdio)
864 {
865 
866 	if (mdio->md_options & ~(MD_AUTOUNIT | MD_FORCE))
867 		return (EINVAL);
868 	if (mdio->md_base == 0)
869 		return (EINVAL);
870 	sc->flags = mdio->md_options & MD_FORCE;
871 	/* Cast to pointer size, then to pointer to avoid warning */
872 	sc->pl_ptr = (u_char *)(uintptr_t)mdio->md_base;
873 	sc->pl_len = (size_t)sc->mediasize;
874 	return (0);
875 }
876 
877 
878 static int
879 mdcreate_malloc(struct md_s *sc, struct md_ioctl *mdio)
880 {
881 	uintptr_t sp;
882 	int error;
883 	off_t u;
884 
885 	error = 0;
886 	if (mdio->md_options & ~(MD_AUTOUNIT | MD_COMPRESS | MD_RESERVE))
887 		return (EINVAL);
888 	if (mdio->md_sectorsize != 0 && !powerof2(mdio->md_sectorsize))
889 		return (EINVAL);
890 	/* Compression doesn't make sense if we have reserved space */
891 	if (mdio->md_options & MD_RESERVE)
892 		mdio->md_options &= ~MD_COMPRESS;
893 	if (mdio->md_fwsectors != 0)
894 		sc->fwsectors = mdio->md_fwsectors;
895 	if (mdio->md_fwheads != 0)
896 		sc->fwheads = mdio->md_fwheads;
897 	sc->flags = mdio->md_options & (MD_COMPRESS | MD_FORCE);
898 	sc->indir = dimension(sc->mediasize / sc->sectorsize);
899 	sc->uma = uma_zcreate(sc->name, sc->sectorsize, NULL, NULL, NULL, NULL,
900 	    0x1ff, 0);
901 	if (mdio->md_options & MD_RESERVE) {
902 		off_t nsectors;
903 
904 		nsectors = sc->mediasize / sc->sectorsize;
905 		for (u = 0; u < nsectors; u++) {
906 			sp = (uintptr_t)uma_zalloc(sc->uma, (md_malloc_wait ?
907 			    M_WAITOK : M_NOWAIT) | M_ZERO);
908 			if (sp != 0)
909 				error = s_write(sc->indir, u, sp);
910 			else
911 				error = ENOMEM;
912 			if (error != 0)
913 				break;
914 		}
915 	}
916 	return (error);
917 }
918 
919 
920 static int
921 mdsetcred(struct md_s *sc, struct ucred *cred)
922 {
923 	char *tmpbuf;
924 	int error = 0;
925 
926 	/*
927 	 * Set credits in our softc
928 	 */
929 
930 	if (sc->cred)
931 		crfree(sc->cred);
932 	sc->cred = crhold(cred);
933 
934 	/*
935 	 * Horrible kludge to establish credentials for NFS  XXX.
936 	 */
937 
938 	if (sc->vnode) {
939 		struct uio auio;
940 		struct iovec aiov;
941 
942 		tmpbuf = malloc(sc->sectorsize, M_TEMP, M_WAITOK);
943 		bzero(&auio, sizeof(auio));
944 
945 		aiov.iov_base = tmpbuf;
946 		aiov.iov_len = sc->sectorsize;
947 		auio.uio_iov = &aiov;
948 		auio.uio_iovcnt = 1;
949 		auio.uio_offset = 0;
950 		auio.uio_rw = UIO_READ;
951 		auio.uio_segflg = UIO_SYSSPACE;
952 		auio.uio_resid = aiov.iov_len;
953 		vn_lock(sc->vnode, LK_EXCLUSIVE | LK_RETRY);
954 		error = VOP_READ(sc->vnode, &auio, 0, sc->cred);
955 		VOP_UNLOCK(sc->vnode, 0);
956 		free(tmpbuf, M_TEMP);
957 	}
958 	return (error);
959 }
960 
961 static int
962 mdcreate_vnode(struct md_s *sc, struct md_ioctl *mdio, struct thread *td)
963 {
964 	struct vattr vattr;
965 	struct nameidata nd;
966 	char *fname;
967 	int error, flags, vfslocked;
968 
969 	/*
970 	 * Kernel-originated requests must have the filename appended
971 	 * to the mdio structure to protect against malicious software.
972 	 */
973 	fname = mdio->md_file;
974 	if ((void *)fname != (void *)(mdio + 1)) {
975 		error = copyinstr(fname, sc->file, sizeof(sc->file), NULL);
976 		if (error != 0)
977 			return (error);
978 	} else
979 		strlcpy(sc->file, fname, sizeof(sc->file));
980 
981 	/*
982 	 * If the user specified that this is a read only device, don't
983 	 * set the FWRITE mask before trying to open the backing store.
984 	 */
985 	flags = FREAD | ((mdio->md_options & MD_READONLY) ? 0 : FWRITE);
986 	NDINIT(&nd, LOOKUP, FOLLOW | MPSAFE, UIO_SYSSPACE, sc->file, td);
987 	error = vn_open(&nd, &flags, 0, NULL);
988 	if (error != 0)
989 		return (error);
990 	vfslocked = NDHASGIANT(&nd);
991 	NDFREE(&nd, NDF_ONLY_PNBUF);
992 	if (nd.ni_vp->v_type != VREG) {
993 		error = EINVAL;
994 		goto bad;
995 	}
996 	error = VOP_GETATTR(nd.ni_vp, &vattr, td->td_ucred);
997 	if (error != 0)
998 		goto bad;
999 	if (VOP_ISLOCKED(nd.ni_vp) != LK_EXCLUSIVE) {
1000 		vn_lock(nd.ni_vp, LK_UPGRADE | LK_RETRY);
1001 		if (nd.ni_vp->v_iflag & VI_DOOMED) {
1002 			/* Forced unmount. */
1003 			error = EBADF;
1004 			goto bad;
1005 		}
1006 	}
1007 	nd.ni_vp->v_vflag |= VV_MD;
1008 	VOP_UNLOCK(nd.ni_vp, 0);
1009 
1010 	if (mdio->md_fwsectors != 0)
1011 		sc->fwsectors = mdio->md_fwsectors;
1012 	if (mdio->md_fwheads != 0)
1013 		sc->fwheads = mdio->md_fwheads;
1014 	sc->flags = mdio->md_options & (MD_FORCE | MD_ASYNC);
1015 	if (!(flags & FWRITE))
1016 		sc->flags |= MD_READONLY;
1017 	sc->vnode = nd.ni_vp;
1018 
1019 	error = mdsetcred(sc, td->td_ucred);
1020 	if (error != 0) {
1021 		sc->vnode = NULL;
1022 		vn_lock(nd.ni_vp, LK_EXCLUSIVE | LK_RETRY);
1023 		nd.ni_vp->v_vflag &= ~VV_MD;
1024 		goto bad;
1025 	}
1026 	VFS_UNLOCK_GIANT(vfslocked);
1027 	return (0);
1028 bad:
1029 	VOP_UNLOCK(nd.ni_vp, 0);
1030 	(void)vn_close(nd.ni_vp, flags, td->td_ucred, td);
1031 	VFS_UNLOCK_GIANT(vfslocked);
1032 	return (error);
1033 }
1034 
1035 static int
1036 mddestroy(struct md_s *sc, struct thread *td)
1037 {
1038 	int vfslocked;
1039 
1040 	if (sc->gp) {
1041 		sc->gp->softc = NULL;
1042 		g_topology_lock();
1043 		g_wither_geom(sc->gp, ENXIO);
1044 		g_topology_unlock();
1045 		sc->gp = NULL;
1046 		sc->pp = NULL;
1047 	}
1048 	if (sc->devstat) {
1049 		devstat_remove_entry(sc->devstat);
1050 		sc->devstat = NULL;
1051 	}
1052 	mtx_lock(&sc->queue_mtx);
1053 	sc->flags |= MD_SHUTDOWN;
1054 	wakeup(sc);
1055 	while (!(sc->flags & MD_EXITING))
1056 		msleep(sc->procp, &sc->queue_mtx, PRIBIO, "mddestroy", hz / 10);
1057 	mtx_unlock(&sc->queue_mtx);
1058 	mtx_destroy(&sc->queue_mtx);
1059 	if (sc->vnode != NULL) {
1060 		vfslocked = VFS_LOCK_GIANT(sc->vnode->v_mount);
1061 		vn_lock(sc->vnode, LK_EXCLUSIVE | LK_RETRY);
1062 		sc->vnode->v_vflag &= ~VV_MD;
1063 		VOP_UNLOCK(sc->vnode, 0);
1064 		(void)vn_close(sc->vnode, sc->flags & MD_READONLY ?
1065 		    FREAD : (FREAD|FWRITE), sc->cred, td);
1066 		VFS_UNLOCK_GIANT(vfslocked);
1067 	}
1068 	if (sc->cred != NULL)
1069 		crfree(sc->cred);
1070 	if (sc->object != NULL)
1071 		vm_object_deallocate(sc->object);
1072 	if (sc->indir)
1073 		destroy_indir(sc, sc->indir);
1074 	if (sc->uma)
1075 		uma_zdestroy(sc->uma);
1076 
1077 	LIST_REMOVE(sc, list);
1078 	free_unr(md_uh, sc->unit);
1079 	free(sc, M_MD);
1080 	return (0);
1081 }
1082 
1083 static int
1084 mdcreate_swap(struct md_s *sc, struct md_ioctl *mdio, struct thread *td)
1085 {
1086 	vm_ooffset_t npage;
1087 	int error;
1088 
1089 	/*
1090 	 * Range check.  Disallow negative sizes or any size less then the
1091 	 * size of a page.  Then round to a page.
1092 	 */
1093 	if (sc->mediasize == 0 || (sc->mediasize % PAGE_SIZE) != 0)
1094 		return (EDOM);
1095 
1096 	/*
1097 	 * Allocate an OBJT_SWAP object.
1098 	 *
1099 	 * Note the truncation.
1100 	 */
1101 
1102 	npage = mdio->md_mediasize / PAGE_SIZE;
1103 	if (mdio->md_fwsectors != 0)
1104 		sc->fwsectors = mdio->md_fwsectors;
1105 	if (mdio->md_fwheads != 0)
1106 		sc->fwheads = mdio->md_fwheads;
1107 	sc->object = vm_pager_allocate(OBJT_SWAP, NULL, PAGE_SIZE * npage,
1108 	    VM_PROT_DEFAULT, 0, td->td_ucred);
1109 	if (sc->object == NULL)
1110 		return (ENOMEM);
1111 	sc->flags = mdio->md_options & MD_FORCE;
1112 	if (mdio->md_options & MD_RESERVE) {
1113 		if (swap_pager_reserve(sc->object, 0, npage) < 0) {
1114 			error = EDOM;
1115 			goto finish;
1116 		}
1117 	}
1118 	error = mdsetcred(sc, td->td_ucred);
1119  finish:
1120 	if (error != 0) {
1121 		vm_object_deallocate(sc->object);
1122 		sc->object = NULL;
1123 	}
1124 	return (error);
1125 }
1126 
1127 
1128 static int
1129 xmdctlioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags, struct thread *td)
1130 {
1131 	struct md_ioctl *mdio;
1132 	struct md_s *sc;
1133 	int error, i;
1134 
1135 	if (md_debug)
1136 		printf("mdctlioctl(%s %lx %p %x %p)\n",
1137 			devtoname(dev), cmd, addr, flags, td);
1138 
1139 	mdio = (struct md_ioctl *)addr;
1140 	if (mdio->md_version != MDIOVERSION)
1141 		return (EINVAL);
1142 
1143 	/*
1144 	 * We assert the version number in the individual ioctl
1145 	 * handlers instead of out here because (a) it is possible we
1146 	 * may add another ioctl in the future which doesn't read an
1147 	 * mdio, and (b) the correct return value for an unknown ioctl
1148 	 * is ENOIOCTL, not EINVAL.
1149 	 */
1150 	error = 0;
1151 	switch (cmd) {
1152 	case MDIOCATTACH:
1153 		switch (mdio->md_type) {
1154 		case MD_MALLOC:
1155 		case MD_PRELOAD:
1156 		case MD_VNODE:
1157 		case MD_SWAP:
1158 			break;
1159 		default:
1160 			return (EINVAL);
1161 		}
1162 		if (mdio->md_options & MD_AUTOUNIT)
1163 			sc = mdnew(-1, &error, mdio->md_type);
1164 		else {
1165 			if (mdio->md_unit > INT_MAX)
1166 				return (EINVAL);
1167 			sc = mdnew(mdio->md_unit, &error, mdio->md_type);
1168 		}
1169 		if (sc == NULL)
1170 			return (error);
1171 		if (mdio->md_options & MD_AUTOUNIT)
1172 			mdio->md_unit = sc->unit;
1173 		sc->mediasize = mdio->md_mediasize;
1174 		if (mdio->md_sectorsize == 0)
1175 			sc->sectorsize = DEV_BSIZE;
1176 		else
1177 			sc->sectorsize = mdio->md_sectorsize;
1178 		error = EDOOFUS;
1179 		switch (sc->type) {
1180 		case MD_MALLOC:
1181 			sc->start = mdstart_malloc;
1182 			error = mdcreate_malloc(sc, mdio);
1183 			break;
1184 		case MD_PRELOAD:
1185 			sc->start = mdstart_preload;
1186 			error = mdcreate_preload(sc, mdio);
1187 			break;
1188 		case MD_VNODE:
1189 			sc->start = mdstart_vnode;
1190 			error = mdcreate_vnode(sc, mdio, td);
1191 			break;
1192 		case MD_SWAP:
1193 			sc->start = mdstart_swap;
1194 			error = mdcreate_swap(sc, mdio, td);
1195 			break;
1196 		}
1197 		if (error != 0) {
1198 			mddestroy(sc, td);
1199 			return (error);
1200 		}
1201 
1202 		/* Prune off any residual fractional sector */
1203 		i = sc->mediasize % sc->sectorsize;
1204 		sc->mediasize -= i;
1205 
1206 		mdinit(sc);
1207 		return (0);
1208 	case MDIOCDETACH:
1209 		if (mdio->md_mediasize != 0 ||
1210 		    (mdio->md_options & ~MD_FORCE) != 0)
1211 			return (EINVAL);
1212 
1213 		sc = mdfind(mdio->md_unit);
1214 		if (sc == NULL)
1215 			return (ENOENT);
1216 		if (sc->opencount != 0 && !(sc->flags & MD_FORCE) &&
1217 		    !(mdio->md_options & MD_FORCE))
1218 			return (EBUSY);
1219 		return (mddestroy(sc, td));
1220 	case MDIOCQUERY:
1221 		sc = mdfind(mdio->md_unit);
1222 		if (sc == NULL)
1223 			return (ENOENT);
1224 		mdio->md_type = sc->type;
1225 		mdio->md_options = sc->flags;
1226 		mdio->md_mediasize = sc->mediasize;
1227 		mdio->md_sectorsize = sc->sectorsize;
1228 		if (sc->type == MD_VNODE)
1229 			error = copyout(sc->file, mdio->md_file,
1230 			    strlen(sc->file) + 1);
1231 		return (error);
1232 	case MDIOCLIST:
1233 		i = 1;
1234 		LIST_FOREACH(sc, &md_softc_list, list) {
1235 			if (i == MDNPAD - 1)
1236 				mdio->md_pad[i] = -1;
1237 			else
1238 				mdio->md_pad[i++] = sc->unit;
1239 		}
1240 		mdio->md_pad[0] = i - 1;
1241 		return (0);
1242 	default:
1243 		return (ENOIOCTL);
1244 	};
1245 }
1246 
1247 static int
1248 mdctlioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags, struct thread *td)
1249 {
1250 	int error;
1251 
1252 	sx_xlock(&md_sx);
1253 	error = xmdctlioctl(dev, cmd, addr, flags, td);
1254 	sx_xunlock(&md_sx);
1255 	return (error);
1256 }
1257 
1258 static void
1259 md_preloaded(u_char *image, size_t length)
1260 {
1261 	struct md_s *sc;
1262 	int error;
1263 
1264 	sc = mdnew(-1, &error, MD_PRELOAD);
1265 	if (sc == NULL)
1266 		return;
1267 	sc->mediasize = length;
1268 	sc->sectorsize = DEV_BSIZE;
1269 	sc->pl_ptr = image;
1270 	sc->pl_len = length;
1271 	sc->start = mdstart_preload;
1272 #ifdef MD_ROOT
1273 	if (sc->unit == 0)
1274 		rootdevnames[0] = "ufs:/dev/md0";
1275 #endif
1276 	mdinit(sc);
1277 }
1278 
1279 static void
1280 g_md_init(struct g_class *mp __unused)
1281 {
1282 	caddr_t mod;
1283 	u_char *ptr, *name, *type;
1284 	unsigned len;
1285 	int i;
1286 
1287 	/* figure out log2(NINDIR) */
1288 	for (i = NINDIR, nshift = -1; i; nshift++)
1289 		i >>= 1;
1290 
1291 	mod = NULL;
1292 	sx_init(&md_sx, "MD config lock");
1293 	g_topology_unlock();
1294 	md_uh = new_unrhdr(0, INT_MAX, NULL);
1295 #ifdef MD_ROOT_SIZE
1296 	sx_xlock(&md_sx);
1297 	md_preloaded(mfs_root.start, sizeof(mfs_root.start));
1298 	sx_xunlock(&md_sx);
1299 #endif
1300 	/* XXX: are preload_* static or do they need Giant ? */
1301 	while ((mod = preload_search_next_name(mod)) != NULL) {
1302 		name = (char *)preload_search_info(mod, MODINFO_NAME);
1303 		if (name == NULL)
1304 			continue;
1305 		type = (char *)preload_search_info(mod, MODINFO_TYPE);
1306 		if (type == NULL)
1307 			continue;
1308 		if (strcmp(type, "md_image") && strcmp(type, "mfs_root"))
1309 			continue;
1310 		ptr = preload_fetch_addr(mod);
1311 		len = preload_fetch_size(mod);
1312 		if (ptr != NULL && len != 0) {
1313 			printf("%s%d: Preloaded image <%s> %d bytes at %p\n",
1314 			    MD_NAME, mdunits, name, len, ptr);
1315 			sx_xlock(&md_sx);
1316 			md_preloaded(ptr, len);
1317 			sx_xunlock(&md_sx);
1318 		}
1319 	}
1320 	status_dev = make_dev(&mdctl_cdevsw, INT_MAX, UID_ROOT, GID_WHEEL,
1321 	    0600, MDCTL_NAME);
1322 	g_topology_lock();
1323 }
1324 
1325 static void
1326 g_md_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp,
1327     struct g_consumer *cp __unused, struct g_provider *pp)
1328 {
1329 	struct md_s *mp;
1330 	char *type;
1331 
1332 	mp = gp->softc;
1333 	if (mp == NULL)
1334 		return;
1335 
1336 	switch (mp->type) {
1337 	case MD_MALLOC:
1338 		type = "malloc";
1339 		break;
1340 	case MD_PRELOAD:
1341 		type = "preload";
1342 		break;
1343 	case MD_VNODE:
1344 		type = "vnode";
1345 		break;
1346 	case MD_SWAP:
1347 		type = "swap";
1348 		break;
1349 	default:
1350 		type = "unknown";
1351 		break;
1352 	}
1353 
1354 	if (pp != NULL) {
1355 		if (indent == NULL) {
1356 			sbuf_printf(sb, " u %d", mp->unit);
1357 			sbuf_printf(sb, " s %ju", (uintmax_t) mp->sectorsize);
1358 			sbuf_printf(sb, " f %ju", (uintmax_t) mp->fwheads);
1359 			sbuf_printf(sb, " fs %ju", (uintmax_t) mp->fwsectors);
1360 			sbuf_printf(sb, " l %ju", (uintmax_t) mp->mediasize);
1361 			sbuf_printf(sb, " t %s", type);
1362 			if (mp->type == MD_VNODE && mp->vnode != NULL)
1363 				sbuf_printf(sb, " file %s", mp->file);
1364 		} else {
1365 			sbuf_printf(sb, "%s<unit>%d</unit>\n", indent,
1366 			    mp->unit);
1367 			sbuf_printf(sb, "%s<sectorsize>%ju</sectorsize>\n",
1368 			    indent, (uintmax_t) mp->sectorsize);
1369 			sbuf_printf(sb, "%s<fwheads>%ju</fwheads>\n",
1370 			    indent, (uintmax_t) mp->fwheads);
1371 			sbuf_printf(sb, "%s<fwsectors>%ju</fwsectors>\n",
1372 			    indent, (uintmax_t) mp->fwsectors);
1373 			sbuf_printf(sb, "%s<length>%ju</length>\n",
1374 			    indent, (uintmax_t) mp->mediasize);
1375 			sbuf_printf(sb, "%s<compression>%s</compression>\n", indent,
1376 			    (mp->flags & MD_COMPRESS) == 0 ? "off": "on");
1377 			sbuf_printf(sb, "%s<access>%s</access>\n", indent,
1378 			    (mp->flags & MD_READONLY) == 0 ? "read-write":
1379 			    "read-only");
1380 			sbuf_printf(sb, "%s<type>%s</type>\n", indent,
1381 			    type);
1382 			if (mp->type == MD_VNODE && mp->vnode != NULL)
1383 				sbuf_printf(sb, "%s<file>%s</file>\n",
1384 				    indent, mp->file);
1385 		}
1386 	}
1387 }
1388 
1389 static void
1390 g_md_fini(struct g_class *mp __unused)
1391 {
1392 
1393 	sx_destroy(&md_sx);
1394 	if (status_dev != NULL)
1395 		destroy_dev(status_dev);
1396 	delete_unrhdr(md_uh);
1397 }
1398