xref: /freebsd/sys/dev/md/md.c (revision ab2043b81eaba0d7d7769b4a58b2b6d17bc464a3)
1 /*-
2  * ----------------------------------------------------------------------------
3  * "THE BEER-WARE LICENSE" (Revision 42):
4  * <phk@FreeBSD.ORG> wrote this file.  As long as you retain this notice you
5  * can do whatever you want with this stuff. If we meet some day, and you think
6  * this stuff is worth it, you can buy me a beer in return.   Poul-Henning Kamp
7  * ----------------------------------------------------------------------------
8  *
9  * $FreeBSD$
10  *
11  */
12 
13 /*-
14  * The following functions are based in the vn(4) driver: mdstart_swap(),
15  * mdstart_vnode(), mdcreate_swap(), mdcreate_vnode() and mddestroy(),
16  * and as such under the following copyright:
17  *
18  * Copyright (c) 1988 University of Utah.
19  * Copyright (c) 1990, 1993
20  *	The Regents of the University of California.  All rights reserved.
21  *
22  * This code is derived from software contributed to Berkeley by
23  * the Systems Programming Group of the University of Utah Computer
24  * Science Department.
25  *
26  * Redistribution and use in source and binary forms, with or without
27  * modification, are permitted provided that the following conditions
28  * are met:
29  * 1. Redistributions of source code must retain the above copyright
30  *    notice, this list of conditions and the following disclaimer.
31  * 2. Redistributions in binary form must reproduce the above copyright
32  *    notice, this list of conditions and the following disclaimer in the
33  *    documentation and/or other materials provided with the distribution.
34  * 4. Neither the name of the University nor the names of its contributors
35  *    may be used to endorse or promote products derived from this software
36  *    without specific prior written permission.
37  *
38  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
39  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
40  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
41  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
42  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
43  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
44  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
45  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
46  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
47  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
48  * SUCH DAMAGE.
49  *
50  * from: Utah Hdr: vn.c 1.13 94/04/02
51  *
52  *	from: @(#)vn.c	8.6 (Berkeley) 4/1/94
53  * From: src/sys/dev/vn/vn.c,v 1.122 2000/12/16 16:06:03
54  */
55 
56 #include "opt_geom.h"
57 #include "opt_md.h"
58 
59 #include <sys/param.h>
60 #include <sys/systm.h>
61 #include <sys/bio.h>
62 #include <sys/conf.h>
63 #include <sys/devicestat.h>
64 #include <sys/fcntl.h>
65 #include <sys/kernel.h>
66 #include <sys/kthread.h>
67 #include <sys/limits.h>
68 #include <sys/linker.h>
69 #include <sys/lock.h>
70 #include <sys/malloc.h>
71 #include <sys/mdioctl.h>
72 #include <sys/mount.h>
73 #include <sys/mutex.h>
74 #include <sys/sx.h>
75 #include <sys/namei.h>
76 #include <sys/proc.h>
77 #include <sys/queue.h>
78 #include <sys/sbuf.h>
79 #include <sys/sched.h>
80 #include <sys/sf_buf.h>
81 #include <sys/sysctl.h>
82 #include <sys/vnode.h>
83 
84 #include <geom/geom.h>
85 
86 #include <vm/vm.h>
87 #include <vm/vm_param.h>
88 #include <vm/vm_object.h>
89 #include <vm/vm_page.h>
90 #include <vm/vm_pager.h>
91 #include <vm/swap_pager.h>
92 #include <vm/uma.h>
93 
94 #define MD_MODVER 1
95 
96 #define MD_SHUTDOWN	0x10000		/* Tell worker thread to terminate. */
97 #define	MD_EXITING	0x20000		/* Worker thread is exiting. */
98 
99 #ifndef MD_NSECT
100 #define MD_NSECT (10000 * 2)
101 #endif
102 
103 static MALLOC_DEFINE(M_MD, "md_disk", "Memory Disk");
104 static MALLOC_DEFINE(M_MDSECT, "md_sectors", "Memory Disk Sectors");
105 
106 static int md_debug;
107 SYSCTL_INT(_debug, OID_AUTO, mddebug, CTLFLAG_RW, &md_debug, 0,
108     "Enable md(4) debug messages");
109 static int md_malloc_wait;
110 SYSCTL_INT(_vm, OID_AUTO, md_malloc_wait, CTLFLAG_RW, &md_malloc_wait, 0,
111     "Allow malloc to wait for memory allocations");
112 
113 #if defined(MD_ROOT) && defined(MD_ROOT_SIZE)
114 /*
115  * Preloaded image gets put here.
116  * Applications that patch the object with the image can determine
117  * the size looking at the start and end markers (strings),
118  * so we want them contiguous.
119  */
120 static struct {
121 	u_char start[MD_ROOT_SIZE*1024];
122 	u_char end[128];
123 } mfs_root = {
124 	.start = "MFS Filesystem goes here",
125 	.end = "MFS Filesystem had better STOP here",
126 };
127 #endif
128 
129 static g_init_t g_md_init;
130 static g_fini_t g_md_fini;
131 static g_start_t g_md_start;
132 static g_access_t g_md_access;
133 static void g_md_dumpconf(struct sbuf *sb, const char *indent,
134     struct g_geom *gp, struct g_consumer *cp __unused, struct g_provider *pp);
135 
136 static int mdunits;
137 static struct cdev *status_dev = 0;
138 static struct sx md_sx;
139 static struct unrhdr *md_uh;
140 
141 static d_ioctl_t mdctlioctl;
142 
143 static struct cdevsw mdctl_cdevsw = {
144 	.d_version =	D_VERSION,
145 	.d_ioctl =	mdctlioctl,
146 	.d_name =	MD_NAME,
147 };
148 
149 struct g_class g_md_class = {
150 	.name = "MD",
151 	.version = G_VERSION,
152 	.init = g_md_init,
153 	.fini = g_md_fini,
154 	.start = g_md_start,
155 	.access = g_md_access,
156 	.dumpconf = g_md_dumpconf,
157 };
158 
159 DECLARE_GEOM_CLASS(g_md_class, g_md);
160 
161 
162 static LIST_HEAD(, md_s) md_softc_list = LIST_HEAD_INITIALIZER(md_softc_list);
163 
164 #define NINDIR	(PAGE_SIZE / sizeof(uintptr_t))
165 #define NMASK	(NINDIR-1)
166 static int nshift;
167 
168 struct indir {
169 	uintptr_t	*array;
170 	u_int		total;
171 	u_int		used;
172 	u_int		shift;
173 };
174 
175 struct md_s {
176 	int unit;
177 	LIST_ENTRY(md_s) list;
178 	struct bio_queue_head bio_queue;
179 	struct mtx queue_mtx;
180 	struct cdev *dev;
181 	enum md_types type;
182 	off_t mediasize;
183 	unsigned sectorsize;
184 	unsigned opencount;
185 	unsigned fwheads;
186 	unsigned fwsectors;
187 	unsigned flags;
188 	char name[20];
189 	struct proc *procp;
190 	struct g_geom *gp;
191 	struct g_provider *pp;
192 	int (*start)(struct md_s *sc, struct bio *bp);
193 	struct devstat *devstat;
194 
195 	/* MD_MALLOC related fields */
196 	struct indir *indir;
197 	uma_zone_t uma;
198 
199 	/* MD_PRELOAD related fields */
200 	u_char *pl_ptr;
201 	size_t pl_len;
202 
203 	/* MD_VNODE related fields */
204 	struct vnode *vnode;
205 	char file[PATH_MAX];
206 	struct ucred *cred;
207 
208 	/* MD_SWAP related fields */
209 	vm_object_t object;
210 };
211 
212 static struct indir *
213 new_indir(u_int shift)
214 {
215 	struct indir *ip;
216 
217 	ip = malloc(sizeof *ip, M_MD, (md_malloc_wait ? M_WAITOK : M_NOWAIT)
218 	    | M_ZERO);
219 	if (ip == NULL)
220 		return (NULL);
221 	ip->array = malloc(sizeof(uintptr_t) * NINDIR,
222 	    M_MDSECT, (md_malloc_wait ? M_WAITOK : M_NOWAIT) | M_ZERO);
223 	if (ip->array == NULL) {
224 		free(ip, M_MD);
225 		return (NULL);
226 	}
227 	ip->total = NINDIR;
228 	ip->shift = shift;
229 	return (ip);
230 }
231 
232 static void
233 del_indir(struct indir *ip)
234 {
235 
236 	free(ip->array, M_MDSECT);
237 	free(ip, M_MD);
238 }
239 
240 static void
241 destroy_indir(struct md_s *sc, struct indir *ip)
242 {
243 	int i;
244 
245 	for (i = 0; i < NINDIR; i++) {
246 		if (!ip->array[i])
247 			continue;
248 		if (ip->shift)
249 			destroy_indir(sc, (struct indir*)(ip->array[i]));
250 		else if (ip->array[i] > 255)
251 			uma_zfree(sc->uma, (void *)(ip->array[i]));
252 	}
253 	del_indir(ip);
254 }
255 
256 /*
257  * This function does the math and allocates the top level "indir" structure
258  * for a device of "size" sectors.
259  */
260 
261 static struct indir *
262 dimension(off_t size)
263 {
264 	off_t rcnt;
265 	struct indir *ip;
266 	int layer;
267 
268 	rcnt = size;
269 	layer = 0;
270 	while (rcnt > NINDIR) {
271 		rcnt /= NINDIR;
272 		layer++;
273 	}
274 
275 	/*
276 	 * XXX: the top layer is probably not fully populated, so we allocate
277 	 * too much space for ip->array in here.
278 	 */
279 	ip = malloc(sizeof *ip, M_MD, M_WAITOK | M_ZERO);
280 	ip->array = malloc(sizeof(uintptr_t) * NINDIR,
281 	    M_MDSECT, M_WAITOK | M_ZERO);
282 	ip->total = NINDIR;
283 	ip->shift = layer * nshift;
284 	return (ip);
285 }
286 
287 /*
288  * Read a given sector
289  */
290 
291 static uintptr_t
292 s_read(struct indir *ip, off_t offset)
293 {
294 	struct indir *cip;
295 	int idx;
296 	uintptr_t up;
297 
298 	if (md_debug > 1)
299 		printf("s_read(%jd)\n", (intmax_t)offset);
300 	up = 0;
301 	for (cip = ip; cip != NULL;) {
302 		if (cip->shift) {
303 			idx = (offset >> cip->shift) & NMASK;
304 			up = cip->array[idx];
305 			cip = (struct indir *)up;
306 			continue;
307 		}
308 		idx = offset & NMASK;
309 		return (cip->array[idx]);
310 	}
311 	return (0);
312 }
313 
314 /*
315  * Write a given sector, prune the tree if the value is 0
316  */
317 
318 static int
319 s_write(struct indir *ip, off_t offset, uintptr_t ptr)
320 {
321 	struct indir *cip, *lip[10];
322 	int idx, li;
323 	uintptr_t up;
324 
325 	if (md_debug > 1)
326 		printf("s_write(%jd, %p)\n", (intmax_t)offset, (void *)ptr);
327 	up = 0;
328 	li = 0;
329 	cip = ip;
330 	for (;;) {
331 		lip[li++] = cip;
332 		if (cip->shift) {
333 			idx = (offset >> cip->shift) & NMASK;
334 			up = cip->array[idx];
335 			if (up != 0) {
336 				cip = (struct indir *)up;
337 				continue;
338 			}
339 			/* Allocate branch */
340 			cip->array[idx] =
341 			    (uintptr_t)new_indir(cip->shift - nshift);
342 			if (cip->array[idx] == 0)
343 				return (ENOSPC);
344 			cip->used++;
345 			up = cip->array[idx];
346 			cip = (struct indir *)up;
347 			continue;
348 		}
349 		/* leafnode */
350 		idx = offset & NMASK;
351 		up = cip->array[idx];
352 		if (up != 0)
353 			cip->used--;
354 		cip->array[idx] = ptr;
355 		if (ptr != 0)
356 			cip->used++;
357 		break;
358 	}
359 	if (cip->used != 0 || li == 1)
360 		return (0);
361 	li--;
362 	while (cip->used == 0 && cip != ip) {
363 		li--;
364 		idx = (offset >> lip[li]->shift) & NMASK;
365 		up = lip[li]->array[idx];
366 		KASSERT(up == (uintptr_t)cip, ("md screwed up"));
367 		del_indir(cip);
368 		lip[li]->array[idx] = 0;
369 		lip[li]->used--;
370 		cip = lip[li];
371 	}
372 	return (0);
373 }
374 
375 
376 static int
377 g_md_access(struct g_provider *pp, int r, int w, int e)
378 {
379 	struct md_s *sc;
380 
381 	sc = pp->geom->softc;
382 	if (sc == NULL) {
383 		if (r <= 0 && w <= 0 && e <= 0)
384 			return (0);
385 		return (ENXIO);
386 	}
387 	r += pp->acr;
388 	w += pp->acw;
389 	e += pp->ace;
390 	if ((sc->flags & MD_READONLY) != 0 && w > 0)
391 		return (EROFS);
392 	if ((pp->acr + pp->acw + pp->ace) == 0 && (r + w + e) > 0) {
393 		sc->opencount = 1;
394 	} else if ((pp->acr + pp->acw + pp->ace) > 0 && (r + w + e) == 0) {
395 		sc->opencount = 0;
396 	}
397 	return (0);
398 }
399 
400 static void
401 g_md_start(struct bio *bp)
402 {
403 	struct md_s *sc;
404 
405 	sc = bp->bio_to->geom->softc;
406 	if ((bp->bio_cmd == BIO_READ) || (bp->bio_cmd == BIO_WRITE))
407 		devstat_start_transaction_bio(sc->devstat, bp);
408 	mtx_lock(&sc->queue_mtx);
409 	bioq_disksort(&sc->bio_queue, bp);
410 	mtx_unlock(&sc->queue_mtx);
411 	wakeup(sc);
412 }
413 
414 static int
415 mdstart_malloc(struct md_s *sc, struct bio *bp)
416 {
417 	int i, error;
418 	u_char *dst;
419 	off_t secno, nsec, uc;
420 	uintptr_t sp, osp;
421 
422 	switch (bp->bio_cmd) {
423 	case BIO_READ:
424 	case BIO_WRITE:
425 	case BIO_DELETE:
426 		break;
427 	default:
428 		return (EOPNOTSUPP);
429 	}
430 
431 	nsec = bp->bio_length / sc->sectorsize;
432 	secno = bp->bio_offset / sc->sectorsize;
433 	dst = bp->bio_data;
434 	error = 0;
435 	while (nsec--) {
436 		osp = s_read(sc->indir, secno);
437 		if (bp->bio_cmd == BIO_DELETE) {
438 			if (osp != 0)
439 				error = s_write(sc->indir, secno, 0);
440 		} else if (bp->bio_cmd == BIO_READ) {
441 			if (osp == 0)
442 				bzero(dst, sc->sectorsize);
443 			else if (osp <= 255)
444 				memset(dst, osp, sc->sectorsize);
445 			else {
446 				bcopy((void *)osp, dst, sc->sectorsize);
447 				cpu_flush_dcache(dst, sc->sectorsize);
448 			}
449 			osp = 0;
450 		} else if (bp->bio_cmd == BIO_WRITE) {
451 			if (sc->flags & MD_COMPRESS) {
452 				uc = dst[0];
453 				for (i = 1; i < sc->sectorsize; i++)
454 					if (dst[i] != uc)
455 						break;
456 			} else {
457 				i = 0;
458 				uc = 0;
459 			}
460 			if (i == sc->sectorsize) {
461 				if (osp != uc)
462 					error = s_write(sc->indir, secno, uc);
463 			} else {
464 				if (osp <= 255) {
465 					sp = (uintptr_t)uma_zalloc(sc->uma,
466 					    md_malloc_wait ? M_WAITOK :
467 					    M_NOWAIT);
468 					if (sp == 0) {
469 						error = ENOSPC;
470 						break;
471 					}
472 					bcopy(dst, (void *)sp, sc->sectorsize);
473 					error = s_write(sc->indir, secno, sp);
474 				} else {
475 					bcopy(dst, (void *)osp, sc->sectorsize);
476 					osp = 0;
477 				}
478 			}
479 		} else {
480 			error = EOPNOTSUPP;
481 		}
482 		if (osp > 255)
483 			uma_zfree(sc->uma, (void*)osp);
484 		if (error != 0)
485 			break;
486 		secno++;
487 		dst += sc->sectorsize;
488 	}
489 	bp->bio_resid = 0;
490 	return (error);
491 }
492 
493 static int
494 mdstart_preload(struct md_s *sc, struct bio *bp)
495 {
496 
497 	switch (bp->bio_cmd) {
498 	case BIO_READ:
499 		bcopy(sc->pl_ptr + bp->bio_offset, bp->bio_data,
500 		    bp->bio_length);
501 		cpu_flush_dcache(bp->bio_data, bp->bio_length);
502 		break;
503 	case BIO_WRITE:
504 		bcopy(bp->bio_data, sc->pl_ptr + bp->bio_offset,
505 		    bp->bio_length);
506 		break;
507 	}
508 	bp->bio_resid = 0;
509 	return (0);
510 }
511 
512 static int
513 mdstart_vnode(struct md_s *sc, struct bio *bp)
514 {
515 	int error, vfslocked;
516 	struct uio auio;
517 	struct iovec aiov;
518 	struct mount *mp;
519 	struct vnode *vp;
520 	struct thread *td;
521 	off_t end, zerosize;
522 
523 	switch (bp->bio_cmd) {
524 	case BIO_READ:
525 	case BIO_WRITE:
526 	case BIO_DELETE:
527 	case BIO_FLUSH:
528 		break;
529 	default:
530 		return (EOPNOTSUPP);
531 	}
532 
533 	td = curthread;
534 	vp = sc->vnode;
535 
536 	/*
537 	 * VNODE I/O
538 	 *
539 	 * If an error occurs, we set BIO_ERROR but we do not set
540 	 * B_INVAL because (for a write anyway), the buffer is
541 	 * still valid.
542 	 */
543 
544 	if (bp->bio_cmd == BIO_FLUSH) {
545 		vfslocked = VFS_LOCK_GIANT(vp->v_mount);
546 		(void) vn_start_write(vp, &mp, V_WAIT);
547 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
548 		error = VOP_FSYNC(vp, MNT_WAIT, td);
549 		VOP_UNLOCK(vp, 0);
550 		vn_finished_write(mp);
551 		VFS_UNLOCK_GIANT(vfslocked);
552 		return (error);
553 	}
554 
555 	bzero(&auio, sizeof(auio));
556 
557 	/*
558 	 * Special case for BIO_DELETE.  On the surface, this is very
559 	 * similar to BIO_WRITE, except that we write from our own
560 	 * fixed-length buffer, so we have to loop.  The net result is
561 	 * that the two cases end up having very little in common.
562 	 */
563 	if (bp->bio_cmd == BIO_DELETE) {
564 		zerosize = ZERO_REGION_SIZE -
565 		    (ZERO_REGION_SIZE % sc->sectorsize);
566 		auio.uio_iov = &aiov;
567 		auio.uio_iovcnt = 1;
568 		auio.uio_offset = (vm_ooffset_t)bp->bio_offset;
569 		auio.uio_segflg = UIO_SYSSPACE;
570 		auio.uio_rw = UIO_WRITE;
571 		auio.uio_td = td;
572 		end = bp->bio_offset + bp->bio_length;
573 		vfslocked = VFS_LOCK_GIANT(vp->v_mount);
574 		(void) vn_start_write(vp, &mp, V_WAIT);
575 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
576 		error = 0;
577 		while (auio.uio_offset < end) {
578 			aiov.iov_base = __DECONST(void *, zero_region);
579 			aiov.iov_len = end - auio.uio_offset;
580 			if (aiov.iov_len > zerosize)
581 				aiov.iov_len = zerosize;
582 			auio.uio_resid = aiov.iov_len;
583 			error = VOP_WRITE(vp, &auio,
584 			    sc->flags & MD_ASYNC ? 0 : IO_SYNC, sc->cred);
585 			if (error != 0)
586 				break;
587 		}
588 		VOP_UNLOCK(vp, 0);
589 		vn_finished_write(mp);
590 		bp->bio_resid = end - auio.uio_offset;
591 		VFS_UNLOCK_GIANT(vfslocked);
592 		return (error);
593 	}
594 
595 	aiov.iov_base = bp->bio_data;
596 	aiov.iov_len = bp->bio_length;
597 	auio.uio_iov = &aiov;
598 	auio.uio_iovcnt = 1;
599 	auio.uio_offset = (vm_ooffset_t)bp->bio_offset;
600 	auio.uio_segflg = UIO_SYSSPACE;
601 	if (bp->bio_cmd == BIO_READ)
602 		auio.uio_rw = UIO_READ;
603 	else if (bp->bio_cmd == BIO_WRITE)
604 		auio.uio_rw = UIO_WRITE;
605 	else
606 		panic("wrong BIO_OP in mdstart_vnode");
607 	auio.uio_resid = bp->bio_length;
608 	auio.uio_td = td;
609 	/*
610 	 * When reading set IO_DIRECT to try to avoid double-caching
611 	 * the data.  When writing IO_DIRECT is not optimal.
612 	 */
613 	vfslocked = VFS_LOCK_GIANT(vp->v_mount);
614 	if (bp->bio_cmd == BIO_READ) {
615 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
616 		error = VOP_READ(vp, &auio, IO_DIRECT, sc->cred);
617 		VOP_UNLOCK(vp, 0);
618 	} else {
619 		(void) vn_start_write(vp, &mp, V_WAIT);
620 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
621 		error = VOP_WRITE(vp, &auio, sc->flags & MD_ASYNC ? 0 : IO_SYNC,
622 		    sc->cred);
623 		VOP_UNLOCK(vp, 0);
624 		vn_finished_write(mp);
625 	}
626 	VFS_UNLOCK_GIANT(vfslocked);
627 	bp->bio_resid = auio.uio_resid;
628 	return (error);
629 }
630 
631 static int
632 mdstart_swap(struct md_s *sc, struct bio *bp)
633 {
634 	struct sf_buf *sf;
635 	int rv, offs, len, lastend;
636 	vm_pindex_t i, lastp;
637 	vm_page_t m;
638 	u_char *p;
639 
640 	switch (bp->bio_cmd) {
641 	case BIO_READ:
642 	case BIO_WRITE:
643 	case BIO_DELETE:
644 		break;
645 	default:
646 		return (EOPNOTSUPP);
647 	}
648 
649 	p = bp->bio_data;
650 
651 	/*
652 	 * offs is the offset at which to start operating on the
653 	 * next (ie, first) page.  lastp is the last page on
654 	 * which we're going to operate.  lastend is the ending
655 	 * position within that last page (ie, PAGE_SIZE if
656 	 * we're operating on complete aligned pages).
657 	 */
658 	offs = bp->bio_offset % PAGE_SIZE;
659 	lastp = (bp->bio_offset + bp->bio_length - 1) / PAGE_SIZE;
660 	lastend = (bp->bio_offset + bp->bio_length - 1) % PAGE_SIZE + 1;
661 
662 	rv = VM_PAGER_OK;
663 	VM_OBJECT_LOCK(sc->object);
664 	vm_object_pip_add(sc->object, 1);
665 	for (i = bp->bio_offset / PAGE_SIZE; i <= lastp; i++) {
666 		len = ((i == lastp) ? lastend : PAGE_SIZE) - offs;
667 
668 		m = vm_page_grab(sc->object, i,
669 		    VM_ALLOC_NORMAL|VM_ALLOC_RETRY);
670 		VM_OBJECT_UNLOCK(sc->object);
671 		sched_pin();
672 		sf = sf_buf_alloc(m, SFB_CPUPRIVATE);
673 		VM_OBJECT_LOCK(sc->object);
674 		if (bp->bio_cmd == BIO_READ) {
675 			if (m->valid != VM_PAGE_BITS_ALL)
676 				rv = vm_pager_get_pages(sc->object, &m, 1, 0);
677 			if (rv == VM_PAGER_ERROR) {
678 				sf_buf_free(sf);
679 				sched_unpin();
680 				vm_page_wakeup(m);
681 				break;
682 			}
683 			bcopy((void *)(sf_buf_kva(sf) + offs), p, len);
684 			cpu_flush_dcache(p, len);
685 		} else if (bp->bio_cmd == BIO_WRITE) {
686 			if (len != PAGE_SIZE && m->valid != VM_PAGE_BITS_ALL)
687 				rv = vm_pager_get_pages(sc->object, &m, 1, 0);
688 			if (rv == VM_PAGER_ERROR) {
689 				sf_buf_free(sf);
690 				sched_unpin();
691 				vm_page_wakeup(m);
692 				break;
693 			}
694 			bcopy(p, (void *)(sf_buf_kva(sf) + offs), len);
695 			m->valid = VM_PAGE_BITS_ALL;
696 		} else if (bp->bio_cmd == BIO_DELETE) {
697 			if (len != PAGE_SIZE && m->valid != VM_PAGE_BITS_ALL)
698 				rv = vm_pager_get_pages(sc->object, &m, 1, 0);
699 			if (rv == VM_PAGER_ERROR) {
700 				sf_buf_free(sf);
701 				sched_unpin();
702 				vm_page_wakeup(m);
703 				break;
704 			}
705 			if (len != PAGE_SIZE) {
706 				bzero((void *)(sf_buf_kva(sf) + offs), len);
707 				vm_page_clear_dirty(m, offs, len);
708 				m->valid = VM_PAGE_BITS_ALL;
709 			} else
710 				vm_pager_page_unswapped(m);
711 		}
712 		sf_buf_free(sf);
713 		sched_unpin();
714 		vm_page_wakeup(m);
715 		vm_page_lock(m);
716 		if (bp->bio_cmd == BIO_DELETE && len == PAGE_SIZE)
717 			vm_page_free(m);
718 		else
719 			vm_page_activate(m);
720 		vm_page_unlock(m);
721 		if (bp->bio_cmd == BIO_WRITE)
722 			vm_page_dirty(m);
723 
724 		/* Actions on further pages start at offset 0 */
725 		p += PAGE_SIZE - offs;
726 		offs = 0;
727 	}
728 	vm_object_pip_subtract(sc->object, 1);
729 	VM_OBJECT_UNLOCK(sc->object);
730 	return (rv != VM_PAGER_ERROR ? 0 : ENOSPC);
731 }
732 
733 static void
734 md_kthread(void *arg)
735 {
736 	struct md_s *sc;
737 	struct bio *bp;
738 	int error;
739 
740 	sc = arg;
741 	thread_lock(curthread);
742 	sched_prio(curthread, PRIBIO);
743 	thread_unlock(curthread);
744 	if (sc->type == MD_VNODE)
745 		curthread->td_pflags |= TDP_NORUNNINGBUF;
746 
747 	for (;;) {
748 		mtx_lock(&sc->queue_mtx);
749 		if (sc->flags & MD_SHUTDOWN) {
750 			sc->flags |= MD_EXITING;
751 			mtx_unlock(&sc->queue_mtx);
752 			kproc_exit(0);
753 		}
754 		bp = bioq_takefirst(&sc->bio_queue);
755 		if (!bp) {
756 			msleep(sc, &sc->queue_mtx, PRIBIO | PDROP, "mdwait", 0);
757 			continue;
758 		}
759 		mtx_unlock(&sc->queue_mtx);
760 		if (bp->bio_cmd == BIO_GETATTR) {
761 			if ((sc->fwsectors && sc->fwheads &&
762 			    (g_handleattr_int(bp, "GEOM::fwsectors",
763 			    sc->fwsectors) ||
764 			    g_handleattr_int(bp, "GEOM::fwheads",
765 			    sc->fwheads))) ||
766 			    g_handleattr_int(bp, "GEOM::candelete", 1))
767 				error = -1;
768 			else
769 				error = EOPNOTSUPP;
770 		} else {
771 			error = sc->start(sc, bp);
772 		}
773 
774 		if (error != -1) {
775 			bp->bio_completed = bp->bio_length;
776 			if ((bp->bio_cmd == BIO_READ) || (bp->bio_cmd == BIO_WRITE))
777 				devstat_end_transaction_bio(sc->devstat, bp);
778 			g_io_deliver(bp, error);
779 		}
780 	}
781 }
782 
783 static struct md_s *
784 mdfind(int unit)
785 {
786 	struct md_s *sc;
787 
788 	LIST_FOREACH(sc, &md_softc_list, list) {
789 		if (sc->unit == unit)
790 			break;
791 	}
792 	return (sc);
793 }
794 
795 static struct md_s *
796 mdnew(int unit, int *errp, enum md_types type)
797 {
798 	struct md_s *sc;
799 	int error;
800 
801 	*errp = 0;
802 	if (unit == -1)
803 		unit = alloc_unr(md_uh);
804 	else
805 		unit = alloc_unr_specific(md_uh, unit);
806 
807 	if (unit == -1) {
808 		*errp = EBUSY;
809 		return (NULL);
810 	}
811 
812 	sc = (struct md_s *)malloc(sizeof *sc, M_MD, M_WAITOK | M_ZERO);
813 	sc->type = type;
814 	bioq_init(&sc->bio_queue);
815 	mtx_init(&sc->queue_mtx, "md bio queue", NULL, MTX_DEF);
816 	sc->unit = unit;
817 	sprintf(sc->name, "md%d", unit);
818 	LIST_INSERT_HEAD(&md_softc_list, sc, list);
819 	error = kproc_create(md_kthread, sc, &sc->procp, 0, 0,"%s", sc->name);
820 	if (error == 0)
821 		return (sc);
822 	LIST_REMOVE(sc, list);
823 	mtx_destroy(&sc->queue_mtx);
824 	free_unr(md_uh, sc->unit);
825 	free(sc, M_MD);
826 	*errp = error;
827 	return (NULL);
828 }
829 
830 static void
831 mdinit(struct md_s *sc)
832 {
833 	struct g_geom *gp;
834 	struct g_provider *pp;
835 
836 	g_topology_lock();
837 	gp = g_new_geomf(&g_md_class, "md%d", sc->unit);
838 	gp->softc = sc;
839 	pp = g_new_providerf(gp, "md%d", sc->unit);
840 	pp->mediasize = sc->mediasize;
841 	pp->sectorsize = sc->sectorsize;
842 	sc->gp = gp;
843 	sc->pp = pp;
844 	g_error_provider(pp, 0);
845 	g_topology_unlock();
846 	sc->devstat = devstat_new_entry("md", sc->unit, sc->sectorsize,
847 	    DEVSTAT_ALL_SUPPORTED, DEVSTAT_TYPE_DIRECT, DEVSTAT_PRIORITY_MAX);
848 }
849 
850 /*
851  * XXX: we should check that the range they feed us is mapped.
852  * XXX: we should implement read-only.
853  */
854 
855 static int
856 mdcreate_preload(struct md_s *sc, struct md_ioctl *mdio)
857 {
858 
859 	if (mdio->md_options & ~(MD_AUTOUNIT | MD_FORCE))
860 		return (EINVAL);
861 	if (mdio->md_base == 0)
862 		return (EINVAL);
863 	sc->flags = mdio->md_options & MD_FORCE;
864 	/* Cast to pointer size, then to pointer to avoid warning */
865 	sc->pl_ptr = (u_char *)(uintptr_t)mdio->md_base;
866 	sc->pl_len = (size_t)sc->mediasize;
867 	return (0);
868 }
869 
870 
871 static int
872 mdcreate_malloc(struct md_s *sc, struct md_ioctl *mdio)
873 {
874 	uintptr_t sp;
875 	int error;
876 	off_t u;
877 
878 	error = 0;
879 	if (mdio->md_options & ~(MD_AUTOUNIT | MD_COMPRESS | MD_RESERVE))
880 		return (EINVAL);
881 	if (mdio->md_sectorsize != 0 && !powerof2(mdio->md_sectorsize))
882 		return (EINVAL);
883 	/* Compression doesn't make sense if we have reserved space */
884 	if (mdio->md_options & MD_RESERVE)
885 		mdio->md_options &= ~MD_COMPRESS;
886 	if (mdio->md_fwsectors != 0)
887 		sc->fwsectors = mdio->md_fwsectors;
888 	if (mdio->md_fwheads != 0)
889 		sc->fwheads = mdio->md_fwheads;
890 	sc->flags = mdio->md_options & (MD_COMPRESS | MD_FORCE);
891 	sc->indir = dimension(sc->mediasize / sc->sectorsize);
892 	sc->uma = uma_zcreate(sc->name, sc->sectorsize, NULL, NULL, NULL, NULL,
893 	    0x1ff, 0);
894 	if (mdio->md_options & MD_RESERVE) {
895 		off_t nsectors;
896 
897 		nsectors = sc->mediasize / sc->sectorsize;
898 		for (u = 0; u < nsectors; u++) {
899 			sp = (uintptr_t)uma_zalloc(sc->uma, (md_malloc_wait ?
900 			    M_WAITOK : M_NOWAIT) | M_ZERO);
901 			if (sp != 0)
902 				error = s_write(sc->indir, u, sp);
903 			else
904 				error = ENOMEM;
905 			if (error != 0)
906 				break;
907 		}
908 	}
909 	return (error);
910 }
911 
912 
913 static int
914 mdsetcred(struct md_s *sc, struct ucred *cred)
915 {
916 	char *tmpbuf;
917 	int error = 0;
918 
919 	/*
920 	 * Set credits in our softc
921 	 */
922 
923 	if (sc->cred)
924 		crfree(sc->cred);
925 	sc->cred = crhold(cred);
926 
927 	/*
928 	 * Horrible kludge to establish credentials for NFS  XXX.
929 	 */
930 
931 	if (sc->vnode) {
932 		struct uio auio;
933 		struct iovec aiov;
934 
935 		tmpbuf = malloc(sc->sectorsize, M_TEMP, M_WAITOK);
936 		bzero(&auio, sizeof(auio));
937 
938 		aiov.iov_base = tmpbuf;
939 		aiov.iov_len = sc->sectorsize;
940 		auio.uio_iov = &aiov;
941 		auio.uio_iovcnt = 1;
942 		auio.uio_offset = 0;
943 		auio.uio_rw = UIO_READ;
944 		auio.uio_segflg = UIO_SYSSPACE;
945 		auio.uio_resid = aiov.iov_len;
946 		vn_lock(sc->vnode, LK_EXCLUSIVE | LK_RETRY);
947 		error = VOP_READ(sc->vnode, &auio, 0, sc->cred);
948 		VOP_UNLOCK(sc->vnode, 0);
949 		free(tmpbuf, M_TEMP);
950 	}
951 	return (error);
952 }
953 
954 static int
955 mdcreate_vnode(struct md_s *sc, struct md_ioctl *mdio, struct thread *td)
956 {
957 	struct vattr vattr;
958 	struct nameidata nd;
959 	char *fname;
960 	int error, flags, vfslocked;
961 
962 	/*
963 	 * Kernel-originated requests must have the filename appended
964 	 * to the mdio structure to protect against malicious software.
965 	 */
966 	fname = mdio->md_file;
967 	if ((void *)fname != (void *)(mdio + 1)) {
968 		error = copyinstr(fname, sc->file, sizeof(sc->file), NULL);
969 		if (error != 0)
970 			return (error);
971 	} else
972 		strlcpy(sc->file, fname, sizeof(sc->file));
973 
974 	/*
975 	 * If the user specified that this is a read only device, don't
976 	 * set the FWRITE mask before trying to open the backing store.
977 	 */
978 	flags = FREAD | ((mdio->md_options & MD_READONLY) ? 0 : FWRITE);
979 	NDINIT(&nd, LOOKUP, FOLLOW | MPSAFE, UIO_SYSSPACE, sc->file, td);
980 	error = vn_open(&nd, &flags, 0, NULL);
981 	if (error != 0)
982 		return (error);
983 	vfslocked = NDHASGIANT(&nd);
984 	NDFREE(&nd, NDF_ONLY_PNBUF);
985 	if (nd.ni_vp->v_type != VREG) {
986 		error = EINVAL;
987 		goto bad;
988 	}
989 	error = VOP_GETATTR(nd.ni_vp, &vattr, td->td_ucred);
990 	if (error != 0)
991 		goto bad;
992 	if (VOP_ISLOCKED(nd.ni_vp) != LK_EXCLUSIVE) {
993 		vn_lock(nd.ni_vp, LK_UPGRADE | LK_RETRY);
994 		if (nd.ni_vp->v_iflag & VI_DOOMED) {
995 			/* Forced unmount. */
996 			error = EBADF;
997 			goto bad;
998 		}
999 	}
1000 	nd.ni_vp->v_vflag |= VV_MD;
1001 	VOP_UNLOCK(nd.ni_vp, 0);
1002 
1003 	if (mdio->md_fwsectors != 0)
1004 		sc->fwsectors = mdio->md_fwsectors;
1005 	if (mdio->md_fwheads != 0)
1006 		sc->fwheads = mdio->md_fwheads;
1007 	sc->flags = mdio->md_options & (MD_FORCE | MD_ASYNC);
1008 	if (!(flags & FWRITE))
1009 		sc->flags |= MD_READONLY;
1010 	sc->vnode = nd.ni_vp;
1011 
1012 	error = mdsetcred(sc, td->td_ucred);
1013 	if (error != 0) {
1014 		sc->vnode = NULL;
1015 		vn_lock(nd.ni_vp, LK_EXCLUSIVE | LK_RETRY);
1016 		nd.ni_vp->v_vflag &= ~VV_MD;
1017 		goto bad;
1018 	}
1019 	VFS_UNLOCK_GIANT(vfslocked);
1020 	return (0);
1021 bad:
1022 	VOP_UNLOCK(nd.ni_vp, 0);
1023 	(void)vn_close(nd.ni_vp, flags, td->td_ucred, td);
1024 	VFS_UNLOCK_GIANT(vfslocked);
1025 	return (error);
1026 }
1027 
1028 static int
1029 mddestroy(struct md_s *sc, struct thread *td)
1030 {
1031 	int vfslocked;
1032 
1033 	if (sc->gp) {
1034 		sc->gp->softc = NULL;
1035 		g_topology_lock();
1036 		g_wither_geom(sc->gp, ENXIO);
1037 		g_topology_unlock();
1038 		sc->gp = NULL;
1039 		sc->pp = NULL;
1040 	}
1041 	if (sc->devstat) {
1042 		devstat_remove_entry(sc->devstat);
1043 		sc->devstat = NULL;
1044 	}
1045 	mtx_lock(&sc->queue_mtx);
1046 	sc->flags |= MD_SHUTDOWN;
1047 	wakeup(sc);
1048 	while (!(sc->flags & MD_EXITING))
1049 		msleep(sc->procp, &sc->queue_mtx, PRIBIO, "mddestroy", hz / 10);
1050 	mtx_unlock(&sc->queue_mtx);
1051 	mtx_destroy(&sc->queue_mtx);
1052 	if (sc->vnode != NULL) {
1053 		vfslocked = VFS_LOCK_GIANT(sc->vnode->v_mount);
1054 		vn_lock(sc->vnode, LK_EXCLUSIVE | LK_RETRY);
1055 		sc->vnode->v_vflag &= ~VV_MD;
1056 		VOP_UNLOCK(sc->vnode, 0);
1057 		(void)vn_close(sc->vnode, sc->flags & MD_READONLY ?
1058 		    FREAD : (FREAD|FWRITE), sc->cred, td);
1059 		VFS_UNLOCK_GIANT(vfslocked);
1060 	}
1061 	if (sc->cred != NULL)
1062 		crfree(sc->cred);
1063 	if (sc->object != NULL)
1064 		vm_object_deallocate(sc->object);
1065 	if (sc->indir)
1066 		destroy_indir(sc, sc->indir);
1067 	if (sc->uma)
1068 		uma_zdestroy(sc->uma);
1069 
1070 	LIST_REMOVE(sc, list);
1071 	free_unr(md_uh, sc->unit);
1072 	free(sc, M_MD);
1073 	return (0);
1074 }
1075 
1076 static int
1077 mdresize(struct md_s *sc, struct md_ioctl *mdio)
1078 {
1079 	int error, res;
1080 	vm_pindex_t oldpages, newpages;
1081 
1082 	switch (sc->type) {
1083 	case MD_VNODE:
1084 		break;
1085 	case MD_SWAP:
1086 		if (mdio->md_mediasize <= 0 ||
1087 		    (mdio->md_mediasize % PAGE_SIZE) != 0)
1088 			return (EDOM);
1089 		oldpages = OFF_TO_IDX(round_page(sc->mediasize));
1090 		newpages = OFF_TO_IDX(round_page(mdio->md_mediasize));
1091 		if (newpages < oldpages) {
1092 			VM_OBJECT_LOCK(sc->object);
1093 			vm_object_page_remove(sc->object, newpages, 0, 0);
1094 			swap_pager_freespace(sc->object, newpages,
1095 			    oldpages - newpages);
1096 			swap_release_by_cred(IDX_TO_OFF(oldpages -
1097 			    newpages), sc->cred);
1098 			sc->object->charge = IDX_TO_OFF(newpages);
1099 			sc->object->size = newpages;
1100 			VM_OBJECT_UNLOCK(sc->object);
1101 		} else if (newpages > oldpages) {
1102 			res = swap_reserve_by_cred(IDX_TO_OFF(newpages -
1103 			    oldpages), sc->cred);
1104 			if (!res)
1105 				return (ENOMEM);
1106 			if ((mdio->md_options & MD_RESERVE) ||
1107 			    (sc->flags & MD_RESERVE)) {
1108 				error = swap_pager_reserve(sc->object,
1109 				    oldpages, newpages - oldpages);
1110 				if (error < 0) {
1111 					swap_release_by_cred(
1112 					    IDX_TO_OFF(newpages - oldpages),
1113 					    sc->cred);
1114 					return (EDOM);
1115 				}
1116 			}
1117 			VM_OBJECT_LOCK(sc->object);
1118 			sc->object->charge = IDX_TO_OFF(newpages);
1119 			sc->object->size = newpages;
1120 			VM_OBJECT_UNLOCK(sc->object);
1121 		}
1122 		break;
1123 	default:
1124 		return (EOPNOTSUPP);
1125 	}
1126 
1127 	sc->mediasize = mdio->md_mediasize;
1128 	g_topology_lock();
1129 	g_resize_provider(sc->pp, sc->mediasize);
1130 	g_topology_unlock();
1131 	return (0);
1132 }
1133 
1134 static int
1135 mdcreate_swap(struct md_s *sc, struct md_ioctl *mdio, struct thread *td)
1136 {
1137 	vm_ooffset_t npage;
1138 	int error;
1139 
1140 	/*
1141 	 * Range check.  Disallow negative sizes or any size less then the
1142 	 * size of a page.  Then round to a page.
1143 	 */
1144 	if (sc->mediasize <= 0 || (sc->mediasize % PAGE_SIZE) != 0)
1145 		return (EDOM);
1146 
1147 	/*
1148 	 * Allocate an OBJT_SWAP object.
1149 	 *
1150 	 * Note the truncation.
1151 	 */
1152 
1153 	npage = mdio->md_mediasize / PAGE_SIZE;
1154 	if (mdio->md_fwsectors != 0)
1155 		sc->fwsectors = mdio->md_fwsectors;
1156 	if (mdio->md_fwheads != 0)
1157 		sc->fwheads = mdio->md_fwheads;
1158 	sc->object = vm_pager_allocate(OBJT_SWAP, NULL, PAGE_SIZE * npage,
1159 	    VM_PROT_DEFAULT, 0, td->td_ucred);
1160 	if (sc->object == NULL)
1161 		return (ENOMEM);
1162 	sc->flags = mdio->md_options & (MD_FORCE | MD_RESERVE);
1163 	if (mdio->md_options & MD_RESERVE) {
1164 		if (swap_pager_reserve(sc->object, 0, npage) < 0) {
1165 			error = EDOM;
1166 			goto finish;
1167 		}
1168 	}
1169 	error = mdsetcred(sc, td->td_ucred);
1170  finish:
1171 	if (error != 0) {
1172 		vm_object_deallocate(sc->object);
1173 		sc->object = NULL;
1174 	}
1175 	return (error);
1176 }
1177 
1178 
1179 static int
1180 xmdctlioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags, struct thread *td)
1181 {
1182 	struct md_ioctl *mdio;
1183 	struct md_s *sc;
1184 	int error, i;
1185 	unsigned sectsize;
1186 
1187 	if (md_debug)
1188 		printf("mdctlioctl(%s %lx %p %x %p)\n",
1189 			devtoname(dev), cmd, addr, flags, td);
1190 
1191 	mdio = (struct md_ioctl *)addr;
1192 	if (mdio->md_version != MDIOVERSION)
1193 		return (EINVAL);
1194 
1195 	/*
1196 	 * We assert the version number in the individual ioctl
1197 	 * handlers instead of out here because (a) it is possible we
1198 	 * may add another ioctl in the future which doesn't read an
1199 	 * mdio, and (b) the correct return value for an unknown ioctl
1200 	 * is ENOIOCTL, not EINVAL.
1201 	 */
1202 	error = 0;
1203 	switch (cmd) {
1204 	case MDIOCATTACH:
1205 		switch (mdio->md_type) {
1206 		case MD_MALLOC:
1207 		case MD_PRELOAD:
1208 		case MD_VNODE:
1209 		case MD_SWAP:
1210 			break;
1211 		default:
1212 			return (EINVAL);
1213 		}
1214 		if (mdio->md_sectorsize == 0)
1215 			sectsize = DEV_BSIZE;
1216 		else
1217 			sectsize = mdio->md_sectorsize;
1218 		if (sectsize > MAXPHYS || mdio->md_mediasize < sectsize)
1219 			return (EINVAL);
1220 		if (mdio->md_options & MD_AUTOUNIT)
1221 			sc = mdnew(-1, &error, mdio->md_type);
1222 		else {
1223 			if (mdio->md_unit > INT_MAX)
1224 				return (EINVAL);
1225 			sc = mdnew(mdio->md_unit, &error, mdio->md_type);
1226 		}
1227 		if (sc == NULL)
1228 			return (error);
1229 		if (mdio->md_options & MD_AUTOUNIT)
1230 			mdio->md_unit = sc->unit;
1231 		sc->mediasize = mdio->md_mediasize;
1232 		sc->sectorsize = sectsize;
1233 		error = EDOOFUS;
1234 		switch (sc->type) {
1235 		case MD_MALLOC:
1236 			sc->start = mdstart_malloc;
1237 			error = mdcreate_malloc(sc, mdio);
1238 			break;
1239 		case MD_PRELOAD:
1240 			sc->start = mdstart_preload;
1241 			error = mdcreate_preload(sc, mdio);
1242 			break;
1243 		case MD_VNODE:
1244 			sc->start = mdstart_vnode;
1245 			error = mdcreate_vnode(sc, mdio, td);
1246 			break;
1247 		case MD_SWAP:
1248 			sc->start = mdstart_swap;
1249 			error = mdcreate_swap(sc, mdio, td);
1250 			break;
1251 		}
1252 		if (error != 0) {
1253 			mddestroy(sc, td);
1254 			return (error);
1255 		}
1256 
1257 		/* Prune off any residual fractional sector */
1258 		i = sc->mediasize % sc->sectorsize;
1259 		sc->mediasize -= i;
1260 
1261 		mdinit(sc);
1262 		return (0);
1263 	case MDIOCDETACH:
1264 		if (mdio->md_mediasize != 0 ||
1265 		    (mdio->md_options & ~MD_FORCE) != 0)
1266 			return (EINVAL);
1267 
1268 		sc = mdfind(mdio->md_unit);
1269 		if (sc == NULL)
1270 			return (ENOENT);
1271 		if (sc->opencount != 0 && !(sc->flags & MD_FORCE) &&
1272 		    !(mdio->md_options & MD_FORCE))
1273 			return (EBUSY);
1274 		return (mddestroy(sc, td));
1275 	case MDIOCRESIZE:
1276 		if ((mdio->md_options & ~(MD_FORCE | MD_RESERVE)) != 0)
1277 			return (EINVAL);
1278 
1279 		sc = mdfind(mdio->md_unit);
1280 		if (sc == NULL)
1281 			return (ENOENT);
1282 		if (mdio->md_mediasize < sc->sectorsize)
1283 			return (EINVAL);
1284 		if (mdio->md_mediasize < sc->mediasize &&
1285 		    !(sc->flags & MD_FORCE) &&
1286 		    !(mdio->md_options & MD_FORCE))
1287 			return (EBUSY);
1288 		return (mdresize(sc, mdio));
1289 	case MDIOCQUERY:
1290 		sc = mdfind(mdio->md_unit);
1291 		if (sc == NULL)
1292 			return (ENOENT);
1293 		mdio->md_type = sc->type;
1294 		mdio->md_options = sc->flags;
1295 		mdio->md_mediasize = sc->mediasize;
1296 		mdio->md_sectorsize = sc->sectorsize;
1297 		if (sc->type == MD_VNODE)
1298 			error = copyout(sc->file, mdio->md_file,
1299 			    strlen(sc->file) + 1);
1300 		return (error);
1301 	case MDIOCLIST:
1302 		i = 1;
1303 		LIST_FOREACH(sc, &md_softc_list, list) {
1304 			if (i == MDNPAD - 1)
1305 				mdio->md_pad[i] = -1;
1306 			else
1307 				mdio->md_pad[i++] = sc->unit;
1308 		}
1309 		mdio->md_pad[0] = i - 1;
1310 		return (0);
1311 	default:
1312 		return (ENOIOCTL);
1313 	};
1314 }
1315 
1316 static int
1317 mdctlioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags, struct thread *td)
1318 {
1319 	int error;
1320 
1321 	sx_xlock(&md_sx);
1322 	error = xmdctlioctl(dev, cmd, addr, flags, td);
1323 	sx_xunlock(&md_sx);
1324 	return (error);
1325 }
1326 
1327 static void
1328 md_preloaded(u_char *image, size_t length)
1329 {
1330 	struct md_s *sc;
1331 	int error;
1332 
1333 	sc = mdnew(-1, &error, MD_PRELOAD);
1334 	if (sc == NULL)
1335 		return;
1336 	sc->mediasize = length;
1337 	sc->sectorsize = DEV_BSIZE;
1338 	sc->pl_ptr = image;
1339 	sc->pl_len = length;
1340 	sc->start = mdstart_preload;
1341 #ifdef MD_ROOT
1342 	if (sc->unit == 0)
1343 		rootdevnames[0] = "ufs:/dev/md0";
1344 #endif
1345 	mdinit(sc);
1346 }
1347 
1348 static void
1349 g_md_init(struct g_class *mp __unused)
1350 {
1351 	caddr_t mod;
1352 	u_char *ptr, *name, *type;
1353 	unsigned len;
1354 	int i;
1355 
1356 	/* figure out log2(NINDIR) */
1357 	for (i = NINDIR, nshift = -1; i; nshift++)
1358 		i >>= 1;
1359 
1360 	mod = NULL;
1361 	sx_init(&md_sx, "MD config lock");
1362 	g_topology_unlock();
1363 	md_uh = new_unrhdr(0, INT_MAX, NULL);
1364 #ifdef MD_ROOT_SIZE
1365 	sx_xlock(&md_sx);
1366 	md_preloaded(mfs_root.start, sizeof(mfs_root.start));
1367 	sx_xunlock(&md_sx);
1368 #endif
1369 	/* XXX: are preload_* static or do they need Giant ? */
1370 	while ((mod = preload_search_next_name(mod)) != NULL) {
1371 		name = (char *)preload_search_info(mod, MODINFO_NAME);
1372 		if (name == NULL)
1373 			continue;
1374 		type = (char *)preload_search_info(mod, MODINFO_TYPE);
1375 		if (type == NULL)
1376 			continue;
1377 		if (strcmp(type, "md_image") && strcmp(type, "mfs_root"))
1378 			continue;
1379 		ptr = preload_fetch_addr(mod);
1380 		len = preload_fetch_size(mod);
1381 		if (ptr != NULL && len != 0) {
1382 			printf("%s%d: Preloaded image <%s> %d bytes at %p\n",
1383 			    MD_NAME, mdunits, name, len, ptr);
1384 			sx_xlock(&md_sx);
1385 			md_preloaded(ptr, len);
1386 			sx_xunlock(&md_sx);
1387 		}
1388 	}
1389 	status_dev = make_dev(&mdctl_cdevsw, INT_MAX, UID_ROOT, GID_WHEEL,
1390 	    0600, MDCTL_NAME);
1391 	g_topology_lock();
1392 }
1393 
1394 static void
1395 g_md_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp,
1396     struct g_consumer *cp __unused, struct g_provider *pp)
1397 {
1398 	struct md_s *mp;
1399 	char *type;
1400 
1401 	mp = gp->softc;
1402 	if (mp == NULL)
1403 		return;
1404 
1405 	switch (mp->type) {
1406 	case MD_MALLOC:
1407 		type = "malloc";
1408 		break;
1409 	case MD_PRELOAD:
1410 		type = "preload";
1411 		break;
1412 	case MD_VNODE:
1413 		type = "vnode";
1414 		break;
1415 	case MD_SWAP:
1416 		type = "swap";
1417 		break;
1418 	default:
1419 		type = "unknown";
1420 		break;
1421 	}
1422 
1423 	if (pp != NULL) {
1424 		if (indent == NULL) {
1425 			sbuf_printf(sb, " u %d", mp->unit);
1426 			sbuf_printf(sb, " s %ju", (uintmax_t) mp->sectorsize);
1427 			sbuf_printf(sb, " f %ju", (uintmax_t) mp->fwheads);
1428 			sbuf_printf(sb, " fs %ju", (uintmax_t) mp->fwsectors);
1429 			sbuf_printf(sb, " l %ju", (uintmax_t) mp->mediasize);
1430 			sbuf_printf(sb, " t %s", type);
1431 			if (mp->type == MD_VNODE && mp->vnode != NULL)
1432 				sbuf_printf(sb, " file %s", mp->file);
1433 		} else {
1434 			sbuf_printf(sb, "%s<unit>%d</unit>\n", indent,
1435 			    mp->unit);
1436 			sbuf_printf(sb, "%s<sectorsize>%ju</sectorsize>\n",
1437 			    indent, (uintmax_t) mp->sectorsize);
1438 			sbuf_printf(sb, "%s<fwheads>%ju</fwheads>\n",
1439 			    indent, (uintmax_t) mp->fwheads);
1440 			sbuf_printf(sb, "%s<fwsectors>%ju</fwsectors>\n",
1441 			    indent, (uintmax_t) mp->fwsectors);
1442 			sbuf_printf(sb, "%s<length>%ju</length>\n",
1443 			    indent, (uintmax_t) mp->mediasize);
1444 			sbuf_printf(sb, "%s<compression>%s</compression>\n", indent,
1445 			    (mp->flags & MD_COMPRESS) == 0 ? "off": "on");
1446 			sbuf_printf(sb, "%s<access>%s</access>\n", indent,
1447 			    (mp->flags & MD_READONLY) == 0 ? "read-write":
1448 			    "read-only");
1449 			sbuf_printf(sb, "%s<type>%s</type>\n", indent,
1450 			    type);
1451 			if (mp->type == MD_VNODE && mp->vnode != NULL)
1452 				sbuf_printf(sb, "%s<file>%s</file>\n",
1453 				    indent, mp->file);
1454 		}
1455 	}
1456 }
1457 
1458 static void
1459 g_md_fini(struct g_class *mp __unused)
1460 {
1461 
1462 	sx_destroy(&md_sx);
1463 	if (status_dev != NULL)
1464 		destroy_dev(status_dev);
1465 	delete_unrhdr(md_uh);
1466 }
1467