xref: /freebsd/sys/dev/md/md.c (revision 3e7aca6f4e25e87cc473a4c0c5a0a6eb4671fde4)
1 /*-
2  * ----------------------------------------------------------------------------
3  * "THE BEER-WARE LICENSE" (Revision 42):
4  * <phk@FreeBSD.ORG> wrote this file.  As long as you retain this notice you
5  * can do whatever you want with this stuff. If we meet some day, and you think
6  * this stuff is worth it, you can buy me a beer in return.   Poul-Henning Kamp
7  * ----------------------------------------------------------------------------
8  *
9  * $FreeBSD$
10  *
11  */
12 
13 /*-
14  * The following functions are based in the vn(4) driver: mdstart_swap(),
15  * mdstart_vnode(), mdcreate_swap(), mdcreate_vnode() and mddestroy(),
16  * and as such under the following copyright:
17  *
18  * Copyright (c) 1988 University of Utah.
19  * Copyright (c) 1990, 1993
20  *	The Regents of the University of California.  All rights reserved.
21  * Copyright (c) 2013 The FreeBSD Foundation
22  * All rights reserved.
23  *
24  * This code is derived from software contributed to Berkeley by
25  * the Systems Programming Group of the University of Utah Computer
26  * Science Department.
27  *
28  * Portions of this software were developed by Konstantin Belousov
29  * under sponsorship from the FreeBSD Foundation.
30  *
31  * Redistribution and use in source and binary forms, with or without
32  * modification, are permitted provided that the following conditions
33  * are met:
34  * 1. Redistributions of source code must retain the above copyright
35  *    notice, this list of conditions and the following disclaimer.
36  * 2. Redistributions in binary form must reproduce the above copyright
37  *    notice, this list of conditions and the following disclaimer in the
38  *    documentation and/or other materials provided with the distribution.
39  * 4. Neither the name of the University nor the names of its contributors
40  *    may be used to endorse or promote products derived from this software
41  *    without specific prior written permission.
42  *
43  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
44  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
45  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
46  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
47  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
48  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
49  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
50  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
51  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
52  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
53  * SUCH DAMAGE.
54  *
55  * from: Utah Hdr: vn.c 1.13 94/04/02
56  *
57  *	from: @(#)vn.c	8.6 (Berkeley) 4/1/94
58  * From: src/sys/dev/vn/vn.c,v 1.122 2000/12/16 16:06:03
59  */
60 
61 #include "opt_geom.h"
62 #include "opt_md.h"
63 
64 #include <sys/param.h>
65 #include <sys/systm.h>
66 #include <sys/bio.h>
67 #include <sys/buf.h>
68 #include <sys/conf.h>
69 #include <sys/devicestat.h>
70 #include <sys/fcntl.h>
71 #include <sys/kernel.h>
72 #include <sys/kthread.h>
73 #include <sys/limits.h>
74 #include <sys/linker.h>
75 #include <sys/lock.h>
76 #include <sys/malloc.h>
77 #include <sys/mdioctl.h>
78 #include <sys/mount.h>
79 #include <sys/mutex.h>
80 #include <sys/sx.h>
81 #include <sys/namei.h>
82 #include <sys/proc.h>
83 #include <sys/queue.h>
84 #include <sys/rwlock.h>
85 #include <sys/sbuf.h>
86 #include <sys/sched.h>
87 #include <sys/sf_buf.h>
88 #include <sys/sysctl.h>
89 #include <sys/vnode.h>
90 
91 #include <geom/geom.h>
92 
93 #include <vm/vm.h>
94 #include <vm/vm_param.h>
95 #include <vm/vm_object.h>
96 #include <vm/vm_page.h>
97 #include <vm/vm_pager.h>
98 #include <vm/swap_pager.h>
99 #include <vm/uma.h>
100 
101 #define MD_MODVER 1
102 
103 #define MD_SHUTDOWN	0x10000		/* Tell worker thread to terminate. */
104 #define	MD_EXITING	0x20000		/* Worker thread is exiting. */
105 
106 #ifndef MD_NSECT
107 #define MD_NSECT (10000 * 2)
108 #endif
109 
110 static MALLOC_DEFINE(M_MD, "md_disk", "Memory Disk");
111 static MALLOC_DEFINE(M_MDSECT, "md_sectors", "Memory Disk Sectors");
112 
113 static int md_debug;
114 SYSCTL_INT(_debug, OID_AUTO, mddebug, CTLFLAG_RW, &md_debug, 0,
115     "Enable md(4) debug messages");
116 static int md_malloc_wait;
117 SYSCTL_INT(_vm, OID_AUTO, md_malloc_wait, CTLFLAG_RW, &md_malloc_wait, 0,
118     "Allow malloc to wait for memory allocations");
119 
120 #if defined(MD_ROOT) && !defined(MD_ROOT_FSTYPE)
121 #define	MD_ROOT_FSTYPE	"ufs"
122 #endif
123 
124 #if defined(MD_ROOT) && defined(MD_ROOT_SIZE)
125 /*
126  * Preloaded image gets put here.
127  * Applications that patch the object with the image can determine
128  * the size looking at the start and end markers (strings),
129  * so we want them contiguous.
130  */
131 static struct {
132 	u_char start[MD_ROOT_SIZE*1024];
133 	u_char end[128];
134 } mfs_root = {
135 	.start = "MFS Filesystem goes here",
136 	.end = "MFS Filesystem had better STOP here",
137 };
138 #endif
139 
140 static g_init_t g_md_init;
141 static g_fini_t g_md_fini;
142 static g_start_t g_md_start;
143 static g_access_t g_md_access;
144 static void g_md_dumpconf(struct sbuf *sb, const char *indent,
145     struct g_geom *gp, struct g_consumer *cp __unused, struct g_provider *pp);
146 
147 static struct cdev *status_dev = 0;
148 static struct sx md_sx;
149 static struct unrhdr *md_uh;
150 
151 static d_ioctl_t mdctlioctl;
152 
153 static struct cdevsw mdctl_cdevsw = {
154 	.d_version =	D_VERSION,
155 	.d_ioctl =	mdctlioctl,
156 	.d_name =	MD_NAME,
157 };
158 
159 struct g_class g_md_class = {
160 	.name = "MD",
161 	.version = G_VERSION,
162 	.init = g_md_init,
163 	.fini = g_md_fini,
164 	.start = g_md_start,
165 	.access = g_md_access,
166 	.dumpconf = g_md_dumpconf,
167 };
168 
169 DECLARE_GEOM_CLASS(g_md_class, g_md);
170 
171 
172 static LIST_HEAD(, md_s) md_softc_list = LIST_HEAD_INITIALIZER(md_softc_list);
173 
174 #define NINDIR	(PAGE_SIZE / sizeof(uintptr_t))
175 #define NMASK	(NINDIR-1)
176 static int nshift;
177 
178 static int md_vnode_pbuf_freecnt;
179 
180 struct indir {
181 	uintptr_t	*array;
182 	u_int		total;
183 	u_int		used;
184 	u_int		shift;
185 };
186 
187 struct md_s {
188 	int unit;
189 	LIST_ENTRY(md_s) list;
190 	struct bio_queue_head bio_queue;
191 	struct mtx queue_mtx;
192 	struct mtx stat_mtx;
193 	struct cdev *dev;
194 	enum md_types type;
195 	off_t mediasize;
196 	unsigned sectorsize;
197 	unsigned opencount;
198 	unsigned fwheads;
199 	unsigned fwsectors;
200 	unsigned flags;
201 	char name[20];
202 	struct proc *procp;
203 	struct g_geom *gp;
204 	struct g_provider *pp;
205 	int (*start)(struct md_s *sc, struct bio *bp);
206 	struct devstat *devstat;
207 
208 	/* MD_MALLOC related fields */
209 	struct indir *indir;
210 	uma_zone_t uma;
211 
212 	/* MD_PRELOAD related fields */
213 	u_char *pl_ptr;
214 	size_t pl_len;
215 
216 	/* MD_VNODE related fields */
217 	struct vnode *vnode;
218 	char file[PATH_MAX];
219 	struct ucred *cred;
220 
221 	/* MD_SWAP related fields */
222 	vm_object_t object;
223 };
224 
225 static struct indir *
226 new_indir(u_int shift)
227 {
228 	struct indir *ip;
229 
230 	ip = malloc(sizeof *ip, M_MD, (md_malloc_wait ? M_WAITOK : M_NOWAIT)
231 	    | M_ZERO);
232 	if (ip == NULL)
233 		return (NULL);
234 	ip->array = malloc(sizeof(uintptr_t) * NINDIR,
235 	    M_MDSECT, (md_malloc_wait ? M_WAITOK : M_NOWAIT) | M_ZERO);
236 	if (ip->array == NULL) {
237 		free(ip, M_MD);
238 		return (NULL);
239 	}
240 	ip->total = NINDIR;
241 	ip->shift = shift;
242 	return (ip);
243 }
244 
245 static void
246 del_indir(struct indir *ip)
247 {
248 
249 	free(ip->array, M_MDSECT);
250 	free(ip, M_MD);
251 }
252 
253 static void
254 destroy_indir(struct md_s *sc, struct indir *ip)
255 {
256 	int i;
257 
258 	for (i = 0; i < NINDIR; i++) {
259 		if (!ip->array[i])
260 			continue;
261 		if (ip->shift)
262 			destroy_indir(sc, (struct indir*)(ip->array[i]));
263 		else if (ip->array[i] > 255)
264 			uma_zfree(sc->uma, (void *)(ip->array[i]));
265 	}
266 	del_indir(ip);
267 }
268 
269 /*
270  * This function does the math and allocates the top level "indir" structure
271  * for a device of "size" sectors.
272  */
273 
274 static struct indir *
275 dimension(off_t size)
276 {
277 	off_t rcnt;
278 	struct indir *ip;
279 	int layer;
280 
281 	rcnt = size;
282 	layer = 0;
283 	while (rcnt > NINDIR) {
284 		rcnt /= NINDIR;
285 		layer++;
286 	}
287 
288 	/*
289 	 * XXX: the top layer is probably not fully populated, so we allocate
290 	 * too much space for ip->array in here.
291 	 */
292 	ip = malloc(sizeof *ip, M_MD, M_WAITOK | M_ZERO);
293 	ip->array = malloc(sizeof(uintptr_t) * NINDIR,
294 	    M_MDSECT, M_WAITOK | M_ZERO);
295 	ip->total = NINDIR;
296 	ip->shift = layer * nshift;
297 	return (ip);
298 }
299 
300 /*
301  * Read a given sector
302  */
303 
304 static uintptr_t
305 s_read(struct indir *ip, off_t offset)
306 {
307 	struct indir *cip;
308 	int idx;
309 	uintptr_t up;
310 
311 	if (md_debug > 1)
312 		printf("s_read(%jd)\n", (intmax_t)offset);
313 	up = 0;
314 	for (cip = ip; cip != NULL;) {
315 		if (cip->shift) {
316 			idx = (offset >> cip->shift) & NMASK;
317 			up = cip->array[idx];
318 			cip = (struct indir *)up;
319 			continue;
320 		}
321 		idx = offset & NMASK;
322 		return (cip->array[idx]);
323 	}
324 	return (0);
325 }
326 
327 /*
328  * Write a given sector, prune the tree if the value is 0
329  */
330 
331 static int
332 s_write(struct indir *ip, off_t offset, uintptr_t ptr)
333 {
334 	struct indir *cip, *lip[10];
335 	int idx, li;
336 	uintptr_t up;
337 
338 	if (md_debug > 1)
339 		printf("s_write(%jd, %p)\n", (intmax_t)offset, (void *)ptr);
340 	up = 0;
341 	li = 0;
342 	cip = ip;
343 	for (;;) {
344 		lip[li++] = cip;
345 		if (cip->shift) {
346 			idx = (offset >> cip->shift) & NMASK;
347 			up = cip->array[idx];
348 			if (up != 0) {
349 				cip = (struct indir *)up;
350 				continue;
351 			}
352 			/* Allocate branch */
353 			cip->array[idx] =
354 			    (uintptr_t)new_indir(cip->shift - nshift);
355 			if (cip->array[idx] == 0)
356 				return (ENOSPC);
357 			cip->used++;
358 			up = cip->array[idx];
359 			cip = (struct indir *)up;
360 			continue;
361 		}
362 		/* leafnode */
363 		idx = offset & NMASK;
364 		up = cip->array[idx];
365 		if (up != 0)
366 			cip->used--;
367 		cip->array[idx] = ptr;
368 		if (ptr != 0)
369 			cip->used++;
370 		break;
371 	}
372 	if (cip->used != 0 || li == 1)
373 		return (0);
374 	li--;
375 	while (cip->used == 0 && cip != ip) {
376 		li--;
377 		idx = (offset >> lip[li]->shift) & NMASK;
378 		up = lip[li]->array[idx];
379 		KASSERT(up == (uintptr_t)cip, ("md screwed up"));
380 		del_indir(cip);
381 		lip[li]->array[idx] = 0;
382 		lip[li]->used--;
383 		cip = lip[li];
384 	}
385 	return (0);
386 }
387 
388 
389 static int
390 g_md_access(struct g_provider *pp, int r, int w, int e)
391 {
392 	struct md_s *sc;
393 
394 	sc = pp->geom->softc;
395 	if (sc == NULL) {
396 		if (r <= 0 && w <= 0 && e <= 0)
397 			return (0);
398 		return (ENXIO);
399 	}
400 	r += pp->acr;
401 	w += pp->acw;
402 	e += pp->ace;
403 	if ((sc->flags & MD_READONLY) != 0 && w > 0)
404 		return (EROFS);
405 	if ((pp->acr + pp->acw + pp->ace) == 0 && (r + w + e) > 0) {
406 		sc->opencount = 1;
407 	} else if ((pp->acr + pp->acw + pp->ace) > 0 && (r + w + e) == 0) {
408 		sc->opencount = 0;
409 	}
410 	return (0);
411 }
412 
413 static void
414 g_md_start(struct bio *bp)
415 {
416 	struct md_s *sc;
417 
418 	sc = bp->bio_to->geom->softc;
419 	if ((bp->bio_cmd == BIO_READ) || (bp->bio_cmd == BIO_WRITE)) {
420 		mtx_lock(&sc->stat_mtx);
421 		devstat_start_transaction_bio(sc->devstat, bp);
422 		mtx_unlock(&sc->stat_mtx);
423 	}
424 	mtx_lock(&sc->queue_mtx);
425 	bioq_disksort(&sc->bio_queue, bp);
426 	mtx_unlock(&sc->queue_mtx);
427 	wakeup(sc);
428 }
429 
430 #define	MD_MALLOC_MOVE_ZERO	1
431 #define	MD_MALLOC_MOVE_FILL	2
432 #define	MD_MALLOC_MOVE_READ	3
433 #define	MD_MALLOC_MOVE_WRITE	4
434 #define	MD_MALLOC_MOVE_CMP	5
435 
436 static int
437 md_malloc_move(vm_page_t **mp, int *ma_offs, unsigned sectorsize,
438     void *ptr, u_char fill, int op)
439 {
440 	struct sf_buf *sf;
441 	vm_page_t m, *mp1;
442 	char *p, first;
443 	off_t *uc;
444 	unsigned n;
445 	int error, i, ma_offs1, sz, first_read;
446 
447 	m = NULL;
448 	error = 0;
449 	sf = NULL;
450 	/* if (op == MD_MALLOC_MOVE_CMP) { gcc */
451 		first = 0;
452 		first_read = 0;
453 		uc = ptr;
454 		mp1 = *mp;
455 		ma_offs1 = *ma_offs;
456 	/* } */
457 	sched_pin();
458 	for (n = sectorsize; n != 0; n -= sz) {
459 		sz = imin(PAGE_SIZE - *ma_offs, n);
460 		if (m != **mp) {
461 			if (sf != NULL)
462 				sf_buf_free(sf);
463 			m = **mp;
464 			sf = sf_buf_alloc(m, SFB_CPUPRIVATE |
465 			    (md_malloc_wait ? 0 : SFB_NOWAIT));
466 			if (sf == NULL) {
467 				error = ENOMEM;
468 				break;
469 			}
470 		}
471 		p = (char *)sf_buf_kva(sf) + *ma_offs;
472 		switch (op) {
473 		case MD_MALLOC_MOVE_ZERO:
474 			bzero(p, sz);
475 			break;
476 		case MD_MALLOC_MOVE_FILL:
477 			memset(p, fill, sz);
478 			break;
479 		case MD_MALLOC_MOVE_READ:
480 			bcopy(ptr, p, sz);
481 			cpu_flush_dcache(p, sz);
482 			break;
483 		case MD_MALLOC_MOVE_WRITE:
484 			bcopy(p, ptr, sz);
485 			break;
486 		case MD_MALLOC_MOVE_CMP:
487 			for (i = 0; i < sz; i++, p++) {
488 				if (!first_read) {
489 					*uc = (u_char)*p;
490 					first = *p;
491 					first_read = 1;
492 				} else if (*p != first) {
493 					error = EDOOFUS;
494 					break;
495 				}
496 			}
497 			break;
498 		default:
499 			KASSERT(0, ("md_malloc_move unknown op %d\n", op));
500 			break;
501 		}
502 		if (error != 0)
503 			break;
504 		*ma_offs += sz;
505 		*ma_offs %= PAGE_SIZE;
506 		if (*ma_offs == 0)
507 			(*mp)++;
508 		ptr = (char *)ptr + sz;
509 	}
510 
511 	if (sf != NULL)
512 		sf_buf_free(sf);
513 	sched_unpin();
514 	if (op == MD_MALLOC_MOVE_CMP && error != 0) {
515 		*mp = mp1;
516 		*ma_offs = ma_offs1;
517 	}
518 	return (error);
519 }
520 
521 static int
522 mdstart_malloc(struct md_s *sc, struct bio *bp)
523 {
524 	u_char *dst;
525 	vm_page_t *m;
526 	int i, error, error1, ma_offs, notmapped;
527 	off_t secno, nsec, uc;
528 	uintptr_t sp, osp;
529 
530 	switch (bp->bio_cmd) {
531 	case BIO_READ:
532 	case BIO_WRITE:
533 	case BIO_DELETE:
534 		break;
535 	default:
536 		return (EOPNOTSUPP);
537 	}
538 
539 	notmapped = (bp->bio_flags & BIO_UNMAPPED) != 0;
540 	if (notmapped) {
541 		m = bp->bio_ma;
542 		ma_offs = bp->bio_ma_offset;
543 		dst = NULL;
544 	} else {
545 		dst = bp->bio_data;
546 	}
547 
548 	nsec = bp->bio_length / sc->sectorsize;
549 	secno = bp->bio_offset / sc->sectorsize;
550 	error = 0;
551 	while (nsec--) {
552 		osp = s_read(sc->indir, secno);
553 		if (bp->bio_cmd == BIO_DELETE) {
554 			if (osp != 0)
555 				error = s_write(sc->indir, secno, 0);
556 		} else if (bp->bio_cmd == BIO_READ) {
557 			if (osp == 0) {
558 				if (notmapped) {
559 					error = md_malloc_move(&m, &ma_offs,
560 					    sc->sectorsize, NULL, 0,
561 					    MD_MALLOC_MOVE_ZERO);
562 				} else
563 					bzero(dst, sc->sectorsize);
564 			} else if (osp <= 255) {
565 				if (notmapped) {
566 					error = md_malloc_move(&m, &ma_offs,
567 					    sc->sectorsize, NULL, osp,
568 					    MD_MALLOC_MOVE_FILL);
569 				} else
570 					memset(dst, osp, sc->sectorsize);
571 			} else {
572 				if (notmapped) {
573 					error = md_malloc_move(&m, &ma_offs,
574 					    sc->sectorsize, (void *)osp, 0,
575 					    MD_MALLOC_MOVE_READ);
576 				} else {
577 					bcopy((void *)osp, dst, sc->sectorsize);
578 					cpu_flush_dcache(dst, sc->sectorsize);
579 				}
580 			}
581 			osp = 0;
582 		} else if (bp->bio_cmd == BIO_WRITE) {
583 			if (sc->flags & MD_COMPRESS) {
584 				if (notmapped) {
585 					error1 = md_malloc_move(&m, &ma_offs,
586 					    sc->sectorsize, &uc, 0,
587 					    MD_MALLOC_MOVE_CMP);
588 					i = error1 == 0 ? sc->sectorsize : 0;
589 				} else {
590 					uc = dst[0];
591 					for (i = 1; i < sc->sectorsize; i++) {
592 						if (dst[i] != uc)
593 							break;
594 					}
595 				}
596 			} else {
597 				i = 0;
598 				uc = 0;
599 			}
600 			if (i == sc->sectorsize) {
601 				if (osp != uc)
602 					error = s_write(sc->indir, secno, uc);
603 			} else {
604 				if (osp <= 255) {
605 					sp = (uintptr_t)uma_zalloc(sc->uma,
606 					    md_malloc_wait ? M_WAITOK :
607 					    M_NOWAIT);
608 					if (sp == 0) {
609 						error = ENOSPC;
610 						break;
611 					}
612 					if (notmapped) {
613 						error = md_malloc_move(&m,
614 						    &ma_offs, sc->sectorsize,
615 						    (void *)sp, 0,
616 						    MD_MALLOC_MOVE_WRITE);
617 					} else {
618 						bcopy(dst, (void *)sp,
619 						    sc->sectorsize);
620 					}
621 					error = s_write(sc->indir, secno, sp);
622 				} else {
623 					if (notmapped) {
624 						error = md_malloc_move(&m,
625 						    &ma_offs, sc->sectorsize,
626 						    (void *)osp, 0,
627 						    MD_MALLOC_MOVE_WRITE);
628 					} else {
629 						bcopy(dst, (void *)osp,
630 						    sc->sectorsize);
631 					}
632 					osp = 0;
633 				}
634 			}
635 		} else {
636 			error = EOPNOTSUPP;
637 		}
638 		if (osp > 255)
639 			uma_zfree(sc->uma, (void*)osp);
640 		if (error != 0)
641 			break;
642 		secno++;
643 		if (!notmapped)
644 			dst += sc->sectorsize;
645 	}
646 	bp->bio_resid = 0;
647 	return (error);
648 }
649 
650 static int
651 mdstart_preload(struct md_s *sc, struct bio *bp)
652 {
653 
654 	switch (bp->bio_cmd) {
655 	case BIO_READ:
656 		bcopy(sc->pl_ptr + bp->bio_offset, bp->bio_data,
657 		    bp->bio_length);
658 		cpu_flush_dcache(bp->bio_data, bp->bio_length);
659 		break;
660 	case BIO_WRITE:
661 		bcopy(bp->bio_data, sc->pl_ptr + bp->bio_offset,
662 		    bp->bio_length);
663 		break;
664 	}
665 	bp->bio_resid = 0;
666 	return (0);
667 }
668 
669 static int
670 mdstart_vnode(struct md_s *sc, struct bio *bp)
671 {
672 	int error;
673 	struct uio auio;
674 	struct iovec aiov;
675 	struct mount *mp;
676 	struct vnode *vp;
677 	struct buf *pb;
678 	struct thread *td;
679 	off_t end, zerosize;
680 
681 	switch (bp->bio_cmd) {
682 	case BIO_READ:
683 	case BIO_WRITE:
684 	case BIO_DELETE:
685 	case BIO_FLUSH:
686 		break;
687 	default:
688 		return (EOPNOTSUPP);
689 	}
690 
691 	td = curthread;
692 	vp = sc->vnode;
693 
694 	/*
695 	 * VNODE I/O
696 	 *
697 	 * If an error occurs, we set BIO_ERROR but we do not set
698 	 * B_INVAL because (for a write anyway), the buffer is
699 	 * still valid.
700 	 */
701 
702 	if (bp->bio_cmd == BIO_FLUSH) {
703 		(void) vn_start_write(vp, &mp, V_WAIT);
704 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
705 		error = VOP_FSYNC(vp, MNT_WAIT, td);
706 		VOP_UNLOCK(vp, 0);
707 		vn_finished_write(mp);
708 		return (error);
709 	}
710 
711 	bzero(&auio, sizeof(auio));
712 
713 	/*
714 	 * Special case for BIO_DELETE.  On the surface, this is very
715 	 * similar to BIO_WRITE, except that we write from our own
716 	 * fixed-length buffer, so we have to loop.  The net result is
717 	 * that the two cases end up having very little in common.
718 	 */
719 	if (bp->bio_cmd == BIO_DELETE) {
720 		zerosize = ZERO_REGION_SIZE -
721 		    (ZERO_REGION_SIZE % sc->sectorsize);
722 		auio.uio_iov = &aiov;
723 		auio.uio_iovcnt = 1;
724 		auio.uio_offset = (vm_ooffset_t)bp->bio_offset;
725 		auio.uio_segflg = UIO_SYSSPACE;
726 		auio.uio_rw = UIO_WRITE;
727 		auio.uio_td = td;
728 		end = bp->bio_offset + bp->bio_length;
729 		(void) vn_start_write(vp, &mp, V_WAIT);
730 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
731 		error = 0;
732 		while (auio.uio_offset < end) {
733 			aiov.iov_base = __DECONST(void *, zero_region);
734 			aiov.iov_len = end - auio.uio_offset;
735 			if (aiov.iov_len > zerosize)
736 				aiov.iov_len = zerosize;
737 			auio.uio_resid = aiov.iov_len;
738 			error = VOP_WRITE(vp, &auio,
739 			    sc->flags & MD_ASYNC ? 0 : IO_SYNC, sc->cred);
740 			if (error != 0)
741 				break;
742 		}
743 		VOP_UNLOCK(vp, 0);
744 		vn_finished_write(mp);
745 		bp->bio_resid = end - auio.uio_offset;
746 		return (error);
747 	}
748 
749 	KASSERT(bp->bio_length <= MAXPHYS, ("bio_length %jd",
750 	    (uintmax_t)bp->bio_length));
751 	if ((bp->bio_flags & BIO_UNMAPPED) == 0) {
752 		pb = NULL;
753 		aiov.iov_base = bp->bio_data;
754 	} else {
755 		pb = getpbuf(&md_vnode_pbuf_freecnt);
756 		pmap_qenter((vm_offset_t)pb->b_data, bp->bio_ma, bp->bio_ma_n);
757 		aiov.iov_base = (void *)((vm_offset_t)pb->b_data +
758 		    bp->bio_ma_offset);
759 	}
760 	aiov.iov_len = bp->bio_length;
761 	auio.uio_iov = &aiov;
762 	auio.uio_iovcnt = 1;
763 	auio.uio_offset = (vm_ooffset_t)bp->bio_offset;
764 	auio.uio_segflg = UIO_SYSSPACE;
765 	if (bp->bio_cmd == BIO_READ)
766 		auio.uio_rw = UIO_READ;
767 	else if (bp->bio_cmd == BIO_WRITE)
768 		auio.uio_rw = UIO_WRITE;
769 	else
770 		panic("wrong BIO_OP in mdstart_vnode");
771 	auio.uio_resid = bp->bio_length;
772 	auio.uio_td = td;
773 	/*
774 	 * When reading set IO_DIRECT to try to avoid double-caching
775 	 * the data.  When writing IO_DIRECT is not optimal.
776 	 */
777 	if (bp->bio_cmd == BIO_READ) {
778 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
779 		error = VOP_READ(vp, &auio, IO_DIRECT, sc->cred);
780 		VOP_UNLOCK(vp, 0);
781 	} else {
782 		(void) vn_start_write(vp, &mp, V_WAIT);
783 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
784 		error = VOP_WRITE(vp, &auio, sc->flags & MD_ASYNC ? 0 : IO_SYNC,
785 		    sc->cred);
786 		VOP_UNLOCK(vp, 0);
787 		vn_finished_write(mp);
788 	}
789 	if ((bp->bio_flags & BIO_UNMAPPED) != 0) {
790 		pmap_qremove((vm_offset_t)pb->b_data, bp->bio_ma_n);
791 		relpbuf(pb, &md_vnode_pbuf_freecnt);
792 	}
793 	bp->bio_resid = auio.uio_resid;
794 	return (error);
795 }
796 
797 static int
798 mdstart_swap(struct md_s *sc, struct bio *bp)
799 {
800 	vm_page_t m;
801 	u_char *p;
802 	vm_pindex_t i, lastp;
803 	int rv, ma_offs, offs, len, lastend;
804 
805 	switch (bp->bio_cmd) {
806 	case BIO_READ:
807 	case BIO_WRITE:
808 	case BIO_DELETE:
809 		break;
810 	default:
811 		return (EOPNOTSUPP);
812 	}
813 
814 	p = bp->bio_data;
815 	ma_offs = (bp->bio_flags & BIO_UNMAPPED) == 0 ? 0 : bp->bio_ma_offset;
816 
817 	/*
818 	 * offs is the offset at which to start operating on the
819 	 * next (ie, first) page.  lastp is the last page on
820 	 * which we're going to operate.  lastend is the ending
821 	 * position within that last page (ie, PAGE_SIZE if
822 	 * we're operating on complete aligned pages).
823 	 */
824 	offs = bp->bio_offset % PAGE_SIZE;
825 	lastp = (bp->bio_offset + bp->bio_length - 1) / PAGE_SIZE;
826 	lastend = (bp->bio_offset + bp->bio_length - 1) % PAGE_SIZE + 1;
827 
828 	rv = VM_PAGER_OK;
829 	VM_OBJECT_WLOCK(sc->object);
830 	vm_object_pip_add(sc->object, 1);
831 	for (i = bp->bio_offset / PAGE_SIZE; i <= lastp; i++) {
832 		len = ((i == lastp) ? lastend : PAGE_SIZE) - offs;
833 		m = vm_page_grab(sc->object, i, VM_ALLOC_SYSTEM);
834 		if (bp->bio_cmd == BIO_READ) {
835 			if (m->valid == VM_PAGE_BITS_ALL)
836 				rv = VM_PAGER_OK;
837 			else
838 				rv = vm_pager_get_pages(sc->object, &m, 1, 0);
839 			if (rv == VM_PAGER_ERROR) {
840 				vm_page_xunbusy(m);
841 				break;
842 			} else if (rv == VM_PAGER_FAIL) {
843 				/*
844 				 * Pager does not have the page.  Zero
845 				 * the allocated page, and mark it as
846 				 * valid. Do not set dirty, the page
847 				 * can be recreated if thrown out.
848 				 */
849 				pmap_zero_page(m);
850 				m->valid = VM_PAGE_BITS_ALL;
851 			}
852 			if ((bp->bio_flags & BIO_UNMAPPED) != 0) {
853 				pmap_copy_pages(&m, offs, bp->bio_ma,
854 				    ma_offs, len);
855 			} else {
856 				physcopyout(VM_PAGE_TO_PHYS(m) + offs, p, len);
857 				cpu_flush_dcache(p, len);
858 			}
859 		} else if (bp->bio_cmd == BIO_WRITE) {
860 			if (len != PAGE_SIZE && m->valid != VM_PAGE_BITS_ALL)
861 				rv = vm_pager_get_pages(sc->object, &m, 1, 0);
862 			else
863 				rv = VM_PAGER_OK;
864 			if (rv == VM_PAGER_ERROR) {
865 				vm_page_xunbusy(m);
866 				break;
867 			}
868 			if ((bp->bio_flags & BIO_UNMAPPED) != 0) {
869 				pmap_copy_pages(bp->bio_ma, ma_offs, &m,
870 				    offs, len);
871 			} else {
872 				physcopyin(p, VM_PAGE_TO_PHYS(m) + offs, len);
873 			}
874 			m->valid = VM_PAGE_BITS_ALL;
875 		} else if (bp->bio_cmd == BIO_DELETE) {
876 			if (len != PAGE_SIZE && m->valid != VM_PAGE_BITS_ALL)
877 				rv = vm_pager_get_pages(sc->object, &m, 1, 0);
878 			else
879 				rv = VM_PAGER_OK;
880 			if (rv == VM_PAGER_ERROR) {
881 				vm_page_xunbusy(m);
882 				break;
883 			}
884 			if (len != PAGE_SIZE) {
885 				pmap_zero_page_area(m, offs, len);
886 				vm_page_clear_dirty(m, offs, len);
887 				m->valid = VM_PAGE_BITS_ALL;
888 			} else
889 				vm_pager_page_unswapped(m);
890 		}
891 		vm_page_xunbusy(m);
892 		vm_page_lock(m);
893 		if (bp->bio_cmd == BIO_DELETE && len == PAGE_SIZE)
894 			vm_page_free(m);
895 		else
896 			vm_page_activate(m);
897 		vm_page_unlock(m);
898 		if (bp->bio_cmd == BIO_WRITE)
899 			vm_page_dirty(m);
900 
901 		/* Actions on further pages start at offset 0 */
902 		p += PAGE_SIZE - offs;
903 		offs = 0;
904 		ma_offs += len;
905 	}
906 	vm_object_pip_subtract(sc->object, 1);
907 	VM_OBJECT_WUNLOCK(sc->object);
908 	return (rv != VM_PAGER_ERROR ? 0 : ENOSPC);
909 }
910 
911 static void
912 md_kthread(void *arg)
913 {
914 	struct md_s *sc;
915 	struct bio *bp;
916 	int error;
917 
918 	sc = arg;
919 	thread_lock(curthread);
920 	sched_prio(curthread, PRIBIO);
921 	thread_unlock(curthread);
922 	if (sc->type == MD_VNODE)
923 		curthread->td_pflags |= TDP_NORUNNINGBUF;
924 
925 	for (;;) {
926 		mtx_lock(&sc->queue_mtx);
927 		if (sc->flags & MD_SHUTDOWN) {
928 			sc->flags |= MD_EXITING;
929 			mtx_unlock(&sc->queue_mtx);
930 			kproc_exit(0);
931 		}
932 		bp = bioq_takefirst(&sc->bio_queue);
933 		if (!bp) {
934 			msleep(sc, &sc->queue_mtx, PRIBIO | PDROP, "mdwait", 0);
935 			continue;
936 		}
937 		mtx_unlock(&sc->queue_mtx);
938 		if (bp->bio_cmd == BIO_GETATTR) {
939 			if ((sc->fwsectors && sc->fwheads &&
940 			    (g_handleattr_int(bp, "GEOM::fwsectors",
941 			    sc->fwsectors) ||
942 			    g_handleattr_int(bp, "GEOM::fwheads",
943 			    sc->fwheads))) ||
944 			    g_handleattr_int(bp, "GEOM::candelete", 1))
945 				error = -1;
946 			else
947 				error = EOPNOTSUPP;
948 		} else {
949 			error = sc->start(sc, bp);
950 		}
951 
952 		if (error != -1) {
953 			bp->bio_completed = bp->bio_length;
954 			if ((bp->bio_cmd == BIO_READ) || (bp->bio_cmd == BIO_WRITE))
955 				devstat_end_transaction_bio(sc->devstat, bp);
956 			g_io_deliver(bp, error);
957 		}
958 	}
959 }
960 
961 static struct md_s *
962 mdfind(int unit)
963 {
964 	struct md_s *sc;
965 
966 	LIST_FOREACH(sc, &md_softc_list, list) {
967 		if (sc->unit == unit)
968 			break;
969 	}
970 	return (sc);
971 }
972 
973 static struct md_s *
974 mdnew(int unit, int *errp, enum md_types type)
975 {
976 	struct md_s *sc;
977 	int error;
978 
979 	*errp = 0;
980 	if (unit == -1)
981 		unit = alloc_unr(md_uh);
982 	else
983 		unit = alloc_unr_specific(md_uh, unit);
984 
985 	if (unit == -1) {
986 		*errp = EBUSY;
987 		return (NULL);
988 	}
989 
990 	sc = (struct md_s *)malloc(sizeof *sc, M_MD, M_WAITOK | M_ZERO);
991 	sc->type = type;
992 	bioq_init(&sc->bio_queue);
993 	mtx_init(&sc->queue_mtx, "md bio queue", NULL, MTX_DEF);
994 	mtx_init(&sc->stat_mtx, "md stat", NULL, MTX_DEF);
995 	sc->unit = unit;
996 	sprintf(sc->name, "md%d", unit);
997 	LIST_INSERT_HEAD(&md_softc_list, sc, list);
998 	error = kproc_create(md_kthread, sc, &sc->procp, 0, 0,"%s", sc->name);
999 	if (error == 0)
1000 		return (sc);
1001 	LIST_REMOVE(sc, list);
1002 	mtx_destroy(&sc->stat_mtx);
1003 	mtx_destroy(&sc->queue_mtx);
1004 	free_unr(md_uh, sc->unit);
1005 	free(sc, M_MD);
1006 	*errp = error;
1007 	return (NULL);
1008 }
1009 
1010 static void
1011 mdinit(struct md_s *sc)
1012 {
1013 	struct g_geom *gp;
1014 	struct g_provider *pp;
1015 
1016 	g_topology_lock();
1017 	gp = g_new_geomf(&g_md_class, "md%d", sc->unit);
1018 	gp->softc = sc;
1019 	pp = g_new_providerf(gp, "md%d", sc->unit);
1020 	pp->flags |= G_PF_DIRECT_SEND | G_PF_DIRECT_RECEIVE;
1021 	pp->mediasize = sc->mediasize;
1022 	pp->sectorsize = sc->sectorsize;
1023 	switch (sc->type) {
1024 	case MD_MALLOC:
1025 	case MD_VNODE:
1026 	case MD_SWAP:
1027 		pp->flags |= G_PF_ACCEPT_UNMAPPED;
1028 		break;
1029 	case MD_PRELOAD:
1030 		break;
1031 	}
1032 	sc->gp = gp;
1033 	sc->pp = pp;
1034 	g_error_provider(pp, 0);
1035 	g_topology_unlock();
1036 	sc->devstat = devstat_new_entry("md", sc->unit, sc->sectorsize,
1037 	    DEVSTAT_ALL_SUPPORTED, DEVSTAT_TYPE_DIRECT, DEVSTAT_PRIORITY_MAX);
1038 }
1039 
1040 static int
1041 mdcreate_malloc(struct md_s *sc, struct md_ioctl *mdio)
1042 {
1043 	uintptr_t sp;
1044 	int error;
1045 	off_t u;
1046 
1047 	error = 0;
1048 	if (mdio->md_options & ~(MD_AUTOUNIT | MD_COMPRESS | MD_RESERVE))
1049 		return (EINVAL);
1050 	if (mdio->md_sectorsize != 0 && !powerof2(mdio->md_sectorsize))
1051 		return (EINVAL);
1052 	/* Compression doesn't make sense if we have reserved space */
1053 	if (mdio->md_options & MD_RESERVE)
1054 		mdio->md_options &= ~MD_COMPRESS;
1055 	if (mdio->md_fwsectors != 0)
1056 		sc->fwsectors = mdio->md_fwsectors;
1057 	if (mdio->md_fwheads != 0)
1058 		sc->fwheads = mdio->md_fwheads;
1059 	sc->flags = mdio->md_options & (MD_COMPRESS | MD_FORCE);
1060 	sc->indir = dimension(sc->mediasize / sc->sectorsize);
1061 	sc->uma = uma_zcreate(sc->name, sc->sectorsize, NULL, NULL, NULL, NULL,
1062 	    0x1ff, 0);
1063 	if (mdio->md_options & MD_RESERVE) {
1064 		off_t nsectors;
1065 
1066 		nsectors = sc->mediasize / sc->sectorsize;
1067 		for (u = 0; u < nsectors; u++) {
1068 			sp = (uintptr_t)uma_zalloc(sc->uma, (md_malloc_wait ?
1069 			    M_WAITOK : M_NOWAIT) | M_ZERO);
1070 			if (sp != 0)
1071 				error = s_write(sc->indir, u, sp);
1072 			else
1073 				error = ENOMEM;
1074 			if (error != 0)
1075 				break;
1076 		}
1077 	}
1078 	return (error);
1079 }
1080 
1081 
1082 static int
1083 mdsetcred(struct md_s *sc, struct ucred *cred)
1084 {
1085 	char *tmpbuf;
1086 	int error = 0;
1087 
1088 	/*
1089 	 * Set credits in our softc
1090 	 */
1091 
1092 	if (sc->cred)
1093 		crfree(sc->cred);
1094 	sc->cred = crhold(cred);
1095 
1096 	/*
1097 	 * Horrible kludge to establish credentials for NFS  XXX.
1098 	 */
1099 
1100 	if (sc->vnode) {
1101 		struct uio auio;
1102 		struct iovec aiov;
1103 
1104 		tmpbuf = malloc(sc->sectorsize, M_TEMP, M_WAITOK);
1105 		bzero(&auio, sizeof(auio));
1106 
1107 		aiov.iov_base = tmpbuf;
1108 		aiov.iov_len = sc->sectorsize;
1109 		auio.uio_iov = &aiov;
1110 		auio.uio_iovcnt = 1;
1111 		auio.uio_offset = 0;
1112 		auio.uio_rw = UIO_READ;
1113 		auio.uio_segflg = UIO_SYSSPACE;
1114 		auio.uio_resid = aiov.iov_len;
1115 		vn_lock(sc->vnode, LK_EXCLUSIVE | LK_RETRY);
1116 		error = VOP_READ(sc->vnode, &auio, 0, sc->cred);
1117 		VOP_UNLOCK(sc->vnode, 0);
1118 		free(tmpbuf, M_TEMP);
1119 	}
1120 	return (error);
1121 }
1122 
1123 static int
1124 mdcreate_vnode(struct md_s *sc, struct md_ioctl *mdio, struct thread *td)
1125 {
1126 	struct vattr vattr;
1127 	struct nameidata nd;
1128 	char *fname;
1129 	int error, flags;
1130 
1131 	/*
1132 	 * Kernel-originated requests must have the filename appended
1133 	 * to the mdio structure to protect against malicious software.
1134 	 */
1135 	fname = mdio->md_file;
1136 	if ((void *)fname != (void *)(mdio + 1)) {
1137 		error = copyinstr(fname, sc->file, sizeof(sc->file), NULL);
1138 		if (error != 0)
1139 			return (error);
1140 	} else
1141 		strlcpy(sc->file, fname, sizeof(sc->file));
1142 
1143 	/*
1144 	 * If the user specified that this is a read only device, don't
1145 	 * set the FWRITE mask before trying to open the backing store.
1146 	 */
1147 	flags = FREAD | ((mdio->md_options & MD_READONLY) ? 0 : FWRITE);
1148 	NDINIT(&nd, LOOKUP, FOLLOW, UIO_SYSSPACE, sc->file, td);
1149 	error = vn_open(&nd, &flags, 0, NULL);
1150 	if (error != 0)
1151 		return (error);
1152 	NDFREE(&nd, NDF_ONLY_PNBUF);
1153 	if (nd.ni_vp->v_type != VREG) {
1154 		error = EINVAL;
1155 		goto bad;
1156 	}
1157 	error = VOP_GETATTR(nd.ni_vp, &vattr, td->td_ucred);
1158 	if (error != 0)
1159 		goto bad;
1160 	if (VOP_ISLOCKED(nd.ni_vp) != LK_EXCLUSIVE) {
1161 		vn_lock(nd.ni_vp, LK_UPGRADE | LK_RETRY);
1162 		if (nd.ni_vp->v_iflag & VI_DOOMED) {
1163 			/* Forced unmount. */
1164 			error = EBADF;
1165 			goto bad;
1166 		}
1167 	}
1168 	nd.ni_vp->v_vflag |= VV_MD;
1169 	VOP_UNLOCK(nd.ni_vp, 0);
1170 
1171 	if (mdio->md_fwsectors != 0)
1172 		sc->fwsectors = mdio->md_fwsectors;
1173 	if (mdio->md_fwheads != 0)
1174 		sc->fwheads = mdio->md_fwheads;
1175 	sc->flags = mdio->md_options & (MD_FORCE | MD_ASYNC);
1176 	if (!(flags & FWRITE))
1177 		sc->flags |= MD_READONLY;
1178 	sc->vnode = nd.ni_vp;
1179 
1180 	error = mdsetcred(sc, td->td_ucred);
1181 	if (error != 0) {
1182 		sc->vnode = NULL;
1183 		vn_lock(nd.ni_vp, LK_EXCLUSIVE | LK_RETRY);
1184 		nd.ni_vp->v_vflag &= ~VV_MD;
1185 		goto bad;
1186 	}
1187 	return (0);
1188 bad:
1189 	VOP_UNLOCK(nd.ni_vp, 0);
1190 	(void)vn_close(nd.ni_vp, flags, td->td_ucred, td);
1191 	return (error);
1192 }
1193 
1194 static int
1195 mddestroy(struct md_s *sc, struct thread *td)
1196 {
1197 
1198 	if (sc->gp) {
1199 		sc->gp->softc = NULL;
1200 		g_topology_lock();
1201 		g_wither_geom(sc->gp, ENXIO);
1202 		g_topology_unlock();
1203 		sc->gp = NULL;
1204 		sc->pp = NULL;
1205 	}
1206 	if (sc->devstat) {
1207 		devstat_remove_entry(sc->devstat);
1208 		sc->devstat = NULL;
1209 	}
1210 	mtx_lock(&sc->queue_mtx);
1211 	sc->flags |= MD_SHUTDOWN;
1212 	wakeup(sc);
1213 	while (!(sc->flags & MD_EXITING))
1214 		msleep(sc->procp, &sc->queue_mtx, PRIBIO, "mddestroy", hz / 10);
1215 	mtx_unlock(&sc->queue_mtx);
1216 	mtx_destroy(&sc->stat_mtx);
1217 	mtx_destroy(&sc->queue_mtx);
1218 	if (sc->vnode != NULL) {
1219 		vn_lock(sc->vnode, LK_EXCLUSIVE | LK_RETRY);
1220 		sc->vnode->v_vflag &= ~VV_MD;
1221 		VOP_UNLOCK(sc->vnode, 0);
1222 		(void)vn_close(sc->vnode, sc->flags & MD_READONLY ?
1223 		    FREAD : (FREAD|FWRITE), sc->cred, td);
1224 	}
1225 	if (sc->cred != NULL)
1226 		crfree(sc->cred);
1227 	if (sc->object != NULL)
1228 		vm_object_deallocate(sc->object);
1229 	if (sc->indir)
1230 		destroy_indir(sc, sc->indir);
1231 	if (sc->uma)
1232 		uma_zdestroy(sc->uma);
1233 
1234 	LIST_REMOVE(sc, list);
1235 	free_unr(md_uh, sc->unit);
1236 	free(sc, M_MD);
1237 	return (0);
1238 }
1239 
1240 static int
1241 mdresize(struct md_s *sc, struct md_ioctl *mdio)
1242 {
1243 	int error, res;
1244 	vm_pindex_t oldpages, newpages;
1245 
1246 	switch (sc->type) {
1247 	case MD_VNODE:
1248 		break;
1249 	case MD_SWAP:
1250 		if (mdio->md_mediasize <= 0 ||
1251 		    (mdio->md_mediasize % PAGE_SIZE) != 0)
1252 			return (EDOM);
1253 		oldpages = OFF_TO_IDX(round_page(sc->mediasize));
1254 		newpages = OFF_TO_IDX(round_page(mdio->md_mediasize));
1255 		if (newpages < oldpages) {
1256 			VM_OBJECT_WLOCK(sc->object);
1257 			vm_object_page_remove(sc->object, newpages, 0, 0);
1258 			swap_pager_freespace(sc->object, newpages,
1259 			    oldpages - newpages);
1260 			swap_release_by_cred(IDX_TO_OFF(oldpages -
1261 			    newpages), sc->cred);
1262 			sc->object->charge = IDX_TO_OFF(newpages);
1263 			sc->object->size = newpages;
1264 			VM_OBJECT_WUNLOCK(sc->object);
1265 		} else if (newpages > oldpages) {
1266 			res = swap_reserve_by_cred(IDX_TO_OFF(newpages -
1267 			    oldpages), sc->cred);
1268 			if (!res)
1269 				return (ENOMEM);
1270 			if ((mdio->md_options & MD_RESERVE) ||
1271 			    (sc->flags & MD_RESERVE)) {
1272 				error = swap_pager_reserve(sc->object,
1273 				    oldpages, newpages - oldpages);
1274 				if (error < 0) {
1275 					swap_release_by_cred(
1276 					    IDX_TO_OFF(newpages - oldpages),
1277 					    sc->cred);
1278 					return (EDOM);
1279 				}
1280 			}
1281 			VM_OBJECT_WLOCK(sc->object);
1282 			sc->object->charge = IDX_TO_OFF(newpages);
1283 			sc->object->size = newpages;
1284 			VM_OBJECT_WUNLOCK(sc->object);
1285 		}
1286 		break;
1287 	default:
1288 		return (EOPNOTSUPP);
1289 	}
1290 
1291 	sc->mediasize = mdio->md_mediasize;
1292 	g_topology_lock();
1293 	g_resize_provider(sc->pp, sc->mediasize);
1294 	g_topology_unlock();
1295 	return (0);
1296 }
1297 
1298 static int
1299 mdcreate_swap(struct md_s *sc, struct md_ioctl *mdio, struct thread *td)
1300 {
1301 	vm_ooffset_t npage;
1302 	int error;
1303 
1304 	/*
1305 	 * Range check.  Disallow negative sizes or any size less then the
1306 	 * size of a page.  Then round to a page.
1307 	 */
1308 	if (sc->mediasize <= 0 || (sc->mediasize % PAGE_SIZE) != 0)
1309 		return (EDOM);
1310 
1311 	/*
1312 	 * Allocate an OBJT_SWAP object.
1313 	 *
1314 	 * Note the truncation.
1315 	 */
1316 
1317 	npage = mdio->md_mediasize / PAGE_SIZE;
1318 	if (mdio->md_fwsectors != 0)
1319 		sc->fwsectors = mdio->md_fwsectors;
1320 	if (mdio->md_fwheads != 0)
1321 		sc->fwheads = mdio->md_fwheads;
1322 	sc->object = vm_pager_allocate(OBJT_SWAP, NULL, PAGE_SIZE * npage,
1323 	    VM_PROT_DEFAULT, 0, td->td_ucred);
1324 	if (sc->object == NULL)
1325 		return (ENOMEM);
1326 	sc->flags = mdio->md_options & (MD_FORCE | MD_RESERVE);
1327 	if (mdio->md_options & MD_RESERVE) {
1328 		if (swap_pager_reserve(sc->object, 0, npage) < 0) {
1329 			error = EDOM;
1330 			goto finish;
1331 		}
1332 	}
1333 	error = mdsetcred(sc, td->td_ucred);
1334  finish:
1335 	if (error != 0) {
1336 		vm_object_deallocate(sc->object);
1337 		sc->object = NULL;
1338 	}
1339 	return (error);
1340 }
1341 
1342 
1343 static int
1344 xmdctlioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags, struct thread *td)
1345 {
1346 	struct md_ioctl *mdio;
1347 	struct md_s *sc;
1348 	int error, i;
1349 	unsigned sectsize;
1350 
1351 	if (md_debug)
1352 		printf("mdctlioctl(%s %lx %p %x %p)\n",
1353 			devtoname(dev), cmd, addr, flags, td);
1354 
1355 	mdio = (struct md_ioctl *)addr;
1356 	if (mdio->md_version != MDIOVERSION)
1357 		return (EINVAL);
1358 
1359 	/*
1360 	 * We assert the version number in the individual ioctl
1361 	 * handlers instead of out here because (a) it is possible we
1362 	 * may add another ioctl in the future which doesn't read an
1363 	 * mdio, and (b) the correct return value for an unknown ioctl
1364 	 * is ENOIOCTL, not EINVAL.
1365 	 */
1366 	error = 0;
1367 	switch (cmd) {
1368 	case MDIOCATTACH:
1369 		switch (mdio->md_type) {
1370 		case MD_MALLOC:
1371 		case MD_PRELOAD:
1372 		case MD_VNODE:
1373 		case MD_SWAP:
1374 			break;
1375 		default:
1376 			return (EINVAL);
1377 		}
1378 		if (mdio->md_sectorsize == 0)
1379 			sectsize = DEV_BSIZE;
1380 		else
1381 			sectsize = mdio->md_sectorsize;
1382 		if (sectsize > MAXPHYS || mdio->md_mediasize < sectsize)
1383 			return (EINVAL);
1384 		if (mdio->md_options & MD_AUTOUNIT)
1385 			sc = mdnew(-1, &error, mdio->md_type);
1386 		else {
1387 			if (mdio->md_unit > INT_MAX)
1388 				return (EINVAL);
1389 			sc = mdnew(mdio->md_unit, &error, mdio->md_type);
1390 		}
1391 		if (sc == NULL)
1392 			return (error);
1393 		if (mdio->md_options & MD_AUTOUNIT)
1394 			mdio->md_unit = sc->unit;
1395 		sc->mediasize = mdio->md_mediasize;
1396 		sc->sectorsize = sectsize;
1397 		error = EDOOFUS;
1398 		switch (sc->type) {
1399 		case MD_MALLOC:
1400 			sc->start = mdstart_malloc;
1401 			error = mdcreate_malloc(sc, mdio);
1402 			break;
1403 		case MD_PRELOAD:
1404 			/*
1405 			 * We disallow attaching preloaded memory disks via
1406 			 * ioctl. Preloaded memory disks are automatically
1407 			 * attached in g_md_init().
1408 			 */
1409 			error = EOPNOTSUPP;
1410 			break;
1411 		case MD_VNODE:
1412 			sc->start = mdstart_vnode;
1413 			error = mdcreate_vnode(sc, mdio, td);
1414 			break;
1415 		case MD_SWAP:
1416 			sc->start = mdstart_swap;
1417 			error = mdcreate_swap(sc, mdio, td);
1418 			break;
1419 		}
1420 		if (error != 0) {
1421 			mddestroy(sc, td);
1422 			return (error);
1423 		}
1424 
1425 		/* Prune off any residual fractional sector */
1426 		i = sc->mediasize % sc->sectorsize;
1427 		sc->mediasize -= i;
1428 
1429 		mdinit(sc);
1430 		return (0);
1431 	case MDIOCDETACH:
1432 		if (mdio->md_mediasize != 0 ||
1433 		    (mdio->md_options & ~MD_FORCE) != 0)
1434 			return (EINVAL);
1435 
1436 		sc = mdfind(mdio->md_unit);
1437 		if (sc == NULL)
1438 			return (ENOENT);
1439 		if (sc->opencount != 0 && !(sc->flags & MD_FORCE) &&
1440 		    !(mdio->md_options & MD_FORCE))
1441 			return (EBUSY);
1442 		return (mddestroy(sc, td));
1443 	case MDIOCRESIZE:
1444 		if ((mdio->md_options & ~(MD_FORCE | MD_RESERVE)) != 0)
1445 			return (EINVAL);
1446 
1447 		sc = mdfind(mdio->md_unit);
1448 		if (sc == NULL)
1449 			return (ENOENT);
1450 		if (mdio->md_mediasize < sc->sectorsize)
1451 			return (EINVAL);
1452 		if (mdio->md_mediasize < sc->mediasize &&
1453 		    !(sc->flags & MD_FORCE) &&
1454 		    !(mdio->md_options & MD_FORCE))
1455 			return (EBUSY);
1456 		return (mdresize(sc, mdio));
1457 	case MDIOCQUERY:
1458 		sc = mdfind(mdio->md_unit);
1459 		if (sc == NULL)
1460 			return (ENOENT);
1461 		mdio->md_type = sc->type;
1462 		mdio->md_options = sc->flags;
1463 		mdio->md_mediasize = sc->mediasize;
1464 		mdio->md_sectorsize = sc->sectorsize;
1465 		if (sc->type == MD_VNODE)
1466 			error = copyout(sc->file, mdio->md_file,
1467 			    strlen(sc->file) + 1);
1468 		return (error);
1469 	case MDIOCLIST:
1470 		i = 1;
1471 		LIST_FOREACH(sc, &md_softc_list, list) {
1472 			if (i == MDNPAD - 1)
1473 				mdio->md_pad[i] = -1;
1474 			else
1475 				mdio->md_pad[i++] = sc->unit;
1476 		}
1477 		mdio->md_pad[0] = i - 1;
1478 		return (0);
1479 	default:
1480 		return (ENOIOCTL);
1481 	};
1482 }
1483 
1484 static int
1485 mdctlioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags, struct thread *td)
1486 {
1487 	int error;
1488 
1489 	sx_xlock(&md_sx);
1490 	error = xmdctlioctl(dev, cmd, addr, flags, td);
1491 	sx_xunlock(&md_sx);
1492 	return (error);
1493 }
1494 
1495 static void
1496 md_preloaded(u_char *image, size_t length, const char *name)
1497 {
1498 	struct md_s *sc;
1499 	int error;
1500 
1501 	sc = mdnew(-1, &error, MD_PRELOAD);
1502 	if (sc == NULL)
1503 		return;
1504 	sc->mediasize = length;
1505 	sc->sectorsize = DEV_BSIZE;
1506 	sc->pl_ptr = image;
1507 	sc->pl_len = length;
1508 	sc->start = mdstart_preload;
1509 #ifdef MD_ROOT
1510 	if (sc->unit == 0)
1511 		rootdevnames[0] = MD_ROOT_FSTYPE ":/dev/md0";
1512 #endif
1513 	mdinit(sc);
1514 	if (name != NULL) {
1515 		printf("%s%d: Preloaded image <%s> %zd bytes at %p\n",
1516 		    MD_NAME, sc->unit, name, length, image);
1517 	}
1518 }
1519 
1520 static void
1521 g_md_init(struct g_class *mp __unused)
1522 {
1523 	caddr_t mod;
1524 	u_char *ptr, *name, *type;
1525 	unsigned len;
1526 	int i;
1527 
1528 	/* figure out log2(NINDIR) */
1529 	for (i = NINDIR, nshift = -1; i; nshift++)
1530 		i >>= 1;
1531 
1532 	mod = NULL;
1533 	sx_init(&md_sx, "MD config lock");
1534 	g_topology_unlock();
1535 	md_uh = new_unrhdr(0, INT_MAX, NULL);
1536 #ifdef MD_ROOT_SIZE
1537 	sx_xlock(&md_sx);
1538 	md_preloaded(mfs_root.start, sizeof(mfs_root.start), NULL);
1539 	sx_xunlock(&md_sx);
1540 #endif
1541 	/* XXX: are preload_* static or do they need Giant ? */
1542 	while ((mod = preload_search_next_name(mod)) != NULL) {
1543 		name = (char *)preload_search_info(mod, MODINFO_NAME);
1544 		if (name == NULL)
1545 			continue;
1546 		type = (char *)preload_search_info(mod, MODINFO_TYPE);
1547 		if (type == NULL)
1548 			continue;
1549 		if (strcmp(type, "md_image") && strcmp(type, "mfs_root"))
1550 			continue;
1551 		ptr = preload_fetch_addr(mod);
1552 		len = preload_fetch_size(mod);
1553 		if (ptr != NULL && len != 0) {
1554 			sx_xlock(&md_sx);
1555 			md_preloaded(ptr, len, name);
1556 			sx_xunlock(&md_sx);
1557 		}
1558 	}
1559 	md_vnode_pbuf_freecnt = nswbuf / 10;
1560 	status_dev = make_dev(&mdctl_cdevsw, INT_MAX, UID_ROOT, GID_WHEEL,
1561 	    0600, MDCTL_NAME);
1562 	g_topology_lock();
1563 }
1564 
1565 static void
1566 g_md_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp,
1567     struct g_consumer *cp __unused, struct g_provider *pp)
1568 {
1569 	struct md_s *mp;
1570 	char *type;
1571 
1572 	mp = gp->softc;
1573 	if (mp == NULL)
1574 		return;
1575 
1576 	switch (mp->type) {
1577 	case MD_MALLOC:
1578 		type = "malloc";
1579 		break;
1580 	case MD_PRELOAD:
1581 		type = "preload";
1582 		break;
1583 	case MD_VNODE:
1584 		type = "vnode";
1585 		break;
1586 	case MD_SWAP:
1587 		type = "swap";
1588 		break;
1589 	default:
1590 		type = "unknown";
1591 		break;
1592 	}
1593 
1594 	if (pp != NULL) {
1595 		if (indent == NULL) {
1596 			sbuf_printf(sb, " u %d", mp->unit);
1597 			sbuf_printf(sb, " s %ju", (uintmax_t) mp->sectorsize);
1598 			sbuf_printf(sb, " f %ju", (uintmax_t) mp->fwheads);
1599 			sbuf_printf(sb, " fs %ju", (uintmax_t) mp->fwsectors);
1600 			sbuf_printf(sb, " l %ju", (uintmax_t) mp->mediasize);
1601 			sbuf_printf(sb, " t %s", type);
1602 			if (mp->type == MD_VNODE && mp->vnode != NULL)
1603 				sbuf_printf(sb, " file %s", mp->file);
1604 		} else {
1605 			sbuf_printf(sb, "%s<unit>%d</unit>\n", indent,
1606 			    mp->unit);
1607 			sbuf_printf(sb, "%s<sectorsize>%ju</sectorsize>\n",
1608 			    indent, (uintmax_t) mp->sectorsize);
1609 			sbuf_printf(sb, "%s<fwheads>%ju</fwheads>\n",
1610 			    indent, (uintmax_t) mp->fwheads);
1611 			sbuf_printf(sb, "%s<fwsectors>%ju</fwsectors>\n",
1612 			    indent, (uintmax_t) mp->fwsectors);
1613 			sbuf_printf(sb, "%s<length>%ju</length>\n",
1614 			    indent, (uintmax_t) mp->mediasize);
1615 			sbuf_printf(sb, "%s<compression>%s</compression>\n", indent,
1616 			    (mp->flags & MD_COMPRESS) == 0 ? "off": "on");
1617 			sbuf_printf(sb, "%s<access>%s</access>\n", indent,
1618 			    (mp->flags & MD_READONLY) == 0 ? "read-write":
1619 			    "read-only");
1620 			sbuf_printf(sb, "%s<type>%s</type>\n", indent,
1621 			    type);
1622 			if (mp->type == MD_VNODE && mp->vnode != NULL)
1623 				sbuf_printf(sb, "%s<file>%s</file>\n",
1624 				    indent, mp->file);
1625 		}
1626 	}
1627 }
1628 
1629 static void
1630 g_md_fini(struct g_class *mp __unused)
1631 {
1632 
1633 	sx_destroy(&md_sx);
1634 	if (status_dev != NULL)
1635 		destroy_dev(status_dev);
1636 	delete_unrhdr(md_uh);
1637 }
1638