xref: /freebsd/sys/dev/md/md.c (revision 30d239bc4c510432e65a84fa1c14ed67a3ab1c92)
1 /*-
2  * ----------------------------------------------------------------------------
3  * "THE BEER-WARE LICENSE" (Revision 42):
4  * <phk@FreeBSD.ORG> wrote this file.  As long as you retain this notice you
5  * can do whatever you want with this stuff. If we meet some day, and you think
6  * this stuff is worth it, you can buy me a beer in return.   Poul-Henning Kamp
7  * ----------------------------------------------------------------------------
8  *
9  * $FreeBSD$
10  *
11  */
12 
13 /*-
14  * The following functions are based in the vn(4) driver: mdstart_swap(),
15  * mdstart_vnode(), mdcreate_swap(), mdcreate_vnode() and mddestroy(),
16  * and as such under the following copyright:
17  *
18  * Copyright (c) 1988 University of Utah.
19  * Copyright (c) 1990, 1993
20  *	The Regents of the University of California.  All rights reserved.
21  *
22  * This code is derived from software contributed to Berkeley by
23  * the Systems Programming Group of the University of Utah Computer
24  * Science Department.
25  *
26  * Redistribution and use in source and binary forms, with or without
27  * modification, are permitted provided that the following conditions
28  * are met:
29  * 1. Redistributions of source code must retain the above copyright
30  *    notice, this list of conditions and the following disclaimer.
31  * 2. Redistributions in binary form must reproduce the above copyright
32  *    notice, this list of conditions and the following disclaimer in the
33  *    documentation and/or other materials provided with the distribution.
34  * 4. Neither the name of the University nor the names of its contributors
35  *    may be used to endorse or promote products derived from this software
36  *    without specific prior written permission.
37  *
38  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
39  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
40  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
41  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
42  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
43  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
44  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
45  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
46  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
47  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
48  * SUCH DAMAGE.
49  *
50  * from: Utah Hdr: vn.c 1.13 94/04/02
51  *
52  *	from: @(#)vn.c	8.6 (Berkeley) 4/1/94
53  * From: src/sys/dev/vn/vn.c,v 1.122 2000/12/16 16:06:03
54  */
55 
56 #include "opt_geom.h"
57 #include "opt_md.h"
58 
59 #include <sys/param.h>
60 #include <sys/systm.h>
61 #include <sys/bio.h>
62 #include <sys/conf.h>
63 #include <sys/fcntl.h>
64 #include <sys/kernel.h>
65 #include <sys/kthread.h>
66 #include <sys/linker.h>
67 #include <sys/lock.h>
68 #include <sys/malloc.h>
69 #include <sys/mdioctl.h>
70 #include <sys/mount.h>
71 #include <sys/mutex.h>
72 #include <sys/sx.h>
73 #include <sys/namei.h>
74 #include <sys/proc.h>
75 #include <sys/queue.h>
76 #include <sys/sched.h>
77 #include <sys/sf_buf.h>
78 #include <sys/sysctl.h>
79 #include <sys/vnode.h>
80 
81 #include <geom/geom.h>
82 
83 #include <vm/vm.h>
84 #include <vm/vm_object.h>
85 #include <vm/vm_page.h>
86 #include <vm/vm_pager.h>
87 #include <vm/swap_pager.h>
88 #include <vm/uma.h>
89 
90 #define MD_MODVER 1
91 
92 #define MD_SHUTDOWN	0x10000		/* Tell worker thread to terminate. */
93 #define	MD_EXITING	0x20000		/* Worker thread is exiting. */
94 
95 #ifndef MD_NSECT
96 #define MD_NSECT (10000 * 2)
97 #endif
98 
99 static MALLOC_DEFINE(M_MD, "md_disk", "Memory Disk");
100 static MALLOC_DEFINE(M_MDSECT, "md_sectors", "Memory Disk Sectors");
101 
102 static int md_debug;
103 SYSCTL_INT(_debug, OID_AUTO, mddebug, CTLFLAG_RW, &md_debug, 0, "");
104 
105 #if defined(MD_ROOT) && defined(MD_ROOT_SIZE)
106 /*
107  * Preloaded image gets put here.
108  * Applications that patch the object with the image can determine
109  * the size looking at the start and end markers (strings),
110  * so we want them contiguous.
111  */
112 static struct {
113 	u_char start[MD_ROOT_SIZE*1024];
114 	u_char end[128];
115 } mfs_root = {
116 	.start = "MFS Filesystem goes here",
117 	.end = "MFS Filesystem had better STOP here",
118 };
119 #endif
120 
121 static g_init_t g_md_init;
122 static g_fini_t g_md_fini;
123 static g_start_t g_md_start;
124 static g_access_t g_md_access;
125 static void g_md_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp,
126     struct g_consumer *cp __unused, struct g_provider *pp);
127 
128 static int	mdunits;
129 static struct cdev *status_dev = 0;
130 static struct sx md_sx;
131 
132 static d_ioctl_t mdctlioctl;
133 
134 static struct cdevsw mdctl_cdevsw = {
135 	.d_version =	D_VERSION,
136 	.d_ioctl =	mdctlioctl,
137 	.d_name =	MD_NAME,
138 };
139 
140 struct g_class g_md_class = {
141 	.name = "MD",
142 	.version = G_VERSION,
143 	.init = g_md_init,
144 	.fini = g_md_fini,
145 	.start = g_md_start,
146 	.access = g_md_access,
147 	.dumpconf = g_md_dumpconf,
148 };
149 
150 DECLARE_GEOM_CLASS(g_md_class, g_md);
151 
152 
153 static LIST_HEAD(, md_s) md_softc_list = LIST_HEAD_INITIALIZER(&md_softc_list);
154 
155 #define NINDIR	(PAGE_SIZE / sizeof(uintptr_t))
156 #define NMASK	(NINDIR-1)
157 static int nshift;
158 
159 struct indir {
160 	uintptr_t	*array;
161 	u_int		total;
162 	u_int		used;
163 	u_int		shift;
164 };
165 
166 struct md_s {
167 	int unit;
168 	LIST_ENTRY(md_s) list;
169 	struct bio_queue_head bio_queue;
170 	struct mtx queue_mtx;
171 	struct cdev *dev;
172 	enum md_types type;
173 	off_t mediasize;
174 	unsigned sectorsize;
175 	unsigned opencount;
176 	unsigned fwheads;
177 	unsigned fwsectors;
178 	unsigned flags;
179 	char name[20];
180 	struct proc *procp;
181 	struct g_geom *gp;
182 	struct g_provider *pp;
183 	int (*start)(struct md_s *sc, struct bio *bp);
184 
185 	/* MD_MALLOC related fields */
186 	struct indir *indir;
187 	uma_zone_t uma;
188 
189 	/* MD_PRELOAD related fields */
190 	u_char *pl_ptr;
191 	size_t pl_len;
192 
193 	/* MD_VNODE related fields */
194 	struct vnode *vnode;
195 	char file[PATH_MAX];
196 	struct ucred *cred;
197 
198 	/* MD_SWAP related fields */
199 	vm_object_t object;
200 };
201 
202 static struct indir *
203 new_indir(u_int shift)
204 {
205 	struct indir *ip;
206 
207 	ip = malloc(sizeof *ip, M_MD, M_NOWAIT | M_ZERO);
208 	if (ip == NULL)
209 		return (NULL);
210 	ip->array = malloc(sizeof(uintptr_t) * NINDIR,
211 	    M_MDSECT, M_NOWAIT | M_ZERO);
212 	if (ip->array == NULL) {
213 		free(ip, M_MD);
214 		return (NULL);
215 	}
216 	ip->total = NINDIR;
217 	ip->shift = shift;
218 	return (ip);
219 }
220 
221 static void
222 del_indir(struct indir *ip)
223 {
224 
225 	free(ip->array, M_MDSECT);
226 	free(ip, M_MD);
227 }
228 
229 static void
230 destroy_indir(struct md_s *sc, struct indir *ip)
231 {
232 	int i;
233 
234 	for (i = 0; i < NINDIR; i++) {
235 		if (!ip->array[i])
236 			continue;
237 		if (ip->shift)
238 			destroy_indir(sc, (struct indir*)(ip->array[i]));
239 		else if (ip->array[i] > 255)
240 			uma_zfree(sc->uma, (void *)(ip->array[i]));
241 	}
242 	del_indir(ip);
243 }
244 
245 /*
246  * This function does the math and allocates the top level "indir" structure
247  * for a device of "size" sectors.
248  */
249 
250 static struct indir *
251 dimension(off_t size)
252 {
253 	off_t rcnt;
254 	struct indir *ip;
255 	int i, layer;
256 
257 	rcnt = size;
258 	layer = 0;
259 	while (rcnt > NINDIR) {
260 		rcnt /= NINDIR;
261 		layer++;
262 	}
263 	/* figure out log2(NINDIR) */
264 	for (i = NINDIR, nshift = -1; i; nshift++)
265 		i >>= 1;
266 
267 	/*
268 	 * XXX: the top layer is probably not fully populated, so we allocate
269 	 * too much space for ip->array in here.
270 	 */
271 	ip = malloc(sizeof *ip, M_MD, M_WAITOK | M_ZERO);
272 	ip->array = malloc(sizeof(uintptr_t) * NINDIR,
273 	    M_MDSECT, M_WAITOK | M_ZERO);
274 	ip->total = NINDIR;
275 	ip->shift = layer * nshift;
276 	return (ip);
277 }
278 
279 /*
280  * Read a given sector
281  */
282 
283 static uintptr_t
284 s_read(struct indir *ip, off_t offset)
285 {
286 	struct indir *cip;
287 	int idx;
288 	uintptr_t up;
289 
290 	if (md_debug > 1)
291 		printf("s_read(%jd)\n", (intmax_t)offset);
292 	up = 0;
293 	for (cip = ip; cip != NULL;) {
294 		if (cip->shift) {
295 			idx = (offset >> cip->shift) & NMASK;
296 			up = cip->array[idx];
297 			cip = (struct indir *)up;
298 			continue;
299 		}
300 		idx = offset & NMASK;
301 		return (cip->array[idx]);
302 	}
303 	return (0);
304 }
305 
306 /*
307  * Write a given sector, prune the tree if the value is 0
308  */
309 
310 static int
311 s_write(struct indir *ip, off_t offset, uintptr_t ptr)
312 {
313 	struct indir *cip, *lip[10];
314 	int idx, li;
315 	uintptr_t up;
316 
317 	if (md_debug > 1)
318 		printf("s_write(%jd, %p)\n", (intmax_t)offset, (void *)ptr);
319 	up = 0;
320 	li = 0;
321 	cip = ip;
322 	for (;;) {
323 		lip[li++] = cip;
324 		if (cip->shift) {
325 			idx = (offset >> cip->shift) & NMASK;
326 			up = cip->array[idx];
327 			if (up != 0) {
328 				cip = (struct indir *)up;
329 				continue;
330 			}
331 			/* Allocate branch */
332 			cip->array[idx] =
333 			    (uintptr_t)new_indir(cip->shift - nshift);
334 			if (cip->array[idx] == 0)
335 				return (ENOSPC);
336 			cip->used++;
337 			up = cip->array[idx];
338 			cip = (struct indir *)up;
339 			continue;
340 		}
341 		/* leafnode */
342 		idx = offset & NMASK;
343 		up = cip->array[idx];
344 		if (up != 0)
345 			cip->used--;
346 		cip->array[idx] = ptr;
347 		if (ptr != 0)
348 			cip->used++;
349 		break;
350 	}
351 	if (cip->used != 0 || li == 1)
352 		return (0);
353 	li--;
354 	while (cip->used == 0 && cip != ip) {
355 		li--;
356 		idx = (offset >> lip[li]->shift) & NMASK;
357 		up = lip[li]->array[idx];
358 		KASSERT(up == (uintptr_t)cip, ("md screwed up"));
359 		del_indir(cip);
360 		lip[li]->array[idx] = 0;
361 		lip[li]->used--;
362 		cip = lip[li];
363 	}
364 	return (0);
365 }
366 
367 
368 static int
369 g_md_access(struct g_provider *pp, int r, int w, int e)
370 {
371 	struct md_s *sc;
372 
373 	sc = pp->geom->softc;
374 	if (sc == NULL)
375 		return (ENXIO);
376 	r += pp->acr;
377 	w += pp->acw;
378 	e += pp->ace;
379 	if ((sc->flags & MD_READONLY) != 0 && w > 0)
380 		return (EROFS);
381 	if ((pp->acr + pp->acw + pp->ace) == 0 && (r + w + e) > 0) {
382 		sc->opencount = 1;
383 	} else if ((pp->acr + pp->acw + pp->ace) > 0 && (r + w + e) == 0) {
384 		sc->opencount = 0;
385 	}
386 	return (0);
387 }
388 
389 static void
390 g_md_start(struct bio *bp)
391 {
392 	struct md_s *sc;
393 
394 	sc = bp->bio_to->geom->softc;
395 	mtx_lock(&sc->queue_mtx);
396 	bioq_disksort(&sc->bio_queue, bp);
397 	mtx_unlock(&sc->queue_mtx);
398 	wakeup(sc);
399 }
400 
401 static int
402 mdstart_malloc(struct md_s *sc, struct bio *bp)
403 {
404 	int i, error;
405 	u_char *dst;
406 	off_t secno, nsec, uc;
407 	uintptr_t sp, osp;
408 
409 	switch (bp->bio_cmd) {
410 	case BIO_READ:
411 	case BIO_WRITE:
412 	case BIO_DELETE:
413 		break;
414 	default:
415 		return (EOPNOTSUPP);
416 	}
417 
418 	nsec = bp->bio_length / sc->sectorsize;
419 	secno = bp->bio_offset / sc->sectorsize;
420 	dst = bp->bio_data;
421 	error = 0;
422 	while (nsec--) {
423 		osp = s_read(sc->indir, secno);
424 		if (bp->bio_cmd == BIO_DELETE) {
425 			if (osp != 0)
426 				error = s_write(sc->indir, secno, 0);
427 		} else if (bp->bio_cmd == BIO_READ) {
428 			if (osp == 0)
429 				bzero(dst, sc->sectorsize);
430 			else if (osp <= 255)
431 				for (i = 0; i < sc->sectorsize; i++)
432 					dst[i] = osp;
433 			else
434 				bcopy((void *)osp, dst, sc->sectorsize);
435 			osp = 0;
436 		} else if (bp->bio_cmd == BIO_WRITE) {
437 			if (sc->flags & MD_COMPRESS) {
438 				uc = dst[0];
439 				for (i = 1; i < sc->sectorsize; i++)
440 					if (dst[i] != uc)
441 						break;
442 			} else {
443 				i = 0;
444 				uc = 0;
445 			}
446 			if (i == sc->sectorsize) {
447 				if (osp != uc)
448 					error = s_write(sc->indir, secno, uc);
449 			} else {
450 				if (osp <= 255) {
451 					sp = (uintptr_t)uma_zalloc(sc->uma,
452 					    M_NOWAIT);
453 					if (sp == 0) {
454 						error = ENOSPC;
455 						break;
456 					}
457 					bcopy(dst, (void *)sp, sc->sectorsize);
458 					error = s_write(sc->indir, secno, sp);
459 				} else {
460 					bcopy(dst, (void *)osp, sc->sectorsize);
461 					osp = 0;
462 				}
463 			}
464 		} else {
465 			error = EOPNOTSUPP;
466 		}
467 		if (osp > 255)
468 			uma_zfree(sc->uma, (void*)osp);
469 		if (error != 0)
470 			break;
471 		secno++;
472 		dst += sc->sectorsize;
473 	}
474 	bp->bio_resid = 0;
475 	return (error);
476 }
477 
478 static int
479 mdstart_preload(struct md_s *sc, struct bio *bp)
480 {
481 
482 	switch (bp->bio_cmd) {
483 	case BIO_READ:
484 		bcopy(sc->pl_ptr + bp->bio_offset, bp->bio_data,
485 		    bp->bio_length);
486 		break;
487 	case BIO_WRITE:
488 		bcopy(bp->bio_data, sc->pl_ptr + bp->bio_offset,
489 		    bp->bio_length);
490 		break;
491 	}
492 	bp->bio_resid = 0;
493 	return (0);
494 }
495 
496 static int
497 mdstart_vnode(struct md_s *sc, struct bio *bp)
498 {
499 	int error, vfslocked;
500 	struct uio auio;
501 	struct iovec aiov;
502 	struct mount *mp;
503 	struct vnode *vp;
504 	struct thread *td;
505 
506 	switch (bp->bio_cmd) {
507 	case BIO_READ:
508 	case BIO_WRITE:
509 	case BIO_FLUSH:
510 		break;
511 	default:
512 		return (EOPNOTSUPP);
513 	}
514 
515 	td = curthread;
516 	vp = sc->vnode;
517 
518 	/*
519 	 * VNODE I/O
520 	 *
521 	 * If an error occurs, we set BIO_ERROR but we do not set
522 	 * B_INVAL because (for a write anyway), the buffer is
523 	 * still valid.
524 	 */
525 
526 	if (bp->bio_cmd == BIO_FLUSH) {
527 		vfslocked = VFS_LOCK_GIANT(vp->v_mount);
528 		(void) vn_start_write(vp, &mp, V_WAIT);
529 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
530 		error = VOP_FSYNC(vp, MNT_WAIT, td);
531 		VOP_UNLOCK(vp, 0, td);
532 		vn_finished_write(mp);
533 		VFS_UNLOCK_GIANT(vfslocked);
534 		return (error);
535 	}
536 
537 	bzero(&auio, sizeof(auio));
538 
539 	aiov.iov_base = bp->bio_data;
540 	aiov.iov_len = bp->bio_length;
541 	auio.uio_iov = &aiov;
542 	auio.uio_iovcnt = 1;
543 	auio.uio_offset = (vm_ooffset_t)bp->bio_offset;
544 	auio.uio_segflg = UIO_SYSSPACE;
545 	if (bp->bio_cmd == BIO_READ)
546 		auio.uio_rw = UIO_READ;
547 	else if (bp->bio_cmd == BIO_WRITE)
548 		auio.uio_rw = UIO_WRITE;
549 	else
550 		panic("wrong BIO_OP in mdstart_vnode");
551 	auio.uio_resid = bp->bio_length;
552 	auio.uio_td = td;
553 	/*
554 	 * When reading set IO_DIRECT to try to avoid double-caching
555 	 * the data.  When writing IO_DIRECT is not optimal.
556 	 */
557 	vfslocked = VFS_LOCK_GIANT(vp->v_mount);
558 	if (bp->bio_cmd == BIO_READ) {
559 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
560 		error = VOP_READ(vp, &auio, IO_DIRECT, sc->cred);
561 		VOP_UNLOCK(vp, 0, td);
562 	} else {
563 		(void) vn_start_write(vp, &mp, V_WAIT);
564 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
565 		error = VOP_WRITE(vp, &auio, sc->flags & MD_ASYNC ? 0 : IO_SYNC,
566 		    sc->cred);
567 		VOP_UNLOCK(vp, 0, td);
568 		vn_finished_write(mp);
569 	}
570 	VFS_UNLOCK_GIANT(vfslocked);
571 	bp->bio_resid = auio.uio_resid;
572 	return (error);
573 }
574 
575 static int
576 mdstart_swap(struct md_s *sc, struct bio *bp)
577 {
578 	struct sf_buf *sf;
579 	int rv, offs, len, lastend;
580 	vm_pindex_t i, lastp;
581 	vm_page_t m;
582 	u_char *p;
583 
584 	switch (bp->bio_cmd) {
585 	case BIO_READ:
586 	case BIO_WRITE:
587 	case BIO_DELETE:
588 		break;
589 	default:
590 		return (EOPNOTSUPP);
591 	}
592 
593 	p = bp->bio_data;
594 
595 	/*
596 	 * offs is the offset at which to start operating on the
597 	 * next (ie, first) page.  lastp is the last page on
598 	 * which we're going to operate.  lastend is the ending
599 	 * position within that last page (ie, PAGE_SIZE if
600 	 * we're operating on complete aligned pages).
601 	 */
602 	offs = bp->bio_offset % PAGE_SIZE;
603 	lastp = (bp->bio_offset + bp->bio_length - 1) / PAGE_SIZE;
604 	lastend = (bp->bio_offset + bp->bio_length - 1) % PAGE_SIZE + 1;
605 
606 	rv = VM_PAGER_OK;
607 	VM_OBJECT_LOCK(sc->object);
608 	vm_object_pip_add(sc->object, 1);
609 	for (i = bp->bio_offset / PAGE_SIZE; i <= lastp; i++) {
610 		len = ((i == lastp) ? lastend : PAGE_SIZE) - offs;
611 
612 		m = vm_page_grab(sc->object, i,
613 		    VM_ALLOC_NORMAL|VM_ALLOC_RETRY);
614 		VM_OBJECT_UNLOCK(sc->object);
615 		sched_pin();
616 		sf = sf_buf_alloc(m, SFB_CPUPRIVATE);
617 		VM_OBJECT_LOCK(sc->object);
618 		if (bp->bio_cmd == BIO_READ) {
619 			if (m->valid != VM_PAGE_BITS_ALL)
620 				rv = vm_pager_get_pages(sc->object, &m, 1, 0);
621 			if (rv == VM_PAGER_ERROR) {
622 				sf_buf_free(sf);
623 				sched_unpin();
624 				vm_page_lock_queues();
625 				vm_page_wakeup(m);
626 				vm_page_unlock_queues();
627 				break;
628 			}
629 			bcopy((void *)(sf_buf_kva(sf) + offs), p, len);
630 		} else if (bp->bio_cmd == BIO_WRITE) {
631 			if (len != PAGE_SIZE && m->valid != VM_PAGE_BITS_ALL)
632 				rv = vm_pager_get_pages(sc->object, &m, 1, 0);
633 			if (rv == VM_PAGER_ERROR) {
634 				sf_buf_free(sf);
635 				sched_unpin();
636 				vm_page_lock_queues();
637 				vm_page_wakeup(m);
638 				vm_page_unlock_queues();
639 				break;
640 			}
641 			bcopy(p, (void *)(sf_buf_kva(sf) + offs), len);
642 			m->valid = VM_PAGE_BITS_ALL;
643 #if 0
644 		} else if (bp->bio_cmd == BIO_DELETE) {
645 			if (len != PAGE_SIZE && m->valid != VM_PAGE_BITS_ALL)
646 				rv = vm_pager_get_pages(sc->object, &m, 1, 0);
647 			if (rv == VM_PAGER_ERROR) {
648 				sf_buf_free(sf);
649 				sched_unpin();
650 				vm_page_lock_queues();
651 				vm_page_wakeup(m);
652 				vm_page_unlock_queues();
653 				break;
654 			}
655 			bzero((void *)(sf_buf_kva(sf) + offs), len);
656 			vm_page_dirty(m);
657 			m->valid = VM_PAGE_BITS_ALL;
658 #endif
659 		}
660 		sf_buf_free(sf);
661 		sched_unpin();
662 		vm_page_lock_queues();
663 		vm_page_wakeup(m);
664 		vm_page_activate(m);
665 		if (bp->bio_cmd == BIO_WRITE)
666 			vm_page_dirty(m);
667 		vm_page_unlock_queues();
668 
669 		/* Actions on further pages start at offset 0 */
670 		p += PAGE_SIZE - offs;
671 		offs = 0;
672 #if 0
673 if (bootverbose || bp->bio_offset / PAGE_SIZE < 17)
674 printf("wire_count %d busy %d flags %x hold_count %d act_count %d queue %d valid %d dirty %d @ %d\n",
675     m->wire_count, m->busy,
676     m->flags, m->hold_count, m->act_count, m->queue, m->valid, m->dirty, i);
677 #endif
678 	}
679 	vm_object_pip_subtract(sc->object, 1);
680 	vm_object_set_writeable_dirty(sc->object);
681 	VM_OBJECT_UNLOCK(sc->object);
682 	return (rv != VM_PAGER_ERROR ? 0 : ENOSPC);
683 }
684 
685 static void
686 md_kthread(void *arg)
687 {
688 	struct md_s *sc;
689 	struct bio *bp;
690 	int error;
691 
692 	sc = arg;
693 	thread_lock(curthread);
694 	sched_prio(curthread, PRIBIO);
695 	thread_unlock(curthread);
696 	if (sc->type == MD_VNODE)
697 		curthread->td_pflags |= TDP_NORUNNINGBUF;
698 
699 	for (;;) {
700 		mtx_lock(&sc->queue_mtx);
701 		if (sc->flags & MD_SHUTDOWN) {
702 			sc->flags |= MD_EXITING;
703 			mtx_unlock(&sc->queue_mtx);
704 			kproc_exit(0);
705 		}
706 		bp = bioq_takefirst(&sc->bio_queue);
707 		if (!bp) {
708 			msleep(sc, &sc->queue_mtx, PRIBIO | PDROP, "mdwait", 0);
709 			continue;
710 		}
711 		mtx_unlock(&sc->queue_mtx);
712 		if (bp->bio_cmd == BIO_GETATTR) {
713 			if (sc->fwsectors && sc->fwheads &&
714 			    (g_handleattr_int(bp, "GEOM::fwsectors",
715 			    sc->fwsectors) ||
716 			    g_handleattr_int(bp, "GEOM::fwheads",
717 			    sc->fwheads)))
718 				error = -1;
719 			else
720 				error = EOPNOTSUPP;
721 		} else {
722 			error = sc->start(sc, bp);
723 		}
724 
725 		if (error != -1) {
726 			bp->bio_completed = bp->bio_length;
727 			g_io_deliver(bp, error);
728 		}
729 	}
730 }
731 
732 static struct md_s *
733 mdfind(int unit)
734 {
735 	struct md_s *sc;
736 
737 	LIST_FOREACH(sc, &md_softc_list, list) {
738 		if (sc->unit == unit)
739 			break;
740 	}
741 	return (sc);
742 }
743 
744 static struct md_s *
745 mdnew(int unit, int *errp, enum md_types type)
746 {
747 	struct md_s *sc, *sc2;
748 	int error, max = -1;
749 
750 	*errp = 0;
751 	LIST_FOREACH(sc2, &md_softc_list, list) {
752 		if (unit == sc2->unit) {
753 			*errp = EBUSY;
754 			return (NULL);
755 		}
756 		if (unit == -1 && sc2->unit > max)
757 			max = sc2->unit;
758 	}
759 	if (unit == -1)
760 		unit = max + 1;
761 	sc = (struct md_s *)malloc(sizeof *sc, M_MD, M_WAITOK | M_ZERO);
762 	sc->type = type;
763 	bioq_init(&sc->bio_queue);
764 	mtx_init(&sc->queue_mtx, "md bio queue", NULL, MTX_DEF);
765 	sc->unit = unit;
766 	sprintf(sc->name, "md%d", unit);
767 	LIST_INSERT_HEAD(&md_softc_list, sc, list);
768 	error = kproc_create(md_kthread, sc, &sc->procp, 0, 0,"%s", sc->name);
769 	if (error == 0)
770 		return (sc);
771 	LIST_REMOVE(sc, list);
772 	mtx_destroy(&sc->queue_mtx);
773 	free(sc, M_MD);
774 	*errp = error;
775 	return (NULL);
776 }
777 
778 static void
779 mdinit(struct md_s *sc)
780 {
781 
782 	struct g_geom *gp;
783 	struct g_provider *pp;
784 
785 	g_topology_lock();
786 	gp = g_new_geomf(&g_md_class, "md%d", sc->unit);
787 	gp->softc = sc;
788 	pp = g_new_providerf(gp, "md%d", sc->unit);
789 	pp->mediasize = sc->mediasize;
790 	pp->sectorsize = sc->sectorsize;
791 	sc->gp = gp;
792 	sc->pp = pp;
793 	g_error_provider(pp, 0);
794 	g_topology_unlock();
795 }
796 
797 /*
798  * XXX: we should check that the range they feed us is mapped.
799  * XXX: we should implement read-only.
800  */
801 
802 static int
803 mdcreate_preload(struct md_s *sc, struct md_ioctl *mdio)
804 {
805 
806 	if (mdio->md_options & ~(MD_AUTOUNIT | MD_FORCE))
807 		return (EINVAL);
808 	sc->flags = mdio->md_options & MD_FORCE;
809 	/* Cast to pointer size, then to pointer to avoid warning */
810 	sc->pl_ptr = (u_char *)(uintptr_t)mdio->md_base;
811 	sc->pl_len = (size_t)sc->mediasize;
812 	return (0);
813 }
814 
815 
816 static int
817 mdcreate_malloc(struct md_s *sc, struct md_ioctl *mdio)
818 {
819 	uintptr_t sp;
820 	int error;
821 	off_t u;
822 
823 	error = 0;
824 	if (mdio->md_options & ~(MD_AUTOUNIT | MD_COMPRESS | MD_RESERVE))
825 		return (EINVAL);
826 	if (mdio->md_sectorsize != 0 && !powerof2(mdio->md_sectorsize))
827 		return (EINVAL);
828 	/* Compression doesn't make sense if we have reserved space */
829 	if (mdio->md_options & MD_RESERVE)
830 		mdio->md_options &= ~MD_COMPRESS;
831 	if (mdio->md_fwsectors != 0)
832 		sc->fwsectors = mdio->md_fwsectors;
833 	if (mdio->md_fwheads != 0)
834 		sc->fwheads = mdio->md_fwheads;
835 	sc->flags = mdio->md_options & (MD_COMPRESS | MD_FORCE);
836 	sc->indir = dimension(sc->mediasize / sc->sectorsize);
837 	sc->uma = uma_zcreate(sc->name, sc->sectorsize, NULL, NULL, NULL, NULL,
838 	    0x1ff, 0);
839 	if (mdio->md_options & MD_RESERVE) {
840 		off_t nsectors;
841 
842 		nsectors = sc->mediasize / sc->sectorsize;
843 		for (u = 0; u < nsectors; u++) {
844 			sp = (uintptr_t)uma_zalloc(sc->uma, M_NOWAIT | M_ZERO);
845 			if (sp != 0)
846 				error = s_write(sc->indir, u, sp);
847 			else
848 				error = ENOMEM;
849 			if (error != 0)
850 				break;
851 		}
852 	}
853 	return (error);
854 }
855 
856 
857 static int
858 mdsetcred(struct md_s *sc, struct ucred *cred)
859 {
860 	char *tmpbuf;
861 	int error = 0;
862 
863 	/*
864 	 * Set credits in our softc
865 	 */
866 
867 	if (sc->cred)
868 		crfree(sc->cred);
869 	sc->cred = crhold(cred);
870 
871 	/*
872 	 * Horrible kludge to establish credentials for NFS  XXX.
873 	 */
874 
875 	if (sc->vnode) {
876 		struct uio auio;
877 		struct iovec aiov;
878 
879 		tmpbuf = malloc(sc->sectorsize, M_TEMP, M_WAITOK);
880 		bzero(&auio, sizeof(auio));
881 
882 		aiov.iov_base = tmpbuf;
883 		aiov.iov_len = sc->sectorsize;
884 		auio.uio_iov = &aiov;
885 		auio.uio_iovcnt = 1;
886 		auio.uio_offset = 0;
887 		auio.uio_rw = UIO_READ;
888 		auio.uio_segflg = UIO_SYSSPACE;
889 		auio.uio_resid = aiov.iov_len;
890 		vn_lock(sc->vnode, LK_EXCLUSIVE | LK_RETRY, curthread);
891 		error = VOP_READ(sc->vnode, &auio, 0, sc->cred);
892 		VOP_UNLOCK(sc->vnode, 0, curthread);
893 		free(tmpbuf, M_TEMP);
894 	}
895 	return (error);
896 }
897 
898 static int
899 mdcreate_vnode(struct md_s *sc, struct md_ioctl *mdio, struct thread *td)
900 {
901 	struct vattr vattr;
902 	struct nameidata nd;
903 	int error, flags, vfslocked;
904 
905 	error = copyinstr(mdio->md_file, sc->file, sizeof(sc->file), NULL);
906 	if (error != 0)
907 		return (error);
908 	flags = FREAD|FWRITE;
909 	/*
910 	 * If the user specified that this is a read only device, unset the
911 	 * FWRITE mask before trying to open the backing store.
912 	 */
913 	if ((mdio->md_options & MD_READONLY) != 0)
914 		flags &= ~FWRITE;
915 	NDINIT(&nd, LOOKUP, FOLLOW | MPSAFE, UIO_SYSSPACE, sc->file, td);
916 	error = vn_open(&nd, &flags, 0, NULL);
917 	if (error != 0)
918 		return (error);
919 	vfslocked = NDHASGIANT(&nd);
920 	NDFREE(&nd, NDF_ONLY_PNBUF);
921 	if (nd.ni_vp->v_type != VREG ||
922 	    (error = VOP_GETATTR(nd.ni_vp, &vattr, td->td_ucred, td))) {
923 		VOP_UNLOCK(nd.ni_vp, 0, td);
924 		(void)vn_close(nd.ni_vp, flags, td->td_ucred, td);
925 		VFS_UNLOCK_GIANT(vfslocked);
926 		return (error ? error : EINVAL);
927 	}
928 	nd.ni_vp->v_vflag |= VV_MD;
929 	VOP_UNLOCK(nd.ni_vp, 0, td);
930 
931 	if (mdio->md_fwsectors != 0)
932 		sc->fwsectors = mdio->md_fwsectors;
933 	if (mdio->md_fwheads != 0)
934 		sc->fwheads = mdio->md_fwheads;
935 	sc->flags = mdio->md_options & (MD_FORCE | MD_ASYNC);
936 	if (!(flags & FWRITE))
937 		sc->flags |= MD_READONLY;
938 	sc->vnode = nd.ni_vp;
939 
940 	error = mdsetcred(sc, td->td_ucred);
941 	if (error != 0) {
942 		vn_lock(nd.ni_vp, LK_EXCLUSIVE | LK_RETRY, td);
943 		nd.ni_vp->v_vflag &= ~VV_MD;
944 		VOP_UNLOCK(nd.ni_vp, 0, td);
945 		(void)vn_close(nd.ni_vp, flags, td->td_ucred, td);
946 		VFS_UNLOCK_GIANT(vfslocked);
947 		return (error);
948 	}
949 	VFS_UNLOCK_GIANT(vfslocked);
950 	return (0);
951 }
952 
953 static int
954 mddestroy(struct md_s *sc, struct thread *td)
955 {
956 	int vfslocked;
957 
958 	if (sc->gp) {
959 		sc->gp->softc = NULL;
960 		g_topology_lock();
961 		g_wither_geom(sc->gp, ENXIO);
962 		g_topology_unlock();
963 		sc->gp = NULL;
964 		sc->pp = NULL;
965 	}
966 	mtx_lock(&sc->queue_mtx);
967 	sc->flags |= MD_SHUTDOWN;
968 	wakeup(sc);
969 	while (!(sc->flags & MD_EXITING))
970 		msleep(sc->procp, &sc->queue_mtx, PRIBIO, "mddestroy", hz / 10);
971 	mtx_unlock(&sc->queue_mtx);
972 	mtx_destroy(&sc->queue_mtx);
973 	if (sc->vnode != NULL) {
974 		vfslocked = VFS_LOCK_GIANT(sc->vnode->v_mount);
975 		vn_lock(sc->vnode, LK_EXCLUSIVE | LK_RETRY, td);
976 		sc->vnode->v_vflag &= ~VV_MD;
977 		VOP_UNLOCK(sc->vnode, 0, td);
978 		(void)vn_close(sc->vnode, sc->flags & MD_READONLY ?
979 		    FREAD : (FREAD|FWRITE), sc->cred, td);
980 		VFS_UNLOCK_GIANT(vfslocked);
981 	}
982 	if (sc->cred != NULL)
983 		crfree(sc->cred);
984 	if (sc->object != NULL)
985 		vm_object_deallocate(sc->object);
986 	if (sc->indir)
987 		destroy_indir(sc, sc->indir);
988 	if (sc->uma)
989 		uma_zdestroy(sc->uma);
990 
991 	LIST_REMOVE(sc, list);
992 	free(sc, M_MD);
993 	return (0);
994 }
995 
996 static int
997 mdcreate_swap(struct md_s *sc, struct md_ioctl *mdio, struct thread *td)
998 {
999 	vm_ooffset_t npage;
1000 	int error;
1001 
1002 	/*
1003 	 * Range check.  Disallow negative sizes or any size less then the
1004 	 * size of a page.  Then round to a page.
1005 	 */
1006 	if (sc->mediasize == 0 || (sc->mediasize % PAGE_SIZE) != 0)
1007 		return (EDOM);
1008 
1009 	/*
1010 	 * Allocate an OBJT_SWAP object.
1011 	 *
1012 	 * Note the truncation.
1013 	 */
1014 
1015 	npage = mdio->md_mediasize / PAGE_SIZE;
1016 	if (mdio->md_fwsectors != 0)
1017 		sc->fwsectors = mdio->md_fwsectors;
1018 	if (mdio->md_fwheads != 0)
1019 		sc->fwheads = mdio->md_fwheads;
1020 	sc->object = vm_pager_allocate(OBJT_SWAP, NULL, PAGE_SIZE * npage,
1021 	    VM_PROT_DEFAULT, 0);
1022 	if (sc->object == NULL)
1023 		return (ENOMEM);
1024 	sc->flags = mdio->md_options & MD_FORCE;
1025 	if (mdio->md_options & MD_RESERVE) {
1026 		if (swap_pager_reserve(sc->object, 0, npage) < 0) {
1027 			vm_object_deallocate(sc->object);
1028 			sc->object = NULL;
1029 			return (EDOM);
1030 		}
1031 	}
1032 	error = mdsetcred(sc, td->td_ucred);
1033 	if (error != 0) {
1034 		vm_object_deallocate(sc->object);
1035 		sc->object = NULL;
1036 	}
1037 	return (error);
1038 }
1039 
1040 
1041 static int
1042 xmdctlioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags, struct thread *td)
1043 {
1044 	struct md_ioctl *mdio;
1045 	struct md_s *sc;
1046 	int error, i;
1047 
1048 	if (md_debug)
1049 		printf("mdctlioctl(%s %lx %p %x %p)\n",
1050 			devtoname(dev), cmd, addr, flags, td);
1051 
1052 	mdio = (struct md_ioctl *)addr;
1053 	if (mdio->md_version != MDIOVERSION)
1054 		return (EINVAL);
1055 
1056 	/*
1057 	 * We assert the version number in the individual ioctl
1058 	 * handlers instead of out here because (a) it is possible we
1059 	 * may add another ioctl in the future which doesn't read an
1060 	 * mdio, and (b) the correct return value for an unknown ioctl
1061 	 * is ENOIOCTL, not EINVAL.
1062 	 */
1063 	error = 0;
1064 	switch (cmd) {
1065 	case MDIOCATTACH:
1066 		switch (mdio->md_type) {
1067 		case MD_MALLOC:
1068 		case MD_PRELOAD:
1069 		case MD_VNODE:
1070 		case MD_SWAP:
1071 			break;
1072 		default:
1073 			return (EINVAL);
1074 		}
1075 		if (mdio->md_options & MD_AUTOUNIT)
1076 			sc = mdnew(-1, &error, mdio->md_type);
1077 		else
1078 			sc = mdnew(mdio->md_unit, &error, mdio->md_type);
1079 		if (sc == NULL)
1080 			return (error);
1081 		if (mdio->md_options & MD_AUTOUNIT)
1082 			mdio->md_unit = sc->unit;
1083 		sc->mediasize = mdio->md_mediasize;
1084 		if (mdio->md_sectorsize == 0)
1085 			sc->sectorsize = DEV_BSIZE;
1086 		else
1087 			sc->sectorsize = mdio->md_sectorsize;
1088 		error = EDOOFUS;
1089 		switch (sc->type) {
1090 		case MD_MALLOC:
1091 			sc->start = mdstart_malloc;
1092 			error = mdcreate_malloc(sc, mdio);
1093 			break;
1094 		case MD_PRELOAD:
1095 			sc->start = mdstart_preload;
1096 			error = mdcreate_preload(sc, mdio);
1097 			break;
1098 		case MD_VNODE:
1099 			sc->start = mdstart_vnode;
1100 			error = mdcreate_vnode(sc, mdio, td);
1101 			break;
1102 		case MD_SWAP:
1103 			sc->start = mdstart_swap;
1104 			error = mdcreate_swap(sc, mdio, td);
1105 			break;
1106 		}
1107 		if (error != 0) {
1108 			mddestroy(sc, td);
1109 			return (error);
1110 		}
1111 
1112 		/* Prune off any residual fractional sector */
1113 		i = sc->mediasize % sc->sectorsize;
1114 		sc->mediasize -= i;
1115 
1116 		mdinit(sc);
1117 		return (0);
1118 	case MDIOCDETACH:
1119 		if (mdio->md_mediasize != 0 || mdio->md_options != 0)
1120 			return (EINVAL);
1121 
1122 		sc = mdfind(mdio->md_unit);
1123 		if (sc == NULL)
1124 			return (ENOENT);
1125 		if (sc->opencount != 0 && !(sc->flags & MD_FORCE))
1126 			return (EBUSY);
1127 		return (mddestroy(sc, td));
1128 	case MDIOCQUERY:
1129 		sc = mdfind(mdio->md_unit);
1130 		if (sc == NULL)
1131 			return (ENOENT);
1132 		mdio->md_type = sc->type;
1133 		mdio->md_options = sc->flags;
1134 		mdio->md_mediasize = sc->mediasize;
1135 		mdio->md_sectorsize = sc->sectorsize;
1136 		if (sc->type == MD_VNODE)
1137 			error = copyout(sc->file, mdio->md_file,
1138 			    strlen(sc->file) + 1);
1139 		return (error);
1140 	case MDIOCLIST:
1141 		i = 1;
1142 		LIST_FOREACH(sc, &md_softc_list, list) {
1143 			if (i == MDNPAD - 1)
1144 				mdio->md_pad[i] = -1;
1145 			else
1146 				mdio->md_pad[i++] = sc->unit;
1147 		}
1148 		mdio->md_pad[0] = i - 1;
1149 		return (0);
1150 	default:
1151 		return (ENOIOCTL);
1152 	};
1153 }
1154 
1155 static int
1156 mdctlioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags, struct thread *td)
1157 {
1158 	int error;
1159 
1160 	sx_xlock(&md_sx);
1161 	error = xmdctlioctl(dev, cmd, addr, flags, td);
1162 	sx_xunlock(&md_sx);
1163 	return (error);
1164 }
1165 
1166 static void
1167 md_preloaded(u_char *image, size_t length)
1168 {
1169 	struct md_s *sc;
1170 	int error;
1171 
1172 	sc = mdnew(-1, &error, MD_PRELOAD);
1173 	if (sc == NULL)
1174 		return;
1175 	sc->mediasize = length;
1176 	sc->sectorsize = DEV_BSIZE;
1177 	sc->pl_ptr = image;
1178 	sc->pl_len = length;
1179 	sc->start = mdstart_preload;
1180 #ifdef MD_ROOT
1181 	if (sc->unit == 0)
1182 		rootdevnames[0] = "ufs:/dev/md0";
1183 #endif
1184 	mdinit(sc);
1185 }
1186 
1187 static void
1188 g_md_init(struct g_class *mp __unused)
1189 {
1190 
1191 	caddr_t mod;
1192 	caddr_t c;
1193 	u_char *ptr, *name, *type;
1194 	unsigned len;
1195 
1196 	mod = NULL;
1197 	sx_init(&md_sx, "MD config lock");
1198 	g_topology_unlock();
1199 #ifdef MD_ROOT_SIZE
1200 	sx_xlock(&md_sx);
1201 	md_preloaded(mfs_root.start, sizeof(mfs_root.start));
1202 	sx_xunlock(&md_sx);
1203 #endif
1204 	/* XXX: are preload_* static or do they need Giant ? */
1205 	while ((mod = preload_search_next_name(mod)) != NULL) {
1206 		name = (char *)preload_search_info(mod, MODINFO_NAME);
1207 		if (name == NULL)
1208 			continue;
1209 		type = (char *)preload_search_info(mod, MODINFO_TYPE);
1210 		if (type == NULL)
1211 			continue;
1212 		if (strcmp(type, "md_image") && strcmp(type, "mfs_root"))
1213 			continue;
1214 		c = preload_search_info(mod, MODINFO_ADDR);
1215 		ptr = *(u_char **)c;
1216 		c = preload_search_info(mod, MODINFO_SIZE);
1217 		len = *(size_t *)c;
1218 		printf("%s%d: Preloaded image <%s> %d bytes at %p\n",
1219 		    MD_NAME, mdunits, name, len, ptr);
1220 		sx_xlock(&md_sx);
1221 		md_preloaded(ptr, len);
1222 		sx_xunlock(&md_sx);
1223 	}
1224 	status_dev = make_dev(&mdctl_cdevsw, MAXMINOR, UID_ROOT, GID_WHEEL,
1225 	    0600, MDCTL_NAME);
1226 	g_topology_lock();
1227 }
1228 
1229 static void
1230 g_md_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp,
1231     struct g_consumer *cp __unused, struct g_provider *pp)
1232 {
1233 	struct md_s *mp;
1234 	char *type;
1235 
1236 	mp = gp->softc;
1237 	if (mp == NULL)
1238 		return;
1239 
1240 	switch (mp->type) {
1241 	case MD_MALLOC:
1242 		type = "malloc";
1243 		break;
1244 	case MD_PRELOAD:
1245 		type = "preload";
1246 		break;
1247 	case MD_VNODE:
1248 		type = "vnode";
1249 		break;
1250 	case MD_SWAP:
1251 		type = "swap";
1252 		break;
1253 	default:
1254 		type = "unknown";
1255 		break;
1256 	}
1257 
1258 	if (pp != NULL) {
1259 		if (indent == NULL) {
1260 			sbuf_printf(sb, " u %d", mp->unit);
1261 			sbuf_printf(sb, " s %ju", (uintmax_t) mp->sectorsize);
1262 			sbuf_printf(sb, " f %ju", (uintmax_t) mp->fwheads);
1263 			sbuf_printf(sb, " fs %ju", (uintmax_t) mp->fwsectors);
1264 			sbuf_printf(sb, " l %ju", (uintmax_t) mp->mediasize);
1265 			sbuf_printf(sb, " t %s", type);
1266 			if (mp->type == MD_VNODE && mp->vnode != NULL)
1267 				sbuf_printf(sb, " file %s", mp->file);
1268 		} else {
1269 			sbuf_printf(sb, "%s<unit>%d</unit>\n", indent,
1270 			    mp->unit);
1271 			sbuf_printf(sb, "%s<sectorsize>%ju</sectorsize>\n",
1272 			    indent, (uintmax_t) mp->sectorsize);
1273 			sbuf_printf(sb, "%s<fwheads>%ju</fwheads>\n",
1274 			    indent, (uintmax_t) mp->fwheads);
1275 			sbuf_printf(sb, "%s<fwsectors>%ju</fwsectors>\n",
1276 			    indent, (uintmax_t) mp->fwsectors);
1277 			sbuf_printf(sb, "%s<length>%ju</length>\n",
1278 			    indent, (uintmax_t) mp->mediasize);
1279 			sbuf_printf(sb, "%s<type>%s</type>\n", indent,
1280 			    type);
1281 			if (mp->type == MD_VNODE && mp->vnode != NULL)
1282 				sbuf_printf(sb, "%s<file>%s</file>\n",
1283 				    indent, mp->file);
1284 		}
1285 	}
1286 }
1287 
1288 static void
1289 g_md_fini(struct g_class *mp __unused)
1290 {
1291 
1292 	sx_destroy(&md_sx);
1293 	if (status_dev != NULL)
1294 		destroy_dev(status_dev);
1295 }
1296