xref: /freebsd/sys/dev/md/md.c (revision 6b3455a7665208c366849f0b2b3bc916fb97516e)
1 /*
2  * ----------------------------------------------------------------------------
3  * "THE BEER-WARE LICENSE" (Revision 42):
4  * <phk@FreeBSD.ORG> wrote this file.  As long as you retain this notice you
5  * can do whatever you want with this stuff. If we meet some day, and you think
6  * this stuff is worth it, you can buy me a beer in return.   Poul-Henning Kamp
7  * ----------------------------------------------------------------------------
8  *
9  * $FreeBSD$
10  *
11  */
12 
13 /*
14  * The following functions are based in the vn(4) driver: mdstart_swap(),
15  * mdstart_vnode(), mdcreate_swap(), mdcreate_vnode() and mddestroy(),
16  * and as such under the following copyright:
17  *
18  * Copyright (c) 1988 University of Utah.
19  * Copyright (c) 1990, 1993
20  *	The Regents of the University of California.  All rights reserved.
21  *
22  * This code is derived from software contributed to Berkeley by
23  * the Systems Programming Group of the University of Utah Computer
24  * Science Department.
25  *
26  * Redistribution and use in source and binary forms, with or without
27  * modification, are permitted provided that the following conditions
28  * are met:
29  * 1. Redistributions of source code must retain the above copyright
30  *    notice, this list of conditions and the following disclaimer.
31  * 2. Redistributions in binary form must reproduce the above copyright
32  *    notice, this list of conditions and the following disclaimer in the
33  *    documentation and/or other materials provided with the distribution.
34  * 4. Neither the name of the University nor the names of its contributors
35  *    may be used to endorse or promote products derived from this software
36  *    without specific prior written permission.
37  *
38  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
39  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
40  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
41  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
42  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
43  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
44  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
45  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
46  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
47  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
48  * SUCH DAMAGE.
49  *
50  * from: Utah Hdr: vn.c 1.13 94/04/02
51  *
52  *	from: @(#)vn.c	8.6 (Berkeley) 4/1/94
53  * From: src/sys/dev/vn/vn.c,v 1.122 2000/12/16 16:06:03
54  */
55 
56 #include "opt_geom.h"
57 #include "opt_md.h"
58 
59 #include <sys/param.h>
60 #include <sys/systm.h>
61 #include <sys/bio.h>
62 #include <sys/conf.h>
63 #include <sys/fcntl.h>
64 #include <sys/kernel.h>
65 #include <sys/kthread.h>
66 #include <sys/linker.h>
67 #include <sys/lock.h>
68 #include <sys/malloc.h>
69 #include <sys/mdioctl.h>
70 #include <sys/mutex.h>
71 #include <sys/namei.h>
72 #include <sys/proc.h>
73 #include <sys/queue.h>
74 #include <sys/sf_buf.h>
75 #include <sys/sysctl.h>
76 #include <sys/vnode.h>
77 
78 #include <geom/geom.h>
79 
80 #include <vm/vm.h>
81 #include <vm/vm_object.h>
82 #include <vm/vm_page.h>
83 #include <vm/vm_pager.h>
84 #include <vm/swap_pager.h>
85 #include <vm/uma.h>
86 
87 #define MD_MODVER 1
88 
89 #define MD_SHUTDOWN 0x10000	/* Tell worker thread to terminate. */
90 
91 #ifndef MD_NSECT
92 #define MD_NSECT (10000 * 2)
93 #endif
94 
95 static MALLOC_DEFINE(M_MD, "MD disk", "Memory Disk");
96 static MALLOC_DEFINE(M_MDSECT, "MD sectors", "Memory Disk Sectors");
97 
98 static int md_debug;
99 SYSCTL_INT(_debug, OID_AUTO, mddebug, CTLFLAG_RW, &md_debug, 0, "");
100 
101 #if defined(MD_ROOT) && defined(MD_ROOT_SIZE)
102 /* Image gets put here: */
103 static u_char mfs_root[MD_ROOT_SIZE*1024] = "MFS Filesystem goes here";
104 static u_char end_mfs_root[] __unused = "MFS Filesystem had better STOP here";
105 #endif
106 
107 static g_init_t md_drvinit;
108 
109 static int	mdunits;
110 static struct cdev *status_dev = 0;
111 
112 static d_ioctl_t mdctlioctl;
113 
114 static struct cdevsw mdctl_cdevsw = {
115 	.d_version =	D_VERSION,
116 	.d_flags =	D_NEEDGIANT,
117 	.d_ioctl =	mdctlioctl,
118 	.d_name =	MD_NAME,
119 };
120 
121 
122 static LIST_HEAD(, md_s) md_softc_list = LIST_HEAD_INITIALIZER(&md_softc_list);
123 
124 #define NINDIR	(PAGE_SIZE / sizeof(uintptr_t))
125 #define NMASK	(NINDIR-1)
126 static int nshift;
127 
128 struct indir {
129 	uintptr_t	*array;
130 	u_int		total;
131 	u_int		used;
132 	u_int		shift;
133 };
134 
135 struct md_s {
136 	int unit;
137 	LIST_ENTRY(md_s) list;
138 	struct bio_queue_head bio_queue;
139 	struct mtx queue_mtx;
140 	struct cdev *dev;
141 	enum md_types type;
142 	unsigned nsect;
143 	unsigned opencount;
144 	unsigned secsize;
145 	unsigned fwheads;
146 	unsigned fwsectors;
147 	unsigned flags;
148 	char name[20];
149 	struct proc *procp;
150 	struct g_geom *gp;
151 	struct g_provider *pp;
152 
153 	/* MD_MALLOC related fields */
154 	struct indir *indir;
155 	uma_zone_t uma;
156 
157 	/* MD_PRELOAD related fields */
158 	u_char *pl_ptr;
159 	unsigned pl_len;
160 
161 	/* MD_VNODE related fields */
162 	struct vnode *vnode;
163 	struct ucred *cred;
164 
165 	/* MD_SWAP related fields */
166 	vm_object_t object;
167 	unsigned npage;
168 };
169 
170 static int mddestroy(struct md_s *sc, struct thread *td);
171 
172 static struct indir *
173 new_indir(u_int shift)
174 {
175 	struct indir *ip;
176 
177 	ip = malloc(sizeof *ip, M_MD, M_NOWAIT | M_ZERO);
178 	if (ip == NULL)
179 		return (NULL);
180 	ip->array = malloc(sizeof(uintptr_t) * NINDIR,
181 	    M_MDSECT, M_NOWAIT | M_ZERO);
182 	if (ip->array == NULL) {
183 		free(ip, M_MD);
184 		return (NULL);
185 	}
186 	ip->total = NINDIR;
187 	ip->shift = shift;
188 	return (ip);
189 }
190 
191 static void
192 del_indir(struct indir *ip)
193 {
194 
195 	free(ip->array, M_MDSECT);
196 	free(ip, M_MD);
197 }
198 
199 static void
200 destroy_indir(struct md_s *sc, struct indir *ip)
201 {
202 	int i;
203 
204 	for (i = 0; i < NINDIR; i++) {
205 		if (!ip->array[i])
206 			continue;
207 		if (ip->shift)
208 			destroy_indir(sc, (struct indir*)(ip->array[i]));
209 		else if (ip->array[i] > 255)
210 			uma_zfree(sc->uma, (void *)(ip->array[i]));
211 	}
212 	del_indir(ip);
213 }
214 
215 /*
216  * This function does the math and alloctes the top level "indir" structure
217  * for a device of "size" sectors.
218  */
219 
220 static struct indir *
221 dimension(off_t size)
222 {
223 	off_t rcnt;
224 	struct indir *ip;
225 	int i, layer;
226 
227 	rcnt = size;
228 	layer = 0;
229 	while (rcnt > NINDIR) {
230 		rcnt /= NINDIR;
231 		layer++;
232 	}
233 	/* figure out log2(NINDIR) */
234 	for (i = NINDIR, nshift = -1; i; nshift++)
235 		i >>= 1;
236 
237 	/*
238 	 * XXX: the top layer is probably not fully populated, so we allocate
239 	 * too much space for ip->array in here.
240 	 */
241 	ip = malloc(sizeof *ip, M_MD, M_WAITOK | M_ZERO);
242 	ip->array = malloc(sizeof(uintptr_t) * NINDIR,
243 	    M_MDSECT, M_WAITOK | M_ZERO);
244 	ip->total = NINDIR;
245 	ip->shift = layer * nshift;
246 	return (ip);
247 }
248 
249 /*
250  * Read a given sector
251  */
252 
253 static uintptr_t
254 s_read(struct indir *ip, off_t offset)
255 {
256 	struct indir *cip;
257 	int idx;
258 	uintptr_t up;
259 
260 	if (md_debug > 1)
261 		printf("s_read(%jd)\n", (intmax_t)offset);
262 	up = 0;
263 	for (cip = ip; cip != NULL;) {
264 		if (cip->shift) {
265 			idx = (offset >> cip->shift) & NMASK;
266 			up = cip->array[idx];
267 			cip = (struct indir *)up;
268 			continue;
269 		}
270 		idx = offset & NMASK;
271 		return (cip->array[idx]);
272 	}
273 	return (0);
274 }
275 
276 /*
277  * Write a given sector, prune the tree if the value is 0
278  */
279 
280 static int
281 s_write(struct indir *ip, off_t offset, uintptr_t ptr)
282 {
283 	struct indir *cip, *lip[10];
284 	int idx, li;
285 	uintptr_t up;
286 
287 	if (md_debug > 1)
288 		printf("s_write(%jd, %p)\n", (intmax_t)offset, (void *)ptr);
289 	up = 0;
290 	li = 0;
291 	cip = ip;
292 	for (;;) {
293 		lip[li++] = cip;
294 		if (cip->shift) {
295 			idx = (offset >> cip->shift) & NMASK;
296 			up = cip->array[idx];
297 			if (up != 0) {
298 				cip = (struct indir *)up;
299 				continue;
300 			}
301 			/* Allocate branch */
302 			cip->array[idx] =
303 			    (uintptr_t)new_indir(cip->shift - nshift);
304 			if (cip->array[idx] == 0)
305 				return (ENOSPC);
306 			cip->used++;
307 			up = cip->array[idx];
308 			cip = (struct indir *)up;
309 			continue;
310 		}
311 		/* leafnode */
312 		idx = offset & NMASK;
313 		up = cip->array[idx];
314 		if (up != 0)
315 			cip->used--;
316 		cip->array[idx] = ptr;
317 		if (ptr != 0)
318 			cip->used++;
319 		break;
320 	}
321 	if (cip->used != 0 || li == 1)
322 		return (0);
323 	li--;
324 	while (cip->used == 0 && cip != ip) {
325 		li--;
326 		idx = (offset >> lip[li]->shift) & NMASK;
327 		up = lip[li]->array[idx];
328 		KASSERT(up == (uintptr_t)cip, ("md screwed up"));
329 		del_indir(cip);
330 		lip[li]->array[idx] = 0;
331 		lip[li]->used--;
332 		cip = lip[li];
333 	}
334 	return (0);
335 }
336 
337 
338 struct g_class g_md_class = {
339 	.name = "MD",
340 	.init = md_drvinit,
341 };
342 
343 static int
344 g_md_access(struct g_provider *pp, int r, int w, int e)
345 {
346 	struct md_s *sc;
347 
348 	sc = pp->geom->softc;
349 	if (sc == NULL)
350 		return (ENXIO);
351 	r += pp->acr;
352 	w += pp->acw;
353 	e += pp->ace;
354 	if ((pp->acr + pp->acw + pp->ace) == 0 && (r + w + e) > 0) {
355 		sc->opencount = 1;
356 	} else if ((pp->acr + pp->acw + pp->ace) > 0 && (r + w + e) == 0) {
357 		sc->opencount = 0;
358 	}
359 	return (0);
360 }
361 
362 static void
363 g_md_start(struct bio *bp)
364 {
365 	struct md_s *sc;
366 
367 	sc = bp->bio_to->geom->softc;
368 
369 	bp->bio_pblkno = bp->bio_offset / sc->secsize;
370 	bp->bio_bcount = bp->bio_length;
371 	mtx_lock(&sc->queue_mtx);
372 	bioq_disksort(&sc->bio_queue, bp);
373 	mtx_unlock(&sc->queue_mtx);
374 
375 	wakeup(sc);
376 }
377 
378 DECLARE_GEOM_CLASS(g_md_class, g_md);
379 
380 
381 static int
382 mdstart_malloc(struct md_s *sc, struct bio *bp)
383 {
384 	int i, error;
385 	u_char *dst;
386 	unsigned secno, nsec, uc;
387 	uintptr_t sp, osp;
388 
389 	nsec = bp->bio_bcount / sc->secsize;
390 	secno = bp->bio_pblkno;
391 	dst = bp->bio_data;
392 	error = 0;
393 	while (nsec--) {
394 		osp = s_read(sc->indir, secno);
395 		if (bp->bio_cmd == BIO_DELETE) {
396 			if (osp != 0)
397 				error = s_write(sc->indir, secno, 0);
398 		} else if (bp->bio_cmd == BIO_READ) {
399 			if (osp == 0)
400 				bzero(dst, sc->secsize);
401 			else if (osp <= 255)
402 				for (i = 0; i < sc->secsize; i++)
403 					dst[i] = osp;
404 			else
405 				bcopy((void *)osp, dst, sc->secsize);
406 			osp = 0;
407 		} else if (bp->bio_cmd == BIO_WRITE) {
408 			if (sc->flags & MD_COMPRESS) {
409 				uc = dst[0];
410 				for (i = 1; i < sc->secsize; i++)
411 					if (dst[i] != uc)
412 						break;
413 			} else {
414 				i = 0;
415 				uc = 0;
416 			}
417 			if (i == sc->secsize) {
418 				if (osp != uc)
419 					error = s_write(sc->indir, secno, uc);
420 			} else {
421 				if (osp <= 255) {
422 					sp = (uintptr_t) uma_zalloc(
423 					    sc->uma, M_NOWAIT);
424 					if (sp == 0) {
425 						error = ENOSPC;
426 						break;
427 					}
428 					bcopy(dst, (void *)sp, sc->secsize);
429 					error = s_write(sc->indir, secno, sp);
430 				} else {
431 					bcopy(dst, (void *)osp, sc->secsize);
432 					osp = 0;
433 				}
434 			}
435 		} else {
436 			error = EOPNOTSUPP;
437 		}
438 		if (osp > 255)
439 			uma_zfree(sc->uma, (void*)osp);
440 		if (error)
441 			break;
442 		secno++;
443 		dst += sc->secsize;
444 	}
445 	bp->bio_resid = 0;
446 	return (error);
447 }
448 
449 static int
450 mdstart_preload(struct md_s *sc, struct bio *bp)
451 {
452 
453 	if (bp->bio_cmd == BIO_DELETE) {
454 	} else if (bp->bio_cmd == BIO_READ) {
455 		bcopy(sc->pl_ptr + (bp->bio_pblkno << DEV_BSHIFT), bp->bio_data, bp->bio_bcount);
456 	} else {
457 		bcopy(bp->bio_data, sc->pl_ptr + (bp->bio_pblkno << DEV_BSHIFT), bp->bio_bcount);
458 	}
459 	bp->bio_resid = 0;
460 	return (0);
461 }
462 
463 static int
464 mdstart_vnode(struct md_s *sc, struct bio *bp)
465 {
466 	int error;
467 	struct uio auio;
468 	struct iovec aiov;
469 	struct mount *mp;
470 
471 	/*
472 	 * VNODE I/O
473 	 *
474 	 * If an error occurs, we set BIO_ERROR but we do not set
475 	 * B_INVAL because (for a write anyway), the buffer is
476 	 * still valid.
477 	 */
478 
479 	bzero(&auio, sizeof(auio));
480 
481 	aiov.iov_base = bp->bio_data;
482 	aiov.iov_len = bp->bio_bcount;
483 	auio.uio_iov = &aiov;
484 	auio.uio_iovcnt = 1;
485 	auio.uio_offset = (vm_ooffset_t)bp->bio_pblkno * sc->secsize;
486 	auio.uio_segflg = UIO_SYSSPACE;
487 	if(bp->bio_cmd == BIO_READ)
488 		auio.uio_rw = UIO_READ;
489 	else if(bp->bio_cmd == BIO_WRITE)
490 		auio.uio_rw = UIO_WRITE;
491 	else
492 		panic("wrong BIO_OP in mdstart_vnode");
493 	auio.uio_resid = bp->bio_bcount;
494 	auio.uio_td = curthread;
495 	/*
496 	 * When reading set IO_DIRECT to try to avoid double-caching
497 	 * the data.  When writing IO_DIRECT is not optimal.
498 	 */
499 	if (bp->bio_cmd == BIO_READ) {
500 		vn_lock(sc->vnode, LK_EXCLUSIVE | LK_RETRY, curthread);
501 		error = VOP_READ(sc->vnode, &auio, IO_DIRECT, sc->cred);
502 		VOP_UNLOCK(sc->vnode, 0, curthread);
503 	} else {
504 		(void) vn_start_write(sc->vnode, &mp, V_WAIT);
505 		vn_lock(sc->vnode, LK_EXCLUSIVE | LK_RETRY, curthread);
506 		error = VOP_WRITE(sc->vnode, &auio,
507 		    sc->flags & MD_ASYNC ? 0 : IO_SYNC, sc->cred);
508 		VOP_UNLOCK(sc->vnode, 0, curthread);
509 		vn_finished_write(mp);
510 	}
511 	bp->bio_resid = auio.uio_resid;
512 	return (error);
513 }
514 
515 static int
516 mdstart_swap(struct md_s *sc, struct bio *bp)
517 {
518 	struct sf_buf *sf;
519 	int i, rv;
520 	int offs, len, lastp, lastend;
521 	vm_page_t m;
522 	u_char *p;
523 
524 	p = bp->bio_data;
525 
526 	/*
527 	 * offs is the ofset at whih to start operating on the
528 	 * next (ie, first) page.  lastp is the last page on
529 	 * which we're going to operate.  lastend is the ending
530 	 * position within that last page (ie, PAGE_SIZE if
531 	 * we're operating on complete aligned pages).
532 	 */
533 	offs = bp->bio_offset % PAGE_SIZE;
534 	lastp = (bp->bio_offset + bp->bio_length - 1) / PAGE_SIZE;
535 	lastend = (bp->bio_offset + bp->bio_length - 1) % PAGE_SIZE + 1;
536 
537 	VM_OBJECT_LOCK(sc->object);
538 	vm_object_pip_add(sc->object, 1);
539 	for (i = bp->bio_offset / PAGE_SIZE; i <= lastp; i++) {
540 		len = ((i == lastp) ? lastend : PAGE_SIZE) - offs;
541 
542 		m = vm_page_grab(sc->object, i,
543 		    VM_ALLOC_NORMAL|VM_ALLOC_RETRY);
544 		VM_OBJECT_UNLOCK(sc->object);
545 		sf = sf_buf_alloc(m, 0);
546 		VM_OBJECT_LOCK(sc->object);
547 		if (bp->bio_cmd == BIO_READ) {
548 			if (m->valid != VM_PAGE_BITS_ALL)
549 				rv = vm_pager_get_pages(sc->object, &m, 1, 0);
550 			bcopy((void *)(sf_buf_kva(sf) + offs), p, len);
551 		} else if (bp->bio_cmd == BIO_WRITE) {
552 			if (len != PAGE_SIZE && m->valid != VM_PAGE_BITS_ALL)
553 				rv = vm_pager_get_pages(sc->object, &m, 1, 0);
554 			bcopy(p, (void *)(sf_buf_kva(sf) + offs), len);
555 			m->valid = VM_PAGE_BITS_ALL;
556 #if 0
557 		} else if (bp->bio_cmd == BIO_DELETE) {
558 			if (len != PAGE_SIZE && m->valid != VM_PAGE_BITS_ALL)
559 				rv = vm_pager_get_pages(sc->object, &m, 1, 0);
560 			bzero((void *)(sf_buf_kva(sf) + offs), len);
561 			vm_page_dirty(m);
562 			m->valid = VM_PAGE_BITS_ALL;
563 #endif
564 		}
565 		sf_buf_free(sf);
566 		vm_page_lock_queues();
567 		vm_page_wakeup(m);
568 		vm_page_activate(m);
569 		if (bp->bio_cmd == BIO_WRITE)
570 			vm_page_dirty(m);
571 		vm_page_unlock_queues();
572 
573 		/* Actions on further pages start at offset 0 */
574 		p += PAGE_SIZE - offs;
575 		offs = 0;
576 #if 0
577 if (bootverbose || bp->bio_offset / PAGE_SIZE < 17)
578 printf("wire_count %d busy %d flags %x hold_count %d act_count %d queue %d valid %d dirty %d @ %d\n",
579     m->wire_count, m->busy,
580     m->flags, m->hold_count, m->act_count, m->queue, m->valid, m->dirty, i);
581 #endif
582 	}
583 	vm_object_pip_subtract(sc->object, 1);
584 	vm_object_set_writeable_dirty(sc->object);
585 	VM_OBJECT_UNLOCK(sc->object);
586 	return (0);
587 }
588 
589 static void
590 md_kthread(void *arg)
591 {
592 	struct md_s *sc;
593 	struct bio *bp;
594 	int error, hasgiant;
595 
596 	sc = arg;
597 	curthread->td_base_pri = PRIBIO;
598 
599 	switch (sc->type) {
600 	case MD_VNODE:
601 		mtx_lock(&Giant);
602 		hasgiant = 1;
603 		break;
604 	case MD_MALLOC:
605 	case MD_PRELOAD:
606 	case MD_SWAP:
607 	default:
608 		hasgiant = 0;
609 		break;
610 	}
611 
612 	for (;;) {
613 		mtx_lock(&sc->queue_mtx);
614 		bp = bioq_first(&sc->bio_queue);
615 		if (bp)
616 			bioq_remove(&sc->bio_queue, bp);
617 		if (!bp) {
618 			if (sc->flags & MD_SHUTDOWN) {
619 				mtx_unlock(&sc->queue_mtx);
620 				sc->procp = NULL;
621 				wakeup(&sc->procp);
622 				if (hasgiant)
623 					mtx_unlock(&Giant);
624 				kthread_exit(0);
625 			}
626 			msleep(sc, &sc->queue_mtx, PRIBIO | PDROP, "mdwait", 0);
627 			continue;
628 		}
629 		mtx_unlock(&sc->queue_mtx);
630 		if (bp->bio_cmd == BIO_GETATTR) {
631 			if (sc->fwsectors && sc->fwheads &&
632 			    (g_handleattr_int(bp, "GEOM::fwsectors",
633 			    sc->fwsectors) ||
634 			    g_handleattr_int(bp, "GEOM::fwheads",
635 			    sc->fwheads)))
636 				error = -1;
637 			else
638 				error = EOPNOTSUPP;
639 		} else {
640 			switch (sc->type) {
641 			case MD_MALLOC:
642 				error = mdstart_malloc(sc, bp);
643 				break;
644 			case MD_PRELOAD:
645 				error = mdstart_preload(sc, bp);
646 				break;
647 			case MD_VNODE:
648 				error = mdstart_vnode(sc, bp);
649 				break;
650 			case MD_SWAP:
651 				error = mdstart_swap(sc, bp);
652 				break;
653 			default:
654 				panic("Impossible md(type)");
655 				break;
656 			}
657 		}
658 
659 		if (error != -1) {
660 			bp->bio_completed = bp->bio_length;
661 			g_io_deliver(bp, error);
662 		}
663 	}
664 }
665 
666 static struct md_s *
667 mdfind(int unit)
668 {
669 	struct md_s *sc;
670 
671 	/* XXX: LOCK(unique unit numbers) */
672 	LIST_FOREACH(sc, &md_softc_list, list) {
673 		if (sc->unit == unit)
674 			break;
675 	}
676 	/* XXX: UNLOCK(unique unit numbers) */
677 	return (sc);
678 }
679 
680 static struct md_s *
681 mdnew(int unit)
682 {
683 	struct md_s *sc;
684 	int error, max = -1;
685 
686 	/* XXX: LOCK(unique unit numbers) */
687 	LIST_FOREACH(sc, &md_softc_list, list) {
688 		if (sc->unit == unit) {
689 			/* XXX: UNLOCK(unique unit numbers) */
690 			return (NULL);
691 		}
692 		if (sc->unit > max)
693 			max = sc->unit;
694 	}
695 	if (unit == -1)
696 		unit = max + 1;
697 	sc = (struct md_s *)malloc(sizeof *sc, M_MD, M_WAITOK | M_ZERO);
698 	sc->unit = unit;
699 	bioq_init(&sc->bio_queue);
700 	mtx_init(&sc->queue_mtx, "md bio queue", NULL, MTX_DEF);
701 	sprintf(sc->name, "md%d", unit);
702 	error = kthread_create(md_kthread, sc, &sc->procp, 0, 0,"%s", sc->name);
703 	if (error) {
704 		free(sc, M_MD);
705 		return (NULL);
706 	}
707 	LIST_INSERT_HEAD(&md_softc_list, sc, list);
708 	/* XXX: UNLOCK(unique unit numbers) */
709 	return (sc);
710 }
711 
712 static void
713 mdinit(struct md_s *sc)
714 {
715 
716 	struct g_geom *gp;
717 	struct g_provider *pp;
718 
719 	DROP_GIANT();
720 	g_topology_lock();
721 	gp = g_new_geomf(&g_md_class, "md%d", sc->unit);
722 	gp->start = g_md_start;
723 	gp->access = g_md_access;
724 	gp->softc = sc;
725 	pp = g_new_providerf(gp, "md%d", sc->unit);
726 	pp->mediasize = (off_t)sc->nsect * sc->secsize;
727 	pp->sectorsize = sc->secsize;
728 	sc->gp = gp;
729 	sc->pp = pp;
730 	g_error_provider(pp, 0);
731 	g_topology_unlock();
732 	PICKUP_GIANT();
733 }
734 
735 /*
736  * XXX: we should check that the range they feed us is mapped.
737  * XXX: we should implement read-only.
738  */
739 
740 static int
741 mdcreate_preload(struct md_ioctl *mdio)
742 {
743 	struct md_s *sc;
744 
745 	if (mdio->md_size == 0)
746 		return (EINVAL);
747 	if (mdio->md_options & ~(MD_AUTOUNIT))
748 		return (EINVAL);
749 	if (mdio->md_options & MD_AUTOUNIT) {
750 		sc = mdnew(-1);
751 		if (sc == NULL)
752 			return (ENOMEM);
753 		mdio->md_unit = sc->unit;
754 	} else {
755 		sc = mdnew(mdio->md_unit);
756 		if (sc == NULL)
757 			return (EBUSY);
758 	}
759 	sc->type = MD_PRELOAD;
760 	sc->secsize = DEV_BSIZE;
761 	sc->nsect = mdio->md_size;
762 	sc->flags = mdio->md_options & MD_FORCE;
763 	/* Cast to pointer size, then to pointer to avoid warning */
764 	sc->pl_ptr = (u_char *)(uintptr_t)mdio->md_base;
765 	sc->pl_len = (mdio->md_size << DEV_BSHIFT);
766 	mdinit(sc);
767 	return (0);
768 }
769 
770 
771 static int
772 mdcreate_malloc(struct md_ioctl *mdio)
773 {
774 	struct md_s *sc;
775 	off_t u;
776 	uintptr_t sp;
777 	int error;
778 
779 	error = 0;
780 	if (mdio->md_size == 0)
781 		return (EINVAL);
782 	if (mdio->md_options & ~(MD_AUTOUNIT | MD_COMPRESS | MD_RESERVE))
783 		return (EINVAL);
784 	if (mdio->md_secsize != 0 && !powerof2(mdio->md_secsize))
785 		return (EINVAL);
786 	/* Compression doesn't make sense if we have reserved space */
787 	if (mdio->md_options & MD_RESERVE)
788 		mdio->md_options &= ~MD_COMPRESS;
789 	if (mdio->md_options & MD_AUTOUNIT) {
790 		sc = mdnew(-1);
791 		if (sc == NULL)
792 			return (ENOMEM);
793 		mdio->md_unit = sc->unit;
794 	} else {
795 		sc = mdnew(mdio->md_unit);
796 		if (sc == NULL)
797 			return (EBUSY);
798 	}
799 	sc->type = MD_MALLOC;
800 	if (mdio->md_secsize != 0)
801 		sc->secsize = mdio->md_secsize;
802 	else
803 		sc->secsize = DEV_BSIZE;
804 	if (mdio->md_fwsectors != 0)
805 		sc->fwsectors = mdio->md_fwsectors;
806 	if (mdio->md_fwheads != 0)
807 		sc->fwheads = mdio->md_fwheads;
808 	sc->nsect = (mdio->md_size * DEV_BSIZE) / sc->secsize;
809 	sc->flags = mdio->md_options & (MD_COMPRESS | MD_FORCE);
810 	sc->indir = dimension(sc->nsect);
811 	sc->uma = uma_zcreate(sc->name, sc->secsize,
812 	    NULL, NULL, NULL, NULL, 0x1ff, 0);
813 	if (mdio->md_options & MD_RESERVE) {
814 		for (u = 0; u < sc->nsect; u++) {
815 			sp = (uintptr_t) uma_zalloc(sc->uma, M_NOWAIT | M_ZERO);
816 			if (sp != 0)
817 				error = s_write(sc->indir, u, sp);
818 			else
819 				error = ENOMEM;
820 			if (error)
821 				break;
822 		}
823 	}
824 	if (error)  {
825 		mddestroy(sc, NULL);
826 		return (error);
827 	}
828 	mdinit(sc);
829 	if (!(mdio->md_options & MD_RESERVE))
830 		sc->pp->flags |= G_PF_CANDELETE;
831 	return (0);
832 }
833 
834 
835 static int
836 mdsetcred(struct md_s *sc, struct ucred *cred)
837 {
838 	char *tmpbuf;
839 	int error = 0;
840 
841 	/*
842 	 * Set credits in our softc
843 	 */
844 
845 	if (sc->cred)
846 		crfree(sc->cred);
847 	sc->cred = crhold(cred);
848 
849 	/*
850 	 * Horrible kludge to establish credentials for NFS  XXX.
851 	 */
852 
853 	if (sc->vnode) {
854 		struct uio auio;
855 		struct iovec aiov;
856 
857 		tmpbuf = malloc(sc->secsize, M_TEMP, M_WAITOK);
858 		bzero(&auio, sizeof(auio));
859 
860 		aiov.iov_base = tmpbuf;
861 		aiov.iov_len = sc->secsize;
862 		auio.uio_iov = &aiov;
863 		auio.uio_iovcnt = 1;
864 		auio.uio_offset = 0;
865 		auio.uio_rw = UIO_READ;
866 		auio.uio_segflg = UIO_SYSSPACE;
867 		auio.uio_resid = aiov.iov_len;
868 		vn_lock(sc->vnode, LK_EXCLUSIVE | LK_RETRY, curthread);
869 		error = VOP_READ(sc->vnode, &auio, 0, sc->cred);
870 		VOP_UNLOCK(sc->vnode, 0, curthread);
871 		free(tmpbuf, M_TEMP);
872 	}
873 	return (error);
874 }
875 
876 static int
877 mdcreate_vnode(struct md_ioctl *mdio, struct thread *td)
878 {
879 	struct md_s *sc;
880 	struct vattr vattr;
881 	struct nameidata nd;
882 	int error, flags;
883 
884 	flags = FREAD|FWRITE;
885 	NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, mdio->md_file, td);
886 	error = vn_open(&nd, &flags, 0, -1);
887 	if (error) {
888 		if (error != EACCES && error != EPERM && error != EROFS)
889 			return (error);
890 		flags &= ~FWRITE;
891 		NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, mdio->md_file, td);
892 		error = vn_open(&nd, &flags, 0, -1);
893 		if (error)
894 			return (error);
895 	}
896 	NDFREE(&nd, NDF_ONLY_PNBUF);
897 	if (nd.ni_vp->v_type != VREG ||
898 	    (error = VOP_GETATTR(nd.ni_vp, &vattr, td->td_ucred, td))) {
899 		VOP_UNLOCK(nd.ni_vp, 0, td);
900 		(void) vn_close(nd.ni_vp, flags, td->td_ucred, td);
901 		return (error ? error : EINVAL);
902 	}
903 	VOP_UNLOCK(nd.ni_vp, 0, td);
904 
905 	if (mdio->md_options & MD_AUTOUNIT) {
906 		sc = mdnew(-1);
907 		mdio->md_unit = sc->unit;
908 	} else {
909 		sc = mdnew(mdio->md_unit);
910 	}
911 	if (sc == NULL) {
912 		(void) vn_close(nd.ni_vp, flags, td->td_ucred, td);
913 		return (EBUSY);
914 	}
915 
916 	if (mdio->md_fwsectors != 0)
917 		sc->fwsectors = mdio->md_fwsectors;
918 	if (mdio->md_fwheads != 0)
919 		sc->fwheads = mdio->md_fwheads;
920 	sc->type = MD_VNODE;
921 	sc->flags = mdio->md_options & (MD_FORCE | MD_ASYNC);
922 	if (!(flags & FWRITE))
923 		sc->flags |= MD_READONLY;
924 	sc->secsize = DEV_BSIZE;
925 	sc->vnode = nd.ni_vp;
926 
927 	/*
928 	 * If the size is specified, override the file attributes.
929 	 */
930 	if (mdio->md_size)
931 		sc->nsect = mdio->md_size;
932 	else
933 		sc->nsect = vattr.va_size / sc->secsize; /* XXX: round up ? */
934 	if (sc->nsect == 0) {
935 		mddestroy(sc, td);
936 		return (EINVAL);
937 	}
938 	error = mdsetcred(sc, td->td_ucred);
939 	if (error) {
940 		mddestroy(sc, td);
941 		return (error);
942 	}
943 	mdinit(sc);
944 	return (0);
945 }
946 
947 static void
948 md_zapit(void *p, int cancel)
949 {
950 	if (cancel)
951 		return;
952 	g_wither_geom(p, ENXIO);
953 }
954 
955 static int
956 mddestroy(struct md_s *sc, struct thread *td)
957 {
958 
959 	GIANT_REQUIRED;
960 
961 	mtx_destroy(&sc->queue_mtx);
962 	if (sc->gp) {
963 		sc->gp->softc = NULL;
964 		g_waitfor_event(md_zapit, sc->gp, M_WAITOK, sc->gp, NULL);
965 		sc->gp = NULL;
966 		sc->pp = NULL;
967 	}
968 	sc->flags |= MD_SHUTDOWN;
969 	wakeup(sc);
970 	while (sc->procp != NULL)
971 		tsleep(&sc->procp, PRIBIO, "mddestroy", hz / 10);
972 	if (sc->vnode != NULL)
973 		(void)vn_close(sc->vnode, sc->flags & MD_READONLY ?
974 		    FREAD : (FREAD|FWRITE), sc->cred, td);
975 	if (sc->cred != NULL)
976 		crfree(sc->cred);
977 	if (sc->object != NULL) {
978 		vm_object_deallocate(sc->object);
979 	}
980 	if (sc->indir)
981 		destroy_indir(sc, sc->indir);
982 	if (sc->uma)
983 		uma_zdestroy(sc->uma);
984 
985 	/* XXX: LOCK(unique unit numbers) */
986 	LIST_REMOVE(sc, list);
987 	/* XXX: UNLOCK(unique unit numbers) */
988 	free(sc, M_MD);
989 	return (0);
990 }
991 
992 static int
993 mdcreate_swap(struct md_ioctl *mdio, struct thread *td)
994 {
995 	int error;
996 	struct md_s *sc;
997 
998 	GIANT_REQUIRED;
999 
1000 	if (mdio->md_options & MD_AUTOUNIT) {
1001 		sc = mdnew(-1);
1002 		mdio->md_unit = sc->unit;
1003 	} else {
1004 		sc = mdnew(mdio->md_unit);
1005 	}
1006 	if (sc == NULL)
1007 		return (EBUSY);
1008 
1009 	sc->type = MD_SWAP;
1010 
1011 	/*
1012 	 * Range check.  Disallow negative sizes or any size less then the
1013 	 * size of a page.  Then round to a page.
1014 	 */
1015 
1016 	if (mdio->md_size == 0) {
1017 		mddestroy(sc, td);
1018 		return (EDOM);
1019 	}
1020 
1021 	/*
1022 	 * Allocate an OBJT_SWAP object.
1023 	 *
1024 	 * sc_nsect is in units of DEV_BSIZE.
1025 	 * sc_npage is in units of PAGE_SIZE.
1026 	 *
1027 	 * Note the truncation.
1028 	 */
1029 
1030 	sc->secsize = DEV_BSIZE;
1031 	sc->npage = mdio->md_size / (PAGE_SIZE / DEV_BSIZE);
1032 	sc->nsect = sc->npage * (PAGE_SIZE / DEV_BSIZE);
1033 	if (mdio->md_fwsectors != 0)
1034 		sc->fwsectors = mdio->md_fwsectors;
1035 	if (mdio->md_fwheads != 0)
1036 		sc->fwheads = mdio->md_fwheads;
1037 	sc->object = vm_pager_allocate(OBJT_SWAP, NULL, PAGE_SIZE *
1038 	    (vm_offset_t)sc->npage, VM_PROT_DEFAULT, 0);
1039 	sc->flags = mdio->md_options & MD_FORCE;
1040 	if (mdio->md_options & MD_RESERVE) {
1041 		if (swap_pager_reserve(sc->object, 0, sc->npage) < 0) {
1042 			vm_object_deallocate(sc->object);
1043 			sc->object = NULL;
1044 			mddestroy(sc, td);
1045 			return (EDOM);
1046 		}
1047 	}
1048 	error = mdsetcred(sc, td->td_ucred);
1049 	if (error) {
1050 		mddestroy(sc, td);
1051 		return (error);
1052 	}
1053 	mdinit(sc);
1054 	if (!(mdio->md_options & MD_RESERVE))
1055 		sc->pp->flags |= G_PF_CANDELETE;
1056 	return (0);
1057 }
1058 
1059 static int
1060 mddetach(int unit, struct thread *td)
1061 {
1062 	struct md_s *sc;
1063 
1064 	sc = mdfind(unit);
1065 	if (sc == NULL)
1066 		return (ENOENT);
1067 	if (sc->opencount != 0 && !(sc->flags & MD_FORCE))
1068 		return (EBUSY);
1069 	switch(sc->type) {
1070 	case MD_VNODE:
1071 	case MD_SWAP:
1072 	case MD_MALLOC:
1073 	case MD_PRELOAD:
1074 		return (mddestroy(sc, td));
1075 	default:
1076 		return (EOPNOTSUPP);
1077 	}
1078 }
1079 
1080 static int
1081 mdctlioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags, struct thread *td)
1082 {
1083 	struct md_ioctl *mdio;
1084 	struct md_s *sc;
1085 	int i;
1086 
1087 	if (md_debug)
1088 		printf("mdctlioctl(%s %lx %p %x %p)\n",
1089 			devtoname(dev), cmd, addr, flags, td);
1090 
1091 	/*
1092 	 * We assert the version number in the individual ioctl
1093 	 * handlers instead of out here because (a) it is possible we
1094 	 * may add another ioctl in the future which doesn't read an
1095 	 * mdio, and (b) the correct return value for an unknown ioctl
1096 	 * is ENOIOCTL, not EINVAL.
1097 	 */
1098 	mdio = (struct md_ioctl *)addr;
1099 	switch (cmd) {
1100 	case MDIOCATTACH:
1101 		if (mdio->md_version != MDIOVERSION)
1102 			return (EINVAL);
1103 		switch (mdio->md_type) {
1104 		case MD_MALLOC:
1105 			return (mdcreate_malloc(mdio));
1106 		case MD_PRELOAD:
1107 			return (mdcreate_preload(mdio));
1108 		case MD_VNODE:
1109 			return (mdcreate_vnode(mdio, td));
1110 		case MD_SWAP:
1111 			return (mdcreate_swap(mdio, td));
1112 		default:
1113 			return (EINVAL);
1114 		}
1115 	case MDIOCDETACH:
1116 		if (mdio->md_version != MDIOVERSION)
1117 			return (EINVAL);
1118 		if (mdio->md_file != NULL || mdio->md_size != 0 ||
1119 		    mdio->md_options != 0)
1120 			return (EINVAL);
1121 		return (mddetach(mdio->md_unit, td));
1122 	case MDIOCQUERY:
1123 		if (mdio->md_version != MDIOVERSION)
1124 			return (EINVAL);
1125 		sc = mdfind(mdio->md_unit);
1126 		if (sc == NULL)
1127 			return (ENOENT);
1128 		mdio->md_type = sc->type;
1129 		mdio->md_options = sc->flags;
1130 		switch (sc->type) {
1131 		case MD_MALLOC:
1132 			mdio->md_size = sc->nsect;
1133 			break;
1134 		case MD_PRELOAD:
1135 			mdio->md_size = sc->nsect;
1136 			mdio->md_base = (uint64_t)(intptr_t)sc->pl_ptr;
1137 			break;
1138 		case MD_SWAP:
1139 			mdio->md_size = sc->nsect;
1140 			break;
1141 		case MD_VNODE:
1142 			mdio->md_size = sc->nsect;
1143 			/* XXX fill this in */
1144 			mdio->md_file = NULL;
1145 			break;
1146 		}
1147 		return (0);
1148 	case MDIOCLIST:
1149 		i = 1;
1150 		LIST_FOREACH(sc, &md_softc_list, list) {
1151 			if (i == MDNPAD - 1)
1152 				mdio->md_pad[i] = -1;
1153 			else
1154 				mdio->md_pad[i++] = sc->unit;
1155 		}
1156 		mdio->md_pad[0] = i - 1;
1157 		return (0);
1158 	default:
1159 		return (ENOIOCTL);
1160 	};
1161 	return (ENOIOCTL);
1162 }
1163 
1164 static void
1165 md_preloaded(u_char *image, unsigned length)
1166 {
1167 	struct md_s *sc;
1168 
1169 	sc = mdnew(-1);
1170 	if (sc == NULL)
1171 		return;
1172 	sc->type = MD_PRELOAD;
1173 	sc->secsize = DEV_BSIZE;
1174 	sc->nsect = length / DEV_BSIZE;
1175 	sc->pl_ptr = image;
1176 	sc->pl_len = length;
1177 #ifdef MD_ROOT
1178 	if (sc->unit == 0)
1179 		rootdevnames[0] = "ufs:/dev/md0";
1180 #endif
1181 	mdinit(sc);
1182 }
1183 
1184 static void
1185 md_drvinit(struct g_class *mp __unused)
1186 {
1187 
1188 	caddr_t mod;
1189 	caddr_t c;
1190 	u_char *ptr, *name, *type;
1191 	unsigned len;
1192 
1193 	mod = NULL;
1194 	g_topology_unlock();
1195 #ifdef MD_ROOT_SIZE
1196 	md_preloaded(mfs_root, MD_ROOT_SIZE*1024);
1197 #endif
1198 	while ((mod = preload_search_next_name(mod)) != NULL) {
1199 		name = (char *)preload_search_info(mod, MODINFO_NAME);
1200 		type = (char *)preload_search_info(mod, MODINFO_TYPE);
1201 		if (name == NULL)
1202 			continue;
1203 		if (type == NULL)
1204 			continue;
1205 		if (strcmp(type, "md_image") && strcmp(type, "mfs_root"))
1206 			continue;
1207 		c = preload_search_info(mod, MODINFO_ADDR);
1208 		ptr = *(u_char **)c;
1209 		c = preload_search_info(mod, MODINFO_SIZE);
1210 		len = *(size_t *)c;
1211 		printf("%s%d: Preloaded image <%s> %d bytes at %p\n",
1212 		    MD_NAME, mdunits, name, len, ptr);
1213 		md_preloaded(ptr, len);
1214 	}
1215 	status_dev = make_dev(&mdctl_cdevsw, 0xffff00ff, UID_ROOT, GID_WHEEL,
1216 	    0600, MDCTL_NAME);
1217 	g_topology_lock();
1218 }
1219 
1220 static int
1221 md_modevent(module_t mod, int type, void *data)
1222 {
1223 	int error;
1224 	struct md_s *sc;
1225 
1226 	switch (type) {
1227 	case MOD_LOAD:
1228 		break;
1229 	case MOD_UNLOAD:
1230 		LIST_FOREACH(sc, &md_softc_list, list) {
1231 			error = mddetach(sc->unit, curthread);
1232 			if (error != 0)
1233 				return (error);
1234 		}
1235 		if (status_dev)
1236 			destroy_dev(status_dev);
1237 		status_dev = 0;
1238 		break;
1239 	default:
1240 		return (EOPNOTSUPP);
1241 		break;
1242 	}
1243 	return (0);
1244 }
1245 
1246 static moduledata_t md_mod = {
1247 	MD_NAME,
1248 	md_modevent,
1249 	NULL
1250 };
1251 DECLARE_MODULE(md, md_mod, SI_SUB_DRIVERS, SI_ORDER_MIDDLE);
1252 MODULE_VERSION(md, MD_MODVER);
1253