xref: /freebsd/sys/dev/md/md.c (revision 4133f23624058951a3b66e3ad735de980a485f36)
1 /*-
2  * SPDX-License-Identifier: (Beerware AND BSD-3-Clause)
3  *
4  * ----------------------------------------------------------------------------
5  * "THE BEER-WARE LICENSE" (Revision 42):
6  * <phk@FreeBSD.ORG> wrote this file.  As long as you retain this notice you
7  * can do whatever you want with this stuff. If we meet some day, and you think
8  * this stuff is worth it, you can buy me a beer in return.   Poul-Henning Kamp
9  * ----------------------------------------------------------------------------
10  *
11  * $FreeBSD$
12  *
13  */
14 
15 /*-
16  * The following functions are based in the vn(4) driver: mdstart_swap(),
17  * mdstart_vnode(), mdcreate_swap(), mdcreate_vnode() and mddestroy(),
18  * and as such under the following copyright:
19  *
20  * Copyright (c) 1988 University of Utah.
21  * Copyright (c) 1990, 1993
22  *	The Regents of the University of California.  All rights reserved.
23  * Copyright (c) 2013 The FreeBSD Foundation
24  * All rights reserved.
25  *
26  * This code is derived from software contributed to Berkeley by
27  * the Systems Programming Group of the University of Utah Computer
28  * Science Department.
29  *
30  * Portions of this software were developed by Konstantin Belousov
31  * under sponsorship from the FreeBSD Foundation.
32  *
33  * Redistribution and use in source and binary forms, with or without
34  * modification, are permitted provided that the following conditions
35  * are met:
36  * 1. Redistributions of source code must retain the above copyright
37  *    notice, this list of conditions and the following disclaimer.
38  * 2. Redistributions in binary form must reproduce the above copyright
39  *    notice, this list of conditions and the following disclaimer in the
40  *    documentation and/or other materials provided with the distribution.
41  * 3. Neither the name of the University nor the names of its contributors
42  *    may be used to endorse or promote products derived from this software
43  *    without specific prior written permission.
44  *
45  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
46  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
47  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
48  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
49  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
50  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
51  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
52  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
53  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
54  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
55  * SUCH DAMAGE.
56  *
57  * from: Utah Hdr: vn.c 1.13 94/04/02
58  *
59  *	from: @(#)vn.c	8.6 (Berkeley) 4/1/94
60  * From: src/sys/dev/vn/vn.c,v 1.122 2000/12/16 16:06:03
61  */
62 
63 #include "opt_rootdevname.h"
64 #include "opt_geom.h"
65 #include "opt_md.h"
66 
67 #include <sys/param.h>
68 #include <sys/systm.h>
69 #include <sys/bio.h>
70 #include <sys/buf.h>
71 #include <sys/conf.h>
72 #include <sys/devicestat.h>
73 #include <sys/fcntl.h>
74 #include <sys/kernel.h>
75 #include <sys/kthread.h>
76 #include <sys/limits.h>
77 #include <sys/linker.h>
78 #include <sys/lock.h>
79 #include <sys/malloc.h>
80 #include <sys/mdioctl.h>
81 #include <sys/mount.h>
82 #include <sys/mutex.h>
83 #include <sys/sx.h>
84 #include <sys/namei.h>
85 #include <sys/proc.h>
86 #include <sys/queue.h>
87 #include <sys/rwlock.h>
88 #include <sys/sbuf.h>
89 #include <sys/sched.h>
90 #include <sys/sf_buf.h>
91 #include <sys/sysctl.h>
92 #include <sys/uio.h>
93 #include <sys/vnode.h>
94 #include <sys/disk.h>
95 
96 #include <geom/geom.h>
97 #include <geom/geom_int.h>
98 
99 #include <vm/vm.h>
100 #include <vm/vm_param.h>
101 #include <vm/vm_object.h>
102 #include <vm/vm_page.h>
103 #include <vm/vm_pager.h>
104 #include <vm/swap_pager.h>
105 #include <vm/uma.h>
106 
107 #include <machine/bus.h>
108 
109 #define MD_MODVER 1
110 
111 #define MD_SHUTDOWN	0x10000		/* Tell worker thread to terminate. */
112 #define	MD_EXITING	0x20000		/* Worker thread is exiting. */
113 #define MD_PROVIDERGONE	0x40000		/* Safe to free the softc */
114 
115 #ifndef MD_NSECT
116 #define MD_NSECT (10000 * 2)
117 #endif
118 
119 struct md_req {
120 	unsigned	md_unit;	/* unit number */
121 	enum md_types	md_type;	/* type of disk */
122 	off_t		md_mediasize;	/* size of disk in bytes */
123 	unsigned	md_sectorsize;	/* sectorsize */
124 	unsigned	md_options;	/* options */
125 	int		md_fwheads;	/* firmware heads */
126 	int		md_fwsectors;	/* firmware sectors */
127 	char		*md_file;	/* pathname of file to mount */
128 	enum uio_seg	md_file_seg;	/* location of md_file */
129 	char		*md_label;	/* label of the device (userspace) */
130 	int		*md_units;	/* pointer to units array (kernel) */
131 	size_t		md_units_nitems; /* items in md_units array */
132 };
133 
134 #ifdef COMPAT_FREEBSD32
135 struct md_ioctl32 {
136 	unsigned	md_version;
137 	unsigned	md_unit;
138 	enum md_types	md_type;
139 	uint32_t	md_file;
140 	off_t		md_mediasize;
141 	unsigned	md_sectorsize;
142 	unsigned	md_options;
143 	uint64_t	md_base;
144 	int		md_fwheads;
145 	int		md_fwsectors;
146 	uint32_t	md_label;
147 	int		md_pad[MDNPAD];
148 } __attribute__((__packed__));
149 CTASSERT((sizeof(struct md_ioctl32)) == 436);
150 
151 #define	MDIOCATTACH_32	_IOC_NEWTYPE(MDIOCATTACH, struct md_ioctl32)
152 #define	MDIOCDETACH_32	_IOC_NEWTYPE(MDIOCDETACH, struct md_ioctl32)
153 #define	MDIOCQUERY_32	_IOC_NEWTYPE(MDIOCQUERY, struct md_ioctl32)
154 #define	MDIOCRESIZE_32	_IOC_NEWTYPE(MDIOCRESIZE, struct md_ioctl32)
155 #endif /* COMPAT_FREEBSD32 */
156 
157 static MALLOC_DEFINE(M_MD, "md_disk", "Memory Disk");
158 static MALLOC_DEFINE(M_MDSECT, "md_sectors", "Memory Disk Sectors");
159 
160 static int md_debug;
161 SYSCTL_INT(_debug, OID_AUTO, mddebug, CTLFLAG_RW, &md_debug, 0,
162     "Enable md(4) debug messages");
163 static int md_malloc_wait;
164 SYSCTL_INT(_vm, OID_AUTO, md_malloc_wait, CTLFLAG_RW, &md_malloc_wait, 0,
165     "Allow malloc to wait for memory allocations");
166 
167 #if defined(MD_ROOT) && !defined(MD_ROOT_FSTYPE)
168 #define	MD_ROOT_FSTYPE	"ufs"
169 #endif
170 
171 #if defined(MD_ROOT)
172 /*
173  * Preloaded image gets put here.
174  */
175 #if defined(MD_ROOT_SIZE)
176 /*
177  * We put the mfs_root symbol into the oldmfs section of the kernel object file.
178  * Applications that patch the object with the image can determine
179  * the size looking at the oldmfs section size within the kernel.
180  */
181 u_char mfs_root[MD_ROOT_SIZE*1024] __attribute__ ((section ("oldmfs")));
182 const int mfs_root_size = sizeof(mfs_root);
183 #elif defined(MD_ROOT_MEM)
184 /* MD region already mapped in the memory */
185 u_char *mfs_root;
186 int mfs_root_size;
187 #else
188 extern volatile u_char __weak_symbol mfs_root;
189 extern volatile u_char __weak_symbol mfs_root_end;
190 __GLOBL(mfs_root);
191 __GLOBL(mfs_root_end);
192 #define mfs_root_size ((uintptr_t)(&mfs_root_end - &mfs_root))
193 #endif
194 #endif
195 
196 static g_init_t g_md_init;
197 static g_fini_t g_md_fini;
198 static g_start_t g_md_start;
199 static g_access_t g_md_access;
200 static void g_md_dumpconf(struct sbuf *sb, const char *indent,
201     struct g_geom *gp, struct g_consumer *cp __unused, struct g_provider *pp);
202 static g_provgone_t g_md_providergone;
203 
204 static struct cdev *status_dev = NULL;
205 static struct sx md_sx;
206 static struct unrhdr *md_uh;
207 
208 static d_ioctl_t mdctlioctl;
209 
210 static struct cdevsw mdctl_cdevsw = {
211 	.d_version =	D_VERSION,
212 	.d_ioctl =	mdctlioctl,
213 	.d_name =	MD_NAME,
214 };
215 
216 struct g_class g_md_class = {
217 	.name = "MD",
218 	.version = G_VERSION,
219 	.init = g_md_init,
220 	.fini = g_md_fini,
221 	.start = g_md_start,
222 	.access = g_md_access,
223 	.dumpconf = g_md_dumpconf,
224 	.providergone = g_md_providergone,
225 };
226 
227 DECLARE_GEOM_CLASS(g_md_class, g_md);
228 
229 
230 static LIST_HEAD(, md_s) md_softc_list = LIST_HEAD_INITIALIZER(md_softc_list);
231 
232 #define NINDIR	(PAGE_SIZE / sizeof(uintptr_t))
233 #define NMASK	(NINDIR-1)
234 static int nshift;
235 
236 static uma_zone_t md_pbuf_zone;
237 
238 struct indir {
239 	uintptr_t	*array;
240 	u_int		total;
241 	u_int		used;
242 	u_int		shift;
243 };
244 
245 struct md_s {
246 	int unit;
247 	LIST_ENTRY(md_s) list;
248 	struct bio_queue_head bio_queue;
249 	struct mtx queue_mtx;
250 	struct mtx stat_mtx;
251 	struct cdev *dev;
252 	enum md_types type;
253 	off_t mediasize;
254 	unsigned sectorsize;
255 	unsigned opencount;
256 	unsigned fwheads;
257 	unsigned fwsectors;
258 	char ident[32];
259 	unsigned flags;
260 	char name[20];
261 	struct proc *procp;
262 	struct g_geom *gp;
263 	struct g_provider *pp;
264 	int (*start)(struct md_s *sc, struct bio *bp);
265 	struct devstat *devstat;
266 
267 	/* MD_MALLOC related fields */
268 	struct indir *indir;
269 	uma_zone_t uma;
270 
271 	/* MD_PRELOAD related fields */
272 	u_char *pl_ptr;
273 	size_t pl_len;
274 
275 	/* MD_VNODE related fields */
276 	struct vnode *vnode;
277 	char file[PATH_MAX];
278 	char label[PATH_MAX];
279 	struct ucred *cred;
280 
281 	/* MD_SWAP related fields */
282 	vm_object_t object;
283 };
284 
285 static struct indir *
286 new_indir(u_int shift)
287 {
288 	struct indir *ip;
289 
290 	ip = malloc(sizeof *ip, M_MD, (md_malloc_wait ? M_WAITOK : M_NOWAIT)
291 	    | M_ZERO);
292 	if (ip == NULL)
293 		return (NULL);
294 	ip->array = malloc(sizeof(uintptr_t) * NINDIR,
295 	    M_MDSECT, (md_malloc_wait ? M_WAITOK : M_NOWAIT) | M_ZERO);
296 	if (ip->array == NULL) {
297 		free(ip, M_MD);
298 		return (NULL);
299 	}
300 	ip->total = NINDIR;
301 	ip->shift = shift;
302 	return (ip);
303 }
304 
305 static void
306 del_indir(struct indir *ip)
307 {
308 
309 	free(ip->array, M_MDSECT);
310 	free(ip, M_MD);
311 }
312 
313 static void
314 destroy_indir(struct md_s *sc, struct indir *ip)
315 {
316 	int i;
317 
318 	for (i = 0; i < NINDIR; i++) {
319 		if (!ip->array[i])
320 			continue;
321 		if (ip->shift)
322 			destroy_indir(sc, (struct indir*)(ip->array[i]));
323 		else if (ip->array[i] > 255)
324 			uma_zfree(sc->uma, (void *)(ip->array[i]));
325 	}
326 	del_indir(ip);
327 }
328 
329 /*
330  * This function does the math and allocates the top level "indir" structure
331  * for a device of "size" sectors.
332  */
333 
334 static struct indir *
335 dimension(off_t size)
336 {
337 	off_t rcnt;
338 	struct indir *ip;
339 	int layer;
340 
341 	rcnt = size;
342 	layer = 0;
343 	while (rcnt > NINDIR) {
344 		rcnt /= NINDIR;
345 		layer++;
346 	}
347 
348 	/*
349 	 * XXX: the top layer is probably not fully populated, so we allocate
350 	 * too much space for ip->array in here.
351 	 */
352 	ip = malloc(sizeof *ip, M_MD, M_WAITOK | M_ZERO);
353 	ip->array = malloc(sizeof(uintptr_t) * NINDIR,
354 	    M_MDSECT, M_WAITOK | M_ZERO);
355 	ip->total = NINDIR;
356 	ip->shift = layer * nshift;
357 	return (ip);
358 }
359 
360 /*
361  * Read a given sector
362  */
363 
364 static uintptr_t
365 s_read(struct indir *ip, off_t offset)
366 {
367 	struct indir *cip;
368 	int idx;
369 	uintptr_t up;
370 
371 	if (md_debug > 1)
372 		printf("s_read(%jd)\n", (intmax_t)offset);
373 	up = 0;
374 	for (cip = ip; cip != NULL;) {
375 		if (cip->shift) {
376 			idx = (offset >> cip->shift) & NMASK;
377 			up = cip->array[idx];
378 			cip = (struct indir *)up;
379 			continue;
380 		}
381 		idx = offset & NMASK;
382 		return (cip->array[idx]);
383 	}
384 	return (0);
385 }
386 
387 /*
388  * Write a given sector, prune the tree if the value is 0
389  */
390 
391 static int
392 s_write(struct indir *ip, off_t offset, uintptr_t ptr)
393 {
394 	struct indir *cip, *lip[10];
395 	int idx, li;
396 	uintptr_t up;
397 
398 	if (md_debug > 1)
399 		printf("s_write(%jd, %p)\n", (intmax_t)offset, (void *)ptr);
400 	up = 0;
401 	li = 0;
402 	cip = ip;
403 	for (;;) {
404 		lip[li++] = cip;
405 		if (cip->shift) {
406 			idx = (offset >> cip->shift) & NMASK;
407 			up = cip->array[idx];
408 			if (up != 0) {
409 				cip = (struct indir *)up;
410 				continue;
411 			}
412 			/* Allocate branch */
413 			cip->array[idx] =
414 			    (uintptr_t)new_indir(cip->shift - nshift);
415 			if (cip->array[idx] == 0)
416 				return (ENOSPC);
417 			cip->used++;
418 			up = cip->array[idx];
419 			cip = (struct indir *)up;
420 			continue;
421 		}
422 		/* leafnode */
423 		idx = offset & NMASK;
424 		up = cip->array[idx];
425 		if (up != 0)
426 			cip->used--;
427 		cip->array[idx] = ptr;
428 		if (ptr != 0)
429 			cip->used++;
430 		break;
431 	}
432 	if (cip->used != 0 || li == 1)
433 		return (0);
434 	li--;
435 	while (cip->used == 0 && cip != ip) {
436 		li--;
437 		idx = (offset >> lip[li]->shift) & NMASK;
438 		up = lip[li]->array[idx];
439 		KASSERT(up == (uintptr_t)cip, ("md screwed up"));
440 		del_indir(cip);
441 		lip[li]->array[idx] = 0;
442 		lip[li]->used--;
443 		cip = lip[li];
444 	}
445 	return (0);
446 }
447 
448 
449 static int
450 g_md_access(struct g_provider *pp, int r, int w, int e)
451 {
452 	struct md_s *sc;
453 
454 	sc = pp->geom->softc;
455 	if (sc == NULL) {
456 		if (r <= 0 && w <= 0 && e <= 0)
457 			return (0);
458 		return (ENXIO);
459 	}
460 	r += pp->acr;
461 	w += pp->acw;
462 	e += pp->ace;
463 	if ((sc->flags & MD_READONLY) != 0 && w > 0)
464 		return (EROFS);
465 	if ((pp->acr + pp->acw + pp->ace) == 0 && (r + w + e) > 0) {
466 		sc->opencount = 1;
467 	} else if ((pp->acr + pp->acw + pp->ace) > 0 && (r + w + e) == 0) {
468 		sc->opencount = 0;
469 	}
470 	return (0);
471 }
472 
473 static void
474 g_md_start(struct bio *bp)
475 {
476 	struct md_s *sc;
477 
478 	sc = bp->bio_to->geom->softc;
479 	if ((bp->bio_cmd == BIO_READ) || (bp->bio_cmd == BIO_WRITE)) {
480 		mtx_lock(&sc->stat_mtx);
481 		devstat_start_transaction_bio(sc->devstat, bp);
482 		mtx_unlock(&sc->stat_mtx);
483 	}
484 	mtx_lock(&sc->queue_mtx);
485 	bioq_disksort(&sc->bio_queue, bp);
486 	wakeup(sc);
487 	mtx_unlock(&sc->queue_mtx);
488 }
489 
490 #define	MD_MALLOC_MOVE_ZERO	1
491 #define	MD_MALLOC_MOVE_FILL	2
492 #define	MD_MALLOC_MOVE_READ	3
493 #define	MD_MALLOC_MOVE_WRITE	4
494 #define	MD_MALLOC_MOVE_CMP	5
495 
496 static int
497 md_malloc_move_ma(vm_page_t **mp, int *ma_offs, unsigned sectorsize,
498     void *ptr, u_char fill, int op)
499 {
500 	struct sf_buf *sf;
501 	vm_page_t m, *mp1;
502 	char *p, first;
503 	off_t *uc;
504 	unsigned n;
505 	int error, i, ma_offs1, sz, first_read;
506 
507 	m = NULL;
508 	error = 0;
509 	sf = NULL;
510 	/* if (op == MD_MALLOC_MOVE_CMP) { gcc */
511 		first = 0;
512 		first_read = 0;
513 		uc = ptr;
514 		mp1 = *mp;
515 		ma_offs1 = *ma_offs;
516 	/* } */
517 	sched_pin();
518 	for (n = sectorsize; n != 0; n -= sz) {
519 		sz = imin(PAGE_SIZE - *ma_offs, n);
520 		if (m != **mp) {
521 			if (sf != NULL)
522 				sf_buf_free(sf);
523 			m = **mp;
524 			sf = sf_buf_alloc(m, SFB_CPUPRIVATE |
525 			    (md_malloc_wait ? 0 : SFB_NOWAIT));
526 			if (sf == NULL) {
527 				error = ENOMEM;
528 				break;
529 			}
530 		}
531 		p = (char *)sf_buf_kva(sf) + *ma_offs;
532 		switch (op) {
533 		case MD_MALLOC_MOVE_ZERO:
534 			bzero(p, sz);
535 			break;
536 		case MD_MALLOC_MOVE_FILL:
537 			memset(p, fill, sz);
538 			break;
539 		case MD_MALLOC_MOVE_READ:
540 			bcopy(ptr, p, sz);
541 			cpu_flush_dcache(p, sz);
542 			break;
543 		case MD_MALLOC_MOVE_WRITE:
544 			bcopy(p, ptr, sz);
545 			break;
546 		case MD_MALLOC_MOVE_CMP:
547 			for (i = 0; i < sz; i++, p++) {
548 				if (!first_read) {
549 					*uc = (u_char)*p;
550 					first = *p;
551 					first_read = 1;
552 				} else if (*p != first) {
553 					error = EDOOFUS;
554 					break;
555 				}
556 			}
557 			break;
558 		default:
559 			KASSERT(0, ("md_malloc_move_ma unknown op %d\n", op));
560 			break;
561 		}
562 		if (error != 0)
563 			break;
564 		*ma_offs += sz;
565 		*ma_offs %= PAGE_SIZE;
566 		if (*ma_offs == 0)
567 			(*mp)++;
568 		ptr = (char *)ptr + sz;
569 	}
570 
571 	if (sf != NULL)
572 		sf_buf_free(sf);
573 	sched_unpin();
574 	if (op == MD_MALLOC_MOVE_CMP && error != 0) {
575 		*mp = mp1;
576 		*ma_offs = ma_offs1;
577 	}
578 	return (error);
579 }
580 
581 static int
582 md_malloc_move_vlist(bus_dma_segment_t **pvlist, int *pma_offs,
583     unsigned len, void *ptr, u_char fill, int op)
584 {
585 	bus_dma_segment_t *vlist;
586 	uint8_t *p, *end, first;
587 	off_t *uc;
588 	int ma_offs, seg_len;
589 
590 	vlist = *pvlist;
591 	ma_offs = *pma_offs;
592 	uc = ptr;
593 
594 	for (; len != 0; len -= seg_len) {
595 		seg_len = imin(vlist->ds_len - ma_offs, len);
596 		p = (uint8_t *)(uintptr_t)vlist->ds_addr + ma_offs;
597 		switch (op) {
598 		case MD_MALLOC_MOVE_ZERO:
599 			bzero(p, seg_len);
600 			break;
601 		case MD_MALLOC_MOVE_FILL:
602 			memset(p, fill, seg_len);
603 			break;
604 		case MD_MALLOC_MOVE_READ:
605 			bcopy(ptr, p, seg_len);
606 			cpu_flush_dcache(p, seg_len);
607 			break;
608 		case MD_MALLOC_MOVE_WRITE:
609 			bcopy(p, ptr, seg_len);
610 			break;
611 		case MD_MALLOC_MOVE_CMP:
612 			end = p + seg_len;
613 			first = *uc = *p;
614 			/* Confirm all following bytes match the first */
615 			while (++p < end) {
616 				if (*p != first)
617 					return (EDOOFUS);
618 			}
619 			break;
620 		default:
621 			KASSERT(0, ("md_malloc_move_vlist unknown op %d\n", op));
622 			break;
623 		}
624 
625 		ma_offs += seg_len;
626 		if (ma_offs == vlist->ds_len) {
627 			ma_offs = 0;
628 			vlist++;
629 		}
630 		ptr = (uint8_t *)ptr + seg_len;
631 	}
632 	*pvlist = vlist;
633 	*pma_offs = ma_offs;
634 
635 	return (0);
636 }
637 
638 static int
639 mdstart_malloc(struct md_s *sc, struct bio *bp)
640 {
641 	u_char *dst;
642 	vm_page_t *m;
643 	bus_dma_segment_t *vlist;
644 	int i, error, error1, ma_offs, notmapped;
645 	off_t secno, nsec, uc;
646 	uintptr_t sp, osp;
647 
648 	switch (bp->bio_cmd) {
649 	case BIO_READ:
650 	case BIO_WRITE:
651 	case BIO_DELETE:
652 		break;
653 	default:
654 		return (EOPNOTSUPP);
655 	}
656 
657 	notmapped = (bp->bio_flags & BIO_UNMAPPED) != 0;
658 	vlist = (bp->bio_flags & BIO_VLIST) != 0 ?
659 	    (bus_dma_segment_t *)bp->bio_data : NULL;
660 	if (notmapped) {
661 		m = bp->bio_ma;
662 		ma_offs = bp->bio_ma_offset;
663 		dst = NULL;
664 		KASSERT(vlist == NULL, ("vlists cannot be unmapped"));
665 	} else if (vlist != NULL) {
666 		ma_offs = bp->bio_ma_offset;
667 		dst = NULL;
668 	} else {
669 		dst = bp->bio_data;
670 	}
671 
672 	nsec = bp->bio_length / sc->sectorsize;
673 	secno = bp->bio_offset / sc->sectorsize;
674 	error = 0;
675 	while (nsec--) {
676 		osp = s_read(sc->indir, secno);
677 		if (bp->bio_cmd == BIO_DELETE) {
678 			if (osp != 0)
679 				error = s_write(sc->indir, secno, 0);
680 		} else if (bp->bio_cmd == BIO_READ) {
681 			if (osp == 0) {
682 				if (notmapped) {
683 					error = md_malloc_move_ma(&m, &ma_offs,
684 					    sc->sectorsize, NULL, 0,
685 					    MD_MALLOC_MOVE_ZERO);
686 				} else if (vlist != NULL) {
687 					error = md_malloc_move_vlist(&vlist,
688 					    &ma_offs, sc->sectorsize, NULL, 0,
689 					    MD_MALLOC_MOVE_ZERO);
690 				} else
691 					bzero(dst, sc->sectorsize);
692 			} else if (osp <= 255) {
693 				if (notmapped) {
694 					error = md_malloc_move_ma(&m, &ma_offs,
695 					    sc->sectorsize, NULL, osp,
696 					    MD_MALLOC_MOVE_FILL);
697 				} else if (vlist != NULL) {
698 					error = md_malloc_move_vlist(&vlist,
699 					    &ma_offs, sc->sectorsize, NULL, osp,
700 					    MD_MALLOC_MOVE_FILL);
701 				} else
702 					memset(dst, osp, sc->sectorsize);
703 			} else {
704 				if (notmapped) {
705 					error = md_malloc_move_ma(&m, &ma_offs,
706 					    sc->sectorsize, (void *)osp, 0,
707 					    MD_MALLOC_MOVE_READ);
708 				} else if (vlist != NULL) {
709 					error = md_malloc_move_vlist(&vlist,
710 					    &ma_offs, sc->sectorsize,
711 					    (void *)osp, 0,
712 					    MD_MALLOC_MOVE_READ);
713 				} else {
714 					bcopy((void *)osp, dst, sc->sectorsize);
715 					cpu_flush_dcache(dst, sc->sectorsize);
716 				}
717 			}
718 			osp = 0;
719 		} else if (bp->bio_cmd == BIO_WRITE) {
720 			if (sc->flags & MD_COMPRESS) {
721 				if (notmapped) {
722 					error1 = md_malloc_move_ma(&m, &ma_offs,
723 					    sc->sectorsize, &uc, 0,
724 					    MD_MALLOC_MOVE_CMP);
725 					i = error1 == 0 ? sc->sectorsize : 0;
726 				} else if (vlist != NULL) {
727 					error1 = md_malloc_move_vlist(&vlist,
728 					    &ma_offs, sc->sectorsize, &uc, 0,
729 					    MD_MALLOC_MOVE_CMP);
730 					i = error1 == 0 ? sc->sectorsize : 0;
731 				} else {
732 					uc = dst[0];
733 					for (i = 1; i < sc->sectorsize; i++) {
734 						if (dst[i] != uc)
735 							break;
736 					}
737 				}
738 			} else {
739 				i = 0;
740 				uc = 0;
741 			}
742 			if (i == sc->sectorsize) {
743 				if (osp != uc)
744 					error = s_write(sc->indir, secno, uc);
745 			} else {
746 				if (osp <= 255) {
747 					sp = (uintptr_t)uma_zalloc(sc->uma,
748 					    md_malloc_wait ? M_WAITOK :
749 					    M_NOWAIT);
750 					if (sp == 0) {
751 						error = ENOSPC;
752 						break;
753 					}
754 					if (notmapped) {
755 						error = md_malloc_move_ma(&m,
756 						    &ma_offs, sc->sectorsize,
757 						    (void *)sp, 0,
758 						    MD_MALLOC_MOVE_WRITE);
759 					} else if (vlist != NULL) {
760 						error = md_malloc_move_vlist(
761 						    &vlist, &ma_offs,
762 						    sc->sectorsize, (void *)sp,
763 						    0, MD_MALLOC_MOVE_WRITE);
764 					} else {
765 						bcopy(dst, (void *)sp,
766 						    sc->sectorsize);
767 					}
768 					error = s_write(sc->indir, secno, sp);
769 				} else {
770 					if (notmapped) {
771 						error = md_malloc_move_ma(&m,
772 						    &ma_offs, sc->sectorsize,
773 						    (void *)osp, 0,
774 						    MD_MALLOC_MOVE_WRITE);
775 					} else if (vlist != NULL) {
776 						error = md_malloc_move_vlist(
777 						    &vlist, &ma_offs,
778 						    sc->sectorsize, (void *)osp,
779 						    0, MD_MALLOC_MOVE_WRITE);
780 					} else {
781 						bcopy(dst, (void *)osp,
782 						    sc->sectorsize);
783 					}
784 					osp = 0;
785 				}
786 			}
787 		} else {
788 			error = EOPNOTSUPP;
789 		}
790 		if (osp > 255)
791 			uma_zfree(sc->uma, (void*)osp);
792 		if (error != 0)
793 			break;
794 		secno++;
795 		if (!notmapped && vlist == NULL)
796 			dst += sc->sectorsize;
797 	}
798 	bp->bio_resid = 0;
799 	return (error);
800 }
801 
802 static void
803 mdcopyto_vlist(void *src, bus_dma_segment_t *vlist, off_t offset, off_t len)
804 {
805 	off_t seg_len;
806 
807 	while (offset >= vlist->ds_len) {
808 		offset -= vlist->ds_len;
809 		vlist++;
810 	}
811 
812 	while (len != 0) {
813 		seg_len = omin(len, vlist->ds_len - offset);
814 		bcopy(src, (void *)(uintptr_t)(vlist->ds_addr + offset),
815 		    seg_len);
816 		offset = 0;
817 		src = (uint8_t *)src + seg_len;
818 		len -= seg_len;
819 		vlist++;
820 	}
821 }
822 
823 static void
824 mdcopyfrom_vlist(bus_dma_segment_t *vlist, off_t offset, void *dst, off_t len)
825 {
826 	off_t seg_len;
827 
828 	while (offset >= vlist->ds_len) {
829 		offset -= vlist->ds_len;
830 		vlist++;
831 	}
832 
833 	while (len != 0) {
834 		seg_len = omin(len, vlist->ds_len - offset);
835 		bcopy((void *)(uintptr_t)(vlist->ds_addr + offset), dst,
836 		    seg_len);
837 		offset = 0;
838 		dst = (uint8_t *)dst + seg_len;
839 		len -= seg_len;
840 		vlist++;
841 	}
842 }
843 
844 static int
845 mdstart_preload(struct md_s *sc, struct bio *bp)
846 {
847 	uint8_t *p;
848 
849 	p = sc->pl_ptr + bp->bio_offset;
850 	switch (bp->bio_cmd) {
851 	case BIO_READ:
852 		if ((bp->bio_flags & BIO_VLIST) != 0) {
853 			mdcopyto_vlist(p, (bus_dma_segment_t *)bp->bio_data,
854 			    bp->bio_ma_offset, bp->bio_length);
855 		} else {
856 			bcopy(p, bp->bio_data, bp->bio_length);
857 		}
858 		cpu_flush_dcache(bp->bio_data, bp->bio_length);
859 		break;
860 	case BIO_WRITE:
861 		if ((bp->bio_flags & BIO_VLIST) != 0) {
862 			mdcopyfrom_vlist((bus_dma_segment_t *)bp->bio_data,
863 			    bp->bio_ma_offset, p, bp->bio_length);
864 		} else {
865 			bcopy(bp->bio_data, p, bp->bio_length);
866 		}
867 		break;
868 	}
869 	bp->bio_resid = 0;
870 	return (0);
871 }
872 
873 static int
874 mdstart_vnode(struct md_s *sc, struct bio *bp)
875 {
876 	int error;
877 	struct uio auio;
878 	struct iovec aiov;
879 	struct iovec *piov;
880 	struct mount *mp;
881 	struct vnode *vp;
882 	struct buf *pb;
883 	bus_dma_segment_t *vlist;
884 	struct thread *td;
885 	off_t iolen, iostart, len, zerosize;
886 	int ma_offs, npages;
887 
888 	switch (bp->bio_cmd) {
889 	case BIO_READ:
890 		auio.uio_rw = UIO_READ;
891 		break;
892 	case BIO_WRITE:
893 	case BIO_DELETE:
894 		auio.uio_rw = UIO_WRITE;
895 		break;
896 	case BIO_FLUSH:
897 		break;
898 	default:
899 		return (EOPNOTSUPP);
900 	}
901 
902 	td = curthread;
903 	vp = sc->vnode;
904 	pb = NULL;
905 	piov = NULL;
906 	ma_offs = bp->bio_ma_offset;
907 	len = bp->bio_length;
908 
909 	/*
910 	 * VNODE I/O
911 	 *
912 	 * If an error occurs, we set BIO_ERROR but we do not set
913 	 * B_INVAL because (for a write anyway), the buffer is
914 	 * still valid.
915 	 */
916 
917 	if (bp->bio_cmd == BIO_FLUSH) {
918 		(void) vn_start_write(vp, &mp, V_WAIT);
919 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
920 		error = VOP_FSYNC(vp, MNT_WAIT, td);
921 		VOP_UNLOCK(vp, 0);
922 		vn_finished_write(mp);
923 		return (error);
924 	}
925 
926 	auio.uio_offset = (vm_ooffset_t)bp->bio_offset;
927 	auio.uio_resid = bp->bio_length;
928 	auio.uio_segflg = UIO_SYSSPACE;
929 	auio.uio_td = td;
930 
931 	if (bp->bio_cmd == BIO_DELETE) {
932 		/*
933 		 * Emulate BIO_DELETE by writing zeros.
934 		 */
935 		zerosize = ZERO_REGION_SIZE -
936 		    (ZERO_REGION_SIZE % sc->sectorsize);
937 		auio.uio_iovcnt = howmany(bp->bio_length, zerosize);
938 		piov = malloc(sizeof(*piov) * auio.uio_iovcnt, M_MD, M_WAITOK);
939 		auio.uio_iov = piov;
940 		while (len > 0) {
941 			piov->iov_base = __DECONST(void *, zero_region);
942 			piov->iov_len = len;
943 			if (len > zerosize)
944 				piov->iov_len = zerosize;
945 			len -= piov->iov_len;
946 			piov++;
947 		}
948 		piov = auio.uio_iov;
949 	} else if ((bp->bio_flags & BIO_VLIST) != 0) {
950 		piov = malloc(sizeof(*piov) * bp->bio_ma_n, M_MD, M_WAITOK);
951 		auio.uio_iov = piov;
952 		vlist = (bus_dma_segment_t *)bp->bio_data;
953 		while (len > 0) {
954 			piov->iov_base = (void *)(uintptr_t)(vlist->ds_addr +
955 			    ma_offs);
956 			piov->iov_len = vlist->ds_len - ma_offs;
957 			if (piov->iov_len > len)
958 				piov->iov_len = len;
959 			len -= piov->iov_len;
960 			ma_offs = 0;
961 			vlist++;
962 			piov++;
963 		}
964 		auio.uio_iovcnt = piov - auio.uio_iov;
965 		piov = auio.uio_iov;
966 	} else if ((bp->bio_flags & BIO_UNMAPPED) != 0) {
967 		pb = uma_zalloc(md_pbuf_zone, M_WAITOK);
968 		bp->bio_resid = len;
969 unmapped_step:
970 		npages = atop(min(MAXPHYS, round_page(len + (ma_offs &
971 		    PAGE_MASK))));
972 		iolen = min(ptoa(npages) - (ma_offs & PAGE_MASK), len);
973 		KASSERT(iolen > 0, ("zero iolen"));
974 		pmap_qenter((vm_offset_t)pb->b_data,
975 		    &bp->bio_ma[atop(ma_offs)], npages);
976 		aiov.iov_base = (void *)((vm_offset_t)pb->b_data +
977 		    (ma_offs & PAGE_MASK));
978 		aiov.iov_len = iolen;
979 		auio.uio_iov = &aiov;
980 		auio.uio_iovcnt = 1;
981 		auio.uio_resid = iolen;
982 	} else {
983 		aiov.iov_base = bp->bio_data;
984 		aiov.iov_len = bp->bio_length;
985 		auio.uio_iov = &aiov;
986 		auio.uio_iovcnt = 1;
987 	}
988 	iostart = auio.uio_offset;
989 	if (auio.uio_rw == UIO_READ) {
990 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
991 		error = VOP_READ(vp, &auio, 0, sc->cred);
992 		VOP_UNLOCK(vp, 0);
993 	} else {
994 		(void) vn_start_write(vp, &mp, V_WAIT);
995 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
996 		error = VOP_WRITE(vp, &auio, sc->flags & MD_ASYNC ? 0 : IO_SYNC,
997 		    sc->cred);
998 		VOP_UNLOCK(vp, 0);
999 		vn_finished_write(mp);
1000 		if (error == 0)
1001 			sc->flags &= ~MD_VERIFY;
1002 	}
1003 
1004 	/* When MD_CACHE is set, try to avoid double-caching the data. */
1005 	if (error == 0 && (sc->flags & MD_CACHE) == 0)
1006 		VOP_ADVISE(vp, iostart, auio.uio_offset - 1,
1007 		    POSIX_FADV_DONTNEED);
1008 
1009 	if (pb != NULL) {
1010 		pmap_qremove((vm_offset_t)pb->b_data, npages);
1011 		if (error == 0) {
1012 			len -= iolen;
1013 			bp->bio_resid -= iolen;
1014 			ma_offs += iolen;
1015 			if (len > 0)
1016 				goto unmapped_step;
1017 		}
1018 		uma_zfree(md_pbuf_zone, pb);
1019 	}
1020 
1021 	free(piov, M_MD);
1022 	if (pb == NULL)
1023 		bp->bio_resid = auio.uio_resid;
1024 	return (error);
1025 }
1026 
1027 static void
1028 md_swap_page_free(vm_page_t m)
1029 {
1030 
1031 	vm_page_xunbusy(m);
1032 	vm_page_free(m);
1033 }
1034 
1035 static int
1036 mdstart_swap(struct md_s *sc, struct bio *bp)
1037 {
1038 	vm_page_t m;
1039 	u_char *p;
1040 	vm_pindex_t i, lastp;
1041 	bus_dma_segment_t *vlist;
1042 	int rv, ma_offs, offs, len, lastend;
1043 
1044 	switch (bp->bio_cmd) {
1045 	case BIO_READ:
1046 	case BIO_WRITE:
1047 	case BIO_DELETE:
1048 		break;
1049 	default:
1050 		return (EOPNOTSUPP);
1051 	}
1052 
1053 	p = bp->bio_data;
1054 	ma_offs = (bp->bio_flags & (BIO_UNMAPPED|BIO_VLIST)) != 0 ?
1055 	    bp->bio_ma_offset : 0;
1056 	vlist = (bp->bio_flags & BIO_VLIST) != 0 ?
1057 	    (bus_dma_segment_t *)bp->bio_data : NULL;
1058 
1059 	/*
1060 	 * offs is the offset at which to start operating on the
1061 	 * next (ie, first) page.  lastp is the last page on
1062 	 * which we're going to operate.  lastend is the ending
1063 	 * position within that last page (ie, PAGE_SIZE if
1064 	 * we're operating on complete aligned pages).
1065 	 */
1066 	offs = bp->bio_offset % PAGE_SIZE;
1067 	lastp = (bp->bio_offset + bp->bio_length - 1) / PAGE_SIZE;
1068 	lastend = (bp->bio_offset + bp->bio_length - 1) % PAGE_SIZE + 1;
1069 
1070 	rv = VM_PAGER_OK;
1071 	VM_OBJECT_WLOCK(sc->object);
1072 	vm_object_pip_add(sc->object, 1);
1073 	for (i = bp->bio_offset / PAGE_SIZE; i <= lastp; i++) {
1074 		len = ((i == lastp) ? lastend : PAGE_SIZE) - offs;
1075 		m = vm_page_grab(sc->object, i, VM_ALLOC_SYSTEM);
1076 		if (bp->bio_cmd == BIO_READ) {
1077 			if (vm_page_all_valid(m))
1078 				rv = VM_PAGER_OK;
1079 			else
1080 				rv = vm_pager_get_pages(sc->object, &m, 1,
1081 				    NULL, NULL);
1082 			if (rv == VM_PAGER_ERROR) {
1083 				md_swap_page_free(m);
1084 				break;
1085 			} else if (rv == VM_PAGER_FAIL) {
1086 				/*
1087 				 * Pager does not have the page.  Zero
1088 				 * the allocated page, and mark it as
1089 				 * valid. Do not set dirty, the page
1090 				 * can be recreated if thrown out.
1091 				 */
1092 				pmap_zero_page(m);
1093 				vm_page_valid(m);
1094 			}
1095 			if ((bp->bio_flags & BIO_UNMAPPED) != 0) {
1096 				pmap_copy_pages(&m, offs, bp->bio_ma,
1097 				    ma_offs, len);
1098 			} else if ((bp->bio_flags & BIO_VLIST) != 0) {
1099 				physcopyout_vlist(VM_PAGE_TO_PHYS(m) + offs,
1100 				    vlist, ma_offs, len);
1101 				cpu_flush_dcache(p, len);
1102 			} else {
1103 				physcopyout(VM_PAGE_TO_PHYS(m) + offs, p, len);
1104 				cpu_flush_dcache(p, len);
1105 			}
1106 		} else if (bp->bio_cmd == BIO_WRITE) {
1107 			if (len == PAGE_SIZE || vm_page_all_valid(m))
1108 				rv = VM_PAGER_OK;
1109 			else
1110 				rv = vm_pager_get_pages(sc->object, &m, 1,
1111 				    NULL, NULL);
1112 			if (rv == VM_PAGER_ERROR) {
1113 				md_swap_page_free(m);
1114 				break;
1115 			} else if (rv == VM_PAGER_FAIL)
1116 				pmap_zero_page(m);
1117 
1118 			if ((bp->bio_flags & BIO_UNMAPPED) != 0) {
1119 				pmap_copy_pages(bp->bio_ma, ma_offs, &m,
1120 				    offs, len);
1121 			} else if ((bp->bio_flags & BIO_VLIST) != 0) {
1122 				physcopyin_vlist(vlist, ma_offs,
1123 				    VM_PAGE_TO_PHYS(m) + offs, len);
1124 			} else {
1125 				physcopyin(p, VM_PAGE_TO_PHYS(m) + offs, len);
1126 			}
1127 
1128 			vm_page_valid(m);
1129 			if (m->dirty != VM_PAGE_BITS_ALL) {
1130 				vm_page_dirty(m);
1131 				vm_pager_page_unswapped(m);
1132 			}
1133 		} else if (bp->bio_cmd == BIO_DELETE) {
1134 			if (len == PAGE_SIZE || vm_page_all_valid(m))
1135 				rv = VM_PAGER_OK;
1136 			else
1137 				rv = vm_pager_get_pages(sc->object, &m, 1,
1138 				    NULL, NULL);
1139 			if (rv == VM_PAGER_ERROR) {
1140 				md_swap_page_free(m);
1141 				break;
1142 			} else if (rv == VM_PAGER_FAIL) {
1143 				md_swap_page_free(m);
1144 				m = NULL;
1145 			} else {
1146 				/* Page is valid. */
1147 				if (len != PAGE_SIZE) {
1148 					pmap_zero_page_area(m, offs, len);
1149 					if (m->dirty != VM_PAGE_BITS_ALL) {
1150 						vm_page_dirty(m);
1151 						vm_pager_page_unswapped(m);
1152 					}
1153 				} else {
1154 					vm_pager_page_unswapped(m);
1155 					md_swap_page_free(m);
1156 					m = NULL;
1157 				}
1158 			}
1159 		}
1160 		if (m != NULL) {
1161 			vm_page_xunbusy(m);
1162 			vm_page_lock(m);
1163 			if (vm_page_active(m))
1164 				vm_page_reference(m);
1165 			else
1166 				vm_page_activate(m);
1167 			vm_page_unlock(m);
1168 		}
1169 
1170 		/* Actions on further pages start at offset 0 */
1171 		p += PAGE_SIZE - offs;
1172 		offs = 0;
1173 		ma_offs += len;
1174 	}
1175 	vm_object_pip_wakeup(sc->object);
1176 	VM_OBJECT_WUNLOCK(sc->object);
1177 	return (rv != VM_PAGER_ERROR ? 0 : ENOSPC);
1178 }
1179 
1180 static int
1181 mdstart_null(struct md_s *sc, struct bio *bp)
1182 {
1183 
1184 	switch (bp->bio_cmd) {
1185 	case BIO_READ:
1186 		bzero(bp->bio_data, bp->bio_length);
1187 		cpu_flush_dcache(bp->bio_data, bp->bio_length);
1188 		break;
1189 	case BIO_WRITE:
1190 		break;
1191 	}
1192 	bp->bio_resid = 0;
1193 	return (0);
1194 }
1195 
1196 static void
1197 md_kthread(void *arg)
1198 {
1199 	struct md_s *sc;
1200 	struct bio *bp;
1201 	int error;
1202 
1203 	sc = arg;
1204 	thread_lock(curthread);
1205 	sched_prio(curthread, PRIBIO);
1206 	thread_unlock(curthread);
1207 	if (sc->type == MD_VNODE)
1208 		curthread->td_pflags |= TDP_NORUNNINGBUF;
1209 
1210 	for (;;) {
1211 		mtx_lock(&sc->queue_mtx);
1212 		if (sc->flags & MD_SHUTDOWN) {
1213 			sc->flags |= MD_EXITING;
1214 			mtx_unlock(&sc->queue_mtx);
1215 			kproc_exit(0);
1216 		}
1217 		bp = bioq_takefirst(&sc->bio_queue);
1218 		if (!bp) {
1219 			msleep(sc, &sc->queue_mtx, PRIBIO | PDROP, "mdwait", 0);
1220 			continue;
1221 		}
1222 		mtx_unlock(&sc->queue_mtx);
1223 		if (bp->bio_cmd == BIO_GETATTR) {
1224 			int isv = ((sc->flags & MD_VERIFY) != 0);
1225 
1226 			if ((sc->fwsectors && sc->fwheads &&
1227 			    (g_handleattr_int(bp, "GEOM::fwsectors",
1228 			    sc->fwsectors) ||
1229 			    g_handleattr_int(bp, "GEOM::fwheads",
1230 			    sc->fwheads))) ||
1231 			    g_handleattr_int(bp, "GEOM::candelete", 1))
1232 				error = -1;
1233 			else if (sc->ident[0] != '\0' &&
1234 			    g_handleattr_str(bp, "GEOM::ident", sc->ident))
1235 				error = -1;
1236 			else if (g_handleattr_int(bp, "MNT::verified", isv))
1237 				error = -1;
1238 			else
1239 				error = EOPNOTSUPP;
1240 		} else {
1241 			error = sc->start(sc, bp);
1242 		}
1243 
1244 		if (bp->bio_cmd == BIO_READ || bp->bio_cmd == BIO_WRITE) {
1245 			/*
1246 			 * Devstat uses (bio_bcount, bio_resid) for
1247 			 * determining the length of the completed part of
1248 			 * the i/o.  g_io_deliver() will translate from
1249 			 * bio_completed to that, but it also destroys the
1250 			 * bio so we must do our own translation.
1251 			 */
1252 			bp->bio_bcount = bp->bio_length;
1253 			bp->bio_resid = (error == -1 ? bp->bio_bcount : 0);
1254 			devstat_end_transaction_bio(sc->devstat, bp);
1255 		}
1256 		if (error != -1) {
1257 			bp->bio_completed = bp->bio_length;
1258 			g_io_deliver(bp, error);
1259 		}
1260 	}
1261 }
1262 
1263 static struct md_s *
1264 mdfind(int unit)
1265 {
1266 	struct md_s *sc;
1267 
1268 	LIST_FOREACH(sc, &md_softc_list, list) {
1269 		if (sc->unit == unit)
1270 			break;
1271 	}
1272 	return (sc);
1273 }
1274 
1275 static struct md_s *
1276 mdnew(int unit, int *errp, enum md_types type)
1277 {
1278 	struct md_s *sc;
1279 	int error;
1280 
1281 	*errp = 0;
1282 	if (unit == -1)
1283 		unit = alloc_unr(md_uh);
1284 	else
1285 		unit = alloc_unr_specific(md_uh, unit);
1286 
1287 	if (unit == -1) {
1288 		*errp = EBUSY;
1289 		return (NULL);
1290 	}
1291 
1292 	sc = (struct md_s *)malloc(sizeof *sc, M_MD, M_WAITOK | M_ZERO);
1293 	sc->type = type;
1294 	bioq_init(&sc->bio_queue);
1295 	mtx_init(&sc->queue_mtx, "md bio queue", NULL, MTX_DEF);
1296 	mtx_init(&sc->stat_mtx, "md stat", NULL, MTX_DEF);
1297 	sc->unit = unit;
1298 	sprintf(sc->name, "md%d", unit);
1299 	LIST_INSERT_HEAD(&md_softc_list, sc, list);
1300 	error = kproc_create(md_kthread, sc, &sc->procp, 0, 0,"%s", sc->name);
1301 	if (error == 0)
1302 		return (sc);
1303 	LIST_REMOVE(sc, list);
1304 	mtx_destroy(&sc->stat_mtx);
1305 	mtx_destroy(&sc->queue_mtx);
1306 	free_unr(md_uh, sc->unit);
1307 	free(sc, M_MD);
1308 	*errp = error;
1309 	return (NULL);
1310 }
1311 
1312 static void
1313 mdinit(struct md_s *sc)
1314 {
1315 	struct g_geom *gp;
1316 	struct g_provider *pp;
1317 
1318 	g_topology_lock();
1319 	gp = g_new_geomf(&g_md_class, "md%d", sc->unit);
1320 	gp->softc = sc;
1321 	pp = g_new_providerf(gp, "md%d", sc->unit);
1322 	pp->flags |= G_PF_DIRECT_SEND | G_PF_DIRECT_RECEIVE;
1323 	pp->mediasize = sc->mediasize;
1324 	pp->sectorsize = sc->sectorsize;
1325 	switch (sc->type) {
1326 	case MD_MALLOC:
1327 	case MD_VNODE:
1328 	case MD_SWAP:
1329 		pp->flags |= G_PF_ACCEPT_UNMAPPED;
1330 		break;
1331 	case MD_PRELOAD:
1332 	case MD_NULL:
1333 		break;
1334 	}
1335 	sc->gp = gp;
1336 	sc->pp = pp;
1337 	g_error_provider(pp, 0);
1338 	g_topology_unlock();
1339 	sc->devstat = devstat_new_entry("md", sc->unit, sc->sectorsize,
1340 	    DEVSTAT_ALL_SUPPORTED, DEVSTAT_TYPE_DIRECT, DEVSTAT_PRIORITY_MAX);
1341 }
1342 
1343 static int
1344 mdcreate_malloc(struct md_s *sc, struct md_req *mdr)
1345 {
1346 	uintptr_t sp;
1347 	int error;
1348 	off_t u;
1349 
1350 	error = 0;
1351 	if (mdr->md_options & ~(MD_AUTOUNIT | MD_COMPRESS | MD_RESERVE))
1352 		return (EINVAL);
1353 	if (mdr->md_sectorsize != 0 && !powerof2(mdr->md_sectorsize))
1354 		return (EINVAL);
1355 	/* Compression doesn't make sense if we have reserved space */
1356 	if (mdr->md_options & MD_RESERVE)
1357 		mdr->md_options &= ~MD_COMPRESS;
1358 	if (mdr->md_fwsectors != 0)
1359 		sc->fwsectors = mdr->md_fwsectors;
1360 	if (mdr->md_fwheads != 0)
1361 		sc->fwheads = mdr->md_fwheads;
1362 	sc->flags = mdr->md_options & (MD_COMPRESS | MD_FORCE);
1363 	sc->indir = dimension(sc->mediasize / sc->sectorsize);
1364 	sc->uma = uma_zcreate(sc->name, sc->sectorsize, NULL, NULL, NULL, NULL,
1365 	    0x1ff, 0);
1366 	if (mdr->md_options & MD_RESERVE) {
1367 		off_t nsectors;
1368 
1369 		nsectors = sc->mediasize / sc->sectorsize;
1370 		for (u = 0; u < nsectors; u++) {
1371 			sp = (uintptr_t)uma_zalloc(sc->uma, (md_malloc_wait ?
1372 			    M_WAITOK : M_NOWAIT) | M_ZERO);
1373 			if (sp != 0)
1374 				error = s_write(sc->indir, u, sp);
1375 			else
1376 				error = ENOMEM;
1377 			if (error != 0)
1378 				break;
1379 		}
1380 	}
1381 	return (error);
1382 }
1383 
1384 
1385 static int
1386 mdsetcred(struct md_s *sc, struct ucred *cred)
1387 {
1388 	char *tmpbuf;
1389 	int error = 0;
1390 
1391 	/*
1392 	 * Set credits in our softc
1393 	 */
1394 
1395 	if (sc->cred)
1396 		crfree(sc->cred);
1397 	sc->cred = crhold(cred);
1398 
1399 	/*
1400 	 * Horrible kludge to establish credentials for NFS  XXX.
1401 	 */
1402 
1403 	if (sc->vnode) {
1404 		struct uio auio;
1405 		struct iovec aiov;
1406 
1407 		tmpbuf = malloc(sc->sectorsize, M_TEMP, M_WAITOK);
1408 		bzero(&auio, sizeof(auio));
1409 
1410 		aiov.iov_base = tmpbuf;
1411 		aiov.iov_len = sc->sectorsize;
1412 		auio.uio_iov = &aiov;
1413 		auio.uio_iovcnt = 1;
1414 		auio.uio_offset = 0;
1415 		auio.uio_rw = UIO_READ;
1416 		auio.uio_segflg = UIO_SYSSPACE;
1417 		auio.uio_resid = aiov.iov_len;
1418 		vn_lock(sc->vnode, LK_EXCLUSIVE | LK_RETRY);
1419 		error = VOP_READ(sc->vnode, &auio, 0, sc->cred);
1420 		VOP_UNLOCK(sc->vnode, 0);
1421 		free(tmpbuf, M_TEMP);
1422 	}
1423 	return (error);
1424 }
1425 
1426 static int
1427 mdcreate_vnode(struct md_s *sc, struct md_req *mdr, struct thread *td)
1428 {
1429 	struct vattr vattr;
1430 	struct nameidata nd;
1431 	char *fname;
1432 	int error, flags;
1433 
1434 	fname = mdr->md_file;
1435 	if (mdr->md_file_seg == UIO_USERSPACE) {
1436 		error = copyinstr(fname, sc->file, sizeof(sc->file), NULL);
1437 		if (error != 0)
1438 			return (error);
1439 	} else if (mdr->md_file_seg == UIO_SYSSPACE)
1440 		strlcpy(sc->file, fname, sizeof(sc->file));
1441 	else
1442 		return (EDOOFUS);
1443 
1444 	/*
1445 	 * If the user specified that this is a read only device, don't
1446 	 * set the FWRITE mask before trying to open the backing store.
1447 	 */
1448 	flags = FREAD | ((mdr->md_options & MD_READONLY) ? 0 : FWRITE) \
1449 	    | ((mdr->md_options & MD_VERIFY) ? O_VERIFY : 0);
1450 	NDINIT(&nd, LOOKUP, FOLLOW, UIO_SYSSPACE, sc->file, td);
1451 	error = vn_open(&nd, &flags, 0, NULL);
1452 	if (error != 0)
1453 		return (error);
1454 	NDFREE(&nd, NDF_ONLY_PNBUF);
1455 	if (nd.ni_vp->v_type != VREG) {
1456 		error = EINVAL;
1457 		goto bad;
1458 	}
1459 	error = VOP_GETATTR(nd.ni_vp, &vattr, td->td_ucred);
1460 	if (error != 0)
1461 		goto bad;
1462 	if (VOP_ISLOCKED(nd.ni_vp) != LK_EXCLUSIVE) {
1463 		vn_lock(nd.ni_vp, LK_UPGRADE | LK_RETRY);
1464 		if (nd.ni_vp->v_iflag & VI_DOOMED) {
1465 			/* Forced unmount. */
1466 			error = EBADF;
1467 			goto bad;
1468 		}
1469 	}
1470 	nd.ni_vp->v_vflag |= VV_MD;
1471 	VOP_UNLOCK(nd.ni_vp, 0);
1472 
1473 	if (mdr->md_fwsectors != 0)
1474 		sc->fwsectors = mdr->md_fwsectors;
1475 	if (mdr->md_fwheads != 0)
1476 		sc->fwheads = mdr->md_fwheads;
1477 	snprintf(sc->ident, sizeof(sc->ident), "MD-DEV%ju-INO%ju",
1478 	    (uintmax_t)vattr.va_fsid, (uintmax_t)vattr.va_fileid);
1479 	sc->flags = mdr->md_options & (MD_ASYNC | MD_CACHE | MD_FORCE |
1480 	    MD_VERIFY);
1481 	if (!(flags & FWRITE))
1482 		sc->flags |= MD_READONLY;
1483 	sc->vnode = nd.ni_vp;
1484 
1485 	error = mdsetcred(sc, td->td_ucred);
1486 	if (error != 0) {
1487 		sc->vnode = NULL;
1488 		vn_lock(nd.ni_vp, LK_EXCLUSIVE | LK_RETRY);
1489 		nd.ni_vp->v_vflag &= ~VV_MD;
1490 		goto bad;
1491 	}
1492 	return (0);
1493 bad:
1494 	VOP_UNLOCK(nd.ni_vp, 0);
1495 	(void)vn_close(nd.ni_vp, flags, td->td_ucred, td);
1496 	return (error);
1497 }
1498 
1499 static void
1500 g_md_providergone(struct g_provider *pp)
1501 {
1502 	struct md_s *sc = pp->geom->softc;
1503 
1504 	mtx_lock(&sc->queue_mtx);
1505 	sc->flags |= MD_PROVIDERGONE;
1506 	wakeup(&sc->flags);
1507 	mtx_unlock(&sc->queue_mtx);
1508 }
1509 
1510 static int
1511 mddestroy(struct md_s *sc, struct thread *td)
1512 {
1513 
1514 	if (sc->gp) {
1515 		g_topology_lock();
1516 		g_wither_geom(sc->gp, ENXIO);
1517 		g_topology_unlock();
1518 
1519 		mtx_lock(&sc->queue_mtx);
1520 		while (!(sc->flags & MD_PROVIDERGONE))
1521 			msleep(&sc->flags, &sc->queue_mtx, PRIBIO, "mddestroy", 0);
1522 		mtx_unlock(&sc->queue_mtx);
1523 	}
1524 	if (sc->devstat) {
1525 		devstat_remove_entry(sc->devstat);
1526 		sc->devstat = NULL;
1527 	}
1528 	mtx_lock(&sc->queue_mtx);
1529 	sc->flags |= MD_SHUTDOWN;
1530 	wakeup(sc);
1531 	while (!(sc->flags & MD_EXITING))
1532 		msleep(sc->procp, &sc->queue_mtx, PRIBIO, "mddestroy", hz / 10);
1533 	mtx_unlock(&sc->queue_mtx);
1534 	mtx_destroy(&sc->stat_mtx);
1535 	mtx_destroy(&sc->queue_mtx);
1536 	if (sc->vnode != NULL) {
1537 		vn_lock(sc->vnode, LK_EXCLUSIVE | LK_RETRY);
1538 		sc->vnode->v_vflag &= ~VV_MD;
1539 		VOP_UNLOCK(sc->vnode, 0);
1540 		(void)vn_close(sc->vnode, sc->flags & MD_READONLY ?
1541 		    FREAD : (FREAD|FWRITE), sc->cred, td);
1542 	}
1543 	if (sc->cred != NULL)
1544 		crfree(sc->cred);
1545 	if (sc->object != NULL)
1546 		vm_object_deallocate(sc->object);
1547 	if (sc->indir)
1548 		destroy_indir(sc, sc->indir);
1549 	if (sc->uma)
1550 		uma_zdestroy(sc->uma);
1551 
1552 	LIST_REMOVE(sc, list);
1553 	free_unr(md_uh, sc->unit);
1554 	free(sc, M_MD);
1555 	return (0);
1556 }
1557 
1558 static int
1559 mdresize(struct md_s *sc, struct md_req *mdr)
1560 {
1561 	int error, res;
1562 	vm_pindex_t oldpages, newpages;
1563 
1564 	switch (sc->type) {
1565 	case MD_VNODE:
1566 	case MD_NULL:
1567 		break;
1568 	case MD_SWAP:
1569 		if (mdr->md_mediasize <= 0 ||
1570 		    (mdr->md_mediasize % PAGE_SIZE) != 0)
1571 			return (EDOM);
1572 		oldpages = OFF_TO_IDX(round_page(sc->mediasize));
1573 		newpages = OFF_TO_IDX(round_page(mdr->md_mediasize));
1574 		if (newpages < oldpages) {
1575 			VM_OBJECT_WLOCK(sc->object);
1576 			vm_object_page_remove(sc->object, newpages, 0, 0);
1577 			swap_pager_freespace(sc->object, newpages,
1578 			    oldpages - newpages);
1579 			swap_release_by_cred(IDX_TO_OFF(oldpages -
1580 			    newpages), sc->cred);
1581 			sc->object->charge = IDX_TO_OFF(newpages);
1582 			sc->object->size = newpages;
1583 			VM_OBJECT_WUNLOCK(sc->object);
1584 		} else if (newpages > oldpages) {
1585 			res = swap_reserve_by_cred(IDX_TO_OFF(newpages -
1586 			    oldpages), sc->cred);
1587 			if (!res)
1588 				return (ENOMEM);
1589 			if ((mdr->md_options & MD_RESERVE) ||
1590 			    (sc->flags & MD_RESERVE)) {
1591 				error = swap_pager_reserve(sc->object,
1592 				    oldpages, newpages - oldpages);
1593 				if (error < 0) {
1594 					swap_release_by_cred(
1595 					    IDX_TO_OFF(newpages - oldpages),
1596 					    sc->cred);
1597 					return (EDOM);
1598 				}
1599 			}
1600 			VM_OBJECT_WLOCK(sc->object);
1601 			sc->object->charge = IDX_TO_OFF(newpages);
1602 			sc->object->size = newpages;
1603 			VM_OBJECT_WUNLOCK(sc->object);
1604 		}
1605 		break;
1606 	default:
1607 		return (EOPNOTSUPP);
1608 	}
1609 
1610 	sc->mediasize = mdr->md_mediasize;
1611 	g_topology_lock();
1612 	g_resize_provider(sc->pp, sc->mediasize);
1613 	g_topology_unlock();
1614 	return (0);
1615 }
1616 
1617 static int
1618 mdcreate_swap(struct md_s *sc, struct md_req *mdr, struct thread *td)
1619 {
1620 	vm_ooffset_t npage;
1621 	int error;
1622 
1623 	/*
1624 	 * Range check.  Disallow negative sizes and sizes not being
1625 	 * multiple of page size.
1626 	 */
1627 	if (sc->mediasize <= 0 || (sc->mediasize % PAGE_SIZE) != 0)
1628 		return (EDOM);
1629 
1630 	/*
1631 	 * Allocate an OBJT_SWAP object.
1632 	 *
1633 	 * Note the truncation.
1634 	 */
1635 
1636 	if ((mdr->md_options & MD_VERIFY) != 0)
1637 		return (EINVAL);
1638 	npage = mdr->md_mediasize / PAGE_SIZE;
1639 	if (mdr->md_fwsectors != 0)
1640 		sc->fwsectors = mdr->md_fwsectors;
1641 	if (mdr->md_fwheads != 0)
1642 		sc->fwheads = mdr->md_fwheads;
1643 	sc->object = vm_pager_allocate(OBJT_SWAP, NULL, PAGE_SIZE * npage,
1644 	    VM_PROT_DEFAULT, 0, td->td_ucred);
1645 	if (sc->object == NULL)
1646 		return (ENOMEM);
1647 	sc->flags = mdr->md_options & (MD_FORCE | MD_RESERVE);
1648 	if (mdr->md_options & MD_RESERVE) {
1649 		if (swap_pager_reserve(sc->object, 0, npage) < 0) {
1650 			error = EDOM;
1651 			goto finish;
1652 		}
1653 	}
1654 	error = mdsetcred(sc, td->td_ucred);
1655  finish:
1656 	if (error != 0) {
1657 		vm_object_deallocate(sc->object);
1658 		sc->object = NULL;
1659 	}
1660 	return (error);
1661 }
1662 
1663 static int
1664 mdcreate_null(struct md_s *sc, struct md_req *mdr, struct thread *td)
1665 {
1666 
1667 	/*
1668 	 * Range check.  Disallow negative sizes and sizes not being
1669 	 * multiple of page size.
1670 	 */
1671 	if (sc->mediasize <= 0 || (sc->mediasize % PAGE_SIZE) != 0)
1672 		return (EDOM);
1673 
1674 	return (0);
1675 }
1676 
1677 static int
1678 kern_mdattach_locked(struct thread *td, struct md_req *mdr)
1679 {
1680 	struct md_s *sc;
1681 	unsigned sectsize;
1682 	int error, i;
1683 
1684 	sx_assert(&md_sx, SA_XLOCKED);
1685 
1686 	switch (mdr->md_type) {
1687 	case MD_MALLOC:
1688 	case MD_PRELOAD:
1689 	case MD_VNODE:
1690 	case MD_SWAP:
1691 	case MD_NULL:
1692 		break;
1693 	default:
1694 		return (EINVAL);
1695 	}
1696 	if (mdr->md_sectorsize == 0)
1697 		sectsize = DEV_BSIZE;
1698 	else
1699 		sectsize = mdr->md_sectorsize;
1700 	if (sectsize > MAXPHYS || mdr->md_mediasize < sectsize)
1701 		return (EINVAL);
1702 	if (mdr->md_options & MD_AUTOUNIT)
1703 		sc = mdnew(-1, &error, mdr->md_type);
1704 	else {
1705 		if (mdr->md_unit > INT_MAX)
1706 			return (EINVAL);
1707 		sc = mdnew(mdr->md_unit, &error, mdr->md_type);
1708 	}
1709 	if (sc == NULL)
1710 		return (error);
1711 	if (mdr->md_label != NULL)
1712 		error = copyinstr(mdr->md_label, sc->label,
1713 		    sizeof(sc->label), NULL);
1714 	if (error != 0)
1715 		goto err_after_new;
1716 	if (mdr->md_options & MD_AUTOUNIT)
1717 		mdr->md_unit = sc->unit;
1718 	sc->mediasize = mdr->md_mediasize;
1719 	sc->sectorsize = sectsize;
1720 	error = EDOOFUS;
1721 	switch (sc->type) {
1722 	case MD_MALLOC:
1723 		sc->start = mdstart_malloc;
1724 		error = mdcreate_malloc(sc, mdr);
1725 		break;
1726 	case MD_PRELOAD:
1727 		/*
1728 		 * We disallow attaching preloaded memory disks via
1729 		 * ioctl. Preloaded memory disks are automatically
1730 		 * attached in g_md_init().
1731 		 */
1732 		error = EOPNOTSUPP;
1733 		break;
1734 	case MD_VNODE:
1735 		sc->start = mdstart_vnode;
1736 		error = mdcreate_vnode(sc, mdr, td);
1737 		break;
1738 	case MD_SWAP:
1739 		sc->start = mdstart_swap;
1740 		error = mdcreate_swap(sc, mdr, td);
1741 		break;
1742 	case MD_NULL:
1743 		sc->start = mdstart_null;
1744 		error = mdcreate_null(sc, mdr, td);
1745 		break;
1746 	}
1747 err_after_new:
1748 	if (error != 0) {
1749 		mddestroy(sc, td);
1750 		return (error);
1751 	}
1752 
1753 	/* Prune off any residual fractional sector */
1754 	i = sc->mediasize % sc->sectorsize;
1755 	sc->mediasize -= i;
1756 
1757 	mdinit(sc);
1758 	return (0);
1759 }
1760 
1761 static int
1762 kern_mdattach(struct thread *td, struct md_req *mdr)
1763 {
1764 	int error;
1765 
1766 	sx_xlock(&md_sx);
1767 	error = kern_mdattach_locked(td, mdr);
1768 	sx_xunlock(&md_sx);
1769 	return (error);
1770 }
1771 
1772 static int
1773 kern_mddetach_locked(struct thread *td, struct md_req *mdr)
1774 {
1775 	struct md_s *sc;
1776 
1777 	sx_assert(&md_sx, SA_XLOCKED);
1778 
1779 	if (mdr->md_mediasize != 0 ||
1780 	    (mdr->md_options & ~MD_FORCE) != 0)
1781 		return (EINVAL);
1782 
1783 	sc = mdfind(mdr->md_unit);
1784 	if (sc == NULL)
1785 		return (ENOENT);
1786 	if (sc->opencount != 0 && !(sc->flags & MD_FORCE) &&
1787 	    !(mdr->md_options & MD_FORCE))
1788 		return (EBUSY);
1789 	return (mddestroy(sc, td));
1790 }
1791 
1792 static int
1793 kern_mddetach(struct thread *td, struct md_req *mdr)
1794 {
1795 	int error;
1796 
1797 	sx_xlock(&md_sx);
1798 	error = kern_mddetach_locked(td, mdr);
1799 	sx_xunlock(&md_sx);
1800 	return (error);
1801 }
1802 
1803 static int
1804 kern_mdresize_locked(struct md_req *mdr)
1805 {
1806 	struct md_s *sc;
1807 
1808 	sx_assert(&md_sx, SA_XLOCKED);
1809 
1810 	if ((mdr->md_options & ~(MD_FORCE | MD_RESERVE)) != 0)
1811 		return (EINVAL);
1812 
1813 	sc = mdfind(mdr->md_unit);
1814 	if (sc == NULL)
1815 		return (ENOENT);
1816 	if (mdr->md_mediasize < sc->sectorsize)
1817 		return (EINVAL);
1818 	if (mdr->md_mediasize < sc->mediasize &&
1819 	    !(sc->flags & MD_FORCE) &&
1820 	    !(mdr->md_options & MD_FORCE))
1821 		return (EBUSY);
1822 	return (mdresize(sc, mdr));
1823 }
1824 
1825 static int
1826 kern_mdresize(struct md_req *mdr)
1827 {
1828 	int error;
1829 
1830 	sx_xlock(&md_sx);
1831 	error = kern_mdresize_locked(mdr);
1832 	sx_xunlock(&md_sx);
1833 	return (error);
1834 }
1835 
1836 static int
1837 kern_mdquery_locked(struct md_req *mdr)
1838 {
1839 	struct md_s *sc;
1840 	int error;
1841 
1842 	sx_assert(&md_sx, SA_XLOCKED);
1843 
1844 	sc = mdfind(mdr->md_unit);
1845 	if (sc == NULL)
1846 		return (ENOENT);
1847 	mdr->md_type = sc->type;
1848 	mdr->md_options = sc->flags;
1849 	mdr->md_mediasize = sc->mediasize;
1850 	mdr->md_sectorsize = sc->sectorsize;
1851 	error = 0;
1852 	if (mdr->md_label != NULL) {
1853 		error = copyout(sc->label, mdr->md_label,
1854 		    strlen(sc->label) + 1);
1855 		if (error != 0)
1856 			return (error);
1857 	}
1858 	if (sc->type == MD_VNODE ||
1859 	    (sc->type == MD_PRELOAD && mdr->md_file != NULL))
1860 		error = copyout(sc->file, mdr->md_file,
1861 		    strlen(sc->file) + 1);
1862 	return (error);
1863 }
1864 
1865 static int
1866 kern_mdquery(struct md_req *mdr)
1867 {
1868 	int error;
1869 
1870 	sx_xlock(&md_sx);
1871 	error = kern_mdquery_locked(mdr);
1872 	sx_xunlock(&md_sx);
1873 	return (error);
1874 }
1875 
1876 /* Copy members that are not userspace pointers. */
1877 #define	MD_IOCTL2REQ(mdio, mdr) do {					\
1878 	(mdr)->md_unit = (mdio)->md_unit;				\
1879 	(mdr)->md_type = (mdio)->md_type;				\
1880 	(mdr)->md_mediasize = (mdio)->md_mediasize;			\
1881 	(mdr)->md_sectorsize = (mdio)->md_sectorsize;			\
1882 	(mdr)->md_options = (mdio)->md_options;				\
1883 	(mdr)->md_fwheads = (mdio)->md_fwheads;				\
1884 	(mdr)->md_fwsectors = (mdio)->md_fwsectors;			\
1885 	(mdr)->md_units = &(mdio)->md_pad[0];				\
1886 	(mdr)->md_units_nitems = nitems((mdio)->md_pad);		\
1887 } while(0)
1888 
1889 /* Copy members that might have been updated */
1890 #define MD_REQ2IOCTL(mdr, mdio) do {					\
1891 	(mdio)->md_unit = (mdr)->md_unit;				\
1892 	(mdio)->md_type = (mdr)->md_type;				\
1893 	(mdio)->md_mediasize = (mdr)->md_mediasize;			\
1894 	(mdio)->md_sectorsize = (mdr)->md_sectorsize;			\
1895 	(mdio)->md_options = (mdr)->md_options;				\
1896 	(mdio)->md_fwheads = (mdr)->md_fwheads;				\
1897 	(mdio)->md_fwsectors = (mdr)->md_fwsectors;			\
1898 } while(0)
1899 
1900 static int
1901 mdctlioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags,
1902     struct thread *td)
1903 {
1904 	struct md_req mdr;
1905 	int error;
1906 
1907 	if (md_debug)
1908 		printf("mdctlioctl(%s %lx %p %x %p)\n",
1909 			devtoname(dev), cmd, addr, flags, td);
1910 
1911 	bzero(&mdr, sizeof(mdr));
1912 	switch (cmd) {
1913 	case MDIOCATTACH:
1914 	case MDIOCDETACH:
1915 	case MDIOCRESIZE:
1916 	case MDIOCQUERY: {
1917 		struct md_ioctl *mdio = (struct md_ioctl *)addr;
1918 		if (mdio->md_version != MDIOVERSION)
1919 			return (EINVAL);
1920 		MD_IOCTL2REQ(mdio, &mdr);
1921 		mdr.md_file = mdio->md_file;
1922 		mdr.md_file_seg = UIO_USERSPACE;
1923 		/* If the file is adjacent to the md_ioctl it's in kernel. */
1924 		if ((void *)mdio->md_file == (void *)(mdio + 1))
1925 			mdr.md_file_seg = UIO_SYSSPACE;
1926 		mdr.md_label = mdio->md_label;
1927 		break;
1928 	}
1929 #ifdef COMPAT_FREEBSD32
1930 	case MDIOCATTACH_32:
1931 	case MDIOCDETACH_32:
1932 	case MDIOCRESIZE_32:
1933 	case MDIOCQUERY_32: {
1934 		struct md_ioctl32 *mdio = (struct md_ioctl32 *)addr;
1935 		if (mdio->md_version != MDIOVERSION)
1936 			return (EINVAL);
1937 		MD_IOCTL2REQ(mdio, &mdr);
1938 		mdr.md_file = (void *)(uintptr_t)mdio->md_file;
1939 		mdr.md_file_seg = UIO_USERSPACE;
1940 		mdr.md_label = (void *)(uintptr_t)mdio->md_label;
1941 		break;
1942 	}
1943 #endif
1944 	default:
1945 		/* Fall through to handler switch. */
1946 		break;
1947 	}
1948 
1949 	error = 0;
1950 	switch (cmd) {
1951 	case MDIOCATTACH:
1952 #ifdef COMPAT_FREEBSD32
1953 	case MDIOCATTACH_32:
1954 #endif
1955 		error = kern_mdattach(td, &mdr);
1956 		break;
1957 	case MDIOCDETACH:
1958 #ifdef COMPAT_FREEBSD32
1959 	case MDIOCDETACH_32:
1960 #endif
1961 		error = kern_mddetach(td, &mdr);
1962 		break;
1963 	case MDIOCRESIZE:
1964 #ifdef COMPAT_FREEBSD32
1965 	case MDIOCRESIZE_32:
1966 #endif
1967 		error = kern_mdresize(&mdr);
1968 		break;
1969 	case MDIOCQUERY:
1970 #ifdef COMPAT_FREEBSD32
1971 	case MDIOCQUERY_32:
1972 #endif
1973 		error = kern_mdquery(&mdr);
1974 		break;
1975 	default:
1976 		error = ENOIOCTL;
1977 	}
1978 
1979 	switch (cmd) {
1980 	case MDIOCATTACH:
1981 	case MDIOCQUERY: {
1982 		struct md_ioctl *mdio = (struct md_ioctl *)addr;
1983 		MD_REQ2IOCTL(&mdr, mdio);
1984 		break;
1985 	}
1986 #ifdef COMPAT_FREEBSD32
1987 	case MDIOCATTACH_32:
1988 	case MDIOCQUERY_32: {
1989 		struct md_ioctl32 *mdio = (struct md_ioctl32 *)addr;
1990 		MD_REQ2IOCTL(&mdr, mdio);
1991 		break;
1992 	}
1993 #endif
1994 	default:
1995 		/* Other commands to not alter mdr. */
1996 		break;
1997 	}
1998 
1999 	return (error);
2000 }
2001 
2002 static void
2003 md_preloaded(u_char *image, size_t length, const char *name)
2004 {
2005 	struct md_s *sc;
2006 	int error;
2007 
2008 	sc = mdnew(-1, &error, MD_PRELOAD);
2009 	if (sc == NULL)
2010 		return;
2011 	sc->mediasize = length;
2012 	sc->sectorsize = DEV_BSIZE;
2013 	sc->pl_ptr = image;
2014 	sc->pl_len = length;
2015 	sc->start = mdstart_preload;
2016 	if (name != NULL)
2017 		strlcpy(sc->file, name, sizeof(sc->file));
2018 #ifdef MD_ROOT
2019 	if (sc->unit == 0) {
2020 #ifndef ROOTDEVNAME
2021 		rootdevnames[0] = MD_ROOT_FSTYPE ":/dev/md0";
2022 #endif
2023 #ifdef MD_ROOT_READONLY
2024 		sc->flags |= MD_READONLY;
2025 #endif
2026 	}
2027 #endif
2028 	mdinit(sc);
2029 	if (name != NULL) {
2030 		printf("%s%d: Preloaded image <%s> %zd bytes at %p\n",
2031 		    MD_NAME, sc->unit, name, length, image);
2032 	} else {
2033 		printf("%s%d: Embedded image %zd bytes at %p\n",
2034 		    MD_NAME, sc->unit, length, image);
2035 	}
2036 }
2037 
2038 static void
2039 g_md_init(struct g_class *mp __unused)
2040 {
2041 	caddr_t mod;
2042 	u_char *ptr, *name, *type;
2043 	unsigned len;
2044 	int i;
2045 
2046 	/* figure out log2(NINDIR) */
2047 	for (i = NINDIR, nshift = -1; i; nshift++)
2048 		i >>= 1;
2049 
2050 	mod = NULL;
2051 	sx_init(&md_sx, "MD config lock");
2052 	g_topology_unlock();
2053 	md_uh = new_unrhdr(0, INT_MAX, NULL);
2054 #ifdef MD_ROOT
2055 	if (mfs_root_size != 0) {
2056 		sx_xlock(&md_sx);
2057 #ifdef MD_ROOT_MEM
2058 		md_preloaded(mfs_root, mfs_root_size, NULL);
2059 #else
2060 		md_preloaded(__DEVOLATILE(u_char *, &mfs_root), mfs_root_size,
2061 		    NULL);
2062 #endif
2063 		sx_xunlock(&md_sx);
2064 	}
2065 #endif
2066 	/* XXX: are preload_* static or do they need Giant ? */
2067 	while ((mod = preload_search_next_name(mod)) != NULL) {
2068 		name = (char *)preload_search_info(mod, MODINFO_NAME);
2069 		if (name == NULL)
2070 			continue;
2071 		type = (char *)preload_search_info(mod, MODINFO_TYPE);
2072 		if (type == NULL)
2073 			continue;
2074 		if (strcmp(type, "md_image") && strcmp(type, "mfs_root"))
2075 			continue;
2076 		ptr = preload_fetch_addr(mod);
2077 		len = preload_fetch_size(mod);
2078 		if (ptr != NULL && len != 0) {
2079 			sx_xlock(&md_sx);
2080 			md_preloaded(ptr, len, name);
2081 			sx_xunlock(&md_sx);
2082 		}
2083 	}
2084 	md_pbuf_zone = pbuf_zsecond_create("mdpbuf", nswbuf / 10);
2085 	status_dev = make_dev(&mdctl_cdevsw, INT_MAX, UID_ROOT, GID_WHEEL,
2086 	    0600, MDCTL_NAME);
2087 	g_topology_lock();
2088 }
2089 
2090 static void
2091 g_md_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp,
2092     struct g_consumer *cp __unused, struct g_provider *pp)
2093 {
2094 	struct md_s *mp;
2095 	char *type;
2096 
2097 	mp = gp->softc;
2098 	if (mp == NULL)
2099 		return;
2100 
2101 	switch (mp->type) {
2102 	case MD_MALLOC:
2103 		type = "malloc";
2104 		break;
2105 	case MD_PRELOAD:
2106 		type = "preload";
2107 		break;
2108 	case MD_VNODE:
2109 		type = "vnode";
2110 		break;
2111 	case MD_SWAP:
2112 		type = "swap";
2113 		break;
2114 	case MD_NULL:
2115 		type = "null";
2116 		break;
2117 	default:
2118 		type = "unknown";
2119 		break;
2120 	}
2121 
2122 	if (pp != NULL) {
2123 		if (indent == NULL) {
2124 			sbuf_printf(sb, " u %d", mp->unit);
2125 			sbuf_printf(sb, " s %ju", (uintmax_t) mp->sectorsize);
2126 			sbuf_printf(sb, " f %ju", (uintmax_t) mp->fwheads);
2127 			sbuf_printf(sb, " fs %ju", (uintmax_t) mp->fwsectors);
2128 			sbuf_printf(sb, " l %ju", (uintmax_t) mp->mediasize);
2129 			sbuf_printf(sb, " t %s", type);
2130 			if ((mp->type == MD_VNODE && mp->vnode != NULL) ||
2131 			    (mp->type == MD_PRELOAD && mp->file[0] != '\0'))
2132 				sbuf_printf(sb, " file %s", mp->file);
2133 			sbuf_printf(sb, " label %s", mp->label);
2134 		} else {
2135 			sbuf_printf(sb, "%s<unit>%d</unit>\n", indent,
2136 			    mp->unit);
2137 			sbuf_printf(sb, "%s<sectorsize>%ju</sectorsize>\n",
2138 			    indent, (uintmax_t) mp->sectorsize);
2139 			sbuf_printf(sb, "%s<fwheads>%ju</fwheads>\n",
2140 			    indent, (uintmax_t) mp->fwheads);
2141 			sbuf_printf(sb, "%s<fwsectors>%ju</fwsectors>\n",
2142 			    indent, (uintmax_t) mp->fwsectors);
2143 			if (mp->ident[0] != '\0') {
2144 				sbuf_printf(sb, "%s<ident>", indent);
2145 				g_conf_printf_escaped(sb, "%s", mp->ident);
2146 				sbuf_printf(sb, "</ident>\n");
2147 			}
2148 			sbuf_printf(sb, "%s<length>%ju</length>\n",
2149 			    indent, (uintmax_t) mp->mediasize);
2150 			sbuf_printf(sb, "%s<compression>%s</compression>\n", indent,
2151 			    (mp->flags & MD_COMPRESS) == 0 ? "off": "on");
2152 			sbuf_printf(sb, "%s<access>%s</access>\n", indent,
2153 			    (mp->flags & MD_READONLY) == 0 ? "read-write":
2154 			    "read-only");
2155 			sbuf_printf(sb, "%s<type>%s</type>\n", indent,
2156 			    type);
2157 			if ((mp->type == MD_VNODE && mp->vnode != NULL) ||
2158 			    (mp->type == MD_PRELOAD && mp->file[0] != '\0')) {
2159 				sbuf_printf(sb, "%s<file>", indent);
2160 				g_conf_printf_escaped(sb, "%s", mp->file);
2161 				sbuf_printf(sb, "</file>\n");
2162 			}
2163 			if (mp->type == MD_VNODE)
2164 				sbuf_printf(sb, "%s<cache>%s</cache>\n", indent,
2165 				    (mp->flags & MD_CACHE) == 0 ? "off": "on");
2166 			sbuf_printf(sb, "%s<label>", indent);
2167 			g_conf_printf_escaped(sb, "%s", mp->label);
2168 			sbuf_printf(sb, "</label>\n");
2169 		}
2170 	}
2171 }
2172 
2173 static void
2174 g_md_fini(struct g_class *mp __unused)
2175 {
2176 
2177 	sx_destroy(&md_sx);
2178 	if (status_dev != NULL)
2179 		destroy_dev(status_dev);
2180 	uma_zdestroy(md_pbuf_zone);
2181 	delete_unrhdr(md_uh);
2182 }
2183