xref: /freebsd/sys/dev/md/md.c (revision 5945da0bc9ac42f531b1079a246eb8ce4f0d63db)
1 /*-
2  * SPDX-License-Identifier: (Beerware AND BSD-3-Clause)
3  *
4  * ----------------------------------------------------------------------------
5  * "THE BEER-WARE LICENSE" (Revision 42):
6  * <phk@FreeBSD.ORG> wrote this file.  As long as you retain this notice you
7  * can do whatever you want with this stuff. If we meet some day, and you think
8  * this stuff is worth it, you can buy me a beer in return.   Poul-Henning Kamp
9  * ----------------------------------------------------------------------------
10  *
11  */
12 
13 /*-
14  * The following functions are based on the historical vn(4) driver:
15  * mdstart_swap(), mdstart_vnode(), mdcreate_swap(), mdcreate_vnode()
16  * and mddestroy(), and as such under the following copyright:
17  *
18  * Copyright (c) 1988 University of Utah.
19  * Copyright (c) 1990, 1993
20  *	The Regents of the University of California.  All rights reserved.
21  * Copyright (c) 2013 The FreeBSD Foundation
22  * All rights reserved.
23  *
24  * This code is derived from software contributed to Berkeley by
25  * the Systems Programming Group of the University of Utah Computer
26  * Science Department.
27  *
28  * Portions of this software were developed by Konstantin Belousov
29  * under sponsorship from the FreeBSD Foundation.
30  *
31  * Redistribution and use in source and binary forms, with or without
32  * modification, are permitted provided that the following conditions
33  * are met:
34  * 1. Redistributions of source code must retain the above copyright
35  *    notice, this list of conditions and the following disclaimer.
36  * 2. Redistributions in binary form must reproduce the above copyright
37  *    notice, this list of conditions and the following disclaimer in the
38  *    documentation and/or other materials provided with the distribution.
39  * 3. Neither the name of the University nor the names of its contributors
40  *    may be used to endorse or promote products derived from this software
41  *    without specific prior written permission.
42  *
43  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
44  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
45  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
46  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
47  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
48  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
49  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
50  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
51  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
52  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
53  * SUCH DAMAGE.
54  *
55  * from: Utah Hdr: vn.c 1.13 94/04/02
56  * From: src/sys/dev/vn/vn.c,v 1.122 2000/12/16 16:06:03
57  */
58 
59 #include "opt_rootdevname.h"
60 #include "opt_geom.h"
61 #include "opt_md.h"
62 
63 #include <sys/systm.h>
64 #include <sys/bio.h>
65 #include <sys/buf.h>
66 #include <sys/bus.h>
67 #include <sys/conf.h>
68 #include <sys/devicestat.h>
69 #include <sys/disk.h>
70 #include <sys/fcntl.h>
71 #include <sys/kernel.h>
72 #include <sys/kthread.h>
73 #include <sys/limits.h>
74 #include <sys/linker.h>
75 #include <sys/lock.h>
76 #include <sys/malloc.h>
77 #include <sys/mdioctl.h>
78 #include <sys/mount.h>
79 #include <sys/mutex.h>
80 #include <sys/namei.h>
81 #include <sys/proc.h>
82 #include <sys/queue.h>
83 #include <sys/rwlock.h>
84 #include <sys/sx.h>
85 #include <sys/sbuf.h>
86 #include <sys/sched.h>
87 #include <sys/sf_buf.h>
88 #include <sys/sysctl.h>
89 #include <sys/uio.h>
90 #include <sys/unistd.h>
91 #include <sys/vnode.h>
92 
93 #include <geom/geom.h>
94 #include <geom/geom_int.h>
95 
96 #include <vm/vm.h>
97 #include <vm/vm_extern.h>
98 #include <vm/vm_param.h>
99 #include <vm/vm_object.h>
100 #include <vm/vm_page.h>
101 #include <vm/vm_pager.h>
102 #include <vm/swap_pager.h>
103 #include <vm/uma.h>
104 
105 #include <machine/bus.h>
106 
107 #define MD_MODVER 1
108 
109 #define MD_SHUTDOWN	0x10000		/* Tell worker thread to terminate. */
110 #define	MD_EXITING	0x20000		/* Worker thread is exiting. */
111 #define MD_PROVIDERGONE	0x40000		/* Safe to free the softc */
112 
113 #ifndef MD_NSECT
114 #define MD_NSECT (10000 * 2)
115 #endif
116 
117 struct md_req {
118 	unsigned	md_unit;	/* unit number */
119 	enum md_types	md_type;	/* type of disk */
120 	off_t		md_mediasize;	/* size of disk in bytes */
121 	unsigned	md_sectorsize;	/* sectorsize */
122 	unsigned	md_options;	/* options */
123 	int		md_fwheads;	/* firmware heads */
124 	int		md_fwsectors;	/* firmware sectors */
125 	char		*md_file;	/* pathname of file to mount */
126 	enum uio_seg	md_file_seg;	/* location of md_file */
127 	char		*md_label;	/* label of the device (userspace) */
128 	int		*md_units;	/* pointer to units array (kernel) */
129 	size_t		md_units_nitems; /* items in md_units array */
130 };
131 
132 #ifdef COMPAT_FREEBSD32
133 struct md_ioctl32 {
134 	unsigned	md_version;
135 	unsigned	md_unit;
136 	enum md_types	md_type;
137 	uint32_t	md_file;
138 	off_t		md_mediasize;
139 	unsigned	md_sectorsize;
140 	unsigned	md_options;
141 	uint64_t	md_base;
142 	int		md_fwheads;
143 	int		md_fwsectors;
144 	uint32_t	md_label;
145 	int		md_pad[MDNPAD];
146 }
147 #ifdef __amd64__
148 __attribute__((__packed__))
149 #endif
150 ;
151 #ifndef __amd64__
152 CTASSERT((sizeof(struct md_ioctl32)) == 440);
153 #else
154 CTASSERT((sizeof(struct md_ioctl32)) == 436);
155 #endif
156 
157 #define	MDIOCATTACH_32	_IOC_NEWTYPE(MDIOCATTACH, struct md_ioctl32)
158 #define	MDIOCDETACH_32	_IOC_NEWTYPE(MDIOCDETACH, struct md_ioctl32)
159 #define	MDIOCQUERY_32	_IOC_NEWTYPE(MDIOCQUERY, struct md_ioctl32)
160 #define	MDIOCRESIZE_32	_IOC_NEWTYPE(MDIOCRESIZE, struct md_ioctl32)
161 #endif /* COMPAT_FREEBSD32 */
162 
163 static MALLOC_DEFINE(M_MD, "md_disk", "Memory Disk");
164 static MALLOC_DEFINE(M_MDSECT, "md_sectors", "Memory Disk Sectors");
165 
166 static int md_debug;
167 SYSCTL_INT(_debug, OID_AUTO, mddebug, CTLFLAG_RW, &md_debug, 0,
168     "Enable md(4) debug messages");
169 static int md_malloc_wait;
170 SYSCTL_INT(_vm, OID_AUTO, md_malloc_wait, CTLFLAG_RW, &md_malloc_wait, 0,
171     "Allow malloc to wait for memory allocations");
172 
173 #if defined(MD_ROOT) && !defined(MD_ROOT_FSTYPE)
174 #define	MD_ROOT_FSTYPE	"ufs"
175 #endif
176 
177 #if defined(MD_ROOT)
178 /*
179  * Preloaded image gets put here.
180  */
181 #if defined(MD_ROOT_SIZE)
182 /*
183  * We put the mfs_root symbol into the oldmfs section of the kernel object file.
184  * Applications that patch the object with the image can determine
185  * the size looking at the oldmfs section size within the kernel.
186  */
187 u_char mfs_root[MD_ROOT_SIZE*1024] __attribute__ ((section ("oldmfs")));
188 const int mfs_root_size = sizeof(mfs_root);
189 #elif defined(MD_ROOT_MEM)
190 /* MD region already mapped in the memory */
191 u_char *mfs_root;
192 int mfs_root_size;
193 #else
194 extern volatile u_char __weak_symbol mfs_root;
195 extern volatile u_char __weak_symbol mfs_root_end;
196 #define mfs_root_size ((uintptr_t)(&mfs_root_end - &mfs_root))
197 #endif
198 #endif
199 
200 static g_init_t g_md_init;
201 static g_fini_t g_md_fini;
202 static g_start_t g_md_start;
203 static g_access_t g_md_access;
204 static void g_md_dumpconf(struct sbuf *sb, const char *indent,
205     struct g_geom *gp, struct g_consumer *cp __unused, struct g_provider *pp);
206 static g_provgone_t g_md_providergone;
207 
208 static struct cdev *status_dev = NULL;
209 static struct sx md_sx;
210 static struct unrhdr *md_uh;
211 
212 static d_ioctl_t mdctlioctl;
213 
214 static struct cdevsw mdctl_cdevsw = {
215 	.d_version =	D_VERSION,
216 	.d_ioctl =	mdctlioctl,
217 	.d_name =	MD_NAME,
218 };
219 
220 struct g_class g_md_class = {
221 	.name = "MD",
222 	.version = G_VERSION,
223 	.init = g_md_init,
224 	.fini = g_md_fini,
225 	.start = g_md_start,
226 	.access = g_md_access,
227 	.dumpconf = g_md_dumpconf,
228 	.providergone = g_md_providergone,
229 };
230 
231 DECLARE_GEOM_CLASS(g_md_class, g_md);
232 MODULE_VERSION(geom_md, 0);
233 
234 static LIST_HEAD(, md_s) md_softc_list = LIST_HEAD_INITIALIZER(md_softc_list);
235 
236 #define NINDIR	(PAGE_SIZE / sizeof(uintptr_t))
237 #define NMASK	(NINDIR-1)
238 static int nshift;
239 
240 struct indir {
241 	uintptr_t	*array;
242 	u_int		total;
243 	u_int		used;
244 	u_int		shift;
245 };
246 
247 struct md_s {
248 	int unit;
249 	LIST_ENTRY(md_s) list;
250 	struct bio_queue_head bio_queue;
251 	struct mtx queue_mtx;
252 	struct cdev *dev;
253 	enum md_types type;
254 	off_t mediasize;
255 	unsigned sectorsize;
256 	unsigned opencount;
257 	unsigned fwheads;
258 	unsigned fwsectors;
259 	char ident[DISK_IDENT_SIZE];
260 	unsigned flags;
261 	char name[20];
262 	struct proc *procp;
263 	struct g_geom *gp;
264 	struct g_provider *pp;
265 	int (*start)(struct md_s *sc, struct bio *bp);
266 	struct devstat *devstat;
267 	struct ucred *cred;
268 	char label[PATH_MAX];
269 	bool candelete;
270 
271 	union {
272 		/* MD_MALLOC related fields */
273 		struct {
274 			struct indir *indir;
275 			uma_zone_t uma;
276 		} s_malloc;
277 
278 		/* MD_PRELOAD related fields */
279 		struct {
280 			u_char *pl_ptr;
281 			size_t pl_len;
282 			char name[PATH_MAX];
283 		} s_preload;
284 
285 		/* MD_VNODE related fields */
286 		struct {
287 			struct vnode *vnode;
288 			char file[PATH_MAX];
289 			char *kva;
290 		} s_vnode;
291 
292 		/* MD_SWAP related fields */
293 		struct {
294 			vm_object_t object;
295 		} s_swap;
296 
297 		/* MD_NULL */
298 		struct {
299 		} s_null;
300 	};
301 };
302 
303 static struct indir *
304 new_indir(u_int shift)
305 {
306 	struct indir *ip;
307 
308 	ip = malloc(sizeof *ip, M_MD, (md_malloc_wait ? M_WAITOK : M_NOWAIT)
309 	    | M_ZERO);
310 	if (ip == NULL)
311 		return (NULL);
312 	ip->array = malloc(sizeof(uintptr_t) * NINDIR,
313 	    M_MDSECT, (md_malloc_wait ? M_WAITOK : M_NOWAIT) | M_ZERO);
314 	if (ip->array == NULL) {
315 		free(ip, M_MD);
316 		return (NULL);
317 	}
318 	ip->total = NINDIR;
319 	ip->shift = shift;
320 	return (ip);
321 }
322 
323 static void
324 del_indir(struct indir *ip)
325 {
326 
327 	free(ip->array, M_MDSECT);
328 	free(ip, M_MD);
329 }
330 
331 static void
332 destroy_indir(struct md_s *sc, struct indir *ip)
333 {
334 	int i;
335 
336 	for (i = 0; i < NINDIR; i++) {
337 		if (!ip->array[i])
338 			continue;
339 		if (ip->shift)
340 			destroy_indir(sc, (struct indir*)(ip->array[i]));
341 		else if (ip->array[i] > 255)
342 			uma_zfree(sc->s_malloc.uma, (void *)(ip->array[i]));
343 	}
344 	del_indir(ip);
345 }
346 
347 /*
348  * This function does the math and allocates the top level "indir" structure
349  * for a device of "size" sectors.
350  */
351 
352 static struct indir *
353 dimension(off_t size)
354 {
355 	off_t rcnt;
356 	struct indir *ip;
357 	int layer;
358 
359 	rcnt = size;
360 	layer = 0;
361 	while (rcnt > NINDIR) {
362 		rcnt /= NINDIR;
363 		layer++;
364 	}
365 
366 	/*
367 	 * XXX: the top layer is probably not fully populated, so we allocate
368 	 * too much space for ip->array in here.
369 	 */
370 	ip = malloc(sizeof *ip, M_MD, M_WAITOK | M_ZERO);
371 	ip->array = malloc(sizeof(uintptr_t) * NINDIR,
372 	    M_MDSECT, M_WAITOK | M_ZERO);
373 	ip->total = NINDIR;
374 	ip->shift = layer * nshift;
375 	return (ip);
376 }
377 
378 /*
379  * Read a given sector
380  */
381 
382 static uintptr_t
383 s_read(struct indir *ip, off_t offset)
384 {
385 	struct indir *cip;
386 	int idx;
387 	uintptr_t up;
388 
389 	if (md_debug > 1)
390 		printf("s_read(%jd)\n", (intmax_t)offset);
391 	up = 0;
392 	for (cip = ip; cip != NULL;) {
393 		if (cip->shift) {
394 			idx = (offset >> cip->shift) & NMASK;
395 			up = cip->array[idx];
396 			cip = (struct indir *)up;
397 			continue;
398 		}
399 		idx = offset & NMASK;
400 		return (cip->array[idx]);
401 	}
402 	return (0);
403 }
404 
405 /*
406  * Write a given sector, prune the tree if the value is 0
407  */
408 
409 static int
410 s_write(struct indir *ip, off_t offset, uintptr_t ptr)
411 {
412 	struct indir *cip, *lip[10];
413 	int idx, li;
414 	uintptr_t up;
415 
416 	if (md_debug > 1)
417 		printf("s_write(%jd, %p)\n", (intmax_t)offset, (void *)ptr);
418 	up = 0;
419 	li = 0;
420 	cip = ip;
421 	for (;;) {
422 		lip[li++] = cip;
423 		if (cip->shift) {
424 			idx = (offset >> cip->shift) & NMASK;
425 			up = cip->array[idx];
426 			if (up != 0) {
427 				cip = (struct indir *)up;
428 				continue;
429 			}
430 			/* Allocate branch */
431 			cip->array[idx] =
432 			    (uintptr_t)new_indir(cip->shift - nshift);
433 			if (cip->array[idx] == 0)
434 				return (ENOSPC);
435 			cip->used++;
436 			up = cip->array[idx];
437 			cip = (struct indir *)up;
438 			continue;
439 		}
440 		/* leafnode */
441 		idx = offset & NMASK;
442 		up = cip->array[idx];
443 		if (up != 0)
444 			cip->used--;
445 		cip->array[idx] = ptr;
446 		if (ptr != 0)
447 			cip->used++;
448 		break;
449 	}
450 	if (cip->used != 0 || li == 1)
451 		return (0);
452 	li--;
453 	while (cip->used == 0 && cip != ip) {
454 		li--;
455 		idx = (offset >> lip[li]->shift) & NMASK;
456 		up = lip[li]->array[idx];
457 		KASSERT(up == (uintptr_t)cip, ("md screwed up"));
458 		del_indir(cip);
459 		lip[li]->array[idx] = 0;
460 		lip[li]->used--;
461 		cip = lip[li];
462 	}
463 	return (0);
464 }
465 
466 static int
467 g_md_access(struct g_provider *pp, int r, int w, int e)
468 {
469 	struct md_s *sc;
470 
471 	sc = pp->geom->softc;
472 	if (sc == NULL) {
473 		if (r <= 0 && w <= 0 && e <= 0)
474 			return (0);
475 		return (ENXIO);
476 	}
477 	r += pp->acr;
478 	w += pp->acw;
479 	e += pp->ace;
480 	if ((sc->flags & MD_READONLY) != 0 && w > 0)
481 		return (EROFS);
482 	if ((pp->acr + pp->acw + pp->ace) == 0 && (r + w + e) > 0) {
483 		sc->opencount = 1;
484 	} else if ((pp->acr + pp->acw + pp->ace) > 0 && (r + w + e) == 0) {
485 		sc->opencount = 0;
486 	}
487 	return (0);
488 }
489 
490 static void
491 g_md_start(struct bio *bp)
492 {
493 	struct md_s *sc;
494 
495 	sc = bp->bio_to->geom->softc;
496 	if ((bp->bio_cmd == BIO_READ) || (bp->bio_cmd == BIO_WRITE)) {
497 		devstat_start_transaction_bio(sc->devstat, bp);
498 	}
499 	mtx_lock(&sc->queue_mtx);
500 	bioq_disksort(&sc->bio_queue, bp);
501 	wakeup(sc);
502 	mtx_unlock(&sc->queue_mtx);
503 }
504 
505 #define	MD_MALLOC_MOVE_ZERO	1
506 #define	MD_MALLOC_MOVE_FILL	2
507 #define	MD_MALLOC_MOVE_READ	3
508 #define	MD_MALLOC_MOVE_WRITE	4
509 #define	MD_MALLOC_MOVE_CMP	5
510 
511 static int
512 md_malloc_move_ma(vm_page_t **mp, int *ma_offs, unsigned sectorsize,
513     void *ptr, u_char fill, int op)
514 {
515 	struct sf_buf *sf;
516 	vm_page_t m, *mp1;
517 	char *p, first;
518 	off_t *uc;
519 	unsigned n;
520 	int error, i, ma_offs1, sz, first_read;
521 
522 	m = NULL;
523 	error = 0;
524 	sf = NULL;
525 	/* if (op == MD_MALLOC_MOVE_CMP) { gcc */
526 		first = 0;
527 		first_read = 0;
528 		uc = ptr;
529 		mp1 = *mp;
530 		ma_offs1 = *ma_offs;
531 	/* } */
532 	sched_pin();
533 	for (n = sectorsize; n != 0; n -= sz) {
534 		sz = imin(PAGE_SIZE - *ma_offs, n);
535 		if (m != **mp) {
536 			if (sf != NULL)
537 				sf_buf_free(sf);
538 			m = **mp;
539 			sf = sf_buf_alloc(m, SFB_CPUPRIVATE |
540 			    (md_malloc_wait ? 0 : SFB_NOWAIT));
541 			if (sf == NULL) {
542 				error = ENOMEM;
543 				break;
544 			}
545 		}
546 		p = (char *)sf_buf_kva(sf) + *ma_offs;
547 		switch (op) {
548 		case MD_MALLOC_MOVE_ZERO:
549 			bzero(p, sz);
550 			break;
551 		case MD_MALLOC_MOVE_FILL:
552 			memset(p, fill, sz);
553 			break;
554 		case MD_MALLOC_MOVE_READ:
555 			bcopy(ptr, p, sz);
556 			cpu_flush_dcache(p, sz);
557 			break;
558 		case MD_MALLOC_MOVE_WRITE:
559 			bcopy(p, ptr, sz);
560 			break;
561 		case MD_MALLOC_MOVE_CMP:
562 			for (i = 0; i < sz; i++, p++) {
563 				if (!first_read) {
564 					*uc = (u_char)*p;
565 					first = *p;
566 					first_read = 1;
567 				} else if (*p != first) {
568 					error = EDOOFUS;
569 					break;
570 				}
571 			}
572 			break;
573 		default:
574 			KASSERT(0, ("md_malloc_move_ma unknown op %d\n", op));
575 			break;
576 		}
577 		if (error != 0)
578 			break;
579 		*ma_offs += sz;
580 		*ma_offs %= PAGE_SIZE;
581 		if (*ma_offs == 0)
582 			(*mp)++;
583 		ptr = (char *)ptr + sz;
584 	}
585 
586 	if (sf != NULL)
587 		sf_buf_free(sf);
588 	sched_unpin();
589 	if (op == MD_MALLOC_MOVE_CMP && error != 0) {
590 		*mp = mp1;
591 		*ma_offs = ma_offs1;
592 	}
593 	return (error);
594 }
595 
596 static int
597 md_malloc_move_vlist(bus_dma_segment_t **pvlist, int *pma_offs,
598     unsigned len, void *ptr, u_char fill, int op)
599 {
600 	bus_dma_segment_t *vlist;
601 	uint8_t *p, *end, first;
602 	off_t *uc;
603 	int ma_offs, seg_len;
604 
605 	vlist = *pvlist;
606 	ma_offs = *pma_offs;
607 	uc = ptr;
608 
609 	for (; len != 0; len -= seg_len) {
610 		seg_len = imin(vlist->ds_len - ma_offs, len);
611 		p = (uint8_t *)(uintptr_t)vlist->ds_addr + ma_offs;
612 		switch (op) {
613 		case MD_MALLOC_MOVE_ZERO:
614 			bzero(p, seg_len);
615 			break;
616 		case MD_MALLOC_MOVE_FILL:
617 			memset(p, fill, seg_len);
618 			break;
619 		case MD_MALLOC_MOVE_READ:
620 			bcopy(ptr, p, seg_len);
621 			cpu_flush_dcache(p, seg_len);
622 			break;
623 		case MD_MALLOC_MOVE_WRITE:
624 			bcopy(p, ptr, seg_len);
625 			break;
626 		case MD_MALLOC_MOVE_CMP:
627 			end = p + seg_len;
628 			first = *uc = *p;
629 			/* Confirm all following bytes match the first */
630 			while (++p < end) {
631 				if (*p != first)
632 					return (EDOOFUS);
633 			}
634 			break;
635 		default:
636 			KASSERT(0, ("md_malloc_move_vlist unknown op %d\n", op));
637 			break;
638 		}
639 
640 		ma_offs += seg_len;
641 		if (ma_offs == vlist->ds_len) {
642 			ma_offs = 0;
643 			vlist++;
644 		}
645 		ptr = (uint8_t *)ptr + seg_len;
646 	}
647 	*pvlist = vlist;
648 	*pma_offs = ma_offs;
649 
650 	return (0);
651 }
652 
653 static int
654 mdstart_malloc(struct md_s *sc, struct bio *bp)
655 {
656 	u_char *dst;
657 	vm_page_t *m;
658 	bus_dma_segment_t *vlist;
659 	int i, error, error1, ma_offs, notmapped;
660 	off_t secno, nsec, uc;
661 	uintptr_t sp, osp;
662 
663 	switch (bp->bio_cmd) {
664 	case BIO_READ:
665 	case BIO_WRITE:
666 	case BIO_DELETE:
667 		break;
668 	case BIO_FLUSH:
669 		return (0);
670 	default:
671 		return (EOPNOTSUPP);
672 	}
673 
674 	notmapped = (bp->bio_flags & BIO_UNMAPPED) != 0;
675 	vlist = (bp->bio_flags & BIO_VLIST) != 0 ?
676 	    (bus_dma_segment_t *)bp->bio_data : NULL;
677 	if (notmapped) {
678 		m = bp->bio_ma;
679 		ma_offs = bp->bio_ma_offset;
680 		dst = NULL;
681 		KASSERT(vlist == NULL, ("vlists cannot be unmapped"));
682 	} else if (vlist != NULL) {
683 		ma_offs = bp->bio_ma_offset;
684 		dst = NULL;
685 	} else {
686 		dst = bp->bio_data;
687 	}
688 
689 	nsec = bp->bio_length / sc->sectorsize;
690 	secno = bp->bio_offset / sc->sectorsize;
691 	error = 0;
692 	while (nsec--) {
693 		osp = s_read(sc->s_malloc.indir, secno);
694 		if (bp->bio_cmd == BIO_DELETE) {
695 			if (osp != 0)
696 				error = s_write(sc->s_malloc.indir, secno, 0);
697 		} else if (bp->bio_cmd == BIO_READ) {
698 			if (osp == 0) {
699 				if (notmapped) {
700 					error = md_malloc_move_ma(&m, &ma_offs,
701 					    sc->sectorsize, NULL, 0,
702 					    MD_MALLOC_MOVE_ZERO);
703 				} else if (vlist != NULL) {
704 					error = md_malloc_move_vlist(&vlist,
705 					    &ma_offs, sc->sectorsize, NULL, 0,
706 					    MD_MALLOC_MOVE_ZERO);
707 				} else
708 					bzero(dst, sc->sectorsize);
709 			} else if (osp <= 255) {
710 				if (notmapped) {
711 					error = md_malloc_move_ma(&m, &ma_offs,
712 					    sc->sectorsize, NULL, osp,
713 					    MD_MALLOC_MOVE_FILL);
714 				} else if (vlist != NULL) {
715 					error = md_malloc_move_vlist(&vlist,
716 					    &ma_offs, sc->sectorsize, NULL, osp,
717 					    MD_MALLOC_MOVE_FILL);
718 				} else
719 					memset(dst, osp, sc->sectorsize);
720 			} else {
721 				if (notmapped) {
722 					error = md_malloc_move_ma(&m, &ma_offs,
723 					    sc->sectorsize, (void *)osp, 0,
724 					    MD_MALLOC_MOVE_READ);
725 				} else if (vlist != NULL) {
726 					error = md_malloc_move_vlist(&vlist,
727 					    &ma_offs, sc->sectorsize,
728 					    (void *)osp, 0,
729 					    MD_MALLOC_MOVE_READ);
730 				} else {
731 					bcopy((void *)osp, dst, sc->sectorsize);
732 					cpu_flush_dcache(dst, sc->sectorsize);
733 				}
734 			}
735 			osp = 0;
736 		} else if (bp->bio_cmd == BIO_WRITE) {
737 			if (sc->flags & MD_COMPRESS) {
738 				if (notmapped) {
739 					error1 = md_malloc_move_ma(&m, &ma_offs,
740 					    sc->sectorsize, &uc, 0,
741 					    MD_MALLOC_MOVE_CMP);
742 					i = error1 == 0 ? sc->sectorsize : 0;
743 				} else if (vlist != NULL) {
744 					error1 = md_malloc_move_vlist(&vlist,
745 					    &ma_offs, sc->sectorsize, &uc, 0,
746 					    MD_MALLOC_MOVE_CMP);
747 					i = error1 == 0 ? sc->sectorsize : 0;
748 				} else {
749 					uc = dst[0];
750 					for (i = 1; i < sc->sectorsize; i++) {
751 						if (dst[i] != uc)
752 							break;
753 					}
754 				}
755 			} else {
756 				i = 0;
757 				uc = 0;
758 			}
759 			if (i == sc->sectorsize) {
760 				if (osp != uc)
761 					error = s_write(sc->s_malloc.indir,
762 					    secno, uc);
763 			} else {
764 				if (osp <= 255) {
765 					sp = (uintptr_t)uma_zalloc(
766 					    sc->s_malloc.uma,
767 					    md_malloc_wait ? M_WAITOK :
768 					    M_NOWAIT);
769 					if (sp == 0) {
770 						error = ENOSPC;
771 						break;
772 					}
773 					if (notmapped) {
774 						error = md_malloc_move_ma(&m,
775 						    &ma_offs, sc->sectorsize,
776 						    (void *)sp, 0,
777 						    MD_MALLOC_MOVE_WRITE);
778 					} else if (vlist != NULL) {
779 						error = md_malloc_move_vlist(
780 						    &vlist, &ma_offs,
781 						    sc->sectorsize, (void *)sp,
782 						    0, MD_MALLOC_MOVE_WRITE);
783 					} else {
784 						bcopy(dst, (void *)sp,
785 						    sc->sectorsize);
786 					}
787 					error = s_write(sc->s_malloc.indir,
788 					    secno, sp);
789 				} else {
790 					if (notmapped) {
791 						error = md_malloc_move_ma(&m,
792 						    &ma_offs, sc->sectorsize,
793 						    (void *)osp, 0,
794 						    MD_MALLOC_MOVE_WRITE);
795 					} else if (vlist != NULL) {
796 						error = md_malloc_move_vlist(
797 						    &vlist, &ma_offs,
798 						    sc->sectorsize, (void *)osp,
799 						    0, MD_MALLOC_MOVE_WRITE);
800 					} else {
801 						bcopy(dst, (void *)osp,
802 						    sc->sectorsize);
803 					}
804 					osp = 0;
805 				}
806 			}
807 		} else {
808 			error = EOPNOTSUPP;
809 		}
810 		if (osp > 255)
811 			uma_zfree(sc->s_malloc.uma, (void*)osp);
812 		if (error != 0)
813 			break;
814 		secno++;
815 		if (!notmapped && vlist == NULL)
816 			dst += sc->sectorsize;
817 	}
818 	bp->bio_resid = 0;
819 	return (error);
820 }
821 
822 static void
823 mdcopyto_vlist(void *src, bus_dma_segment_t *vlist, off_t offset, off_t len)
824 {
825 	off_t seg_len;
826 
827 	while (offset >= vlist->ds_len) {
828 		offset -= vlist->ds_len;
829 		vlist++;
830 	}
831 
832 	while (len != 0) {
833 		seg_len = omin(len, vlist->ds_len - offset);
834 		bcopy(src, (void *)(uintptr_t)(vlist->ds_addr + offset),
835 		    seg_len);
836 		offset = 0;
837 		src = (uint8_t *)src + seg_len;
838 		len -= seg_len;
839 		vlist++;
840 	}
841 }
842 
843 static void
844 mdcopyfrom_vlist(bus_dma_segment_t *vlist, off_t offset, void *dst, off_t len)
845 {
846 	off_t seg_len;
847 
848 	while (offset >= vlist->ds_len) {
849 		offset -= vlist->ds_len;
850 		vlist++;
851 	}
852 
853 	while (len != 0) {
854 		seg_len = omin(len, vlist->ds_len - offset);
855 		bcopy((void *)(uintptr_t)(vlist->ds_addr + offset), dst,
856 		    seg_len);
857 		offset = 0;
858 		dst = (uint8_t *)dst + seg_len;
859 		len -= seg_len;
860 		vlist++;
861 	}
862 }
863 
864 static int
865 mdstart_preload(struct md_s *sc, struct bio *bp)
866 {
867 	uint8_t *p;
868 
869 	p = sc->s_preload.pl_ptr + bp->bio_offset;
870 	switch (bp->bio_cmd) {
871 	case BIO_READ:
872 		if ((bp->bio_flags & BIO_VLIST) != 0) {
873 			mdcopyto_vlist(p, (bus_dma_segment_t *)bp->bio_data,
874 			    bp->bio_ma_offset, bp->bio_length);
875 		} else {
876 			bcopy(p, bp->bio_data, bp->bio_length);
877 		}
878 		cpu_flush_dcache(bp->bio_data, bp->bio_length);
879 		break;
880 	case BIO_WRITE:
881 		if ((bp->bio_flags & BIO_VLIST) != 0) {
882 			mdcopyfrom_vlist((bus_dma_segment_t *)bp->bio_data,
883 			    bp->bio_ma_offset, p, bp->bio_length);
884 		} else {
885 			bcopy(bp->bio_data, p, bp->bio_length);
886 		}
887 		break;
888 	}
889 	bp->bio_resid = 0;
890 	return (0);
891 }
892 
893 static int
894 mdstart_vnode(struct md_s *sc, struct bio *bp)
895 {
896 	int error;
897 	struct uio auio;
898 	struct iovec aiov;
899 	struct iovec *piov;
900 	struct mount *mp;
901 	struct vnode *vp;
902 	bus_dma_segment_t *vlist;
903 	struct thread *td;
904 	off_t iolen, iostart, off, len;
905 	int ma_offs, npages;
906 	bool mapped;
907 
908 	td = curthread;
909 	vp = sc->s_vnode.vnode;
910 	piov = NULL;
911 	ma_offs = bp->bio_ma_offset;
912 	off = bp->bio_offset;
913 	len = bp->bio_length;
914 	mapped = false;
915 
916 	/*
917 	 * VNODE I/O
918 	 *
919 	 * If an error occurs, we set BIO_ERROR but we do not set
920 	 * B_INVAL because (for a write anyway), the buffer is
921 	 * still valid.
922 	 */
923 
924 	switch (bp->bio_cmd) {
925 	case BIO_READ:
926 		auio.uio_rw = UIO_READ;
927 		break;
928 	case BIO_WRITE:
929 		auio.uio_rw = UIO_WRITE;
930 		break;
931 	case BIO_FLUSH:
932 		do {
933 			(void)vn_start_write(vp, &mp, V_WAIT);
934 			vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
935 			error = VOP_FSYNC(vp, MNT_WAIT, td);
936 			VOP_UNLOCK(vp);
937 			vn_finished_write(mp);
938 		} while (error == ERELOOKUP);
939 		return (error);
940 	case BIO_DELETE:
941 		if (sc->candelete) {
942 			error = vn_deallocate(vp, &off, &len, 0,
943 			    sc->flags & MD_ASYNC ? 0 : IO_SYNC,
944 			    sc->cred, NOCRED);
945 			bp->bio_resid = len;
946 			return (error);
947 		}
948 		/* FALLTHROUGH */
949 	default:
950 		return (EOPNOTSUPP);
951 	}
952 
953 	auio.uio_offset = (vm_ooffset_t)bp->bio_offset;
954 	auio.uio_resid = bp->bio_length;
955 	auio.uio_segflg = UIO_SYSSPACE;
956 	auio.uio_td = td;
957 
958 	if ((bp->bio_flags & BIO_VLIST) != 0) {
959 		piov = malloc(sizeof(*piov) * bp->bio_ma_n, M_MD, M_WAITOK);
960 		auio.uio_iov = piov;
961 		vlist = (bus_dma_segment_t *)bp->bio_data;
962 		while (len > 0) {
963 			piov->iov_base = (void *)(uintptr_t)(vlist->ds_addr +
964 			    ma_offs);
965 			piov->iov_len = vlist->ds_len - ma_offs;
966 			if (piov->iov_len > len)
967 				piov->iov_len = len;
968 			len -= piov->iov_len;
969 			ma_offs = 0;
970 			vlist++;
971 			piov++;
972 		}
973 		auio.uio_iovcnt = piov - auio.uio_iov;
974 		piov = auio.uio_iov;
975 	} else if ((bp->bio_flags & BIO_UNMAPPED) != 0) {
976 		bp->bio_resid = len;
977 unmapped_step:
978 		npages = atop(min(maxphys, round_page(len + (ma_offs &
979 		    PAGE_MASK))));
980 		iolen = min(ptoa(npages) - (ma_offs & PAGE_MASK), len);
981 		KASSERT(iolen > 0, ("zero iolen"));
982 		KASSERT(npages <= atop(maxphys + PAGE_SIZE),
983 		    ("npages %d too large", npages));
984 		pmap_qenter(sc->s_vnode.kva, &bp->bio_ma[atop(ma_offs)],
985 		    npages);
986 		aiov.iov_base = sc->s_vnode.kva + (ma_offs & PAGE_MASK);
987 		aiov.iov_len = iolen;
988 		auio.uio_iov = &aiov;
989 		auio.uio_iovcnt = 1;
990 		auio.uio_resid = iolen;
991 		mapped = true;
992 	} else {
993 		aiov.iov_base = bp->bio_data;
994 		aiov.iov_len = bp->bio_length;
995 		auio.uio_iov = &aiov;
996 		auio.uio_iovcnt = 1;
997 	}
998 	iostart = auio.uio_offset;
999 	if (bp->bio_cmd == BIO_READ) {
1000 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
1001 		error = VOP_READ(vp, &auio, 0, sc->cred);
1002 		VOP_UNLOCK(vp);
1003 	} else {
1004 		(void) vn_start_write(vp, &mp, V_WAIT);
1005 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
1006 		error = VOP_WRITE(vp, &auio, sc->flags & MD_ASYNC ? 0 : IO_SYNC,
1007 		    sc->cred);
1008 		VOP_UNLOCK(vp);
1009 		vn_finished_write(mp);
1010 		if (error == 0)
1011 			sc->flags &= ~MD_VERIFY;
1012 	}
1013 
1014 	/* When MD_CACHE is set, try to avoid double-caching the data. */
1015 	if (error == 0 && (sc->flags & MD_CACHE) == 0)
1016 		VOP_ADVISE(vp, iostart, auio.uio_offset - 1,
1017 		    POSIX_FADV_DONTNEED);
1018 
1019 	if (mapped) {
1020 		pmap_qremove(sc->s_vnode.kva, npages);
1021 		if (error == 0) {
1022 			len -= iolen;
1023 			bp->bio_resid -= iolen;
1024 			ma_offs += iolen;
1025 			if (len > 0)
1026 				goto unmapped_step;
1027 		}
1028 	} else {
1029 		bp->bio_resid = auio.uio_resid;
1030 	}
1031 
1032 	free(piov, M_MD);
1033 	return (error);
1034 }
1035 
1036 static int
1037 mdstart_swap(struct md_s *sc, struct bio *bp)
1038 {
1039 	vm_page_t m;
1040 	u_char *p;
1041 	vm_pindex_t i, lastp;
1042 	bus_dma_segment_t *vlist;
1043 	int rv, ma_offs, offs, len, lastend;
1044 
1045 	switch (bp->bio_cmd) {
1046 	case BIO_READ:
1047 	case BIO_WRITE:
1048 	case BIO_DELETE:
1049 		break;
1050 	case BIO_FLUSH:
1051 		return (0);
1052 	default:
1053 		return (EOPNOTSUPP);
1054 	}
1055 
1056 	p = bp->bio_data;
1057 	ma_offs = (bp->bio_flags & (BIO_UNMAPPED|BIO_VLIST)) != 0 ?
1058 	    bp->bio_ma_offset : 0;
1059 	vlist = (bp->bio_flags & BIO_VLIST) != 0 ?
1060 	    (bus_dma_segment_t *)bp->bio_data : NULL;
1061 
1062 	/*
1063 	 * offs is the offset at which to start operating on the
1064 	 * next (ie, first) page.  lastp is the last page on
1065 	 * which we're going to operate.  lastend is the ending
1066 	 * position within that last page (ie, PAGE_SIZE if
1067 	 * we're operating on complete aligned pages).
1068 	 */
1069 	offs = bp->bio_offset % PAGE_SIZE;
1070 	lastp = (bp->bio_offset + bp->bio_length - 1) / PAGE_SIZE;
1071 	lastend = (bp->bio_offset + bp->bio_length - 1) % PAGE_SIZE + 1;
1072 
1073 	rv = VM_PAGER_OK;
1074 	vm_object_pip_add(sc->s_swap.object, 1);
1075 	for (i = bp->bio_offset / PAGE_SIZE; i <= lastp; i++) {
1076 		len = ((i == lastp) ? lastend : PAGE_SIZE) - offs;
1077 		m = vm_page_grab_unlocked(sc->s_swap.object, i,
1078 		    VM_ALLOC_SYSTEM);
1079 		if (bp->bio_cmd == BIO_READ) {
1080 			if (vm_page_all_valid(m))
1081 				rv = VM_PAGER_OK;
1082 			else
1083 				rv = vm_pager_get_pages(sc->s_swap.object,
1084 				    &m, 1, NULL, NULL);
1085 			if (rv == VM_PAGER_ERROR) {
1086 				VM_OBJECT_WLOCK(sc->s_swap.object);
1087 				vm_page_free(m);
1088 				VM_OBJECT_WUNLOCK(sc->s_swap.object);
1089 				break;
1090 			} else if (rv == VM_PAGER_FAIL) {
1091 				/*
1092 				 * Pager does not have the page.  Zero
1093 				 * the allocated page, and mark it as
1094 				 * valid. Do not set dirty, the page
1095 				 * can be recreated if thrown out.
1096 				 */
1097 				pmap_zero_page(m);
1098 				vm_page_valid(m);
1099 			}
1100 			if ((bp->bio_flags & BIO_UNMAPPED) != 0) {
1101 				pmap_copy_pages(&m, offs, bp->bio_ma,
1102 				    ma_offs, len);
1103 			} else if ((bp->bio_flags & BIO_VLIST) != 0) {
1104 				physcopyout_vlist(VM_PAGE_TO_PHYS(m) + offs,
1105 				    vlist, ma_offs, len);
1106 				cpu_flush_dcache(p, len);
1107 			} else {
1108 				physcopyout(VM_PAGE_TO_PHYS(m) + offs, p, len);
1109 				cpu_flush_dcache(p, len);
1110 			}
1111 		} else if (bp->bio_cmd == BIO_WRITE) {
1112 			if (len == PAGE_SIZE || vm_page_all_valid(m))
1113 				rv = VM_PAGER_OK;
1114 			else
1115 				rv = vm_pager_get_pages(sc->s_swap.object,
1116 				    &m, 1, NULL, NULL);
1117 			if (rv == VM_PAGER_ERROR) {
1118 				VM_OBJECT_WLOCK(sc->s_swap.object);
1119 				vm_page_free(m);
1120 				VM_OBJECT_WUNLOCK(sc->s_swap.object);
1121 				break;
1122 			} else if (rv == VM_PAGER_FAIL)
1123 				pmap_zero_page(m);
1124 
1125 			if ((bp->bio_flags & BIO_UNMAPPED) != 0) {
1126 				pmap_copy_pages(bp->bio_ma, ma_offs, &m,
1127 				    offs, len);
1128 			} else if ((bp->bio_flags & BIO_VLIST) != 0) {
1129 				physcopyin_vlist(vlist, ma_offs,
1130 				    VM_PAGE_TO_PHYS(m) + offs, len);
1131 			} else {
1132 				physcopyin(p, VM_PAGE_TO_PHYS(m) + offs, len);
1133 			}
1134 
1135 			vm_page_valid(m);
1136 			vm_page_set_dirty(m);
1137 		} else if (bp->bio_cmd == BIO_DELETE) {
1138 			if (len == PAGE_SIZE || vm_page_all_valid(m))
1139 				rv = VM_PAGER_OK;
1140 			else
1141 				rv = vm_pager_get_pages(sc->s_swap.object,
1142 				    &m, 1, NULL, NULL);
1143 			VM_OBJECT_WLOCK(sc->s_swap.object);
1144 			if (rv == VM_PAGER_ERROR) {
1145 				vm_page_free(m);
1146 				VM_OBJECT_WUNLOCK(sc->s_swap.object);
1147 				break;
1148 			} else if (rv == VM_PAGER_FAIL) {
1149 				vm_page_free(m);
1150 				m = NULL;
1151 			} else {
1152 				/* Page is valid. */
1153 				if (len != PAGE_SIZE) {
1154 					pmap_zero_page_area(m, offs, len);
1155 					vm_page_set_dirty(m);
1156 				} else {
1157 					vm_pager_page_unswapped(m);
1158 					vm_page_free(m);
1159 					m = NULL;
1160 				}
1161 			}
1162 			VM_OBJECT_WUNLOCK(sc->s_swap.object);
1163 		}
1164 		if (m != NULL) {
1165 			/*
1166 			 * The page may be deactivated prior to setting
1167 			 * PGA_REFERENCED, but in this case it will be
1168 			 * reactivated by the page daemon.
1169 			 */
1170 			if (vm_page_active(m))
1171 				vm_page_reference(m);
1172 			else
1173 				vm_page_activate(m);
1174 			vm_page_xunbusy(m);
1175 		}
1176 
1177 		/* Actions on further pages start at offset 0 */
1178 		p += PAGE_SIZE - offs;
1179 		offs = 0;
1180 		ma_offs += len;
1181 	}
1182 	vm_object_pip_wakeup(sc->s_swap.object);
1183 	return (rv != VM_PAGER_ERROR ? 0 : ENOSPC);
1184 }
1185 
1186 static int
1187 mdstart_null(struct md_s *sc, struct bio *bp)
1188 {
1189 
1190 	switch (bp->bio_cmd) {
1191 	case BIO_READ:
1192 		bzero(bp->bio_data, bp->bio_length);
1193 		cpu_flush_dcache(bp->bio_data, bp->bio_length);
1194 		break;
1195 	case BIO_WRITE:
1196 		break;
1197 	}
1198 	bp->bio_resid = 0;
1199 	return (0);
1200 }
1201 
1202 static void
1203 md_handleattr(struct md_s *sc, struct bio *bp)
1204 {
1205 	if (sc->fwsectors && sc->fwheads &&
1206 	    (g_handleattr_int(bp, "GEOM::fwsectors", sc->fwsectors) != 0 ||
1207 	    g_handleattr_int(bp, "GEOM::fwheads", sc->fwheads) != 0))
1208 		return;
1209 	if (g_handleattr_int(bp, "GEOM::candelete", sc->candelete) != 0)
1210 		return;
1211 	if (sc->ident[0] != '\0' &&
1212 	    g_handleattr_str(bp, "GEOM::ident", sc->ident) != 0)
1213 		return;
1214 	if (g_handleattr_int(bp, "MNT::verified", (sc->flags & MD_VERIFY) != 0))
1215 		return;
1216 	g_io_deliver(bp, EOPNOTSUPP);
1217 }
1218 
1219 static void
1220 md_kthread(void *arg)
1221 {
1222 	struct md_s *sc;
1223 	struct bio *bp;
1224 	int error;
1225 
1226 	sc = arg;
1227 	thread_lock(curthread);
1228 	sched_prio(curthread, PRIBIO);
1229 	thread_unlock(curthread);
1230 	if (sc->type == MD_VNODE)
1231 		curthread->td_pflags |= TDP_NORUNNINGBUF;
1232 
1233 	for (;;) {
1234 		mtx_lock(&sc->queue_mtx);
1235 		if (sc->flags & MD_SHUTDOWN) {
1236 			sc->flags |= MD_EXITING;
1237 			mtx_unlock(&sc->queue_mtx);
1238 			kproc_exit(0);
1239 		}
1240 		bp = bioq_takefirst(&sc->bio_queue);
1241 		if (!bp) {
1242 			msleep(sc, &sc->queue_mtx, PRIBIO | PDROP, "mdwait", 0);
1243 			continue;
1244 		}
1245 		mtx_unlock(&sc->queue_mtx);
1246 		if (bp->bio_cmd == BIO_GETATTR) {
1247 			md_handleattr(sc, bp);
1248 		} else {
1249 			error = sc->start(sc, bp);
1250 			if (bp->bio_cmd == BIO_READ || bp->bio_cmd == BIO_WRITE) {
1251 				/*
1252 				 * Devstat uses (bio_bcount, bio_resid) for
1253 				 * determining the length of the completed part
1254 				 * of the i/o.  g_io_deliver() will translate
1255 				 * from bio_completed to that, but it also
1256 				 * destroys the bio so we must do our own
1257 				 * translation.
1258 				 */
1259 				bp->bio_bcount = bp->bio_length;
1260 				devstat_end_transaction_bio(sc->devstat, bp);
1261 			}
1262 			bp->bio_completed = bp->bio_length - bp->bio_resid;
1263 			g_io_deliver(bp, error);
1264 		}
1265 	}
1266 }
1267 
1268 static struct md_s *
1269 mdfind(int unit)
1270 {
1271 	struct md_s *sc;
1272 
1273 	LIST_FOREACH(sc, &md_softc_list, list) {
1274 		if (sc->unit == unit)
1275 			break;
1276 	}
1277 	return (sc);
1278 }
1279 
1280 static struct md_s *
1281 mdnew(int unit, int *errp, enum md_types type)
1282 {
1283 	struct md_s *sc;
1284 	int error;
1285 
1286 	*errp = 0;
1287 	if (unit == -1)
1288 		unit = alloc_unr(md_uh);
1289 	else
1290 		unit = alloc_unr_specific(md_uh, unit);
1291 
1292 	if (unit == -1) {
1293 		*errp = EBUSY;
1294 		return (NULL);
1295 	}
1296 
1297 	sc = malloc(sizeof(*sc), M_MD, M_WAITOK | M_ZERO);
1298 	sc->type = type;
1299 	bioq_init(&sc->bio_queue);
1300 	mtx_init(&sc->queue_mtx, "md bio queue", NULL, MTX_DEF);
1301 	sc->unit = unit;
1302 	sprintf(sc->name, "md%d", unit);
1303 	LIST_INSERT_HEAD(&md_softc_list, sc, list);
1304 	error = kproc_create(md_kthread, sc, &sc->procp, 0, 0,"%s", sc->name);
1305 	if (error == 0)
1306 		return (sc);
1307 	LIST_REMOVE(sc, list);
1308 	mtx_destroy(&sc->queue_mtx);
1309 	free_unr(md_uh, sc->unit);
1310 	free(sc, M_MD);
1311 	*errp = error;
1312 	return (NULL);
1313 }
1314 
1315 static void
1316 mdinit(struct md_s *sc)
1317 {
1318 	struct g_geom *gp;
1319 	struct g_provider *pp;
1320 	unsigned remn;
1321 
1322 	g_topology_lock();
1323 	gp = g_new_geomf(&g_md_class, "md%d", sc->unit);
1324 	gp->softc = sc;
1325 	pp = g_new_providerf(gp, "md%d", sc->unit);
1326 	devstat_remove_entry(pp->stat);
1327 	pp->stat = NULL;
1328 	pp->flags |= G_PF_DIRECT_SEND | G_PF_DIRECT_RECEIVE;
1329 	/* Prune off any residual fractional sector. */
1330 	remn = sc->mediasize % sc->sectorsize;
1331 	if (remn != 0) {
1332 		printf("md%d: truncating fractional last sector by %u bytes\n",
1333 		    sc->unit, remn);
1334 		sc->mediasize -= remn;
1335 	}
1336 	pp->mediasize = sc->mediasize;
1337 	pp->sectorsize = sc->sectorsize;
1338 	switch (sc->type) {
1339 	case MD_MALLOC:
1340 	case MD_VNODE:
1341 	case MD_SWAP:
1342 		pp->flags |= G_PF_ACCEPT_UNMAPPED;
1343 		break;
1344 	case MD_PRELOAD:
1345 	case MD_NULL:
1346 		break;
1347 	}
1348 	sc->gp = gp;
1349 	sc->pp = pp;
1350 	sc->devstat = devstat_new_entry("md", sc->unit, sc->sectorsize,
1351 	    DEVSTAT_ALL_SUPPORTED, DEVSTAT_TYPE_DIRECT, DEVSTAT_PRIORITY_MAX);
1352 	sc->devstat->id = pp;
1353 	g_error_provider(pp, 0);
1354 	g_topology_unlock();
1355 }
1356 
1357 static int
1358 mdcreate_malloc(struct md_s *sc, struct md_req *mdr)
1359 {
1360 	uintptr_t sp;
1361 	int error;
1362 	off_t u;
1363 
1364 	error = 0;
1365 	if (mdr->md_options & ~(MD_AUTOUNIT | MD_COMPRESS | MD_RESERVE))
1366 		return (EINVAL);
1367 	if (mdr->md_sectorsize != 0 && !powerof2(mdr->md_sectorsize))
1368 		return (EINVAL);
1369 	/* Compression doesn't make sense if we have reserved space */
1370 	if (mdr->md_options & MD_RESERVE)
1371 		mdr->md_options &= ~MD_COMPRESS;
1372 	if (mdr->md_fwsectors != 0)
1373 		sc->fwsectors = mdr->md_fwsectors;
1374 	if (mdr->md_fwheads != 0)
1375 		sc->fwheads = mdr->md_fwheads;
1376 	sc->flags = mdr->md_options & (MD_COMPRESS | MD_FORCE | MD_RESERVE);
1377 	sc->s_malloc.indir = dimension(sc->mediasize / sc->sectorsize);
1378 	sc->s_malloc.uma = uma_zcreate(sc->name, sc->sectorsize, NULL, NULL,
1379 	    NULL, NULL, 0x1ff, 0);
1380 	if (mdr->md_options & MD_RESERVE) {
1381 		off_t nsectors;
1382 
1383 		nsectors = sc->mediasize / sc->sectorsize;
1384 		for (u = 0; u < nsectors; u++) {
1385 			sp = (uintptr_t)uma_zalloc(sc->s_malloc.uma,
1386 			    (md_malloc_wait ? M_WAITOK : M_NOWAIT) | M_ZERO);
1387 			if (sp != 0)
1388 				error = s_write(sc->s_malloc.indir, u, sp);
1389 			else
1390 				error = ENOMEM;
1391 			if (error != 0)
1392 				break;
1393 		}
1394 	}
1395 	return (error);
1396 }
1397 
1398 static int
1399 mdsetcred(struct md_s *sc, struct ucred *cred)
1400 {
1401 	char *tmpbuf;
1402 	int error = 0;
1403 
1404 	/*
1405 	 * Set credits in our softc
1406 	 */
1407 
1408 	if (sc->cred)
1409 		crfree(sc->cred);
1410 	sc->cred = crhold(cred);
1411 
1412 	/*
1413 	 * Horrible kludge to establish credentials for NFS  XXX.
1414 	 */
1415 
1416 	if (sc->type == MD_VNODE && sc->s_vnode.vnode != NULL) {
1417 		struct uio auio;
1418 		struct iovec aiov;
1419 
1420 		tmpbuf = malloc(sc->sectorsize, M_TEMP, M_WAITOK);
1421 		bzero(&auio, sizeof(auio));
1422 
1423 		aiov.iov_base = tmpbuf;
1424 		aiov.iov_len = sc->sectorsize;
1425 		auio.uio_iov = &aiov;
1426 		auio.uio_iovcnt = 1;
1427 		auio.uio_offset = 0;
1428 		auio.uio_rw = UIO_READ;
1429 		auio.uio_segflg = UIO_SYSSPACE;
1430 		auio.uio_resid = aiov.iov_len;
1431 		vn_lock(sc->s_vnode.vnode, LK_EXCLUSIVE | LK_RETRY);
1432 		error = VOP_READ(sc->s_vnode.vnode, &auio, 0, sc->cred);
1433 		VOP_UNLOCK(sc->s_vnode.vnode);
1434 		free(tmpbuf, M_TEMP);
1435 	}
1436 	return (error);
1437 }
1438 
1439 static int
1440 mdcreate_vnode(struct md_s *sc, struct md_req *mdr, struct thread *td)
1441 {
1442 	struct vattr vattr;
1443 	struct nameidata nd;
1444 	char *fname;
1445 	int error, flags;
1446 	long v;
1447 
1448 	fname = mdr->md_file;
1449 	if (mdr->md_file_seg == UIO_USERSPACE) {
1450 		error = copyinstr(fname, sc->s_vnode.file,
1451 		    sizeof(sc->s_vnode.file), NULL);
1452 		if (error != 0)
1453 			return (error);
1454 	} else if (mdr->md_file_seg == UIO_SYSSPACE)
1455 		strlcpy(sc->s_vnode.file, fname, sizeof(sc->s_vnode.file));
1456 	else
1457 		return (EDOOFUS);
1458 
1459 	/*
1460 	 * If the user specified that this is a read only device, don't
1461 	 * set the FWRITE mask before trying to open the backing store.
1462 	 */
1463 	flags = FREAD | ((mdr->md_options & MD_READONLY) ? 0 : FWRITE) \
1464 	    | ((mdr->md_options & MD_VERIFY) ? O_VERIFY : 0);
1465 	NDINIT(&nd, LOOKUP, FOLLOW, UIO_SYSSPACE, sc->s_vnode.file);
1466 	error = vn_open(&nd, &flags, 0, NULL);
1467 	if (error != 0)
1468 		return (error);
1469 	NDFREE_PNBUF(&nd);
1470 	if (nd.ni_vp->v_type != VREG) {
1471 		error = EINVAL;
1472 		goto bad;
1473 	}
1474 	error = VOP_GETATTR(nd.ni_vp, &vattr, td->td_ucred);
1475 	if (error != 0)
1476 		goto bad;
1477 	if ((mdr->md_options & MD_MUSTDEALLOC) != 0) {
1478 		error = VOP_PATHCONF(nd.ni_vp, _PC_DEALLOC_PRESENT, &v);
1479 		if (error != 0)
1480 			goto bad;
1481 		if (v == 0)
1482 			sc->candelete = false;
1483 	}
1484 	if (VOP_ISLOCKED(nd.ni_vp) != LK_EXCLUSIVE) {
1485 		vn_lock(nd.ni_vp, LK_UPGRADE | LK_RETRY);
1486 		if (VN_IS_DOOMED(nd.ni_vp)) {
1487 			/* Forced unmount. */
1488 			error = EBADF;
1489 			goto bad;
1490 		}
1491 	}
1492 	nd.ni_vp->v_vflag |= VV_MD;
1493 	VOP_UNLOCK(nd.ni_vp);
1494 
1495 	if (mdr->md_fwsectors != 0)
1496 		sc->fwsectors = mdr->md_fwsectors;
1497 	if (mdr->md_fwheads != 0)
1498 		sc->fwheads = mdr->md_fwheads;
1499 	snprintf(sc->ident, sizeof(sc->ident), "MD-DEV%ju-INO%ju",
1500 	    (uintmax_t)vattr.va_fsid, (uintmax_t)vattr.va_fileid);
1501 	sc->flags = mdr->md_options & (MD_ASYNC | MD_CACHE | MD_FORCE |
1502 	    MD_VERIFY | MD_MUSTDEALLOC);
1503 	if (!(flags & FWRITE))
1504 		sc->flags |= MD_READONLY;
1505 	sc->s_vnode.vnode = nd.ni_vp;
1506 
1507 	error = mdsetcred(sc, td->td_ucred);
1508 	if (error != 0) {
1509 		sc->s_vnode.vnode = NULL;
1510 		vn_lock(nd.ni_vp, LK_EXCLUSIVE | LK_RETRY);
1511 		nd.ni_vp->v_vflag &= ~VV_MD;
1512 		goto bad;
1513 	}
1514 
1515 	sc->s_vnode.kva = kva_alloc(maxphys + PAGE_SIZE);
1516 	return (0);
1517 bad:
1518 	VOP_UNLOCK(nd.ni_vp);
1519 	(void)vn_close(nd.ni_vp, flags, td->td_ucred, td);
1520 	return (error);
1521 }
1522 
1523 static void
1524 g_md_providergone(struct g_provider *pp)
1525 {
1526 	struct md_s *sc = pp->geom->softc;
1527 
1528 	mtx_lock(&sc->queue_mtx);
1529 	sc->flags |= MD_PROVIDERGONE;
1530 	wakeup(&sc->flags);
1531 	mtx_unlock(&sc->queue_mtx);
1532 }
1533 
1534 static int
1535 mddestroy(struct md_s *sc, struct thread *td)
1536 {
1537 
1538 	if (sc->gp) {
1539 		g_topology_lock();
1540 		g_wither_geom(sc->gp, ENXIO);
1541 		g_topology_unlock();
1542 
1543 		mtx_lock(&sc->queue_mtx);
1544 		while (!(sc->flags & MD_PROVIDERGONE))
1545 			msleep(&sc->flags, &sc->queue_mtx, PRIBIO, "mddestroy", 0);
1546 		mtx_unlock(&sc->queue_mtx);
1547 	}
1548 	if (sc->devstat) {
1549 		devstat_remove_entry(sc->devstat);
1550 		sc->devstat = NULL;
1551 	}
1552 	mtx_lock(&sc->queue_mtx);
1553 	sc->flags |= MD_SHUTDOWN;
1554 	wakeup(sc);
1555 	while (!(sc->flags & MD_EXITING))
1556 		msleep(sc->procp, &sc->queue_mtx, PRIBIO, "mddestroy", hz / 10);
1557 	mtx_unlock(&sc->queue_mtx);
1558 	mtx_destroy(&sc->queue_mtx);
1559 	switch (sc->type) {
1560 	case MD_VNODE:
1561 		if (sc->s_vnode.vnode != NULL) {
1562 			vn_lock(sc->s_vnode.vnode, LK_EXCLUSIVE | LK_RETRY);
1563 			sc->s_vnode.vnode->v_vflag &= ~VV_MD;
1564 			VOP_UNLOCK(sc->s_vnode.vnode);
1565 			(void)vn_close(sc->s_vnode.vnode,
1566 			    sc->flags & MD_READONLY ?  FREAD : (FREAD|FWRITE),
1567 			    sc->cred, td);
1568 		}
1569 		if (sc->s_vnode.kva != NULL)
1570 			kva_free(sc->s_vnode.kva, maxphys + PAGE_SIZE);
1571 		break;
1572 	case MD_SWAP:
1573 		if (sc->s_swap.object != NULL)
1574 			vm_object_deallocate(sc->s_swap.object);
1575 		break;
1576 	case MD_MALLOC:
1577 		if (sc->s_malloc.indir != NULL)
1578 			destroy_indir(sc, sc->s_malloc.indir);
1579 		if (sc->s_malloc.uma != NULL)
1580 			uma_zdestroy(sc->s_malloc.uma);
1581 		break;
1582 	case MD_PRELOAD:
1583 	case MD_NULL:
1584 		break;
1585 	default:
1586 		__assert_unreachable();
1587 	}
1588 	if (sc->cred != NULL)
1589 		crfree(sc->cred);
1590 
1591 	LIST_REMOVE(sc, list);
1592 	free_unr(md_uh, sc->unit);
1593 	free(sc, M_MD);
1594 	return (0);
1595 }
1596 
1597 static int
1598 mdresize(struct md_s *sc, struct md_req *mdr)
1599 {
1600 	int error, res;
1601 	vm_pindex_t oldpages, newpages;
1602 
1603 	switch (sc->type) {
1604 	case MD_VNODE:
1605 	case MD_NULL:
1606 		break;
1607 	case MD_SWAP:
1608 		if (mdr->md_mediasize <= 0 ||
1609 		    (mdr->md_mediasize % PAGE_SIZE) != 0)
1610 			return (EDOM);
1611 		oldpages = OFF_TO_IDX(sc->mediasize);
1612 		newpages = OFF_TO_IDX(mdr->md_mediasize);
1613 		if (newpages < oldpages) {
1614 			VM_OBJECT_WLOCK(sc->s_swap.object);
1615 			vm_object_page_remove(sc->s_swap.object, newpages,
1616 			    0, 0);
1617 			swap_release_by_cred(IDX_TO_OFF(oldpages -
1618 			    newpages), sc->cred);
1619 			sc->s_swap.object->size = newpages;
1620 			VM_OBJECT_WUNLOCK(sc->s_swap.object);
1621 		} else if (newpages > oldpages) {
1622 			res = swap_reserve_by_cred(IDX_TO_OFF(newpages -
1623 			    oldpages), sc->cred);
1624 			if (!res)
1625 				return (ENOMEM);
1626 			if ((mdr->md_options & MD_RESERVE) ||
1627 			    (sc->flags & MD_RESERVE)) {
1628 				error = swap_pager_reserve(sc->s_swap.object,
1629 				    oldpages, newpages - oldpages);
1630 				if (error < 0) {
1631 					swap_release_by_cred(
1632 					    IDX_TO_OFF(newpages - oldpages),
1633 					    sc->cred);
1634 					return (EDOM);
1635 				}
1636 			}
1637 			VM_OBJECT_WLOCK(sc->s_swap.object);
1638 			sc->s_swap.object->size = newpages;
1639 			VM_OBJECT_WUNLOCK(sc->s_swap.object);
1640 		}
1641 		break;
1642 	default:
1643 		return (EOPNOTSUPP);
1644 	}
1645 
1646 	sc->mediasize = mdr->md_mediasize;
1647 
1648 	g_topology_lock();
1649 	g_resize_provider(sc->pp, sc->mediasize);
1650 	g_topology_unlock();
1651 	return (0);
1652 }
1653 
1654 static int
1655 mdcreate_swap(struct md_s *sc, struct md_req *mdr, struct thread *td)
1656 {
1657 	vm_ooffset_t npage;
1658 	int error;
1659 
1660 	/*
1661 	 * Range check.  Disallow negative sizes and sizes not being
1662 	 * multiple of page size.
1663 	 */
1664 	if (sc->mediasize <= 0 || (sc->mediasize % PAGE_SIZE) != 0)
1665 		return (EDOM);
1666 
1667 	/*
1668 	 * Allocate an OBJT_SWAP object.
1669 	 *
1670 	 * Note the truncation.
1671 	 */
1672 
1673 	if ((mdr->md_options & MD_VERIFY) != 0)
1674 		return (EINVAL);
1675 	npage = mdr->md_mediasize / PAGE_SIZE;
1676 	if (mdr->md_fwsectors != 0)
1677 		sc->fwsectors = mdr->md_fwsectors;
1678 	if (mdr->md_fwheads != 0)
1679 		sc->fwheads = mdr->md_fwheads;
1680 	sc->s_swap.object = vm_pager_allocate(OBJT_SWAP, NULL,
1681 	    PAGE_SIZE * npage, VM_PROT_DEFAULT, 0, td->td_ucred);
1682 	if (sc->s_swap.object == NULL)
1683 		return (ENOMEM);
1684 	sc->flags = mdr->md_options & (MD_FORCE | MD_RESERVE);
1685 	if (mdr->md_options & MD_RESERVE) {
1686 		if (swap_pager_reserve(sc->s_swap.object, 0, npage) < 0) {
1687 			error = EDOM;
1688 			goto finish;
1689 		}
1690 	}
1691 	error = mdsetcred(sc, td->td_ucred);
1692  finish:
1693 	if (error != 0) {
1694 		vm_object_deallocate(sc->s_swap.object);
1695 		sc->s_swap.object = NULL;
1696 	}
1697 	return (error);
1698 }
1699 
1700 static int
1701 mdcreate_null(struct md_s *sc, struct md_req *mdr, struct thread *td)
1702 {
1703 
1704 	/*
1705 	 * Range check.  Disallow negative sizes and sizes not being
1706 	 * multiple of page size.
1707 	 */
1708 	if (sc->mediasize <= 0 || (sc->mediasize % PAGE_SIZE) != 0)
1709 		return (EDOM);
1710 
1711 	return (0);
1712 }
1713 
1714 static int
1715 kern_mdattach_locked(struct thread *td, struct md_req *mdr)
1716 {
1717 	struct md_s *sc;
1718 	unsigned sectsize;
1719 	int error;
1720 
1721 	sx_assert(&md_sx, SA_XLOCKED);
1722 
1723 	switch (mdr->md_type) {
1724 	case MD_MALLOC:
1725 	case MD_PRELOAD:
1726 	case MD_VNODE:
1727 	case MD_SWAP:
1728 	case MD_NULL:
1729 		break;
1730 	default:
1731 		return (EINVAL);
1732 	}
1733 	if (mdr->md_sectorsize == 0)
1734 		sectsize = DEV_BSIZE;
1735 	else
1736 		sectsize = mdr->md_sectorsize;
1737 	if (sectsize > maxphys || mdr->md_mediasize < sectsize)
1738 		return (EINVAL);
1739 	if (mdr->md_options & MD_AUTOUNIT)
1740 		sc = mdnew(-1, &error, mdr->md_type);
1741 	else {
1742 		if (mdr->md_unit > INT_MAX)
1743 			return (EINVAL);
1744 		sc = mdnew(mdr->md_unit, &error, mdr->md_type);
1745 	}
1746 	if (sc == NULL)
1747 		return (error);
1748 	if (mdr->md_label != NULL)
1749 		error = copyinstr(mdr->md_label, sc->label,
1750 		    sizeof(sc->label), NULL);
1751 	if (error != 0)
1752 		goto err_after_new;
1753 	if (mdr->md_options & MD_AUTOUNIT)
1754 		mdr->md_unit = sc->unit;
1755 	sc->mediasize = mdr->md_mediasize;
1756 	sc->sectorsize = sectsize;
1757 	sc->candelete = true;
1758 	error = EDOOFUS;
1759 	switch (sc->type) {
1760 	case MD_MALLOC:
1761 		sc->start = mdstart_malloc;
1762 		error = mdcreate_malloc(sc, mdr);
1763 		break;
1764 	case MD_PRELOAD:
1765 		/*
1766 		 * We disallow attaching preloaded memory disks via
1767 		 * ioctl. Preloaded memory disks are automatically
1768 		 * attached in g_md_init().
1769 		 */
1770 		error = EOPNOTSUPP;
1771 		break;
1772 	case MD_VNODE:
1773 		sc->start = mdstart_vnode;
1774 		error = mdcreate_vnode(sc, mdr, td);
1775 		break;
1776 	case MD_SWAP:
1777 		sc->start = mdstart_swap;
1778 		error = mdcreate_swap(sc, mdr, td);
1779 		break;
1780 	case MD_NULL:
1781 		sc->start = mdstart_null;
1782 		error = mdcreate_null(sc, mdr, td);
1783 		break;
1784 	}
1785 err_after_new:
1786 	if (error != 0) {
1787 		mddestroy(sc, td);
1788 		return (error);
1789 	}
1790 
1791 	mdinit(sc);
1792 	return (0);
1793 }
1794 
1795 static int
1796 kern_mdattach(struct thread *td, struct md_req *mdr)
1797 {
1798 	int error;
1799 
1800 	sx_xlock(&md_sx);
1801 	error = kern_mdattach_locked(td, mdr);
1802 	sx_xunlock(&md_sx);
1803 	return (error);
1804 }
1805 
1806 static int
1807 kern_mddetach_locked(struct thread *td, struct md_req *mdr)
1808 {
1809 	struct md_s *sc;
1810 
1811 	sx_assert(&md_sx, SA_XLOCKED);
1812 
1813 	if (mdr->md_mediasize != 0 ||
1814 	    (mdr->md_options & ~MD_FORCE) != 0)
1815 		return (EINVAL);
1816 
1817 	sc = mdfind(mdr->md_unit);
1818 	if (sc == NULL)
1819 		return (ENOENT);
1820 	if (sc->opencount != 0 && !(sc->flags & MD_FORCE) &&
1821 	    !(mdr->md_options & MD_FORCE))
1822 		return (EBUSY);
1823 	return (mddestroy(sc, td));
1824 }
1825 
1826 static int
1827 kern_mddetach(struct thread *td, struct md_req *mdr)
1828 {
1829 	int error;
1830 
1831 	sx_xlock(&md_sx);
1832 	error = kern_mddetach_locked(td, mdr);
1833 	sx_xunlock(&md_sx);
1834 	return (error);
1835 }
1836 
1837 static int
1838 kern_mdresize_locked(struct md_req *mdr)
1839 {
1840 	struct md_s *sc;
1841 
1842 	sx_assert(&md_sx, SA_XLOCKED);
1843 
1844 	if ((mdr->md_options & ~(MD_FORCE | MD_RESERVE)) != 0)
1845 		return (EINVAL);
1846 
1847 	sc = mdfind(mdr->md_unit);
1848 	if (sc == NULL)
1849 		return (ENOENT);
1850 	if (mdr->md_mediasize < sc->sectorsize)
1851 		return (EINVAL);
1852 	mdr->md_mediasize -= mdr->md_mediasize % sc->sectorsize;
1853 	if (mdr->md_mediasize < sc->mediasize &&
1854 	    !(sc->flags & MD_FORCE) &&
1855 	    !(mdr->md_options & MD_FORCE))
1856 		return (EBUSY);
1857 	return (mdresize(sc, mdr));
1858 }
1859 
1860 static int
1861 kern_mdresize(struct md_req *mdr)
1862 {
1863 	int error;
1864 
1865 	sx_xlock(&md_sx);
1866 	error = kern_mdresize_locked(mdr);
1867 	sx_xunlock(&md_sx);
1868 	return (error);
1869 }
1870 
1871 static int
1872 kern_mdquery_locked(struct md_req *mdr)
1873 {
1874 	struct md_s *sc;
1875 	int error;
1876 
1877 	sx_assert(&md_sx, SA_XLOCKED);
1878 
1879 	sc = mdfind(mdr->md_unit);
1880 	if (sc == NULL)
1881 		return (ENOENT);
1882 	mdr->md_type = sc->type;
1883 	mdr->md_options = sc->flags;
1884 	mdr->md_mediasize = sc->mediasize;
1885 	mdr->md_sectorsize = sc->sectorsize;
1886 	error = 0;
1887 	if (mdr->md_label != NULL) {
1888 		error = copyout(sc->label, mdr->md_label,
1889 		    strlen(sc->label) + 1);
1890 		if (error != 0)
1891 			return (error);
1892 	}
1893 	if (sc->type == MD_VNODE) {
1894 		error = copyout(sc->s_vnode.file, mdr->md_file,
1895 		    strlen(sc->s_vnode.file) + 1);
1896 	} else if (sc->type == MD_PRELOAD && mdr->md_file != NULL) {
1897 		error = copyout(sc->s_preload.name, mdr->md_file,
1898 		    strlen(sc->s_preload.name) + 1);
1899 	}
1900 	return (error);
1901 }
1902 
1903 static int
1904 kern_mdquery(struct md_req *mdr)
1905 {
1906 	int error;
1907 
1908 	sx_xlock(&md_sx);
1909 	error = kern_mdquery_locked(mdr);
1910 	sx_xunlock(&md_sx);
1911 	return (error);
1912 }
1913 
1914 /* Copy members that are not userspace pointers. */
1915 #define	MD_IOCTL2REQ(mdio, mdr) do {					\
1916 	(mdr)->md_unit = (mdio)->md_unit;				\
1917 	(mdr)->md_type = (mdio)->md_type;				\
1918 	(mdr)->md_mediasize = (mdio)->md_mediasize;			\
1919 	(mdr)->md_sectorsize = (mdio)->md_sectorsize;			\
1920 	(mdr)->md_options = (mdio)->md_options;				\
1921 	(mdr)->md_fwheads = (mdio)->md_fwheads;				\
1922 	(mdr)->md_fwsectors = (mdio)->md_fwsectors;			\
1923 	(mdr)->md_units = &(mdio)->md_pad[0];				\
1924 	(mdr)->md_units_nitems = nitems((mdio)->md_pad);		\
1925 } while(0)
1926 
1927 /* Copy members that might have been updated */
1928 #define MD_REQ2IOCTL(mdr, mdio) do {					\
1929 	(mdio)->md_unit = (mdr)->md_unit;				\
1930 	(mdio)->md_type = (mdr)->md_type;				\
1931 	(mdio)->md_mediasize = (mdr)->md_mediasize;			\
1932 	(mdio)->md_sectorsize = (mdr)->md_sectorsize;			\
1933 	(mdio)->md_options = (mdr)->md_options;				\
1934 	(mdio)->md_fwheads = (mdr)->md_fwheads;				\
1935 	(mdio)->md_fwsectors = (mdr)->md_fwsectors;			\
1936 } while(0)
1937 
1938 static int
1939 mdctlioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags,
1940     struct thread *td)
1941 {
1942 	struct md_req mdr;
1943 	int error;
1944 
1945 	if (md_debug)
1946 		printf("mdctlioctl(%s %lx %p %x %p)\n",
1947 			devtoname(dev), cmd, addr, flags, td);
1948 
1949 	bzero(&mdr, sizeof(mdr));
1950 	switch (cmd) {
1951 	case MDIOCATTACH:
1952 	case MDIOCDETACH:
1953 	case MDIOCRESIZE:
1954 	case MDIOCQUERY: {
1955 		struct md_ioctl *mdio = (struct md_ioctl *)addr;
1956 		if (mdio->md_version != MDIOVERSION)
1957 			return (EINVAL);
1958 		MD_IOCTL2REQ(mdio, &mdr);
1959 		mdr.md_file = mdio->md_file;
1960 		mdr.md_file_seg = UIO_USERSPACE;
1961 		/* If the file is adjacent to the md_ioctl it's in kernel. */
1962 		if ((void *)mdio->md_file == (void *)(mdio + 1))
1963 			mdr.md_file_seg = UIO_SYSSPACE;
1964 		mdr.md_label = mdio->md_label;
1965 		break;
1966 	}
1967 #ifdef COMPAT_FREEBSD32
1968 	case MDIOCATTACH_32:
1969 	case MDIOCDETACH_32:
1970 	case MDIOCRESIZE_32:
1971 	case MDIOCQUERY_32: {
1972 		struct md_ioctl32 *mdio = (struct md_ioctl32 *)addr;
1973 		if (mdio->md_version != MDIOVERSION)
1974 			return (EINVAL);
1975 		MD_IOCTL2REQ(mdio, &mdr);
1976 		mdr.md_file = (void *)(uintptr_t)mdio->md_file;
1977 		mdr.md_file_seg = UIO_USERSPACE;
1978 		mdr.md_label = (void *)(uintptr_t)mdio->md_label;
1979 		break;
1980 	}
1981 #endif
1982 	default:
1983 		/* Fall through to handler switch. */
1984 		break;
1985 	}
1986 
1987 	error = 0;
1988 	switch (cmd) {
1989 	case MDIOCATTACH:
1990 #ifdef COMPAT_FREEBSD32
1991 	case MDIOCATTACH_32:
1992 #endif
1993 		error = kern_mdattach(td, &mdr);
1994 		break;
1995 	case MDIOCDETACH:
1996 #ifdef COMPAT_FREEBSD32
1997 	case MDIOCDETACH_32:
1998 #endif
1999 		error = kern_mddetach(td, &mdr);
2000 		break;
2001 	case MDIOCRESIZE:
2002 #ifdef COMPAT_FREEBSD32
2003 	case MDIOCRESIZE_32:
2004 #endif
2005 		error = kern_mdresize(&mdr);
2006 		break;
2007 	case MDIOCQUERY:
2008 #ifdef COMPAT_FREEBSD32
2009 	case MDIOCQUERY_32:
2010 #endif
2011 		error = kern_mdquery(&mdr);
2012 		break;
2013 	default:
2014 		error = ENOIOCTL;
2015 	}
2016 
2017 	switch (cmd) {
2018 	case MDIOCATTACH:
2019 	case MDIOCQUERY: {
2020 		struct md_ioctl *mdio = (struct md_ioctl *)addr;
2021 		MD_REQ2IOCTL(&mdr, mdio);
2022 		break;
2023 	}
2024 #ifdef COMPAT_FREEBSD32
2025 	case MDIOCATTACH_32:
2026 	case MDIOCQUERY_32: {
2027 		struct md_ioctl32 *mdio = (struct md_ioctl32 *)addr;
2028 		MD_REQ2IOCTL(&mdr, mdio);
2029 		break;
2030 	}
2031 #endif
2032 	default:
2033 		/* Other commands to not alter mdr. */
2034 		break;
2035 	}
2036 
2037 	return (error);
2038 }
2039 
2040 static void
2041 md_preloaded(u_char *image, size_t length, const char *name)
2042 {
2043 	struct md_s *sc;
2044 	int error;
2045 
2046 	sc = mdnew(-1, &error, MD_PRELOAD);
2047 	if (sc == NULL)
2048 		return;
2049 	sc->mediasize = length;
2050 	sc->sectorsize = DEV_BSIZE;
2051 	sc->s_preload.pl_ptr = image;
2052 	sc->s_preload.pl_len = length;
2053 	sc->start = mdstart_preload;
2054 	if (name != NULL)
2055 		strlcpy(sc->s_preload.name, name,
2056 		    sizeof(sc->s_preload.name));
2057 #ifdef MD_ROOT
2058 	if (sc->unit == 0) {
2059 #ifndef ROOTDEVNAME
2060 		rootdevnames[0] = MD_ROOT_FSTYPE ":/dev/md0";
2061 #endif
2062 #ifdef MD_ROOT_READONLY
2063 		sc->flags |= MD_READONLY;
2064 #endif
2065 	}
2066 #endif
2067 	mdinit(sc);
2068 	if (name != NULL) {
2069 		printf("%s%d: Preloaded image <%s> %zd bytes at %p\n",
2070 		    MD_NAME, sc->unit, name, length, image);
2071 	} else {
2072 		printf("%s%d: Embedded image %zd bytes at %p\n",
2073 		    MD_NAME, sc->unit, length, image);
2074 	}
2075 }
2076 
2077 static void
2078 g_md_init(struct g_class *mp __unused)
2079 {
2080 	caddr_t mod;
2081 	u_char *ptr, *name, *type;
2082 	u_char scratch[40];
2083 	unsigned len;
2084 	int i;
2085 	vm_offset_t paddr;
2086 
2087 	/* figure out log2(NINDIR) */
2088 	for (i = NINDIR, nshift = -1; i; nshift++)
2089 		i >>= 1;
2090 
2091 	mod = NULL;
2092 	sx_init(&md_sx, "MD config lock");
2093 	g_topology_unlock();
2094 	md_uh = new_unrhdr(0, INT_MAX, NULL);
2095 #ifdef MD_ROOT
2096 	if (mfs_root_size != 0) {
2097 		sx_xlock(&md_sx);
2098 #ifdef MD_ROOT_MEM
2099 		md_preloaded(mfs_root, mfs_root_size, NULL);
2100 #else
2101 		md_preloaded(__DEVOLATILE(u_char *, &mfs_root), mfs_root_size,
2102 		    NULL);
2103 #endif
2104 		sx_xunlock(&md_sx);
2105 	}
2106 #endif
2107 	/* XXX: are preload_* static or do they need Giant ? */
2108 	while ((mod = preload_search_next_name(mod)) != NULL) {
2109 		name = (char *)preload_search_info(mod, MODINFO_NAME);
2110 		if (name == NULL)
2111 			continue;
2112 		type = (char *)preload_search_info(mod, MODINFO_TYPE);
2113 		if (type == NULL)
2114 			continue;
2115 		if (strcmp(type, "md_image") && strcmp(type, "mfs_root"))
2116 			continue;
2117 		ptr = preload_fetch_addr(mod);
2118 		len = preload_fetch_size(mod);
2119 		if (ptr != NULL && len != 0) {
2120 			sx_xlock(&md_sx);
2121 			md_preloaded(ptr, len, name);
2122 			sx_xunlock(&md_sx);
2123 		}
2124 	}
2125 
2126 	/*
2127 	 * Load up to 32 pre-loaded disks
2128 	 */
2129 	for (int i = 0; i < 32; i++) {
2130 		if (resource_long_value("md", i, "physaddr",
2131 			(long *) &paddr) != 0 ||
2132 		    resource_int_value("md", i, "len", &len) != 0)
2133 		        break;
2134 		ptr = pmap_map(NULL, paddr, paddr + len, VM_PROT_READ);
2135 		if (ptr != NULL && len != 0) {
2136 			sprintf(scratch, "preload%d 0x%016jx", i,
2137 			    (uintmax_t)paddr);
2138 			sx_xlock(&md_sx);
2139 			md_preloaded(ptr, len, scratch);
2140 			sx_xunlock(&md_sx);
2141 		}
2142 	}
2143 
2144 	status_dev = make_dev(&mdctl_cdevsw, INT_MAX, UID_ROOT, GID_WHEEL,
2145 	    0600, MDCTL_NAME);
2146 	g_topology_lock();
2147 }
2148 
2149 static void
2150 g_md_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp,
2151     struct g_consumer *cp __unused, struct g_provider *pp)
2152 {
2153 	struct md_s *mp;
2154 	char *type;
2155 
2156 	mp = gp->softc;
2157 	if (mp == NULL)
2158 		return;
2159 
2160 	switch (mp->type) {
2161 	case MD_MALLOC:
2162 		type = "malloc";
2163 		break;
2164 	case MD_PRELOAD:
2165 		type = "preload";
2166 		break;
2167 	case MD_VNODE:
2168 		type = "vnode";
2169 		break;
2170 	case MD_SWAP:
2171 		type = "swap";
2172 		break;
2173 	case MD_NULL:
2174 		type = "null";
2175 		break;
2176 	default:
2177 		type = "unknown";
2178 		break;
2179 	}
2180 
2181 	if (pp != NULL) {
2182 		if (indent == NULL) {
2183 			sbuf_printf(sb, " u %d", mp->unit);
2184 			sbuf_printf(sb, " s %ju", (uintmax_t) mp->sectorsize);
2185 			sbuf_printf(sb, " f %ju", (uintmax_t) mp->fwheads);
2186 			sbuf_printf(sb, " fs %ju", (uintmax_t) mp->fwsectors);
2187 			sbuf_printf(sb, " l %ju", (uintmax_t) mp->mediasize);
2188 			sbuf_printf(sb, " t %s", type);
2189 			if (mp->type == MD_VNODE &&
2190 			    mp->s_vnode.vnode != NULL)
2191 				sbuf_printf(sb, " file %s", mp->s_vnode.file);
2192 			if (mp->type == MD_PRELOAD &&
2193 			    mp->s_preload.name[0] != '\0') {
2194 				sbuf_printf(sb, " file %s",
2195 				    mp->s_preload.name);
2196 			}
2197 			sbuf_printf(sb, " label %s", mp->label);
2198 		} else {
2199 			sbuf_printf(sb, "%s<unit>%d</unit>\n", indent,
2200 			    mp->unit);
2201 			sbuf_printf(sb, "%s<sectorsize>%ju</sectorsize>\n",
2202 			    indent, (uintmax_t) mp->sectorsize);
2203 			sbuf_printf(sb, "%s<fwheads>%ju</fwheads>\n",
2204 			    indent, (uintmax_t) mp->fwheads);
2205 			sbuf_printf(sb, "%s<fwsectors>%ju</fwsectors>\n",
2206 			    indent, (uintmax_t) mp->fwsectors);
2207 			if (mp->ident[0] != '\0') {
2208 				sbuf_printf(sb, "%s<ident>", indent);
2209 				g_conf_printf_escaped(sb, "%s", mp->ident);
2210 				sbuf_printf(sb, "</ident>\n");
2211 			}
2212 			sbuf_printf(sb, "%s<length>%ju</length>\n",
2213 			    indent, (uintmax_t) mp->mediasize);
2214 			sbuf_printf(sb, "%s<compression>%s</compression>\n", indent,
2215 			    (mp->flags & MD_COMPRESS) == 0 ? "off": "on");
2216 			sbuf_printf(sb, "%s<access>%s</access>\n", indent,
2217 			    (mp->flags & MD_READONLY) == 0 ? "read-write":
2218 			    "read-only");
2219 			sbuf_printf(sb, "%s<type>%s</type>\n", indent,
2220 			    type);
2221 			if (mp->type == MD_VNODE) {
2222 				if (mp->s_vnode.vnode != NULL) {
2223 					sbuf_printf(sb, "%s<file>", indent);
2224 					g_conf_printf_escaped(sb, "%s",
2225 					    mp->s_vnode.file);
2226 					sbuf_printf(sb, "</file>\n");
2227 				}
2228 				sbuf_printf(sb, "%s<cache>%s</cache>\n", indent,
2229 				    (mp->flags & MD_CACHE) == 0 ? "off": "on");
2230 			}
2231 			if (mp->type == MD_PRELOAD &&
2232 			    mp->s_preload.name[0] != '\0') {
2233 				sbuf_printf(sb, "%s<file>", indent);
2234 				g_conf_printf_escaped(sb, "%s",
2235 				    mp->s_preload.name);
2236 				sbuf_printf(sb, "</file>\n");
2237 			}
2238 			sbuf_printf(sb, "%s<label>", indent);
2239 			g_conf_printf_escaped(sb, "%s", mp->label);
2240 			sbuf_printf(sb, "</label>\n");
2241 		}
2242 	}
2243 }
2244 
2245 static void
2246 g_md_fini(struct g_class *mp __unused)
2247 {
2248 
2249 	sx_destroy(&md_sx);
2250 	if (status_dev != NULL)
2251 		destroy_dev(status_dev);
2252 	delete_unrhdr(md_uh);
2253 }
2254