xref: /freebsd/sys/dev/md/md.c (revision 9086e0e06819bdf7290dc15cc04985fe8e66a711)
1 /*-
2  * ----------------------------------------------------------------------------
3  * "THE BEER-WARE LICENSE" (Revision 42):
4  * <phk@FreeBSD.ORG> wrote this file.  As long as you retain this notice you
5  * can do whatever you want with this stuff. If we meet some day, and you think
6  * this stuff is worth it, you can buy me a beer in return.   Poul-Henning Kamp
7  * ----------------------------------------------------------------------------
8  *
9  * $FreeBSD$
10  *
11  */
12 
13 /*-
14  * The following functions are based in the vn(4) driver: mdstart_swap(),
15  * mdstart_vnode(), mdcreate_swap(), mdcreate_vnode() and mddestroy(),
16  * and as such under the following copyright:
17  *
18  * Copyright (c) 1988 University of Utah.
19  * Copyright (c) 1990, 1993
20  *	The Regents of the University of California.  All rights reserved.
21  * Copyright (c) 2013 The FreeBSD Foundation
22  * All rights reserved.
23  *
24  * This code is derived from software contributed to Berkeley by
25  * the Systems Programming Group of the University of Utah Computer
26  * Science Department.
27  *
28  * Portions of this software were developed by Konstantin Belousov
29  * under sponsorship from the FreeBSD Foundation.
30  *
31  * Redistribution and use in source and binary forms, with or without
32  * modification, are permitted provided that the following conditions
33  * are met:
34  * 1. Redistributions of source code must retain the above copyright
35  *    notice, this list of conditions and the following disclaimer.
36  * 2. Redistributions in binary form must reproduce the above copyright
37  *    notice, this list of conditions and the following disclaimer in the
38  *    documentation and/or other materials provided with the distribution.
39  * 3. Neither the name of the University nor the names of its contributors
40  *    may be used to endorse or promote products derived from this software
41  *    without specific prior written permission.
42  *
43  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
44  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
45  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
46  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
47  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
48  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
49  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
50  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
51  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
52  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
53  * SUCH DAMAGE.
54  *
55  * from: Utah Hdr: vn.c 1.13 94/04/02
56  *
57  *	from: @(#)vn.c	8.6 (Berkeley) 4/1/94
58  * From: src/sys/dev/vn/vn.c,v 1.122 2000/12/16 16:06:03
59  */
60 
61 #include "opt_rootdevname.h"
62 #include "opt_geom.h"
63 #include "opt_md.h"
64 
65 #include <sys/param.h>
66 #include <sys/systm.h>
67 #include <sys/bio.h>
68 #include <sys/buf.h>
69 #include <sys/conf.h>
70 #include <sys/devicestat.h>
71 #include <sys/fcntl.h>
72 #include <sys/kernel.h>
73 #include <sys/kthread.h>
74 #include <sys/limits.h>
75 #include <sys/linker.h>
76 #include <sys/lock.h>
77 #include <sys/malloc.h>
78 #include <sys/mdioctl.h>
79 #include <sys/mount.h>
80 #include <sys/mutex.h>
81 #include <sys/sx.h>
82 #include <sys/namei.h>
83 #include <sys/proc.h>
84 #include <sys/queue.h>
85 #include <sys/rwlock.h>
86 #include <sys/sbuf.h>
87 #include <sys/sched.h>
88 #include <sys/sf_buf.h>
89 #include <sys/sysctl.h>
90 #include <sys/vnode.h>
91 #include <sys/disk.h>
92 
93 #include <geom/geom.h>
94 #include <geom/geom_int.h>
95 
96 #include <vm/vm.h>
97 #include <vm/vm_param.h>
98 #include <vm/vm_object.h>
99 #include <vm/vm_page.h>
100 #include <vm/vm_pager.h>
101 #include <vm/swap_pager.h>
102 #include <vm/uma.h>
103 
104 #include <machine/bus.h>
105 
106 #define MD_MODVER 1
107 
108 #define MD_SHUTDOWN	0x10000		/* Tell worker thread to terminate. */
109 #define	MD_EXITING	0x20000		/* Worker thread is exiting. */
110 
111 #ifndef MD_NSECT
112 #define MD_NSECT (10000 * 2)
113 #endif
114 
115 static MALLOC_DEFINE(M_MD, "md_disk", "Memory Disk");
116 static MALLOC_DEFINE(M_MDSECT, "md_sectors", "Memory Disk Sectors");
117 
118 static int md_debug;
119 SYSCTL_INT(_debug, OID_AUTO, mddebug, CTLFLAG_RW, &md_debug, 0,
120     "Enable md(4) debug messages");
121 static int md_malloc_wait;
122 SYSCTL_INT(_vm, OID_AUTO, md_malloc_wait, CTLFLAG_RW, &md_malloc_wait, 0,
123     "Allow malloc to wait for memory allocations");
124 
125 #if defined(MD_ROOT) && !defined(MD_ROOT_FSTYPE)
126 #define	MD_ROOT_FSTYPE	"ufs"
127 #endif
128 
129 #if defined(MD_ROOT)
130 /*
131  * Preloaded image gets put here.
132  */
133 #if defined(MD_ROOT_SIZE)
134 /*
135  * We put the mfs_root symbol into the oldmfs section of the kernel object file.
136  * Applications that patch the object with the image can determine
137  * the size looking at the oldmfs section size within the kernel.
138  */
139 u_char mfs_root[MD_ROOT_SIZE*1024] __attribute__ ((section ("oldmfs")));
140 const int mfs_root_size = sizeof(mfs_root);
141 #else
142 extern volatile u_char __weak_symbol mfs_root;
143 extern volatile u_char __weak_symbol mfs_root_end;
144 __GLOBL(mfs_root);
145 __GLOBL(mfs_root_end);
146 #define mfs_root_size ((uintptr_t)(&mfs_root_end - &mfs_root))
147 #endif
148 #endif
149 
150 static g_init_t g_md_init;
151 static g_fini_t g_md_fini;
152 static g_start_t g_md_start;
153 static g_access_t g_md_access;
154 static void g_md_dumpconf(struct sbuf *sb, const char *indent,
155     struct g_geom *gp, struct g_consumer *cp __unused, struct g_provider *pp);
156 
157 static struct cdev *status_dev = NULL;
158 static struct sx md_sx;
159 static struct unrhdr *md_uh;
160 
161 static d_ioctl_t mdctlioctl;
162 
163 static struct cdevsw mdctl_cdevsw = {
164 	.d_version =	D_VERSION,
165 	.d_ioctl =	mdctlioctl,
166 	.d_name =	MD_NAME,
167 };
168 
169 struct g_class g_md_class = {
170 	.name = "MD",
171 	.version = G_VERSION,
172 	.init = g_md_init,
173 	.fini = g_md_fini,
174 	.start = g_md_start,
175 	.access = g_md_access,
176 	.dumpconf = g_md_dumpconf,
177 };
178 
179 DECLARE_GEOM_CLASS(g_md_class, g_md);
180 
181 
182 static LIST_HEAD(, md_s) md_softc_list = LIST_HEAD_INITIALIZER(md_softc_list);
183 
184 #define NINDIR	(PAGE_SIZE / sizeof(uintptr_t))
185 #define NMASK	(NINDIR-1)
186 static int nshift;
187 
188 static int md_vnode_pbuf_freecnt;
189 
190 struct indir {
191 	uintptr_t	*array;
192 	u_int		total;
193 	u_int		used;
194 	u_int		shift;
195 };
196 
197 struct md_s {
198 	int unit;
199 	LIST_ENTRY(md_s) list;
200 	struct bio_queue_head bio_queue;
201 	struct mtx queue_mtx;
202 	struct mtx stat_mtx;
203 	struct cdev *dev;
204 	enum md_types type;
205 	off_t mediasize;
206 	unsigned sectorsize;
207 	unsigned opencount;
208 	unsigned fwheads;
209 	unsigned fwsectors;
210 	unsigned flags;
211 	char name[20];
212 	struct proc *procp;
213 	struct g_geom *gp;
214 	struct g_provider *pp;
215 	int (*start)(struct md_s *sc, struct bio *bp);
216 	struct devstat *devstat;
217 
218 	/* MD_MALLOC related fields */
219 	struct indir *indir;
220 	uma_zone_t uma;
221 
222 	/* MD_PRELOAD related fields */
223 	u_char *pl_ptr;
224 	size_t pl_len;
225 
226 	/* MD_VNODE related fields */
227 	struct vnode *vnode;
228 	char file[PATH_MAX];
229 	struct ucred *cred;
230 
231 	/* MD_SWAP related fields */
232 	vm_object_t object;
233 };
234 
235 static struct indir *
236 new_indir(u_int shift)
237 {
238 	struct indir *ip;
239 
240 	ip = malloc(sizeof *ip, M_MD, (md_malloc_wait ? M_WAITOK : M_NOWAIT)
241 	    | M_ZERO);
242 	if (ip == NULL)
243 		return (NULL);
244 	ip->array = malloc(sizeof(uintptr_t) * NINDIR,
245 	    M_MDSECT, (md_malloc_wait ? M_WAITOK : M_NOWAIT) | M_ZERO);
246 	if (ip->array == NULL) {
247 		free(ip, M_MD);
248 		return (NULL);
249 	}
250 	ip->total = NINDIR;
251 	ip->shift = shift;
252 	return (ip);
253 }
254 
255 static void
256 del_indir(struct indir *ip)
257 {
258 
259 	free(ip->array, M_MDSECT);
260 	free(ip, M_MD);
261 }
262 
263 static void
264 destroy_indir(struct md_s *sc, struct indir *ip)
265 {
266 	int i;
267 
268 	for (i = 0; i < NINDIR; i++) {
269 		if (!ip->array[i])
270 			continue;
271 		if (ip->shift)
272 			destroy_indir(sc, (struct indir*)(ip->array[i]));
273 		else if (ip->array[i] > 255)
274 			uma_zfree(sc->uma, (void *)(ip->array[i]));
275 	}
276 	del_indir(ip);
277 }
278 
279 /*
280  * This function does the math and allocates the top level "indir" structure
281  * for a device of "size" sectors.
282  */
283 
284 static struct indir *
285 dimension(off_t size)
286 {
287 	off_t rcnt;
288 	struct indir *ip;
289 	int layer;
290 
291 	rcnt = size;
292 	layer = 0;
293 	while (rcnt > NINDIR) {
294 		rcnt /= NINDIR;
295 		layer++;
296 	}
297 
298 	/*
299 	 * XXX: the top layer is probably not fully populated, so we allocate
300 	 * too much space for ip->array in here.
301 	 */
302 	ip = malloc(sizeof *ip, M_MD, M_WAITOK | M_ZERO);
303 	ip->array = malloc(sizeof(uintptr_t) * NINDIR,
304 	    M_MDSECT, M_WAITOK | M_ZERO);
305 	ip->total = NINDIR;
306 	ip->shift = layer * nshift;
307 	return (ip);
308 }
309 
310 /*
311  * Read a given sector
312  */
313 
314 static uintptr_t
315 s_read(struct indir *ip, off_t offset)
316 {
317 	struct indir *cip;
318 	int idx;
319 	uintptr_t up;
320 
321 	if (md_debug > 1)
322 		printf("s_read(%jd)\n", (intmax_t)offset);
323 	up = 0;
324 	for (cip = ip; cip != NULL;) {
325 		if (cip->shift) {
326 			idx = (offset >> cip->shift) & NMASK;
327 			up = cip->array[idx];
328 			cip = (struct indir *)up;
329 			continue;
330 		}
331 		idx = offset & NMASK;
332 		return (cip->array[idx]);
333 	}
334 	return (0);
335 }
336 
337 /*
338  * Write a given sector, prune the tree if the value is 0
339  */
340 
341 static int
342 s_write(struct indir *ip, off_t offset, uintptr_t ptr)
343 {
344 	struct indir *cip, *lip[10];
345 	int idx, li;
346 	uintptr_t up;
347 
348 	if (md_debug > 1)
349 		printf("s_write(%jd, %p)\n", (intmax_t)offset, (void *)ptr);
350 	up = 0;
351 	li = 0;
352 	cip = ip;
353 	for (;;) {
354 		lip[li++] = cip;
355 		if (cip->shift) {
356 			idx = (offset >> cip->shift) & NMASK;
357 			up = cip->array[idx];
358 			if (up != 0) {
359 				cip = (struct indir *)up;
360 				continue;
361 			}
362 			/* Allocate branch */
363 			cip->array[idx] =
364 			    (uintptr_t)new_indir(cip->shift - nshift);
365 			if (cip->array[idx] == 0)
366 				return (ENOSPC);
367 			cip->used++;
368 			up = cip->array[idx];
369 			cip = (struct indir *)up;
370 			continue;
371 		}
372 		/* leafnode */
373 		idx = offset & NMASK;
374 		up = cip->array[idx];
375 		if (up != 0)
376 			cip->used--;
377 		cip->array[idx] = ptr;
378 		if (ptr != 0)
379 			cip->used++;
380 		break;
381 	}
382 	if (cip->used != 0 || li == 1)
383 		return (0);
384 	li--;
385 	while (cip->used == 0 && cip != ip) {
386 		li--;
387 		idx = (offset >> lip[li]->shift) & NMASK;
388 		up = lip[li]->array[idx];
389 		KASSERT(up == (uintptr_t)cip, ("md screwed up"));
390 		del_indir(cip);
391 		lip[li]->array[idx] = 0;
392 		lip[li]->used--;
393 		cip = lip[li];
394 	}
395 	return (0);
396 }
397 
398 
399 static int
400 g_md_access(struct g_provider *pp, int r, int w, int e)
401 {
402 	struct md_s *sc;
403 
404 	sc = pp->geom->softc;
405 	if (sc == NULL) {
406 		if (r <= 0 && w <= 0 && e <= 0)
407 			return (0);
408 		return (ENXIO);
409 	}
410 	r += pp->acr;
411 	w += pp->acw;
412 	e += pp->ace;
413 	if ((sc->flags & MD_READONLY) != 0 && w > 0)
414 		return (EROFS);
415 	if ((pp->acr + pp->acw + pp->ace) == 0 && (r + w + e) > 0) {
416 		sc->opencount = 1;
417 	} else if ((pp->acr + pp->acw + pp->ace) > 0 && (r + w + e) == 0) {
418 		sc->opencount = 0;
419 	}
420 	return (0);
421 }
422 
423 static void
424 g_md_start(struct bio *bp)
425 {
426 	struct md_s *sc;
427 
428 	sc = bp->bio_to->geom->softc;
429 	if ((bp->bio_cmd == BIO_READ) || (bp->bio_cmd == BIO_WRITE)) {
430 		mtx_lock(&sc->stat_mtx);
431 		devstat_start_transaction_bio(sc->devstat, bp);
432 		mtx_unlock(&sc->stat_mtx);
433 	}
434 	mtx_lock(&sc->queue_mtx);
435 	bioq_disksort(&sc->bio_queue, bp);
436 	mtx_unlock(&sc->queue_mtx);
437 	wakeup(sc);
438 }
439 
440 #define	MD_MALLOC_MOVE_ZERO	1
441 #define	MD_MALLOC_MOVE_FILL	2
442 #define	MD_MALLOC_MOVE_READ	3
443 #define	MD_MALLOC_MOVE_WRITE	4
444 #define	MD_MALLOC_MOVE_CMP	5
445 
446 static int
447 md_malloc_move_ma(vm_page_t **mp, int *ma_offs, unsigned sectorsize,
448     void *ptr, u_char fill, int op)
449 {
450 	struct sf_buf *sf;
451 	vm_page_t m, *mp1;
452 	char *p, first;
453 	off_t *uc;
454 	unsigned n;
455 	int error, i, ma_offs1, sz, first_read;
456 
457 	m = NULL;
458 	error = 0;
459 	sf = NULL;
460 	/* if (op == MD_MALLOC_MOVE_CMP) { gcc */
461 		first = 0;
462 		first_read = 0;
463 		uc = ptr;
464 		mp1 = *mp;
465 		ma_offs1 = *ma_offs;
466 	/* } */
467 	sched_pin();
468 	for (n = sectorsize; n != 0; n -= sz) {
469 		sz = imin(PAGE_SIZE - *ma_offs, n);
470 		if (m != **mp) {
471 			if (sf != NULL)
472 				sf_buf_free(sf);
473 			m = **mp;
474 			sf = sf_buf_alloc(m, SFB_CPUPRIVATE |
475 			    (md_malloc_wait ? 0 : SFB_NOWAIT));
476 			if (sf == NULL) {
477 				error = ENOMEM;
478 				break;
479 			}
480 		}
481 		p = (char *)sf_buf_kva(sf) + *ma_offs;
482 		switch (op) {
483 		case MD_MALLOC_MOVE_ZERO:
484 			bzero(p, sz);
485 			break;
486 		case MD_MALLOC_MOVE_FILL:
487 			memset(p, fill, sz);
488 			break;
489 		case MD_MALLOC_MOVE_READ:
490 			bcopy(ptr, p, sz);
491 			cpu_flush_dcache(p, sz);
492 			break;
493 		case MD_MALLOC_MOVE_WRITE:
494 			bcopy(p, ptr, sz);
495 			break;
496 		case MD_MALLOC_MOVE_CMP:
497 			for (i = 0; i < sz; i++, p++) {
498 				if (!first_read) {
499 					*uc = (u_char)*p;
500 					first = *p;
501 					first_read = 1;
502 				} else if (*p != first) {
503 					error = EDOOFUS;
504 					break;
505 				}
506 			}
507 			break;
508 		default:
509 			KASSERT(0, ("md_malloc_move_ma unknown op %d\n", op));
510 			break;
511 		}
512 		if (error != 0)
513 			break;
514 		*ma_offs += sz;
515 		*ma_offs %= PAGE_SIZE;
516 		if (*ma_offs == 0)
517 			(*mp)++;
518 		ptr = (char *)ptr + sz;
519 	}
520 
521 	if (sf != NULL)
522 		sf_buf_free(sf);
523 	sched_unpin();
524 	if (op == MD_MALLOC_MOVE_CMP && error != 0) {
525 		*mp = mp1;
526 		*ma_offs = ma_offs1;
527 	}
528 	return (error);
529 }
530 
531 static int
532 md_malloc_move_vlist(bus_dma_segment_t **pvlist, int *pma_offs,
533     unsigned len, void *ptr, u_char fill, int op)
534 {
535 	bus_dma_segment_t *vlist;
536 	uint8_t *p, *end, first;
537 	off_t *uc;
538 	int ma_offs, seg_len;
539 
540 	vlist = *pvlist;
541 	ma_offs = *pma_offs;
542 	uc = ptr;
543 
544 	for (; len != 0; len -= seg_len) {
545 		seg_len = imin(vlist->ds_len - ma_offs, len);
546 		p = (uint8_t *)(uintptr_t)vlist->ds_addr + ma_offs;
547 		switch (op) {
548 		case MD_MALLOC_MOVE_ZERO:
549 			bzero(p, seg_len);
550 			break;
551 		case MD_MALLOC_MOVE_FILL:
552 			memset(p, fill, seg_len);
553 			break;
554 		case MD_MALLOC_MOVE_READ:
555 			bcopy(ptr, p, seg_len);
556 			cpu_flush_dcache(p, seg_len);
557 			break;
558 		case MD_MALLOC_MOVE_WRITE:
559 			bcopy(p, ptr, seg_len);
560 			break;
561 		case MD_MALLOC_MOVE_CMP:
562 			end = p + seg_len;
563 			first = *uc = *p;
564 			/* Confirm all following bytes match the first */
565 			while (++p < end) {
566 				if (*p != first)
567 					return (EDOOFUS);
568 			}
569 			break;
570 		default:
571 			KASSERT(0, ("md_malloc_move_vlist unknown op %d\n", op));
572 			break;
573 		}
574 
575 		ma_offs += seg_len;
576 		if (ma_offs == vlist->ds_len) {
577 			ma_offs = 0;
578 			vlist++;
579 		}
580 		ptr = (uint8_t *)ptr + seg_len;
581 	}
582 	*pvlist = vlist;
583 	*pma_offs = ma_offs;
584 
585 	return (0);
586 }
587 
588 static int
589 mdstart_malloc(struct md_s *sc, struct bio *bp)
590 {
591 	u_char *dst;
592 	vm_page_t *m;
593 	bus_dma_segment_t *vlist;
594 	int i, error, error1, ma_offs, notmapped;
595 	off_t secno, nsec, uc;
596 	uintptr_t sp, osp;
597 
598 	switch (bp->bio_cmd) {
599 	case BIO_READ:
600 	case BIO_WRITE:
601 	case BIO_DELETE:
602 		break;
603 	default:
604 		return (EOPNOTSUPP);
605 	}
606 
607 	notmapped = (bp->bio_flags & BIO_UNMAPPED) != 0;
608 	vlist = (bp->bio_flags & BIO_VLIST) != 0 ?
609 	    (bus_dma_segment_t *)bp->bio_data : NULL;
610 	if (notmapped) {
611 		m = bp->bio_ma;
612 		ma_offs = bp->bio_ma_offset;
613 		dst = NULL;
614 		KASSERT(vlist == NULL, ("vlists cannot be unmapped"));
615 	} else if (vlist != NULL) {
616 		ma_offs = bp->bio_ma_offset;
617 		dst = NULL;
618 	} else {
619 		dst = bp->bio_data;
620 	}
621 
622 	nsec = bp->bio_length / sc->sectorsize;
623 	secno = bp->bio_offset / sc->sectorsize;
624 	error = 0;
625 	while (nsec--) {
626 		osp = s_read(sc->indir, secno);
627 		if (bp->bio_cmd == BIO_DELETE) {
628 			if (osp != 0)
629 				error = s_write(sc->indir, secno, 0);
630 		} else if (bp->bio_cmd == BIO_READ) {
631 			if (osp == 0) {
632 				if (notmapped) {
633 					error = md_malloc_move_ma(&m, &ma_offs,
634 					    sc->sectorsize, NULL, 0,
635 					    MD_MALLOC_MOVE_ZERO);
636 				} else if (vlist != NULL) {
637 					error = md_malloc_move_vlist(&vlist,
638 					    &ma_offs, sc->sectorsize, NULL, 0,
639 					    MD_MALLOC_MOVE_ZERO);
640 				} else
641 					bzero(dst, sc->sectorsize);
642 			} else if (osp <= 255) {
643 				if (notmapped) {
644 					error = md_malloc_move_ma(&m, &ma_offs,
645 					    sc->sectorsize, NULL, osp,
646 					    MD_MALLOC_MOVE_FILL);
647 				} else if (vlist != NULL) {
648 					error = md_malloc_move_vlist(&vlist,
649 					    &ma_offs, sc->sectorsize, NULL, osp,
650 					    MD_MALLOC_MOVE_FILL);
651 				} else
652 					memset(dst, osp, sc->sectorsize);
653 			} else {
654 				if (notmapped) {
655 					error = md_malloc_move_ma(&m, &ma_offs,
656 					    sc->sectorsize, (void *)osp, 0,
657 					    MD_MALLOC_MOVE_READ);
658 				} else if (vlist != NULL) {
659 					error = md_malloc_move_vlist(&vlist,
660 					    &ma_offs, sc->sectorsize,
661 					    (void *)osp, 0,
662 					    MD_MALLOC_MOVE_READ);
663 				} else {
664 					bcopy((void *)osp, dst, sc->sectorsize);
665 					cpu_flush_dcache(dst, sc->sectorsize);
666 				}
667 			}
668 			osp = 0;
669 		} else if (bp->bio_cmd == BIO_WRITE) {
670 			if (sc->flags & MD_COMPRESS) {
671 				if (notmapped) {
672 					error1 = md_malloc_move_ma(&m, &ma_offs,
673 					    sc->sectorsize, &uc, 0,
674 					    MD_MALLOC_MOVE_CMP);
675 					i = error1 == 0 ? sc->sectorsize : 0;
676 				} else if (vlist != NULL) {
677 					error1 = md_malloc_move_vlist(&vlist,
678 					    &ma_offs, sc->sectorsize, &uc, 0,
679 					    MD_MALLOC_MOVE_CMP);
680 					i = error1 == 0 ? sc->sectorsize : 0;
681 				} else {
682 					uc = dst[0];
683 					for (i = 1; i < sc->sectorsize; i++) {
684 						if (dst[i] != uc)
685 							break;
686 					}
687 				}
688 			} else {
689 				i = 0;
690 				uc = 0;
691 			}
692 			if (i == sc->sectorsize) {
693 				if (osp != uc)
694 					error = s_write(sc->indir, secno, uc);
695 			} else {
696 				if (osp <= 255) {
697 					sp = (uintptr_t)uma_zalloc(sc->uma,
698 					    md_malloc_wait ? M_WAITOK :
699 					    M_NOWAIT);
700 					if (sp == 0) {
701 						error = ENOSPC;
702 						break;
703 					}
704 					if (notmapped) {
705 						error = md_malloc_move_ma(&m,
706 						    &ma_offs, sc->sectorsize,
707 						    (void *)sp, 0,
708 						    MD_MALLOC_MOVE_WRITE);
709 					} else if (vlist != NULL) {
710 						error = md_malloc_move_vlist(
711 						    &vlist, &ma_offs,
712 						    sc->sectorsize, (void *)sp,
713 						    0, MD_MALLOC_MOVE_WRITE);
714 					} else {
715 						bcopy(dst, (void *)sp,
716 						    sc->sectorsize);
717 					}
718 					error = s_write(sc->indir, secno, sp);
719 				} else {
720 					if (notmapped) {
721 						error = md_malloc_move_ma(&m,
722 						    &ma_offs, sc->sectorsize,
723 						    (void *)osp, 0,
724 						    MD_MALLOC_MOVE_WRITE);
725 					} else if (vlist != NULL) {
726 						error = md_malloc_move_vlist(
727 						    &vlist, &ma_offs,
728 						    sc->sectorsize, (void *)osp,
729 						    0, MD_MALLOC_MOVE_WRITE);
730 					} else {
731 						bcopy(dst, (void *)osp,
732 						    sc->sectorsize);
733 					}
734 					osp = 0;
735 				}
736 			}
737 		} else {
738 			error = EOPNOTSUPP;
739 		}
740 		if (osp > 255)
741 			uma_zfree(sc->uma, (void*)osp);
742 		if (error != 0)
743 			break;
744 		secno++;
745 		if (!notmapped && vlist == NULL)
746 			dst += sc->sectorsize;
747 	}
748 	bp->bio_resid = 0;
749 	return (error);
750 }
751 
752 static void
753 mdcopyto_vlist(void *src, bus_dma_segment_t *vlist, off_t offset, off_t len)
754 {
755 	off_t seg_len;
756 
757 	while (offset >= vlist->ds_len) {
758 		offset -= vlist->ds_len;
759 		vlist++;
760 	}
761 
762 	while (len != 0) {
763 		seg_len = omin(len, vlist->ds_len - offset);
764 		bcopy(src, (void *)(uintptr_t)(vlist->ds_addr + offset),
765 		    seg_len);
766 		offset = 0;
767 		src = (uint8_t *)src + seg_len;
768 		len -= seg_len;
769 		vlist++;
770 	}
771 }
772 
773 static void
774 mdcopyfrom_vlist(bus_dma_segment_t *vlist, off_t offset, void *dst, off_t len)
775 {
776 	off_t seg_len;
777 
778 	while (offset >= vlist->ds_len) {
779 		offset -= vlist->ds_len;
780 		vlist++;
781 	}
782 
783 	while (len != 0) {
784 		seg_len = omin(len, vlist->ds_len - offset);
785 		bcopy((void *)(uintptr_t)(vlist->ds_addr + offset), dst,
786 		    seg_len);
787 		offset = 0;
788 		dst = (uint8_t *)dst + seg_len;
789 		len -= seg_len;
790 		vlist++;
791 	}
792 }
793 
794 static int
795 mdstart_preload(struct md_s *sc, struct bio *bp)
796 {
797 	uint8_t *p;
798 
799 	p = sc->pl_ptr + bp->bio_offset;
800 	switch (bp->bio_cmd) {
801 	case BIO_READ:
802 		if ((bp->bio_flags & BIO_VLIST) != 0) {
803 			mdcopyto_vlist(p, (bus_dma_segment_t *)bp->bio_data,
804 			    bp->bio_ma_offset, bp->bio_length);
805 		} else {
806 			bcopy(p, bp->bio_data, bp->bio_length);
807 		}
808 		cpu_flush_dcache(bp->bio_data, bp->bio_length);
809 		break;
810 	case BIO_WRITE:
811 		if ((bp->bio_flags & BIO_VLIST) != 0) {
812 			mdcopyfrom_vlist((bus_dma_segment_t *)bp->bio_data,
813 			    bp->bio_ma_offset, p, bp->bio_length);
814 		} else {
815 			bcopy(bp->bio_data, p, bp->bio_length);
816 		}
817 		break;
818 	}
819 	bp->bio_resid = 0;
820 	return (0);
821 }
822 
823 static int
824 mdstart_vnode(struct md_s *sc, struct bio *bp)
825 {
826 	int error;
827 	struct uio auio;
828 	struct iovec aiov;
829 	struct iovec *piov;
830 	struct mount *mp;
831 	struct vnode *vp;
832 	struct buf *pb;
833 	bus_dma_segment_t *vlist;
834 	struct thread *td;
835 	off_t iolen, len, zerosize;
836 	int ma_offs, npages;
837 
838 	switch (bp->bio_cmd) {
839 	case BIO_READ:
840 		auio.uio_rw = UIO_READ;
841 		break;
842 	case BIO_WRITE:
843 	case BIO_DELETE:
844 		auio.uio_rw = UIO_WRITE;
845 		break;
846 	case BIO_FLUSH:
847 		break;
848 	default:
849 		return (EOPNOTSUPP);
850 	}
851 
852 	td = curthread;
853 	vp = sc->vnode;
854 	pb = NULL;
855 	piov = NULL;
856 	ma_offs = bp->bio_ma_offset;
857 	len = bp->bio_length;
858 
859 	/*
860 	 * VNODE I/O
861 	 *
862 	 * If an error occurs, we set BIO_ERROR but we do not set
863 	 * B_INVAL because (for a write anyway), the buffer is
864 	 * still valid.
865 	 */
866 
867 	if (bp->bio_cmd == BIO_FLUSH) {
868 		(void) vn_start_write(vp, &mp, V_WAIT);
869 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
870 		error = VOP_FSYNC(vp, MNT_WAIT, td);
871 		VOP_UNLOCK(vp, 0);
872 		vn_finished_write(mp);
873 		return (error);
874 	}
875 
876 	auio.uio_offset = (vm_ooffset_t)bp->bio_offset;
877 	auio.uio_resid = bp->bio_length;
878 	auio.uio_segflg = UIO_SYSSPACE;
879 	auio.uio_td = td;
880 
881 	if (bp->bio_cmd == BIO_DELETE) {
882 		/*
883 		 * Emulate BIO_DELETE by writing zeros.
884 		 */
885 		zerosize = ZERO_REGION_SIZE -
886 		    (ZERO_REGION_SIZE % sc->sectorsize);
887 		auio.uio_iovcnt = howmany(bp->bio_length, zerosize);
888 		piov = malloc(sizeof(*piov) * auio.uio_iovcnt, M_MD, M_WAITOK);
889 		auio.uio_iov = piov;
890 		while (len > 0) {
891 			piov->iov_base = __DECONST(void *, zero_region);
892 			piov->iov_len = len;
893 			if (len > zerosize)
894 				piov->iov_len = zerosize;
895 			len -= piov->iov_len;
896 			piov++;
897 		}
898 		piov = auio.uio_iov;
899 	} else if ((bp->bio_flags & BIO_VLIST) != 0) {
900 		piov = malloc(sizeof(*piov) * bp->bio_ma_n, M_MD, M_WAITOK);
901 		auio.uio_iov = piov;
902 		vlist = (bus_dma_segment_t *)bp->bio_data;
903 		while (len > 0) {
904 			piov->iov_base = (void *)(uintptr_t)(vlist->ds_addr +
905 			    ma_offs);
906 			piov->iov_len = vlist->ds_len - ma_offs;
907 			if (piov->iov_len > len)
908 				piov->iov_len = len;
909 			len -= piov->iov_len;
910 			ma_offs = 0;
911 			vlist++;
912 			piov++;
913 		}
914 		auio.uio_iovcnt = piov - auio.uio_iov;
915 		piov = auio.uio_iov;
916 	} else if ((bp->bio_flags & BIO_UNMAPPED) != 0) {
917 		pb = getpbuf(&md_vnode_pbuf_freecnt);
918 		bp->bio_resid = len;
919 unmapped_step:
920 		npages = atop(min(MAXPHYS, round_page(len + (ma_offs &
921 		    PAGE_MASK))));
922 		iolen = min(ptoa(npages) - (ma_offs & PAGE_MASK), len);
923 		KASSERT(iolen > 0, ("zero iolen"));
924 		pmap_qenter((vm_offset_t)pb->b_data,
925 		    &bp->bio_ma[atop(ma_offs)], npages);
926 		aiov.iov_base = (void *)((vm_offset_t)pb->b_data +
927 		    (ma_offs & PAGE_MASK));
928 		aiov.iov_len = iolen;
929 		auio.uio_iov = &aiov;
930 		auio.uio_iovcnt = 1;
931 		auio.uio_resid = iolen;
932 	} else {
933 		aiov.iov_base = bp->bio_data;
934 		aiov.iov_len = bp->bio_length;
935 		auio.uio_iov = &aiov;
936 		auio.uio_iovcnt = 1;
937 	}
938 	/*
939 	 * When reading set IO_DIRECT to try to avoid double-caching
940 	 * the data.  When writing IO_DIRECT is not optimal.
941 	 */
942 	if (auio.uio_rw == UIO_READ) {
943 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
944 		error = VOP_READ(vp, &auio, IO_DIRECT, sc->cred);
945 		VOP_UNLOCK(vp, 0);
946 	} else {
947 		(void) vn_start_write(vp, &mp, V_WAIT);
948 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
949 		error = VOP_WRITE(vp, &auio, sc->flags & MD_ASYNC ? 0 : IO_SYNC,
950 		    sc->cred);
951 		VOP_UNLOCK(vp, 0);
952 		vn_finished_write(mp);
953 		if (error == 0)
954 			sc->flags &= ~MD_VERIFY;
955 	}
956 
957 	if (pb != NULL) {
958 		pmap_qremove((vm_offset_t)pb->b_data, npages);
959 		if (error == 0) {
960 			len -= iolen;
961 			bp->bio_resid -= iolen;
962 			ma_offs += iolen;
963 			if (len > 0)
964 				goto unmapped_step;
965 		}
966 		relpbuf(pb, &md_vnode_pbuf_freecnt);
967 	}
968 
969 	free(piov, M_MD);
970 	if (pb == NULL)
971 		bp->bio_resid = auio.uio_resid;
972 	return (error);
973 }
974 
975 static int
976 mdstart_swap(struct md_s *sc, struct bio *bp)
977 {
978 	vm_page_t m;
979 	u_char *p;
980 	vm_pindex_t i, lastp;
981 	bus_dma_segment_t *vlist;
982 	int rv, ma_offs, offs, len, lastend;
983 
984 	switch (bp->bio_cmd) {
985 	case BIO_READ:
986 	case BIO_WRITE:
987 	case BIO_DELETE:
988 		break;
989 	default:
990 		return (EOPNOTSUPP);
991 	}
992 
993 	p = bp->bio_data;
994 	ma_offs = (bp->bio_flags & (BIO_UNMAPPED|BIO_VLIST)) != 0 ?
995 	    bp->bio_ma_offset : 0;
996 	vlist = (bp->bio_flags & BIO_VLIST) != 0 ?
997 	    (bus_dma_segment_t *)bp->bio_data : NULL;
998 
999 	/*
1000 	 * offs is the offset at which to start operating on the
1001 	 * next (ie, first) page.  lastp is the last page on
1002 	 * which we're going to operate.  lastend is the ending
1003 	 * position within that last page (ie, PAGE_SIZE if
1004 	 * we're operating on complete aligned pages).
1005 	 */
1006 	offs = bp->bio_offset % PAGE_SIZE;
1007 	lastp = (bp->bio_offset + bp->bio_length - 1) / PAGE_SIZE;
1008 	lastend = (bp->bio_offset + bp->bio_length - 1) % PAGE_SIZE + 1;
1009 
1010 	rv = VM_PAGER_OK;
1011 	VM_OBJECT_WLOCK(sc->object);
1012 	vm_object_pip_add(sc->object, 1);
1013 	for (i = bp->bio_offset / PAGE_SIZE; i <= lastp; i++) {
1014 		len = ((i == lastp) ? lastend : PAGE_SIZE) - offs;
1015 		m = vm_page_grab(sc->object, i, VM_ALLOC_SYSTEM);
1016 		if (bp->bio_cmd == BIO_READ) {
1017 			if (m->valid == VM_PAGE_BITS_ALL)
1018 				rv = VM_PAGER_OK;
1019 			else
1020 				rv = vm_pager_get_pages(sc->object, &m, 1,
1021 				    NULL, NULL);
1022 			if (rv == VM_PAGER_ERROR) {
1023 				vm_page_xunbusy(m);
1024 				break;
1025 			} else if (rv == VM_PAGER_FAIL) {
1026 				/*
1027 				 * Pager does not have the page.  Zero
1028 				 * the allocated page, and mark it as
1029 				 * valid. Do not set dirty, the page
1030 				 * can be recreated if thrown out.
1031 				 */
1032 				pmap_zero_page(m);
1033 				m->valid = VM_PAGE_BITS_ALL;
1034 			}
1035 			if ((bp->bio_flags & BIO_UNMAPPED) != 0) {
1036 				pmap_copy_pages(&m, offs, bp->bio_ma,
1037 				    ma_offs, len);
1038 			} else if ((bp->bio_flags & BIO_VLIST) != 0) {
1039 				physcopyout_vlist(VM_PAGE_TO_PHYS(m) + offs,
1040 				    vlist, ma_offs, len);
1041 				cpu_flush_dcache(p, len);
1042 			} else {
1043 				physcopyout(VM_PAGE_TO_PHYS(m) + offs, p, len);
1044 				cpu_flush_dcache(p, len);
1045 			}
1046 		} else if (bp->bio_cmd == BIO_WRITE) {
1047 			if (len != PAGE_SIZE && m->valid != VM_PAGE_BITS_ALL)
1048 				rv = vm_pager_get_pages(sc->object, &m, 1,
1049 				    NULL, NULL);
1050 			else
1051 				rv = VM_PAGER_OK;
1052 			if (rv == VM_PAGER_ERROR) {
1053 				vm_page_xunbusy(m);
1054 				break;
1055 			}
1056 			if ((bp->bio_flags & BIO_UNMAPPED) != 0) {
1057 				pmap_copy_pages(bp->bio_ma, ma_offs, &m,
1058 				    offs, len);
1059 			} else if ((bp->bio_flags & BIO_VLIST) != 0) {
1060 				physcopyin_vlist(vlist, ma_offs,
1061 				    VM_PAGE_TO_PHYS(m) + offs, len);
1062 			} else {
1063 				physcopyin(p, VM_PAGE_TO_PHYS(m) + offs, len);
1064 			}
1065 			m->valid = VM_PAGE_BITS_ALL;
1066 		} else if (bp->bio_cmd == BIO_DELETE) {
1067 			if (len != PAGE_SIZE && m->valid != VM_PAGE_BITS_ALL)
1068 				rv = vm_pager_get_pages(sc->object, &m, 1,
1069 				    NULL, NULL);
1070 			else
1071 				rv = VM_PAGER_OK;
1072 			if (rv == VM_PAGER_ERROR) {
1073 				vm_page_xunbusy(m);
1074 				break;
1075 			}
1076 			if (len != PAGE_SIZE) {
1077 				pmap_zero_page_area(m, offs, len);
1078 				vm_page_clear_dirty(m, offs, len);
1079 				m->valid = VM_PAGE_BITS_ALL;
1080 			} else
1081 				vm_pager_page_unswapped(m);
1082 		}
1083 		vm_page_xunbusy(m);
1084 		vm_page_lock(m);
1085 		if (bp->bio_cmd == BIO_DELETE && len == PAGE_SIZE)
1086 			vm_page_free(m);
1087 		else
1088 			vm_page_activate(m);
1089 		vm_page_unlock(m);
1090 		if (bp->bio_cmd == BIO_WRITE) {
1091 			vm_page_dirty(m);
1092 			vm_pager_page_unswapped(m);
1093 		}
1094 
1095 		/* Actions on further pages start at offset 0 */
1096 		p += PAGE_SIZE - offs;
1097 		offs = 0;
1098 		ma_offs += len;
1099 	}
1100 	vm_object_pip_wakeup(sc->object);
1101 	VM_OBJECT_WUNLOCK(sc->object);
1102 	return (rv != VM_PAGER_ERROR ? 0 : ENOSPC);
1103 }
1104 
1105 static int
1106 mdstart_null(struct md_s *sc, struct bio *bp)
1107 {
1108 
1109 	switch (bp->bio_cmd) {
1110 	case BIO_READ:
1111 		bzero(bp->bio_data, bp->bio_length);
1112 		cpu_flush_dcache(bp->bio_data, bp->bio_length);
1113 		break;
1114 	case BIO_WRITE:
1115 		break;
1116 	}
1117 	bp->bio_resid = 0;
1118 	return (0);
1119 }
1120 
1121 static void
1122 md_kthread(void *arg)
1123 {
1124 	struct md_s *sc;
1125 	struct bio *bp;
1126 	int error;
1127 
1128 	sc = arg;
1129 	thread_lock(curthread);
1130 	sched_prio(curthread, PRIBIO);
1131 	thread_unlock(curthread);
1132 	if (sc->type == MD_VNODE)
1133 		curthread->td_pflags |= TDP_NORUNNINGBUF;
1134 
1135 	for (;;) {
1136 		mtx_lock(&sc->queue_mtx);
1137 		if (sc->flags & MD_SHUTDOWN) {
1138 			sc->flags |= MD_EXITING;
1139 			mtx_unlock(&sc->queue_mtx);
1140 			kproc_exit(0);
1141 		}
1142 		bp = bioq_takefirst(&sc->bio_queue);
1143 		if (!bp) {
1144 			msleep(sc, &sc->queue_mtx, PRIBIO | PDROP, "mdwait", 0);
1145 			continue;
1146 		}
1147 		mtx_unlock(&sc->queue_mtx);
1148 		if (bp->bio_cmd == BIO_GETATTR) {
1149 			int isv = ((sc->flags & MD_VERIFY) != 0);
1150 
1151 			if ((sc->fwsectors && sc->fwheads &&
1152 			    (g_handleattr_int(bp, "GEOM::fwsectors",
1153 			    sc->fwsectors) ||
1154 			    g_handleattr_int(bp, "GEOM::fwheads",
1155 			    sc->fwheads))) ||
1156 			    g_handleattr_int(bp, "GEOM::candelete", 1))
1157 				error = -1;
1158 			else if (g_handleattr_int(bp, "MNT::verified", isv))
1159 				error = -1;
1160 			else
1161 				error = EOPNOTSUPP;
1162 		} else {
1163 			error = sc->start(sc, bp);
1164 		}
1165 
1166 		if (error != -1) {
1167 			bp->bio_completed = bp->bio_length;
1168 			if ((bp->bio_cmd == BIO_READ) || (bp->bio_cmd == BIO_WRITE))
1169 				devstat_end_transaction_bio(sc->devstat, bp);
1170 			g_io_deliver(bp, error);
1171 		}
1172 	}
1173 }
1174 
1175 static struct md_s *
1176 mdfind(int unit)
1177 {
1178 	struct md_s *sc;
1179 
1180 	LIST_FOREACH(sc, &md_softc_list, list) {
1181 		if (sc->unit == unit)
1182 			break;
1183 	}
1184 	return (sc);
1185 }
1186 
1187 static struct md_s *
1188 mdnew(int unit, int *errp, enum md_types type)
1189 {
1190 	struct md_s *sc;
1191 	int error;
1192 
1193 	*errp = 0;
1194 	if (unit == -1)
1195 		unit = alloc_unr(md_uh);
1196 	else
1197 		unit = alloc_unr_specific(md_uh, unit);
1198 
1199 	if (unit == -1) {
1200 		*errp = EBUSY;
1201 		return (NULL);
1202 	}
1203 
1204 	sc = (struct md_s *)malloc(sizeof *sc, M_MD, M_WAITOK | M_ZERO);
1205 	sc->type = type;
1206 	bioq_init(&sc->bio_queue);
1207 	mtx_init(&sc->queue_mtx, "md bio queue", NULL, MTX_DEF);
1208 	mtx_init(&sc->stat_mtx, "md stat", NULL, MTX_DEF);
1209 	sc->unit = unit;
1210 	sprintf(sc->name, "md%d", unit);
1211 	LIST_INSERT_HEAD(&md_softc_list, sc, list);
1212 	error = kproc_create(md_kthread, sc, &sc->procp, 0, 0,"%s", sc->name);
1213 	if (error == 0)
1214 		return (sc);
1215 	LIST_REMOVE(sc, list);
1216 	mtx_destroy(&sc->stat_mtx);
1217 	mtx_destroy(&sc->queue_mtx);
1218 	free_unr(md_uh, sc->unit);
1219 	free(sc, M_MD);
1220 	*errp = error;
1221 	return (NULL);
1222 }
1223 
1224 static void
1225 mdinit(struct md_s *sc)
1226 {
1227 	struct g_geom *gp;
1228 	struct g_provider *pp;
1229 
1230 	g_topology_lock();
1231 	gp = g_new_geomf(&g_md_class, "md%d", sc->unit);
1232 	gp->softc = sc;
1233 	pp = g_new_providerf(gp, "md%d", sc->unit);
1234 	pp->flags |= G_PF_DIRECT_SEND | G_PF_DIRECT_RECEIVE;
1235 	pp->mediasize = sc->mediasize;
1236 	pp->sectorsize = sc->sectorsize;
1237 	switch (sc->type) {
1238 	case MD_MALLOC:
1239 	case MD_VNODE:
1240 	case MD_SWAP:
1241 		pp->flags |= G_PF_ACCEPT_UNMAPPED;
1242 		break;
1243 	case MD_PRELOAD:
1244 	case MD_NULL:
1245 		break;
1246 	}
1247 	sc->gp = gp;
1248 	sc->pp = pp;
1249 	g_error_provider(pp, 0);
1250 	g_topology_unlock();
1251 	sc->devstat = devstat_new_entry("md", sc->unit, sc->sectorsize,
1252 	    DEVSTAT_ALL_SUPPORTED, DEVSTAT_TYPE_DIRECT, DEVSTAT_PRIORITY_MAX);
1253 }
1254 
1255 static int
1256 mdcreate_malloc(struct md_s *sc, struct md_ioctl *mdio)
1257 {
1258 	uintptr_t sp;
1259 	int error;
1260 	off_t u;
1261 
1262 	error = 0;
1263 	if (mdio->md_options & ~(MD_AUTOUNIT | MD_COMPRESS | MD_RESERVE))
1264 		return (EINVAL);
1265 	if (mdio->md_sectorsize != 0 && !powerof2(mdio->md_sectorsize))
1266 		return (EINVAL);
1267 	/* Compression doesn't make sense if we have reserved space */
1268 	if (mdio->md_options & MD_RESERVE)
1269 		mdio->md_options &= ~MD_COMPRESS;
1270 	if (mdio->md_fwsectors != 0)
1271 		sc->fwsectors = mdio->md_fwsectors;
1272 	if (mdio->md_fwheads != 0)
1273 		sc->fwheads = mdio->md_fwheads;
1274 	sc->flags = mdio->md_options & (MD_COMPRESS | MD_FORCE);
1275 	sc->indir = dimension(sc->mediasize / sc->sectorsize);
1276 	sc->uma = uma_zcreate(sc->name, sc->sectorsize, NULL, NULL, NULL, NULL,
1277 	    0x1ff, 0);
1278 	if (mdio->md_options & MD_RESERVE) {
1279 		off_t nsectors;
1280 
1281 		nsectors = sc->mediasize / sc->sectorsize;
1282 		for (u = 0; u < nsectors; u++) {
1283 			sp = (uintptr_t)uma_zalloc(sc->uma, (md_malloc_wait ?
1284 			    M_WAITOK : M_NOWAIT) | M_ZERO);
1285 			if (sp != 0)
1286 				error = s_write(sc->indir, u, sp);
1287 			else
1288 				error = ENOMEM;
1289 			if (error != 0)
1290 				break;
1291 		}
1292 	}
1293 	return (error);
1294 }
1295 
1296 
1297 static int
1298 mdsetcred(struct md_s *sc, struct ucred *cred)
1299 {
1300 	char *tmpbuf;
1301 	int error = 0;
1302 
1303 	/*
1304 	 * Set credits in our softc
1305 	 */
1306 
1307 	if (sc->cred)
1308 		crfree(sc->cred);
1309 	sc->cred = crhold(cred);
1310 
1311 	/*
1312 	 * Horrible kludge to establish credentials for NFS  XXX.
1313 	 */
1314 
1315 	if (sc->vnode) {
1316 		struct uio auio;
1317 		struct iovec aiov;
1318 
1319 		tmpbuf = malloc(sc->sectorsize, M_TEMP, M_WAITOK);
1320 		bzero(&auio, sizeof(auio));
1321 
1322 		aiov.iov_base = tmpbuf;
1323 		aiov.iov_len = sc->sectorsize;
1324 		auio.uio_iov = &aiov;
1325 		auio.uio_iovcnt = 1;
1326 		auio.uio_offset = 0;
1327 		auio.uio_rw = UIO_READ;
1328 		auio.uio_segflg = UIO_SYSSPACE;
1329 		auio.uio_resid = aiov.iov_len;
1330 		vn_lock(sc->vnode, LK_EXCLUSIVE | LK_RETRY);
1331 		error = VOP_READ(sc->vnode, &auio, 0, sc->cred);
1332 		VOP_UNLOCK(sc->vnode, 0);
1333 		free(tmpbuf, M_TEMP);
1334 	}
1335 	return (error);
1336 }
1337 
1338 static int
1339 mdcreate_vnode(struct md_s *sc, struct md_ioctl *mdio, struct thread *td)
1340 {
1341 	struct vattr vattr;
1342 	struct nameidata nd;
1343 	char *fname;
1344 	int error, flags;
1345 
1346 	/*
1347 	 * Kernel-originated requests must have the filename appended
1348 	 * to the mdio structure to protect against malicious software.
1349 	 */
1350 	fname = mdio->md_file;
1351 	if ((void *)fname != (void *)(mdio + 1)) {
1352 		error = copyinstr(fname, sc->file, sizeof(sc->file), NULL);
1353 		if (error != 0)
1354 			return (error);
1355 	} else
1356 		strlcpy(sc->file, fname, sizeof(sc->file));
1357 
1358 	/*
1359 	 * If the user specified that this is a read only device, don't
1360 	 * set the FWRITE mask before trying to open the backing store.
1361 	 */
1362 	flags = FREAD | ((mdio->md_options & MD_READONLY) ? 0 : FWRITE) \
1363 	    | ((mdio->md_options & MD_VERIFY) ? 0 : O_VERIFY);
1364 	NDINIT(&nd, LOOKUP, FOLLOW, UIO_SYSSPACE, sc->file, td);
1365 	error = vn_open(&nd, &flags, 0, NULL);
1366 	if (error != 0)
1367 		return (error);
1368 	NDFREE(&nd, NDF_ONLY_PNBUF);
1369 	if (nd.ni_vp->v_type != VREG) {
1370 		error = EINVAL;
1371 		goto bad;
1372 	}
1373 	error = VOP_GETATTR(nd.ni_vp, &vattr, td->td_ucred);
1374 	if (error != 0)
1375 		goto bad;
1376 	if (VOP_ISLOCKED(nd.ni_vp) != LK_EXCLUSIVE) {
1377 		vn_lock(nd.ni_vp, LK_UPGRADE | LK_RETRY);
1378 		if (nd.ni_vp->v_iflag & VI_DOOMED) {
1379 			/* Forced unmount. */
1380 			error = EBADF;
1381 			goto bad;
1382 		}
1383 	}
1384 	nd.ni_vp->v_vflag |= VV_MD;
1385 	VOP_UNLOCK(nd.ni_vp, 0);
1386 
1387 	if (mdio->md_fwsectors != 0)
1388 		sc->fwsectors = mdio->md_fwsectors;
1389 	if (mdio->md_fwheads != 0)
1390 		sc->fwheads = mdio->md_fwheads;
1391 	sc->flags = mdio->md_options & (MD_FORCE | MD_ASYNC | MD_VERIFY);
1392 	if (!(flags & FWRITE))
1393 		sc->flags |= MD_READONLY;
1394 	sc->vnode = nd.ni_vp;
1395 
1396 	error = mdsetcred(sc, td->td_ucred);
1397 	if (error != 0) {
1398 		sc->vnode = NULL;
1399 		vn_lock(nd.ni_vp, LK_EXCLUSIVE | LK_RETRY);
1400 		nd.ni_vp->v_vflag &= ~VV_MD;
1401 		goto bad;
1402 	}
1403 	return (0);
1404 bad:
1405 	VOP_UNLOCK(nd.ni_vp, 0);
1406 	(void)vn_close(nd.ni_vp, flags, td->td_ucred, td);
1407 	return (error);
1408 }
1409 
1410 static int
1411 mddestroy(struct md_s *sc, struct thread *td)
1412 {
1413 
1414 	if (sc->gp) {
1415 		sc->gp->softc = NULL;
1416 		g_topology_lock();
1417 		g_wither_geom(sc->gp, ENXIO);
1418 		g_topology_unlock();
1419 		sc->gp = NULL;
1420 		sc->pp = NULL;
1421 	}
1422 	if (sc->devstat) {
1423 		devstat_remove_entry(sc->devstat);
1424 		sc->devstat = NULL;
1425 	}
1426 	mtx_lock(&sc->queue_mtx);
1427 	sc->flags |= MD_SHUTDOWN;
1428 	wakeup(sc);
1429 	while (!(sc->flags & MD_EXITING))
1430 		msleep(sc->procp, &sc->queue_mtx, PRIBIO, "mddestroy", hz / 10);
1431 	mtx_unlock(&sc->queue_mtx);
1432 	mtx_destroy(&sc->stat_mtx);
1433 	mtx_destroy(&sc->queue_mtx);
1434 	if (sc->vnode != NULL) {
1435 		vn_lock(sc->vnode, LK_EXCLUSIVE | LK_RETRY);
1436 		sc->vnode->v_vflag &= ~VV_MD;
1437 		VOP_UNLOCK(sc->vnode, 0);
1438 		(void)vn_close(sc->vnode, sc->flags & MD_READONLY ?
1439 		    FREAD : (FREAD|FWRITE), sc->cred, td);
1440 	}
1441 	if (sc->cred != NULL)
1442 		crfree(sc->cred);
1443 	if (sc->object != NULL)
1444 		vm_object_deallocate(sc->object);
1445 	if (sc->indir)
1446 		destroy_indir(sc, sc->indir);
1447 	if (sc->uma)
1448 		uma_zdestroy(sc->uma);
1449 
1450 	LIST_REMOVE(sc, list);
1451 	free_unr(md_uh, sc->unit);
1452 	free(sc, M_MD);
1453 	return (0);
1454 }
1455 
1456 static int
1457 mdresize(struct md_s *sc, struct md_ioctl *mdio)
1458 {
1459 	int error, res;
1460 	vm_pindex_t oldpages, newpages;
1461 
1462 	switch (sc->type) {
1463 	case MD_VNODE:
1464 	case MD_NULL:
1465 		break;
1466 	case MD_SWAP:
1467 		if (mdio->md_mediasize <= 0 ||
1468 		    (mdio->md_mediasize % PAGE_SIZE) != 0)
1469 			return (EDOM);
1470 		oldpages = OFF_TO_IDX(round_page(sc->mediasize));
1471 		newpages = OFF_TO_IDX(round_page(mdio->md_mediasize));
1472 		if (newpages < oldpages) {
1473 			VM_OBJECT_WLOCK(sc->object);
1474 			vm_object_page_remove(sc->object, newpages, 0, 0);
1475 			swap_pager_freespace(sc->object, newpages,
1476 			    oldpages - newpages);
1477 			swap_release_by_cred(IDX_TO_OFF(oldpages -
1478 			    newpages), sc->cred);
1479 			sc->object->charge = IDX_TO_OFF(newpages);
1480 			sc->object->size = newpages;
1481 			VM_OBJECT_WUNLOCK(sc->object);
1482 		} else if (newpages > oldpages) {
1483 			res = swap_reserve_by_cred(IDX_TO_OFF(newpages -
1484 			    oldpages), sc->cred);
1485 			if (!res)
1486 				return (ENOMEM);
1487 			if ((mdio->md_options & MD_RESERVE) ||
1488 			    (sc->flags & MD_RESERVE)) {
1489 				error = swap_pager_reserve(sc->object,
1490 				    oldpages, newpages - oldpages);
1491 				if (error < 0) {
1492 					swap_release_by_cred(
1493 					    IDX_TO_OFF(newpages - oldpages),
1494 					    sc->cred);
1495 					return (EDOM);
1496 				}
1497 			}
1498 			VM_OBJECT_WLOCK(sc->object);
1499 			sc->object->charge = IDX_TO_OFF(newpages);
1500 			sc->object->size = newpages;
1501 			VM_OBJECT_WUNLOCK(sc->object);
1502 		}
1503 		break;
1504 	default:
1505 		return (EOPNOTSUPP);
1506 	}
1507 
1508 	sc->mediasize = mdio->md_mediasize;
1509 	g_topology_lock();
1510 	g_resize_provider(sc->pp, sc->mediasize);
1511 	g_topology_unlock();
1512 	return (0);
1513 }
1514 
1515 static int
1516 mdcreate_swap(struct md_s *sc, struct md_ioctl *mdio, struct thread *td)
1517 {
1518 	vm_ooffset_t npage;
1519 	int error;
1520 
1521 	/*
1522 	 * Range check.  Disallow negative sizes and sizes not being
1523 	 * multiple of page size.
1524 	 */
1525 	if (sc->mediasize <= 0 || (sc->mediasize % PAGE_SIZE) != 0)
1526 		return (EDOM);
1527 
1528 	/*
1529 	 * Allocate an OBJT_SWAP object.
1530 	 *
1531 	 * Note the truncation.
1532 	 */
1533 
1534 	if ((mdio->md_options & MD_VERIFY) != 0)
1535 		return (EINVAL);
1536 	npage = mdio->md_mediasize / PAGE_SIZE;
1537 	if (mdio->md_fwsectors != 0)
1538 		sc->fwsectors = mdio->md_fwsectors;
1539 	if (mdio->md_fwheads != 0)
1540 		sc->fwheads = mdio->md_fwheads;
1541 	sc->object = vm_pager_allocate(OBJT_SWAP, NULL, PAGE_SIZE * npage,
1542 	    VM_PROT_DEFAULT, 0, td->td_ucred);
1543 	if (sc->object == NULL)
1544 		return (ENOMEM);
1545 	sc->flags = mdio->md_options & (MD_FORCE | MD_RESERVE);
1546 	if (mdio->md_options & MD_RESERVE) {
1547 		if (swap_pager_reserve(sc->object, 0, npage) < 0) {
1548 			error = EDOM;
1549 			goto finish;
1550 		}
1551 	}
1552 	error = mdsetcred(sc, td->td_ucred);
1553  finish:
1554 	if (error != 0) {
1555 		vm_object_deallocate(sc->object);
1556 		sc->object = NULL;
1557 	}
1558 	return (error);
1559 }
1560 
1561 static int
1562 mdcreate_null(struct md_s *sc, struct md_ioctl *mdio, struct thread *td)
1563 {
1564 
1565 	/*
1566 	 * Range check.  Disallow negative sizes and sizes not being
1567 	 * multiple of page size.
1568 	 */
1569 	if (sc->mediasize <= 0 || (sc->mediasize % PAGE_SIZE) != 0)
1570 		return (EDOM);
1571 
1572 	return (0);
1573 }
1574 
1575 static int
1576 xmdctlioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags, struct thread *td)
1577 {
1578 	struct md_ioctl *mdio;
1579 	struct md_s *sc;
1580 	int error, i;
1581 	unsigned sectsize;
1582 
1583 	if (md_debug)
1584 		printf("mdctlioctl(%s %lx %p %x %p)\n",
1585 			devtoname(dev), cmd, addr, flags, td);
1586 
1587 	mdio = (struct md_ioctl *)addr;
1588 	if (mdio->md_version != MDIOVERSION)
1589 		return (EINVAL);
1590 
1591 	/*
1592 	 * We assert the version number in the individual ioctl
1593 	 * handlers instead of out here because (a) it is possible we
1594 	 * may add another ioctl in the future which doesn't read an
1595 	 * mdio, and (b) the correct return value for an unknown ioctl
1596 	 * is ENOIOCTL, not EINVAL.
1597 	 */
1598 	error = 0;
1599 	switch (cmd) {
1600 	case MDIOCATTACH:
1601 		switch (mdio->md_type) {
1602 		case MD_MALLOC:
1603 		case MD_PRELOAD:
1604 		case MD_VNODE:
1605 		case MD_SWAP:
1606 		case MD_NULL:
1607 			break;
1608 		default:
1609 			return (EINVAL);
1610 		}
1611 		if (mdio->md_sectorsize == 0)
1612 			sectsize = DEV_BSIZE;
1613 		else
1614 			sectsize = mdio->md_sectorsize;
1615 		if (sectsize > MAXPHYS || mdio->md_mediasize < sectsize)
1616 			return (EINVAL);
1617 		if (mdio->md_options & MD_AUTOUNIT)
1618 			sc = mdnew(-1, &error, mdio->md_type);
1619 		else {
1620 			if (mdio->md_unit > INT_MAX)
1621 				return (EINVAL);
1622 			sc = mdnew(mdio->md_unit, &error, mdio->md_type);
1623 		}
1624 		if (sc == NULL)
1625 			return (error);
1626 		if (mdio->md_options & MD_AUTOUNIT)
1627 			mdio->md_unit = sc->unit;
1628 		sc->mediasize = mdio->md_mediasize;
1629 		sc->sectorsize = sectsize;
1630 		error = EDOOFUS;
1631 		switch (sc->type) {
1632 		case MD_MALLOC:
1633 			sc->start = mdstart_malloc;
1634 			error = mdcreate_malloc(sc, mdio);
1635 			break;
1636 		case MD_PRELOAD:
1637 			/*
1638 			 * We disallow attaching preloaded memory disks via
1639 			 * ioctl. Preloaded memory disks are automatically
1640 			 * attached in g_md_init().
1641 			 */
1642 			error = EOPNOTSUPP;
1643 			break;
1644 		case MD_VNODE:
1645 			sc->start = mdstart_vnode;
1646 			error = mdcreate_vnode(sc, mdio, td);
1647 			break;
1648 		case MD_SWAP:
1649 			sc->start = mdstart_swap;
1650 			error = mdcreate_swap(sc, mdio, td);
1651 			break;
1652 		case MD_NULL:
1653 			sc->start = mdstart_null;
1654 			error = mdcreate_null(sc, mdio, td);
1655 			break;
1656 		}
1657 		if (error != 0) {
1658 			mddestroy(sc, td);
1659 			return (error);
1660 		}
1661 
1662 		/* Prune off any residual fractional sector */
1663 		i = sc->mediasize % sc->sectorsize;
1664 		sc->mediasize -= i;
1665 
1666 		mdinit(sc);
1667 		return (0);
1668 	case MDIOCDETACH:
1669 		if (mdio->md_mediasize != 0 ||
1670 		    (mdio->md_options & ~MD_FORCE) != 0)
1671 			return (EINVAL);
1672 
1673 		sc = mdfind(mdio->md_unit);
1674 		if (sc == NULL)
1675 			return (ENOENT);
1676 		if (sc->opencount != 0 && !(sc->flags & MD_FORCE) &&
1677 		    !(mdio->md_options & MD_FORCE))
1678 			return (EBUSY);
1679 		return (mddestroy(sc, td));
1680 	case MDIOCRESIZE:
1681 		if ((mdio->md_options & ~(MD_FORCE | MD_RESERVE)) != 0)
1682 			return (EINVAL);
1683 
1684 		sc = mdfind(mdio->md_unit);
1685 		if (sc == NULL)
1686 			return (ENOENT);
1687 		if (mdio->md_mediasize < sc->sectorsize)
1688 			return (EINVAL);
1689 		if (mdio->md_mediasize < sc->mediasize &&
1690 		    !(sc->flags & MD_FORCE) &&
1691 		    !(mdio->md_options & MD_FORCE))
1692 			return (EBUSY);
1693 		return (mdresize(sc, mdio));
1694 	case MDIOCQUERY:
1695 		sc = mdfind(mdio->md_unit);
1696 		if (sc == NULL)
1697 			return (ENOENT);
1698 		mdio->md_type = sc->type;
1699 		mdio->md_options = sc->flags;
1700 		mdio->md_mediasize = sc->mediasize;
1701 		mdio->md_sectorsize = sc->sectorsize;
1702 		if (sc->type == MD_VNODE ||
1703 		    (sc->type == MD_PRELOAD && mdio->md_file != NULL))
1704 			error = copyout(sc->file, mdio->md_file,
1705 			    strlen(sc->file) + 1);
1706 		return (error);
1707 	case MDIOCLIST:
1708 		i = 1;
1709 		LIST_FOREACH(sc, &md_softc_list, list) {
1710 			if (i == MDNPAD - 1)
1711 				mdio->md_pad[i] = -1;
1712 			else
1713 				mdio->md_pad[i++] = sc->unit;
1714 		}
1715 		mdio->md_pad[0] = i - 1;
1716 		return (0);
1717 	default:
1718 		return (ENOIOCTL);
1719 	};
1720 }
1721 
1722 static int
1723 mdctlioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags, struct thread *td)
1724 {
1725 	int error;
1726 
1727 	sx_xlock(&md_sx);
1728 	error = xmdctlioctl(dev, cmd, addr, flags, td);
1729 	sx_xunlock(&md_sx);
1730 	return (error);
1731 }
1732 
1733 static void
1734 md_preloaded(u_char *image, size_t length, const char *name)
1735 {
1736 	struct md_s *sc;
1737 	int error;
1738 
1739 	sc = mdnew(-1, &error, MD_PRELOAD);
1740 	if (sc == NULL)
1741 		return;
1742 	sc->mediasize = length;
1743 	sc->sectorsize = DEV_BSIZE;
1744 	sc->pl_ptr = image;
1745 	sc->pl_len = length;
1746 	sc->start = mdstart_preload;
1747 	if (name != NULL)
1748 		strlcpy(sc->file, name, sizeof(sc->file));
1749 #if defined(MD_ROOT) && !defined(ROOTDEVNAME)
1750 	if (sc->unit == 0)
1751 		rootdevnames[0] = MD_ROOT_FSTYPE ":/dev/md0";
1752 #endif
1753 	mdinit(sc);
1754 	if (name != NULL) {
1755 		printf("%s%d: Preloaded image <%s> %zd bytes at %p\n",
1756 		    MD_NAME, sc->unit, name, length, image);
1757 	} else {
1758 		printf("%s%d: Embedded image %zd bytes at %p\n",
1759 		    MD_NAME, sc->unit, length, image);
1760 	}
1761 }
1762 
1763 static void
1764 g_md_init(struct g_class *mp __unused)
1765 {
1766 	caddr_t mod;
1767 	u_char *ptr, *name, *type;
1768 	unsigned len;
1769 	int i;
1770 
1771 	/* figure out log2(NINDIR) */
1772 	for (i = NINDIR, nshift = -1; i; nshift++)
1773 		i >>= 1;
1774 
1775 	mod = NULL;
1776 	sx_init(&md_sx, "MD config lock");
1777 	g_topology_unlock();
1778 	md_uh = new_unrhdr(0, INT_MAX, NULL);
1779 #ifdef MD_ROOT
1780 	if (mfs_root_size != 0) {
1781 		sx_xlock(&md_sx);
1782 		md_preloaded(__DEVOLATILE(u_char *, &mfs_root), mfs_root_size,
1783 		    NULL);
1784 		sx_xunlock(&md_sx);
1785 	}
1786 #endif
1787 	/* XXX: are preload_* static or do they need Giant ? */
1788 	while ((mod = preload_search_next_name(mod)) != NULL) {
1789 		name = (char *)preload_search_info(mod, MODINFO_NAME);
1790 		if (name == NULL)
1791 			continue;
1792 		type = (char *)preload_search_info(mod, MODINFO_TYPE);
1793 		if (type == NULL)
1794 			continue;
1795 		if (strcmp(type, "md_image") && strcmp(type, "mfs_root"))
1796 			continue;
1797 		ptr = preload_fetch_addr(mod);
1798 		len = preload_fetch_size(mod);
1799 		if (ptr != NULL && len != 0) {
1800 			sx_xlock(&md_sx);
1801 			md_preloaded(ptr, len, name);
1802 			sx_xunlock(&md_sx);
1803 		}
1804 	}
1805 	md_vnode_pbuf_freecnt = nswbuf / 10;
1806 	status_dev = make_dev(&mdctl_cdevsw, INT_MAX, UID_ROOT, GID_WHEEL,
1807 	    0600, MDCTL_NAME);
1808 	g_topology_lock();
1809 }
1810 
1811 static void
1812 g_md_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp,
1813     struct g_consumer *cp __unused, struct g_provider *pp)
1814 {
1815 	struct md_s *mp;
1816 	char *type;
1817 
1818 	mp = gp->softc;
1819 	if (mp == NULL)
1820 		return;
1821 
1822 	switch (mp->type) {
1823 	case MD_MALLOC:
1824 		type = "malloc";
1825 		break;
1826 	case MD_PRELOAD:
1827 		type = "preload";
1828 		break;
1829 	case MD_VNODE:
1830 		type = "vnode";
1831 		break;
1832 	case MD_SWAP:
1833 		type = "swap";
1834 		break;
1835 	case MD_NULL:
1836 		type = "null";
1837 		break;
1838 	default:
1839 		type = "unknown";
1840 		break;
1841 	}
1842 
1843 	if (pp != NULL) {
1844 		if (indent == NULL) {
1845 			sbuf_printf(sb, " u %d", mp->unit);
1846 			sbuf_printf(sb, " s %ju", (uintmax_t) mp->sectorsize);
1847 			sbuf_printf(sb, " f %ju", (uintmax_t) mp->fwheads);
1848 			sbuf_printf(sb, " fs %ju", (uintmax_t) mp->fwsectors);
1849 			sbuf_printf(sb, " l %ju", (uintmax_t) mp->mediasize);
1850 			sbuf_printf(sb, " t %s", type);
1851 			if ((mp->type == MD_VNODE && mp->vnode != NULL) ||
1852 			    (mp->type == MD_PRELOAD && mp->file[0] != '\0'))
1853 				sbuf_printf(sb, " file %s", mp->file);
1854 		} else {
1855 			sbuf_printf(sb, "%s<unit>%d</unit>\n", indent,
1856 			    mp->unit);
1857 			sbuf_printf(sb, "%s<sectorsize>%ju</sectorsize>\n",
1858 			    indent, (uintmax_t) mp->sectorsize);
1859 			sbuf_printf(sb, "%s<fwheads>%ju</fwheads>\n",
1860 			    indent, (uintmax_t) mp->fwheads);
1861 			sbuf_printf(sb, "%s<fwsectors>%ju</fwsectors>\n",
1862 			    indent, (uintmax_t) mp->fwsectors);
1863 			sbuf_printf(sb, "%s<length>%ju</length>\n",
1864 			    indent, (uintmax_t) mp->mediasize);
1865 			sbuf_printf(sb, "%s<compression>%s</compression>\n", indent,
1866 			    (mp->flags & MD_COMPRESS) == 0 ? "off": "on");
1867 			sbuf_printf(sb, "%s<access>%s</access>\n", indent,
1868 			    (mp->flags & MD_READONLY) == 0 ? "read-write":
1869 			    "read-only");
1870 			sbuf_printf(sb, "%s<type>%s</type>\n", indent,
1871 			    type);
1872 			if ((mp->type == MD_VNODE && mp->vnode != NULL) ||
1873 			    (mp->type == MD_PRELOAD && mp->file[0] != '\0')) {
1874 				sbuf_printf(sb, "%s<file>", indent);
1875 				g_conf_printf_escaped(sb, "%s", mp->file);
1876 				sbuf_printf(sb, "</file>\n");
1877 			}
1878 		}
1879 	}
1880 }
1881 
1882 static void
1883 g_md_fini(struct g_class *mp __unused)
1884 {
1885 
1886 	sx_destroy(&md_sx);
1887 	if (status_dev != NULL)
1888 		destroy_dev(status_dev);
1889 	delete_unrhdr(md_uh);
1890 }
1891