xref: /freebsd/sys/dev/md/md.c (revision 1f1928092d351523c25cfe277301b4f401d43e26)
1098ca2bdSWarner Losh /*-
200a6a3c6SPoul-Henning Kamp  * ----------------------------------------------------------------------------
300a6a3c6SPoul-Henning Kamp  * "THE BEER-WARE LICENSE" (Revision 42):
400a6a3c6SPoul-Henning Kamp  * <phk@FreeBSD.ORG> wrote this file.  As long as you retain this notice you
500a6a3c6SPoul-Henning Kamp  * can do whatever you want with this stuff. If we meet some day, and you think
600a6a3c6SPoul-Henning Kamp  * this stuff is worth it, you can buy me a beer in return.   Poul-Henning Kamp
700a6a3c6SPoul-Henning Kamp  * ----------------------------------------------------------------------------
800a6a3c6SPoul-Henning Kamp  *
900a6a3c6SPoul-Henning Kamp  * $FreeBSD$
1000a6a3c6SPoul-Henning Kamp  *
1100a6a3c6SPoul-Henning Kamp  */
1200a6a3c6SPoul-Henning Kamp 
13098ca2bdSWarner Losh /*-
14637f671aSPoul-Henning Kamp  * The following functions are based in the vn(4) driver: mdstart_swap(),
15637f671aSPoul-Henning Kamp  * mdstart_vnode(), mdcreate_swap(), mdcreate_vnode() and mddestroy(),
16637f671aSPoul-Henning Kamp  * and as such under the following copyright:
17637f671aSPoul-Henning Kamp  *
18637f671aSPoul-Henning Kamp  * Copyright (c) 1988 University of Utah.
19637f671aSPoul-Henning Kamp  * Copyright (c) 1990, 1993
20637f671aSPoul-Henning Kamp  *	The Regents of the University of California.  All rights reserved.
21637f671aSPoul-Henning Kamp  *
22ed010cdfSWarner Losh  * This code is derived from software contributed to Berkeley by
23ed010cdfSWarner Losh  * the Systems Programming Group of the University of Utah Computer
24ed010cdfSWarner Losh  * Science Department.
25ed010cdfSWarner Losh  *
26637f671aSPoul-Henning Kamp  * Redistribution and use in source and binary forms, with or without
27637f671aSPoul-Henning Kamp  * modification, are permitted provided that the following conditions
28637f671aSPoul-Henning Kamp  * are met:
29637f671aSPoul-Henning Kamp  * 1. Redistributions of source code must retain the above copyright
30637f671aSPoul-Henning Kamp  *    notice, this list of conditions and the following disclaimer.
31637f671aSPoul-Henning Kamp  * 2. Redistributions in binary form must reproduce the above copyright
32637f671aSPoul-Henning Kamp  *    notice, this list of conditions and the following disclaimer in the
33637f671aSPoul-Henning Kamp  *    documentation and/or other materials provided with the distribution.
34637f671aSPoul-Henning Kamp  * 4. Neither the name of the University nor the names of its contributors
35637f671aSPoul-Henning Kamp  *    may be used to endorse or promote products derived from this software
36637f671aSPoul-Henning Kamp  *    without specific prior written permission.
37637f671aSPoul-Henning Kamp  *
38637f671aSPoul-Henning Kamp  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
39637f671aSPoul-Henning Kamp  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
40637f671aSPoul-Henning Kamp  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
41637f671aSPoul-Henning Kamp  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
42637f671aSPoul-Henning Kamp  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
43637f671aSPoul-Henning Kamp  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
44637f671aSPoul-Henning Kamp  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
45637f671aSPoul-Henning Kamp  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
46637f671aSPoul-Henning Kamp  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
47637f671aSPoul-Henning Kamp  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
48637f671aSPoul-Henning Kamp  * SUCH DAMAGE.
49637f671aSPoul-Henning Kamp  *
50637f671aSPoul-Henning Kamp  * from: Utah Hdr: vn.c 1.13 94/04/02
51637f671aSPoul-Henning Kamp  *
52637f671aSPoul-Henning Kamp  *	from: @(#)vn.c	8.6 (Berkeley) 4/1/94
53637f671aSPoul-Henning Kamp  * From: src/sys/dev/vn/vn.c,v 1.122 2000/12/16 16:06:03
54637f671aSPoul-Henning Kamp  */
55637f671aSPoul-Henning Kamp 
566f4f00f1SPoul-Henning Kamp #include "opt_geom.h"
573f54a085SPoul-Henning Kamp #include "opt_md.h"
5871e4fff8SPoul-Henning Kamp 
5900a6a3c6SPoul-Henning Kamp #include <sys/param.h>
6000a6a3c6SPoul-Henning Kamp #include <sys/systm.h>
619626b608SPoul-Henning Kamp #include <sys/bio.h>
6200a6a3c6SPoul-Henning Kamp #include <sys/conf.h>
63a03be42dSMaxim Sobolev #include <sys/devicestat.h>
648f8def9eSPoul-Henning Kamp #include <sys/fcntl.h>
65fb919e4dSMark Murray #include <sys/kernel.h>
665c97ca54SIan Dowse #include <sys/kthread.h>
6706d425f9SEd Schouten #include <sys/limits.h>
68fb919e4dSMark Murray #include <sys/linker.h>
69fb919e4dSMark Murray #include <sys/lock.h>
70fb919e4dSMark Murray #include <sys/malloc.h>
71fb919e4dSMark Murray #include <sys/mdioctl.h>
72a08d2e7fSJohn Baldwin #include <sys/mount.h>
739dceb26bSJohn Baldwin #include <sys/mutex.h>
749b00ca19SPoul-Henning Kamp #include <sys/sx.h>
75fb919e4dSMark Murray #include <sys/namei.h>
768f8def9eSPoul-Henning Kamp #include <sys/proc.h>
77fb919e4dSMark Murray #include <sys/queue.h>
78657bd8b1SAndrey V. Elsukov #include <sys/sbuf.h>
7963710c4dSJohn Baldwin #include <sys/sched.h>
807cd53fddSAlan Cox #include <sys/sf_buf.h>
81fb919e4dSMark Murray #include <sys/sysctl.h>
82fb919e4dSMark Murray #include <sys/vnode.h>
83fb919e4dSMark Murray 
846f4f00f1SPoul-Henning Kamp #include <geom/geom.h>
856f4f00f1SPoul-Henning Kamp 
868f8def9eSPoul-Henning Kamp #include <vm/vm.h>
878f8def9eSPoul-Henning Kamp #include <vm/vm_object.h>
888f8def9eSPoul-Henning Kamp #include <vm/vm_page.h>
898f8def9eSPoul-Henning Kamp #include <vm/vm_pager.h>
908f8def9eSPoul-Henning Kamp #include <vm/swap_pager.h>
91f43b2bacSPoul-Henning Kamp #include <vm/uma.h>
923f54a085SPoul-Henning Kamp 
93cfb00e5aSMatthew D Fleming #include <machine/vmparam.h>
94cfb00e5aSMatthew D Fleming 
9557e9624eSPoul-Henning Kamp #define MD_MODVER 1
9657e9624eSPoul-Henning Kamp 
975c97ca54SIan Dowse #define MD_SHUTDOWN	0x10000		/* Tell worker thread to terminate. */
98a08d2e7fSJohn Baldwin #define	MD_EXITING	0x20000		/* Worker thread is exiting. */
995c97ca54SIan Dowse 
100f2744793SSheldon Hearn #ifndef MD_NSECT
101f2744793SSheldon Hearn #define MD_NSECT (10000 * 2)
10233edfabeSPoul-Henning Kamp #endif
10333edfabeSPoul-Henning Kamp 
1045bb84bc8SRobert Watson static MALLOC_DEFINE(M_MD, "md_disk", "Memory Disk");
1055bb84bc8SRobert Watson static MALLOC_DEFINE(M_MDSECT, "md_sectors", "Memory Disk Sectors");
10600a6a3c6SPoul-Henning Kamp 
10771e4fff8SPoul-Henning Kamp static int md_debug;
10800a6a3c6SPoul-Henning Kamp SYSCTL_INT(_debug, OID_AUTO, mddebug, CTLFLAG_RW, &md_debug, 0, "");
109c44d423eSKonstantin Belousov static int md_malloc_wait;
110c44d423eSKonstantin Belousov SYSCTL_INT(_vm, OID_AUTO, md_malloc_wait, CTLFLAG_RW, &md_malloc_wait, 0, "");
11100a6a3c6SPoul-Henning Kamp 
11271e4fff8SPoul-Henning Kamp #if defined(MD_ROOT) && defined(MD_ROOT_SIZE)
113de64f22aSLuigi Rizzo /*
114de64f22aSLuigi Rizzo  * Preloaded image gets put here.
115de64f22aSLuigi Rizzo  * Applications that patch the object with the image can determine
116de64f22aSLuigi Rizzo  * the size looking at the start and end markers (strings),
117de64f22aSLuigi Rizzo  * so we want them contiguous.
118de64f22aSLuigi Rizzo  */
119de64f22aSLuigi Rizzo static struct {
120de64f22aSLuigi Rizzo 	u_char start[MD_ROOT_SIZE*1024];
121de64f22aSLuigi Rizzo 	u_char end[128];
122de64f22aSLuigi Rizzo } mfs_root = {
123de64f22aSLuigi Rizzo 	.start = "MFS Filesystem goes here",
124de64f22aSLuigi Rizzo 	.end = "MFS Filesystem had better STOP here",
125de64f22aSLuigi Rizzo };
12671e4fff8SPoul-Henning Kamp #endif
12771e4fff8SPoul-Henning Kamp 
12819945697SPoul-Henning Kamp static g_init_t g_md_init;
12919945697SPoul-Henning Kamp static g_fini_t g_md_fini;
13019945697SPoul-Henning Kamp static g_start_t g_md_start;
13119945697SPoul-Henning Kamp static g_access_t g_md_access;
132b42f40b8SJaakko Heinonen static void g_md_dumpconf(struct sbuf *sb, const char *indent,
133b42f40b8SJaakko Heinonen     struct g_geom *gp, struct g_consumer *cp __unused, struct g_provider *pp);
1340eb14309SPoul-Henning Kamp 
1358f8def9eSPoul-Henning Kamp static int mdunits;
13689c9c53dSPoul-Henning Kamp static struct cdev *status_dev = 0;
1379b00ca19SPoul-Henning Kamp static struct sx md_sx;
138f4e7c5a8SJaakko Heinonen static struct unrhdr *md_uh;
13957e9624eSPoul-Henning Kamp 
140a522a159SPoul-Henning Kamp static d_ioctl_t mdctlioctl;
1418f8def9eSPoul-Henning Kamp 
1428f8def9eSPoul-Henning Kamp static struct cdevsw mdctl_cdevsw = {
143dc08ffecSPoul-Henning Kamp 	.d_version =	D_VERSION,
1447ac40f5fSPoul-Henning Kamp 	.d_ioctl =	mdctlioctl,
1457ac40f5fSPoul-Henning Kamp 	.d_name =	MD_NAME,
14600a6a3c6SPoul-Henning Kamp };
14700a6a3c6SPoul-Henning Kamp 
14819945697SPoul-Henning Kamp struct g_class g_md_class = {
14919945697SPoul-Henning Kamp 	.name = "MD",
1505721c9c7SPoul-Henning Kamp 	.version = G_VERSION,
15119945697SPoul-Henning Kamp 	.init = g_md_init,
15219945697SPoul-Henning Kamp 	.fini = g_md_fini,
15319945697SPoul-Henning Kamp 	.start = g_md_start,
15419945697SPoul-Henning Kamp 	.access = g_md_access,
155c27a8954SWojciech A. Koszek 	.dumpconf = g_md_dumpconf,
15619945697SPoul-Henning Kamp };
15719945697SPoul-Henning Kamp 
15819945697SPoul-Henning Kamp DECLARE_GEOM_CLASS(g_md_class, g_md);
15919945697SPoul-Henning Kamp 
1600cfaeeeeSPoul-Henning Kamp 
16113e403fdSAntoine Brodin static LIST_HEAD(, md_s) md_softc_list = LIST_HEAD_INITIALIZER(md_softc_list);
1623f54a085SPoul-Henning Kamp 
163c6517568SPoul-Henning Kamp #define NINDIR	(PAGE_SIZE / sizeof(uintptr_t))
164c6517568SPoul-Henning Kamp #define NMASK	(NINDIR-1)
165c6517568SPoul-Henning Kamp static int nshift;
166c6517568SPoul-Henning Kamp 
167c6517568SPoul-Henning Kamp struct indir {
168c6517568SPoul-Henning Kamp 	uintptr_t	*array;
1698b149b51SJohn Baldwin 	u_int		total;
1708b149b51SJohn Baldwin 	u_int		used;
1718b149b51SJohn Baldwin 	u_int		shift;
172c6517568SPoul-Henning Kamp };
173c6517568SPoul-Henning Kamp 
17400a6a3c6SPoul-Henning Kamp struct md_s {
17500a6a3c6SPoul-Henning Kamp 	int unit;
1763f54a085SPoul-Henning Kamp 	LIST_ENTRY(md_s) list;
1778177437dSPoul-Henning Kamp 	struct bio_queue_head bio_queue;
1780f8500a5SPoul-Henning Kamp 	struct mtx queue_mtx;
17989c9c53dSPoul-Henning Kamp 	struct cdev *dev;
1808f8def9eSPoul-Henning Kamp 	enum md_types type;
181b830359bSPawel Jakub Dawidek 	off_t mediasize;
182b830359bSPawel Jakub Dawidek 	unsigned sectorsize;
183fe603109SMaxim Sobolev 	unsigned opencount;
1844e8bfe14SPoul-Henning Kamp 	unsigned fwheads;
1854e8bfe14SPoul-Henning Kamp 	unsigned fwsectors;
1868f8def9eSPoul-Henning Kamp 	unsigned flags;
187f43b2bacSPoul-Henning Kamp 	char name[20];
1885c97ca54SIan Dowse 	struct proc *procp;
1896f4f00f1SPoul-Henning Kamp 	struct g_geom *gp;
1906f4f00f1SPoul-Henning Kamp 	struct g_provider *pp;
1919b00ca19SPoul-Henning Kamp 	int (*start)(struct md_s *sc, struct bio *bp);
192a03be42dSMaxim Sobolev 	struct devstat *devstat;
19395f1a897SPoul-Henning Kamp 
19495f1a897SPoul-Henning Kamp 	/* MD_MALLOC related fields */
195c6517568SPoul-Henning Kamp 	struct indir *indir;
196f43b2bacSPoul-Henning Kamp 	uma_zone_t uma;
19700a6a3c6SPoul-Henning Kamp 
19895f1a897SPoul-Henning Kamp 	/* MD_PRELOAD related fields */
19995f1a897SPoul-Henning Kamp 	u_char *pl_ptr;
200b830359bSPawel Jakub Dawidek 	size_t pl_len;
20100a6a3c6SPoul-Henning Kamp 
2028f8def9eSPoul-Henning Kamp 	/* MD_VNODE related fields */
2038f8def9eSPoul-Henning Kamp 	struct vnode *vnode;
20461a6eb62SPawel Jakub Dawidek 	char file[PATH_MAX];
2058f8def9eSPoul-Henning Kamp 	struct ucred *cred;
2068f8def9eSPoul-Henning Kamp 
207e0cebb40SDima Dorfman 	/* MD_SWAP related fields */
2088f8def9eSPoul-Henning Kamp 	vm_object_t object;
2098f8def9eSPoul-Henning Kamp };
21000a6a3c6SPoul-Henning Kamp 
211c6517568SPoul-Henning Kamp static struct indir *
2128b149b51SJohn Baldwin new_indir(u_int shift)
213c6517568SPoul-Henning Kamp {
214c6517568SPoul-Henning Kamp 	struct indir *ip;
215c6517568SPoul-Henning Kamp 
216c44d423eSKonstantin Belousov 	ip = malloc(sizeof *ip, M_MD, (md_malloc_wait ? M_WAITOK : M_NOWAIT)
217c44d423eSKonstantin Belousov 	    | M_ZERO);
218c6517568SPoul-Henning Kamp 	if (ip == NULL)
219c6517568SPoul-Henning Kamp 		return (NULL);
220c6517568SPoul-Henning Kamp 	ip->array = malloc(sizeof(uintptr_t) * NINDIR,
221c44d423eSKonstantin Belousov 	    M_MDSECT, (md_malloc_wait ? M_WAITOK : M_NOWAIT) | M_ZERO);
222c6517568SPoul-Henning Kamp 	if (ip->array == NULL) {
223c6517568SPoul-Henning Kamp 		free(ip, M_MD);
224c6517568SPoul-Henning Kamp 		return (NULL);
225c6517568SPoul-Henning Kamp 	}
226c6517568SPoul-Henning Kamp 	ip->total = NINDIR;
227c6517568SPoul-Henning Kamp 	ip->shift = shift;
228c6517568SPoul-Henning Kamp 	return (ip);
229c6517568SPoul-Henning Kamp }
230c6517568SPoul-Henning Kamp 
231c6517568SPoul-Henning Kamp static void
232c6517568SPoul-Henning Kamp del_indir(struct indir *ip)
233c6517568SPoul-Henning Kamp {
234c6517568SPoul-Henning Kamp 
235f43b2bacSPoul-Henning Kamp 	free(ip->array, M_MDSECT);
236c6517568SPoul-Henning Kamp 	free(ip, M_MD);
237c6517568SPoul-Henning Kamp }
238c6517568SPoul-Henning Kamp 
239f43b2bacSPoul-Henning Kamp static void
240f43b2bacSPoul-Henning Kamp destroy_indir(struct md_s *sc, struct indir *ip)
241f43b2bacSPoul-Henning Kamp {
242f43b2bacSPoul-Henning Kamp 	int i;
243f43b2bacSPoul-Henning Kamp 
244f43b2bacSPoul-Henning Kamp 	for (i = 0; i < NINDIR; i++) {
245f43b2bacSPoul-Henning Kamp 		if (!ip->array[i])
246f43b2bacSPoul-Henning Kamp 			continue;
247f43b2bacSPoul-Henning Kamp 		if (ip->shift)
248f43b2bacSPoul-Henning Kamp 			destroy_indir(sc, (struct indir*)(ip->array[i]));
249f43b2bacSPoul-Henning Kamp 		else if (ip->array[i] > 255)
250f43b2bacSPoul-Henning Kamp 			uma_zfree(sc->uma, (void *)(ip->array[i]));
251f43b2bacSPoul-Henning Kamp 	}
252f43b2bacSPoul-Henning Kamp 	del_indir(ip);
253f43b2bacSPoul-Henning Kamp }
254f43b2bacSPoul-Henning Kamp 
255c6517568SPoul-Henning Kamp /*
2566c3cd0e2SMaxim Konovalov  * This function does the math and allocates the top level "indir" structure
257c6517568SPoul-Henning Kamp  * for a device of "size" sectors.
258c6517568SPoul-Henning Kamp  */
259c6517568SPoul-Henning Kamp 
260c6517568SPoul-Henning Kamp static struct indir *
261c6517568SPoul-Henning Kamp dimension(off_t size)
262c6517568SPoul-Henning Kamp {
263c6517568SPoul-Henning Kamp 	off_t rcnt;
264c6517568SPoul-Henning Kamp 	struct indir *ip;
265d12fc952SKonstantin Belousov 	int layer;
266c6517568SPoul-Henning Kamp 
267c6517568SPoul-Henning Kamp 	rcnt = size;
268c6517568SPoul-Henning Kamp 	layer = 0;
269c6517568SPoul-Henning Kamp 	while (rcnt > NINDIR) {
270c6517568SPoul-Henning Kamp 		rcnt /= NINDIR;
271c6517568SPoul-Henning Kamp 		layer++;
272c6517568SPoul-Henning Kamp 	}
273c6517568SPoul-Henning Kamp 
274c6517568SPoul-Henning Kamp 	/*
275c6517568SPoul-Henning Kamp 	 * XXX: the top layer is probably not fully populated, so we allocate
27683e13864SPoul-Henning Kamp 	 * too much space for ip->array in here.
277c6517568SPoul-Henning Kamp 	 */
27883e13864SPoul-Henning Kamp 	ip = malloc(sizeof *ip, M_MD, M_WAITOK | M_ZERO);
27983e13864SPoul-Henning Kamp 	ip->array = malloc(sizeof(uintptr_t) * NINDIR,
28083e13864SPoul-Henning Kamp 	    M_MDSECT, M_WAITOK | M_ZERO);
28183e13864SPoul-Henning Kamp 	ip->total = NINDIR;
28283e13864SPoul-Henning Kamp 	ip->shift = layer * nshift;
283c6517568SPoul-Henning Kamp 	return (ip);
284c6517568SPoul-Henning Kamp }
285c6517568SPoul-Henning Kamp 
286c6517568SPoul-Henning Kamp /*
287c6517568SPoul-Henning Kamp  * Read a given sector
288c6517568SPoul-Henning Kamp  */
289c6517568SPoul-Henning Kamp 
290c6517568SPoul-Henning Kamp static uintptr_t
291c6517568SPoul-Henning Kamp s_read(struct indir *ip, off_t offset)
292c6517568SPoul-Henning Kamp {
293c6517568SPoul-Henning Kamp 	struct indir *cip;
294c6517568SPoul-Henning Kamp 	int idx;
295c6517568SPoul-Henning Kamp 	uintptr_t up;
296c6517568SPoul-Henning Kamp 
297c6517568SPoul-Henning Kamp 	if (md_debug > 1)
2986569d6f3SMaxime Henrion 		printf("s_read(%jd)\n", (intmax_t)offset);
299c6517568SPoul-Henning Kamp 	up = 0;
300c6517568SPoul-Henning Kamp 	for (cip = ip; cip != NULL;) {
301c6517568SPoul-Henning Kamp 		if (cip->shift) {
302c6517568SPoul-Henning Kamp 			idx = (offset >> cip->shift) & NMASK;
303c6517568SPoul-Henning Kamp 			up = cip->array[idx];
304c6517568SPoul-Henning Kamp 			cip = (struct indir *)up;
305c6517568SPoul-Henning Kamp 			continue;
306c6517568SPoul-Henning Kamp 		}
307c6517568SPoul-Henning Kamp 		idx = offset & NMASK;
308c6517568SPoul-Henning Kamp 		return (cip->array[idx]);
309c6517568SPoul-Henning Kamp 	}
310c6517568SPoul-Henning Kamp 	return (0);
311c6517568SPoul-Henning Kamp }
312c6517568SPoul-Henning Kamp 
313c6517568SPoul-Henning Kamp /*
314c6517568SPoul-Henning Kamp  * Write a given sector, prune the tree if the value is 0
315c6517568SPoul-Henning Kamp  */
316c6517568SPoul-Henning Kamp 
317c6517568SPoul-Henning Kamp static int
318fde2a2e4SPoul-Henning Kamp s_write(struct indir *ip, off_t offset, uintptr_t ptr)
319c6517568SPoul-Henning Kamp {
320c6517568SPoul-Henning Kamp 	struct indir *cip, *lip[10];
321c6517568SPoul-Henning Kamp 	int idx, li;
322c6517568SPoul-Henning Kamp 	uintptr_t up;
323c6517568SPoul-Henning Kamp 
324c6517568SPoul-Henning Kamp 	if (md_debug > 1)
3256569d6f3SMaxime Henrion 		printf("s_write(%jd, %p)\n", (intmax_t)offset, (void *)ptr);
326c6517568SPoul-Henning Kamp 	up = 0;
327c6517568SPoul-Henning Kamp 	li = 0;
328c6517568SPoul-Henning Kamp 	cip = ip;
329c6517568SPoul-Henning Kamp 	for (;;) {
330c6517568SPoul-Henning Kamp 		lip[li++] = cip;
331c6517568SPoul-Henning Kamp 		if (cip->shift) {
332c6517568SPoul-Henning Kamp 			idx = (offset >> cip->shift) & NMASK;
333c6517568SPoul-Henning Kamp 			up = cip->array[idx];
334c6517568SPoul-Henning Kamp 			if (up != 0) {
335c6517568SPoul-Henning Kamp 				cip = (struct indir *)up;
336c6517568SPoul-Henning Kamp 				continue;
337c6517568SPoul-Henning Kamp 			}
338c6517568SPoul-Henning Kamp 			/* Allocate branch */
339c6517568SPoul-Henning Kamp 			cip->array[idx] =
340c6517568SPoul-Henning Kamp 			    (uintptr_t)new_indir(cip->shift - nshift);
341c6517568SPoul-Henning Kamp 			if (cip->array[idx] == 0)
342975b628fSPoul-Henning Kamp 				return (ENOSPC);
343c6517568SPoul-Henning Kamp 			cip->used++;
344c6517568SPoul-Henning Kamp 			up = cip->array[idx];
345c6517568SPoul-Henning Kamp 			cip = (struct indir *)up;
346c6517568SPoul-Henning Kamp 			continue;
347c6517568SPoul-Henning Kamp 		}
348c6517568SPoul-Henning Kamp 		/* leafnode */
349c6517568SPoul-Henning Kamp 		idx = offset & NMASK;
350c6517568SPoul-Henning Kamp 		up = cip->array[idx];
351c6517568SPoul-Henning Kamp 		if (up != 0)
352c6517568SPoul-Henning Kamp 			cip->used--;
353c6517568SPoul-Henning Kamp 		cip->array[idx] = ptr;
354c6517568SPoul-Henning Kamp 		if (ptr != 0)
355c6517568SPoul-Henning Kamp 			cip->used++;
356c6517568SPoul-Henning Kamp 		break;
357c6517568SPoul-Henning Kamp 	}
358c6517568SPoul-Henning Kamp 	if (cip->used != 0 || li == 1)
359c6517568SPoul-Henning Kamp 		return (0);
360c6517568SPoul-Henning Kamp 	li--;
361c6517568SPoul-Henning Kamp 	while (cip->used == 0 && cip != ip) {
362c6517568SPoul-Henning Kamp 		li--;
363c6517568SPoul-Henning Kamp 		idx = (offset >> lip[li]->shift) & NMASK;
364c6517568SPoul-Henning Kamp 		up = lip[li]->array[idx];
365c6517568SPoul-Henning Kamp 		KASSERT(up == (uintptr_t)cip, ("md screwed up"));
366c6517568SPoul-Henning Kamp 		del_indir(cip);
3674a6a94d8SArchie Cobbs 		lip[li]->array[idx] = 0;
368c6517568SPoul-Henning Kamp 		lip[li]->used--;
369c6517568SPoul-Henning Kamp 		cip = lip[li];
370c6517568SPoul-Henning Kamp 	}
371c6517568SPoul-Henning Kamp 	return (0);
372c6517568SPoul-Henning Kamp }
373c6517568SPoul-Henning Kamp 
3746f4f00f1SPoul-Henning Kamp 
3756f4f00f1SPoul-Henning Kamp static int
3766f4f00f1SPoul-Henning Kamp g_md_access(struct g_provider *pp, int r, int w, int e)
3776f4f00f1SPoul-Henning Kamp {
3786f4f00f1SPoul-Henning Kamp 	struct md_s *sc;
3796f4f00f1SPoul-Henning Kamp 
3806f4f00f1SPoul-Henning Kamp 	sc = pp->geom->softc;
38141c8b468SEdward Tomasz Napierala 	if (sc == NULL) {
38241c8b468SEdward Tomasz Napierala 		if (r <= 0 && w <= 0 && e <= 0)
38341c8b468SEdward Tomasz Napierala 			return (0);
3846b60a2cdSPoul-Henning Kamp 		return (ENXIO);
38541c8b468SEdward Tomasz Napierala 	}
3866f4f00f1SPoul-Henning Kamp 	r += pp->acr;
3876f4f00f1SPoul-Henning Kamp 	w += pp->acw;
3886f4f00f1SPoul-Henning Kamp 	e += pp->ace;
38986776891SChristian S.J. Peron 	if ((sc->flags & MD_READONLY) != 0 && w > 0)
39086776891SChristian S.J. Peron 		return (EROFS);
3916f4f00f1SPoul-Henning Kamp 	if ((pp->acr + pp->acw + pp->ace) == 0 && (r + w + e) > 0) {
3926f4f00f1SPoul-Henning Kamp 		sc->opencount = 1;
3936f4f00f1SPoul-Henning Kamp 	} else if ((pp->acr + pp->acw + pp->ace) > 0 && (r + w + e) == 0) {
3946f4f00f1SPoul-Henning Kamp 		sc->opencount = 0;
3956f4f00f1SPoul-Henning Kamp 	}
3966f4f00f1SPoul-Henning Kamp 	return (0);
3976f4f00f1SPoul-Henning Kamp }
3986f4f00f1SPoul-Henning Kamp 
3996f4f00f1SPoul-Henning Kamp static void
4006f4f00f1SPoul-Henning Kamp g_md_start(struct bio *bp)
4016f4f00f1SPoul-Henning Kamp {
4026f4f00f1SPoul-Henning Kamp 	struct md_s *sc;
4036f4f00f1SPoul-Henning Kamp 
4046f4f00f1SPoul-Henning Kamp 	sc = bp->bio_to->geom->softc;
405a03be42dSMaxim Sobolev 	if ((bp->bio_cmd == BIO_READ) || (bp->bio_cmd == BIO_WRITE))
406a03be42dSMaxim Sobolev 		devstat_start_transaction_bio(sc->devstat, bp);
4070f8500a5SPoul-Henning Kamp 	mtx_lock(&sc->queue_mtx);
408891619a6SPoul-Henning Kamp 	bioq_disksort(&sc->bio_queue, bp);
4094b07ede4SPawel Jakub Dawidek 	mtx_unlock(&sc->queue_mtx);
410e4cdd0d4SPawel Jakub Dawidek 	wakeup(sc);
4116f4f00f1SPoul-Henning Kamp }
4126f4f00f1SPoul-Henning Kamp 
413b4a4f93cSPoul-Henning Kamp static int
414b4a4f93cSPoul-Henning Kamp mdstart_malloc(struct md_s *sc, struct bio *bp)
41500a6a3c6SPoul-Henning Kamp {
416c6517568SPoul-Henning Kamp 	int i, error;
417c6517568SPoul-Henning Kamp 	u_char *dst;
418b830359bSPawel Jakub Dawidek 	off_t secno, nsec, uc;
419c6517568SPoul-Henning Kamp 	uintptr_t sp, osp;
42000a6a3c6SPoul-Henning Kamp 
4215541f25eSPawel Jakub Dawidek 	switch (bp->bio_cmd) {
4225541f25eSPawel Jakub Dawidek 	case BIO_READ:
4235541f25eSPawel Jakub Dawidek 	case BIO_WRITE:
4245541f25eSPawel Jakub Dawidek 	case BIO_DELETE:
4255541f25eSPawel Jakub Dawidek 		break;
4265541f25eSPawel Jakub Dawidek 	default:
4275541f25eSPawel Jakub Dawidek 		return (EOPNOTSUPP);
4285541f25eSPawel Jakub Dawidek 	}
4295541f25eSPawel Jakub Dawidek 
430b830359bSPawel Jakub Dawidek 	nsec = bp->bio_length / sc->sectorsize;
431b830359bSPawel Jakub Dawidek 	secno = bp->bio_offset / sc->sectorsize;
4328177437dSPoul-Henning Kamp 	dst = bp->bio_data;
433c6517568SPoul-Henning Kamp 	error = 0;
43400a6a3c6SPoul-Henning Kamp 	while (nsec--) {
435fde2a2e4SPoul-Henning Kamp 		osp = s_read(sc->indir, secno);
4368177437dSPoul-Henning Kamp 		if (bp->bio_cmd == BIO_DELETE) {
437fde2a2e4SPoul-Henning Kamp 			if (osp != 0)
438fde2a2e4SPoul-Henning Kamp 				error = s_write(sc->indir, secno, 0);
4398177437dSPoul-Henning Kamp 		} else if (bp->bio_cmd == BIO_READ) {
440fde2a2e4SPoul-Henning Kamp 			if (osp == 0)
441b830359bSPawel Jakub Dawidek 				bzero(dst, sc->sectorsize);
442fde2a2e4SPoul-Henning Kamp 			else if (osp <= 255)
443dbb95048SMarcel Moolenaar 				memset(dst, osp, sc->sectorsize);
444dbb95048SMarcel Moolenaar 			else {
445b830359bSPawel Jakub Dawidek 				bcopy((void *)osp, dst, sc->sectorsize);
446dbb95048SMarcel Moolenaar 				cpu_flush_dcache(dst, sc->sectorsize);
447dbb95048SMarcel Moolenaar 			}
448fde2a2e4SPoul-Henning Kamp 			osp = 0;
449c6517568SPoul-Henning Kamp 		} else if (bp->bio_cmd == BIO_WRITE) {
4508f8def9eSPoul-Henning Kamp 			if (sc->flags & MD_COMPRESS) {
45100a6a3c6SPoul-Henning Kamp 				uc = dst[0];
452b830359bSPawel Jakub Dawidek 				for (i = 1; i < sc->sectorsize; i++)
45300a6a3c6SPoul-Henning Kamp 					if (dst[i] != uc)
45400a6a3c6SPoul-Henning Kamp 						break;
4558f8def9eSPoul-Henning Kamp 			} else {
4568f8def9eSPoul-Henning Kamp 				i = 0;
4578f8def9eSPoul-Henning Kamp 				uc = 0;
4588f8def9eSPoul-Henning Kamp 			}
459b830359bSPawel Jakub Dawidek 			if (i == sc->sectorsize) {
460fde2a2e4SPoul-Henning Kamp 				if (osp != uc)
461fde2a2e4SPoul-Henning Kamp 					error = s_write(sc->indir, secno, uc);
46200a6a3c6SPoul-Henning Kamp 			} else {
463fde2a2e4SPoul-Henning Kamp 				if (osp <= 255) {
464b830359bSPawel Jakub Dawidek 					sp = (uintptr_t)uma_zalloc(sc->uma,
465c44d423eSKonstantin Belousov 					    md_malloc_wait ? M_WAITOK :
466b830359bSPawel Jakub Dawidek 					    M_NOWAIT);
467c6517568SPoul-Henning Kamp 					if (sp == 0) {
468fde2a2e4SPoul-Henning Kamp 						error = ENOSPC;
469fde2a2e4SPoul-Henning Kamp 						break;
470fde2a2e4SPoul-Henning Kamp 					}
471b830359bSPawel Jakub Dawidek 					bcopy(dst, (void *)sp, sc->sectorsize);
472fde2a2e4SPoul-Henning Kamp 					error = s_write(sc->indir, secno, sp);
473c6517568SPoul-Henning Kamp 				} else {
474b830359bSPawel Jakub Dawidek 					bcopy(dst, (void *)osp, sc->sectorsize);
475fde2a2e4SPoul-Henning Kamp 					osp = 0;
47600a6a3c6SPoul-Henning Kamp 				}
47700a6a3c6SPoul-Henning Kamp 			}
478c6517568SPoul-Henning Kamp 		} else {
479c6517568SPoul-Henning Kamp 			error = EOPNOTSUPP;
480c6517568SPoul-Henning Kamp 		}
481c6517568SPoul-Henning Kamp 		if (osp > 255)
482f43b2bacSPoul-Henning Kamp 			uma_zfree(sc->uma, (void*)osp);
483e3ed29a7SPawel Jakub Dawidek 		if (error != 0)
484c6517568SPoul-Henning Kamp 			break;
48500a6a3c6SPoul-Henning Kamp 		secno++;
486b830359bSPawel Jakub Dawidek 		dst += sc->sectorsize;
48700a6a3c6SPoul-Henning Kamp 	}
4888177437dSPoul-Henning Kamp 	bp->bio_resid = 0;
489c6517568SPoul-Henning Kamp 	return (error);
49000a6a3c6SPoul-Henning Kamp }
49100a6a3c6SPoul-Henning Kamp 
492b4a4f93cSPoul-Henning Kamp static int
493b4a4f93cSPoul-Henning Kamp mdstart_preload(struct md_s *sc, struct bio *bp)
49471e4fff8SPoul-Henning Kamp {
49571e4fff8SPoul-Henning Kamp 
496a8a58d03SPawel Jakub Dawidek 	switch (bp->bio_cmd) {
497a8a58d03SPawel Jakub Dawidek 	case BIO_READ:
498a8a58d03SPawel Jakub Dawidek 		bcopy(sc->pl_ptr + bp->bio_offset, bp->bio_data,
499a8a58d03SPawel Jakub Dawidek 		    bp->bio_length);
500dbb95048SMarcel Moolenaar 		cpu_flush_dcache(bp->bio_data, bp->bio_length);
501a8a58d03SPawel Jakub Dawidek 		break;
502a8a58d03SPawel Jakub Dawidek 	case BIO_WRITE:
503a8a58d03SPawel Jakub Dawidek 		bcopy(bp->bio_data, sc->pl_ptr + bp->bio_offset,
504a8a58d03SPawel Jakub Dawidek 		    bp->bio_length);
505a8a58d03SPawel Jakub Dawidek 		break;
50671e4fff8SPoul-Henning Kamp 	}
5078177437dSPoul-Henning Kamp 	bp->bio_resid = 0;
508b4a4f93cSPoul-Henning Kamp 	return (0);
50971e4fff8SPoul-Henning Kamp }
51071e4fff8SPoul-Henning Kamp 
511b4a4f93cSPoul-Henning Kamp static int
512b4a4f93cSPoul-Henning Kamp mdstart_vnode(struct md_s *sc, struct bio *bp)
5138f8def9eSPoul-Henning Kamp {
514a08d2e7fSJohn Baldwin 	int error, vfslocked;
5158f8def9eSPoul-Henning Kamp 	struct uio auio;
5168f8def9eSPoul-Henning Kamp 	struct iovec aiov;
5178f8def9eSPoul-Henning Kamp 	struct mount *mp;
5185541f25eSPawel Jakub Dawidek 	struct vnode *vp;
5195541f25eSPawel Jakub Dawidek 	struct thread *td;
5200abd21bdSDag-Erling Smørgrav 	off_t end, zerosize;
5215541f25eSPawel Jakub Dawidek 
5225541f25eSPawel Jakub Dawidek 	switch (bp->bio_cmd) {
5235541f25eSPawel Jakub Dawidek 	case BIO_READ:
5245541f25eSPawel Jakub Dawidek 	case BIO_WRITE:
5250abd21bdSDag-Erling Smørgrav 	case BIO_DELETE:
5265541f25eSPawel Jakub Dawidek 	case BIO_FLUSH:
5275541f25eSPawel Jakub Dawidek 		break;
5285541f25eSPawel Jakub Dawidek 	default:
5295541f25eSPawel Jakub Dawidek 		return (EOPNOTSUPP);
5305541f25eSPawel Jakub Dawidek 	}
5315541f25eSPawel Jakub Dawidek 
5325541f25eSPawel Jakub Dawidek 	td = curthread;
5335541f25eSPawel Jakub Dawidek 	vp = sc->vnode;
5348f8def9eSPoul-Henning Kamp 
5358f8def9eSPoul-Henning Kamp 	/*
5368f8def9eSPoul-Henning Kamp 	 * VNODE I/O
5378f8def9eSPoul-Henning Kamp 	 *
5388f8def9eSPoul-Henning Kamp 	 * If an error occurs, we set BIO_ERROR but we do not set
5398f8def9eSPoul-Henning Kamp 	 * B_INVAL because (for a write anyway), the buffer is
5408f8def9eSPoul-Henning Kamp 	 * still valid.
5418f8def9eSPoul-Henning Kamp 	 */
5428f8def9eSPoul-Henning Kamp 
5435541f25eSPawel Jakub Dawidek 	if (bp->bio_cmd == BIO_FLUSH) {
5445541f25eSPawel Jakub Dawidek 		vfslocked = VFS_LOCK_GIANT(vp->v_mount);
5455541f25eSPawel Jakub Dawidek 		(void) vn_start_write(vp, &mp, V_WAIT);
546cb05b60aSAttilio Rao 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
5475541f25eSPawel Jakub Dawidek 		error = VOP_FSYNC(vp, MNT_WAIT, td);
54822db15c0SAttilio Rao 		VOP_UNLOCK(vp, 0);
5495541f25eSPawel Jakub Dawidek 		vn_finished_write(mp);
5505541f25eSPawel Jakub Dawidek 		VFS_UNLOCK_GIANT(vfslocked);
5515541f25eSPawel Jakub Dawidek 		return (error);
5525541f25eSPawel Jakub Dawidek 	}
5535541f25eSPawel Jakub Dawidek 
5548f8def9eSPoul-Henning Kamp 	bzero(&auio, sizeof(auio));
5558f8def9eSPoul-Henning Kamp 
5560abd21bdSDag-Erling Smørgrav 	/*
5570abd21bdSDag-Erling Smørgrav 	 * Special case for BIO_DELETE.  On the surface, this is very
5580abd21bdSDag-Erling Smørgrav 	 * similar to BIO_WRITE, except that we write from our own
5590abd21bdSDag-Erling Smørgrav 	 * fixed-length buffer, so we have to loop.  The net result is
5600abd21bdSDag-Erling Smørgrav 	 * that the two cases end up having very little in common.
5610abd21bdSDag-Erling Smørgrav 	 */
5620abd21bdSDag-Erling Smørgrav 	if (bp->bio_cmd == BIO_DELETE) {
56389cb2a19SMatthew D Fleming 		zerosize = ZERO_REGION_SIZE -
56489cb2a19SMatthew D Fleming 		    (ZERO_REGION_SIZE % sc->sectorsize);
5650abd21bdSDag-Erling Smørgrav 		auio.uio_iov = &aiov;
5660abd21bdSDag-Erling Smørgrav 		auio.uio_iovcnt = 1;
5670abd21bdSDag-Erling Smørgrav 		auio.uio_offset = (vm_ooffset_t)bp->bio_offset;
5680abd21bdSDag-Erling Smørgrav 		auio.uio_segflg = UIO_SYSSPACE;
5690abd21bdSDag-Erling Smørgrav 		auio.uio_rw = UIO_WRITE;
5700abd21bdSDag-Erling Smørgrav 		auio.uio_td = td;
5710abd21bdSDag-Erling Smørgrav 		end = bp->bio_offset + bp->bio_length;
5720abd21bdSDag-Erling Smørgrav 		vfslocked = VFS_LOCK_GIANT(vp->v_mount);
5730abd21bdSDag-Erling Smørgrav 		(void) vn_start_write(vp, &mp, V_WAIT);
5740abd21bdSDag-Erling Smørgrav 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
5750abd21bdSDag-Erling Smørgrav 		error = 0;
5760abd21bdSDag-Erling Smørgrav 		while (auio.uio_offset < end) {
57789cb2a19SMatthew D Fleming 			aiov.iov_base = __DECONST(void *, zero_region);
5780abd21bdSDag-Erling Smørgrav 			aiov.iov_len = end - auio.uio_offset;
5790abd21bdSDag-Erling Smørgrav 			if (aiov.iov_len > zerosize)
5800abd21bdSDag-Erling Smørgrav 				aiov.iov_len = zerosize;
5810abd21bdSDag-Erling Smørgrav 			auio.uio_resid = aiov.iov_len;
5820abd21bdSDag-Erling Smørgrav 			error = VOP_WRITE(vp, &auio,
5830abd21bdSDag-Erling Smørgrav 			    sc->flags & MD_ASYNC ? 0 : IO_SYNC, sc->cred);
5840abd21bdSDag-Erling Smørgrav 			if (error != 0)
5850abd21bdSDag-Erling Smørgrav 				break;
5860abd21bdSDag-Erling Smørgrav 		}
5870abd21bdSDag-Erling Smørgrav 		VOP_UNLOCK(vp, 0);
5880abd21bdSDag-Erling Smørgrav 		vn_finished_write(mp);
5890abd21bdSDag-Erling Smørgrav 		bp->bio_resid = end - auio.uio_offset;
5900abd21bdSDag-Erling Smørgrav 		VFS_UNLOCK_GIANT(vfslocked);
5910abd21bdSDag-Erling Smørgrav 		return (error);
5920abd21bdSDag-Erling Smørgrav 	}
5930abd21bdSDag-Erling Smørgrav 
5948f8def9eSPoul-Henning Kamp 	aiov.iov_base = bp->bio_data;
595a8a58d03SPawel Jakub Dawidek 	aiov.iov_len = bp->bio_length;
5968f8def9eSPoul-Henning Kamp 	auio.uio_iov = &aiov;
5978f8def9eSPoul-Henning Kamp 	auio.uio_iovcnt = 1;
598a8a58d03SPawel Jakub Dawidek 	auio.uio_offset = (vm_ooffset_t)bp->bio_offset;
5998f8def9eSPoul-Henning Kamp 	auio.uio_segflg = UIO_SYSSPACE;
6008f8def9eSPoul-Henning Kamp 	if (bp->bio_cmd == BIO_READ)
6018f8def9eSPoul-Henning Kamp 		auio.uio_rw = UIO_READ;
6028e28326aSPoul-Henning Kamp 	else if (bp->bio_cmd == BIO_WRITE)
6038f8def9eSPoul-Henning Kamp 		auio.uio_rw = UIO_WRITE;
6048e28326aSPoul-Henning Kamp 	else
6058e28326aSPoul-Henning Kamp 		panic("wrong BIO_OP in mdstart_vnode");
606a8a58d03SPawel Jakub Dawidek 	auio.uio_resid = bp->bio_length;
6075541f25eSPawel Jakub Dawidek 	auio.uio_td = td;
6087e76bb56SMatthew Dillon 	/*
6097e76bb56SMatthew Dillon 	 * When reading set IO_DIRECT to try to avoid double-caching
61017a13919SPoul-Henning Kamp 	 * the data.  When writing IO_DIRECT is not optimal.
6117e76bb56SMatthew Dillon 	 */
6125541f25eSPawel Jakub Dawidek 	vfslocked = VFS_LOCK_GIANT(vp->v_mount);
6138f8def9eSPoul-Henning Kamp 	if (bp->bio_cmd == BIO_READ) {
614cb05b60aSAttilio Rao 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
6155541f25eSPawel Jakub Dawidek 		error = VOP_READ(vp, &auio, IO_DIRECT, sc->cred);
61622db15c0SAttilio Rao 		VOP_UNLOCK(vp, 0);
6178f8def9eSPoul-Henning Kamp 	} else {
6185541f25eSPawel Jakub Dawidek 		(void) vn_start_write(vp, &mp, V_WAIT);
619cb05b60aSAttilio Rao 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
6205541f25eSPawel Jakub Dawidek 		error = VOP_WRITE(vp, &auio, sc->flags & MD_ASYNC ? 0 : IO_SYNC,
6215541f25eSPawel Jakub Dawidek 		    sc->cred);
62222db15c0SAttilio Rao 		VOP_UNLOCK(vp, 0);
6238f8def9eSPoul-Henning Kamp 		vn_finished_write(mp);
6248f8def9eSPoul-Henning Kamp 	}
625a08d2e7fSJohn Baldwin 	VFS_UNLOCK_GIANT(vfslocked);
6268f8def9eSPoul-Henning Kamp 	bp->bio_resid = auio.uio_resid;
627b4a4f93cSPoul-Henning Kamp 	return (error);
6288f8def9eSPoul-Henning Kamp }
6298f8def9eSPoul-Henning Kamp 
630b4a4f93cSPoul-Henning Kamp static int
631b4a4f93cSPoul-Henning Kamp mdstart_swap(struct md_s *sc, struct bio *bp)
6328f8def9eSPoul-Henning Kamp {
6337cd53fddSAlan Cox 	struct sf_buf *sf;
6346ab0a0aeSPawel Jakub Dawidek 	int rv, offs, len, lastend;
6356ab0a0aeSPawel Jakub Dawidek 	vm_pindex_t i, lastp;
6368e28326aSPoul-Henning Kamp 	vm_page_t m;
6378e28326aSPoul-Henning Kamp 	u_char *p;
6388f8def9eSPoul-Henning Kamp 
6395541f25eSPawel Jakub Dawidek 	switch (bp->bio_cmd) {
6405541f25eSPawel Jakub Dawidek 	case BIO_READ:
6415541f25eSPawel Jakub Dawidek 	case BIO_WRITE:
6425541f25eSPawel Jakub Dawidek 	case BIO_DELETE:
6435541f25eSPawel Jakub Dawidek 		break;
6445541f25eSPawel Jakub Dawidek 	default:
6455541f25eSPawel Jakub Dawidek 		return (EOPNOTSUPP);
6465541f25eSPawel Jakub Dawidek 	}
6475541f25eSPawel Jakub Dawidek 
6488e28326aSPoul-Henning Kamp 	p = bp->bio_data;
649e07113d6SColin Percival 
650e07113d6SColin Percival 	/*
6516c3cd0e2SMaxim Konovalov 	 * offs is the offset at which to start operating on the
652e07113d6SColin Percival 	 * next (ie, first) page.  lastp is the last page on
653e07113d6SColin Percival 	 * which we're going to operate.  lastend is the ending
654e07113d6SColin Percival 	 * position within that last page (ie, PAGE_SIZE if
655e07113d6SColin Percival 	 * we're operating on complete aligned pages).
656e07113d6SColin Percival 	 */
657e07113d6SColin Percival 	offs = bp->bio_offset % PAGE_SIZE;
658e07113d6SColin Percival 	lastp = (bp->bio_offset + bp->bio_length - 1) / PAGE_SIZE;
659e07113d6SColin Percival 	lastend = (bp->bio_offset + bp->bio_length - 1) % PAGE_SIZE + 1;
660e07113d6SColin Percival 
661812851b6SBrian Feldman 	rv = VM_PAGER_OK;
6628e28326aSPoul-Henning Kamp 	VM_OBJECT_LOCK(sc->object);
6638e28326aSPoul-Henning Kamp 	vm_object_pip_add(sc->object, 1);
664e07113d6SColin Percival 	for (i = bp->bio_offset / PAGE_SIZE; i <= lastp; i++) {
665e07113d6SColin Percival 		len = ((i == lastp) ? lastend : PAGE_SIZE) - offs;
666e07113d6SColin Percival 
667e07113d6SColin Percival 		m = vm_page_grab(sc->object, i,
6688e28326aSPoul-Henning Kamp 		    VM_ALLOC_NORMAL|VM_ALLOC_RETRY);
6697cd53fddSAlan Cox 		VM_OBJECT_UNLOCK(sc->object);
670e340fc60SAlan Cox 		sched_pin();
671e340fc60SAlan Cox 		sf = sf_buf_alloc(m, SFB_CPUPRIVATE);
6727cd53fddSAlan Cox 		VM_OBJECT_LOCK(sc->object);
6738e28326aSPoul-Henning Kamp 		if (bp->bio_cmd == BIO_READ) {
67407be617fSAlan Cox 			if (m->valid != VM_PAGE_BITS_ALL)
67507be617fSAlan Cox 				rv = vm_pager_get_pages(sc->object, &m, 1, 0);
676812851b6SBrian Feldman 			if (rv == VM_PAGER_ERROR) {
677812851b6SBrian Feldman 				sf_buf_free(sf);
678e340fc60SAlan Cox 				sched_unpin();
679812851b6SBrian Feldman 				vm_page_wakeup(m);
680812851b6SBrian Feldman 				break;
681812851b6SBrian Feldman 			}
6827cd53fddSAlan Cox 			bcopy((void *)(sf_buf_kva(sf) + offs), p, len);
683dbb95048SMarcel Moolenaar 			cpu_flush_dcache(p, len);
6848e28326aSPoul-Henning Kamp 		} else if (bp->bio_cmd == BIO_WRITE) {
68507be617fSAlan Cox 			if (len != PAGE_SIZE && m->valid != VM_PAGE_BITS_ALL)
68607be617fSAlan Cox 				rv = vm_pager_get_pages(sc->object, &m, 1, 0);
687812851b6SBrian Feldman 			if (rv == VM_PAGER_ERROR) {
688812851b6SBrian Feldman 				sf_buf_free(sf);
689e340fc60SAlan Cox 				sched_unpin();
690812851b6SBrian Feldman 				vm_page_wakeup(m);
691812851b6SBrian Feldman 				break;
692812851b6SBrian Feldman 			}
6937cd53fddSAlan Cox 			bcopy(p, (void *)(sf_buf_kva(sf) + offs), len);
6948e28326aSPoul-Henning Kamp 			m->valid = VM_PAGE_BITS_ALL;
6958e28326aSPoul-Henning Kamp 		} else if (bp->bio_cmd == BIO_DELETE) {
69607be617fSAlan Cox 			if (len != PAGE_SIZE && m->valid != VM_PAGE_BITS_ALL)
69707be617fSAlan Cox 				rv = vm_pager_get_pages(sc->object, &m, 1, 0);
698812851b6SBrian Feldman 			if (rv == VM_PAGER_ERROR) {
699812851b6SBrian Feldman 				sf_buf_free(sf);
700e340fc60SAlan Cox 				sched_unpin();
701812851b6SBrian Feldman 				vm_page_wakeup(m);
702812851b6SBrian Feldman 				break;
703812851b6SBrian Feldman 			}
7044a13a769SKonstantin Belousov 			if (len != PAGE_SIZE) {
7057cd53fddSAlan Cox 				bzero((void *)(sf_buf_kva(sf) + offs), len);
7064a13a769SKonstantin Belousov 				vm_page_clear_dirty(m, offs, len);
7078e28326aSPoul-Henning Kamp 				m->valid = VM_PAGE_BITS_ALL;
7084a13a769SKonstantin Belousov 			} else
7094a13a769SKonstantin Belousov 				vm_pager_page_unswapped(m);
7108e28326aSPoul-Henning Kamp 		}
7117cd53fddSAlan Cox 		sf_buf_free(sf);
712e340fc60SAlan Cox 		sched_unpin();
7138e28326aSPoul-Henning Kamp 		vm_page_wakeup(m);
714fc0c3802SKonstantin Belousov 		vm_page_lock(m);
7154a13a769SKonstantin Belousov 		if (bp->bio_cmd == BIO_DELETE && len == PAGE_SIZE)
7164a13a769SKonstantin Belousov 			vm_page_free(m);
7174a13a769SKonstantin Belousov 		else
7188e28326aSPoul-Henning Kamp 			vm_page_activate(m);
719ecd5dd95SAlan Cox 		vm_page_unlock(m);
72007be617fSAlan Cox 		if (bp->bio_cmd == BIO_WRITE)
7218e28326aSPoul-Henning Kamp 			vm_page_dirty(m);
722e07113d6SColin Percival 
723e07113d6SColin Percival 		/* Actions on further pages start at offset 0 */
724e07113d6SColin Percival 		p += PAGE_SIZE - offs;
725e07113d6SColin Percival 		offs = 0;
7268e28326aSPoul-Henning Kamp #if 0
727e07113d6SColin Percival if (bootverbose || bp->bio_offset / PAGE_SIZE < 17)
7288e28326aSPoul-Henning Kamp printf("wire_count %d busy %d flags %x hold_count %d act_count %d queue %d valid %d dirty %d @ %d\n",
7298e28326aSPoul-Henning Kamp     m->wire_count, m->busy,
730e07113d6SColin Percival     m->flags, m->hold_count, m->act_count, m->queue, m->valid, m->dirty, i);
7318e28326aSPoul-Henning Kamp #endif
7328e28326aSPoul-Henning Kamp 	}
7338e28326aSPoul-Henning Kamp 	vm_object_pip_subtract(sc->object, 1);
7348e28326aSPoul-Henning Kamp 	VM_OBJECT_UNLOCK(sc->object);
735812851b6SBrian Feldman 	return (rv != VM_PAGER_ERROR ? 0 : ENOSPC);
7368e28326aSPoul-Henning Kamp }
7378f8def9eSPoul-Henning Kamp 
7388f8def9eSPoul-Henning Kamp static void
7395c97ca54SIan Dowse md_kthread(void *arg)
7405c97ca54SIan Dowse {
7415c97ca54SIan Dowse 	struct md_s *sc;
7425c97ca54SIan Dowse 	struct bio *bp;
743a08d2e7fSJohn Baldwin 	int error;
7445c97ca54SIan Dowse 
7455c97ca54SIan Dowse 	sc = arg;
746982d11f8SJeff Roberson 	thread_lock(curthread);
74763710c4dSJohn Baldwin 	sched_prio(curthread, PRIBIO);
748982d11f8SJeff Roberson 	thread_unlock(curthread);
7493b7b5496SKonstantin Belousov 	if (sc->type == MD_VNODE)
7503b7b5496SKonstantin Belousov 		curthread->td_pflags |= TDP_NORUNNINGBUF;
7515c97ca54SIan Dowse 
752b4a4f93cSPoul-Henning Kamp 	for (;;) {
753a08d2e7fSJohn Baldwin 		mtx_lock(&sc->queue_mtx);
7545c97ca54SIan Dowse 		if (sc->flags & MD_SHUTDOWN) {
755a08d2e7fSJohn Baldwin 			sc->flags |= MD_EXITING;
756a08d2e7fSJohn Baldwin 			mtx_unlock(&sc->queue_mtx);
7573745c395SJulian Elischer 			kproc_exit(0);
7585c97ca54SIan Dowse 		}
7599b00ca19SPoul-Henning Kamp 		bp = bioq_takefirst(&sc->bio_queue);
7609b00ca19SPoul-Henning Kamp 		if (!bp) {
7610f8500a5SPoul-Henning Kamp 			msleep(sc, &sc->queue_mtx, PRIBIO | PDROP, "mdwait", 0);
7625c97ca54SIan Dowse 			continue;
7635c97ca54SIan Dowse 		}
7640f8500a5SPoul-Henning Kamp 		mtx_unlock(&sc->queue_mtx);
7654e8bfe14SPoul-Henning Kamp 		if (bp->bio_cmd == BIO_GETATTR) {
766d91e813cSKonstantin Belousov 			if ((sc->fwsectors && sc->fwheads &&
7674e8bfe14SPoul-Henning Kamp 			    (g_handleattr_int(bp, "GEOM::fwsectors",
7684e8bfe14SPoul-Henning Kamp 			    sc->fwsectors) ||
7694e8bfe14SPoul-Henning Kamp 			    g_handleattr_int(bp, "GEOM::fwheads",
770d91e813cSKonstantin Belousov 			    sc->fwheads))) ||
771d91e813cSKonstantin Belousov 			    g_handleattr_int(bp, "GEOM::candelete", 1))
7724e8bfe14SPoul-Henning Kamp 				error = -1;
7734e8bfe14SPoul-Henning Kamp 			else
7744e8bfe14SPoul-Henning Kamp 				error = EOPNOTSUPP;
7754e8bfe14SPoul-Henning Kamp 		} else {
7769b00ca19SPoul-Henning Kamp 			error = sc->start(sc, bp);
7774e8bfe14SPoul-Henning Kamp 		}
778b4a4f93cSPoul-Henning Kamp 
7796f4f00f1SPoul-Henning Kamp 		if (error != -1) {
7806f4f00f1SPoul-Henning Kamp 			bp->bio_completed = bp->bio_length;
781a03be42dSMaxim Sobolev 			if ((bp->bio_cmd == BIO_READ) || (bp->bio_cmd == BIO_WRITE))
782a03be42dSMaxim Sobolev 				devstat_end_transaction_bio(sc->devstat, bp);
78396410b95SKonstantin Belousov 			g_io_deliver(bp, error);
784b4a4f93cSPoul-Henning Kamp 		}
7858f8def9eSPoul-Henning Kamp 	}
78626d48b40SPoul-Henning Kamp }
7878f8def9eSPoul-Henning Kamp 
7888f8def9eSPoul-Henning Kamp static struct md_s *
7898f8def9eSPoul-Henning Kamp mdfind(int unit)
7908f8def9eSPoul-Henning Kamp {
7918f8def9eSPoul-Henning Kamp 	struct md_s *sc;
7928f8def9eSPoul-Henning Kamp 
7933f54a085SPoul-Henning Kamp 	LIST_FOREACH(sc, &md_softc_list, list) {
7943f54a085SPoul-Henning Kamp 		if (sc->unit == unit)
7958f8def9eSPoul-Henning Kamp 			break;
7968f8def9eSPoul-Henning Kamp 	}
7978f8def9eSPoul-Henning Kamp 	return (sc);
7988f8def9eSPoul-Henning Kamp }
7998f8def9eSPoul-Henning Kamp 
8008f8def9eSPoul-Henning Kamp static struct md_s *
801947fc8deSPoul-Henning Kamp mdnew(int unit, int *errp, enum md_types type)
8028f8def9eSPoul-Henning Kamp {
803f4e7c5a8SJaakko Heinonen 	struct md_s *sc;
804f4e7c5a8SJaakko Heinonen 	int error;
8058f8def9eSPoul-Henning Kamp 
8069b00ca19SPoul-Henning Kamp 	*errp = 0;
807f4e7c5a8SJaakko Heinonen 	if (unit == -1)
808f4e7c5a8SJaakko Heinonen 		unit = alloc_unr(md_uh);
809f4e7c5a8SJaakko Heinonen 	else
810f4e7c5a8SJaakko Heinonen 		unit = alloc_unr_specific(md_uh, unit);
811f4e7c5a8SJaakko Heinonen 
812f4e7c5a8SJaakko Heinonen 	if (unit == -1) {
8137ee3c044SPawel Jakub Dawidek 		*errp = EBUSY;
8143f54a085SPoul-Henning Kamp 		return (NULL);
8153f54a085SPoul-Henning Kamp 	}
816f4e7c5a8SJaakko Heinonen 
8179b00ca19SPoul-Henning Kamp 	sc = (struct md_s *)malloc(sizeof *sc, M_MD, M_WAITOK | M_ZERO);
818947fc8deSPoul-Henning Kamp 	sc->type = type;
8199b00ca19SPoul-Henning Kamp 	bioq_init(&sc->bio_queue);
8209b00ca19SPoul-Henning Kamp 	mtx_init(&sc->queue_mtx, "md bio queue", NULL, MTX_DEF);
8213f54a085SPoul-Henning Kamp 	sc->unit = unit;
822f43b2bacSPoul-Henning Kamp 	sprintf(sc->name, "md%d", unit);
8237ee3c044SPawel Jakub Dawidek 	LIST_INSERT_HEAD(&md_softc_list, sc, list);
8243745c395SJulian Elischer 	error = kproc_create(md_kthread, sc, &sc->procp, 0, 0,"%s", sc->name);
8259b00ca19SPoul-Henning Kamp 	if (error == 0)
8269b00ca19SPoul-Henning Kamp 		return (sc);
8277ee3c044SPawel Jakub Dawidek 	LIST_REMOVE(sc, list);
8287ee3c044SPawel Jakub Dawidek 	mtx_destroy(&sc->queue_mtx);
829f4e7c5a8SJaakko Heinonen 	free_unr(md_uh, sc->unit);
8305c97ca54SIan Dowse 	free(sc, M_MD);
8317ee3c044SPawel Jakub Dawidek 	*errp = error;
8325c97ca54SIan Dowse 	return (NULL);
8335c97ca54SIan Dowse }
8348f8def9eSPoul-Henning Kamp 
8358f8def9eSPoul-Henning Kamp static void
8368f8def9eSPoul-Henning Kamp mdinit(struct md_s *sc)
8378f8def9eSPoul-Henning Kamp {
8386f4f00f1SPoul-Henning Kamp 	struct g_geom *gp;
8396f4f00f1SPoul-Henning Kamp 	struct g_provider *pp;
8406f4f00f1SPoul-Henning Kamp 
8416f4f00f1SPoul-Henning Kamp 	g_topology_lock();
8426f4f00f1SPoul-Henning Kamp 	gp = g_new_geomf(&g_md_class, "md%d", sc->unit);
8436f4f00f1SPoul-Henning Kamp 	gp->softc = sc;
8446f4f00f1SPoul-Henning Kamp 	pp = g_new_providerf(gp, "md%d", sc->unit);
845b830359bSPawel Jakub Dawidek 	pp->mediasize = sc->mediasize;
846b830359bSPawel Jakub Dawidek 	pp->sectorsize = sc->sectorsize;
8476f4f00f1SPoul-Henning Kamp 	sc->gp = gp;
8486f4f00f1SPoul-Henning Kamp 	sc->pp = pp;
8496f4f00f1SPoul-Henning Kamp 	g_error_provider(pp, 0);
8506f4f00f1SPoul-Henning Kamp 	g_topology_unlock();
851a03be42dSMaxim Sobolev 	sc->devstat = devstat_new_entry("md", sc->unit, sc->sectorsize,
852a03be42dSMaxim Sobolev 	    DEVSTAT_ALL_SUPPORTED, DEVSTAT_TYPE_DIRECT, DEVSTAT_PRIORITY_MAX);
8536f4f00f1SPoul-Henning Kamp }
85471e4fff8SPoul-Henning Kamp 
85596b6a55fSPoul-Henning Kamp /*
85696b6a55fSPoul-Henning Kamp  * XXX: we should check that the range they feed us is mapped.
85796b6a55fSPoul-Henning Kamp  * XXX: we should implement read-only.
85896b6a55fSPoul-Henning Kamp  */
85996b6a55fSPoul-Henning Kamp 
860637f671aSPoul-Henning Kamp static int
861b830359bSPawel Jakub Dawidek mdcreate_preload(struct md_s *sc, struct md_ioctl *mdio)
86271e4fff8SPoul-Henning Kamp {
86371e4fff8SPoul-Henning Kamp 
864b830359bSPawel Jakub Dawidek 	if (mdio->md_options & ~(MD_AUTOUNIT | MD_FORCE))
865637f671aSPoul-Henning Kamp 		return (EINVAL);
8665ed1eb2bSEdward Tomasz Napierala 	if (mdio->md_base == 0)
8675ed1eb2bSEdward Tomasz Napierala 		return (EINVAL);
86826a0ee75SDima Dorfman 	sc->flags = mdio->md_options & MD_FORCE;
86996b6a55fSPoul-Henning Kamp 	/* Cast to pointer size, then to pointer to avoid warning */
870dc57d7c6SPeter Wemm 	sc->pl_ptr = (u_char *)(uintptr_t)mdio->md_base;
871b830359bSPawel Jakub Dawidek 	sc->pl_len = (size_t)sc->mediasize;
872637f671aSPoul-Henning Kamp 	return (0);
87395f1a897SPoul-Henning Kamp }
87495f1a897SPoul-Henning Kamp 
875637f671aSPoul-Henning Kamp 
8768f8def9eSPoul-Henning Kamp static int
877b830359bSPawel Jakub Dawidek mdcreate_malloc(struct md_s *sc, struct md_ioctl *mdio)
87895f1a897SPoul-Henning Kamp {
879c6517568SPoul-Henning Kamp 	uintptr_t sp;
880c6517568SPoul-Henning Kamp 	int error;
881b830359bSPawel Jakub Dawidek 	off_t u;
88295f1a897SPoul-Henning Kamp 
883c6517568SPoul-Henning Kamp 	error = 0;
8848f8def9eSPoul-Henning Kamp 	if (mdio->md_options & ~(MD_AUTOUNIT | MD_COMPRESS | MD_RESERVE))
8858f8def9eSPoul-Henning Kamp 		return (EINVAL);
886b830359bSPawel Jakub Dawidek 	if (mdio->md_sectorsize != 0 && !powerof2(mdio->md_sectorsize))
887ebe789d6SPoul-Henning Kamp 		return (EINVAL);
8888f8def9eSPoul-Henning Kamp 	/* Compression doesn't make sense if we have reserved space */
8898f8def9eSPoul-Henning Kamp 	if (mdio->md_options & MD_RESERVE)
8908f8def9eSPoul-Henning Kamp 		mdio->md_options &= ~MD_COMPRESS;
8914e8bfe14SPoul-Henning Kamp 	if (mdio->md_fwsectors != 0)
8924e8bfe14SPoul-Henning Kamp 		sc->fwsectors = mdio->md_fwsectors;
8934e8bfe14SPoul-Henning Kamp 	if (mdio->md_fwheads != 0)
8944e8bfe14SPoul-Henning Kamp 		sc->fwheads = mdio->md_fwheads;
89526a0ee75SDima Dorfman 	sc->flags = mdio->md_options & (MD_COMPRESS | MD_FORCE);
896b830359bSPawel Jakub Dawidek 	sc->indir = dimension(sc->mediasize / sc->sectorsize);
897b830359bSPawel Jakub Dawidek 	sc->uma = uma_zcreate(sc->name, sc->sectorsize, NULL, NULL, NULL, NULL,
898b830359bSPawel Jakub Dawidek 	    0x1ff, 0);
89996b6a55fSPoul-Henning Kamp 	if (mdio->md_options & MD_RESERVE) {
900b830359bSPawel Jakub Dawidek 		off_t nsectors;
901b830359bSPawel Jakub Dawidek 
902b830359bSPawel Jakub Dawidek 		nsectors = sc->mediasize / sc->sectorsize;
903b830359bSPawel Jakub Dawidek 		for (u = 0; u < nsectors; u++) {
904007777f1SKonstantin Belousov 			sp = (uintptr_t)uma_zalloc(sc->uma, (md_malloc_wait ?
905007777f1SKonstantin Belousov 			    M_WAITOK : M_NOWAIT) | M_ZERO);
906c6517568SPoul-Henning Kamp 			if (sp != 0)
907fde2a2e4SPoul-Henning Kamp 				error = s_write(sc->indir, u, sp);
908c6517568SPoul-Henning Kamp 			else
909c6517568SPoul-Henning Kamp 				error = ENOMEM;
910b830359bSPawel Jakub Dawidek 			if (error != 0)
911c6517568SPoul-Henning Kamp 				break;
9128f8def9eSPoul-Henning Kamp 		}
913c6517568SPoul-Henning Kamp 	}
914c6517568SPoul-Henning Kamp 	return (error);
91500a6a3c6SPoul-Henning Kamp }
91600a6a3c6SPoul-Henning Kamp 
9173f54a085SPoul-Henning Kamp 
9188f8def9eSPoul-Henning Kamp static int
9198f8def9eSPoul-Henning Kamp mdsetcred(struct md_s *sc, struct ucred *cred)
9208f8def9eSPoul-Henning Kamp {
9218f8def9eSPoul-Henning Kamp 	char *tmpbuf;
9228f8def9eSPoul-Henning Kamp 	int error = 0;
9238f8def9eSPoul-Henning Kamp 
9243f54a085SPoul-Henning Kamp 	/*
9258f8def9eSPoul-Henning Kamp 	 * Set credits in our softc
9263f54a085SPoul-Henning Kamp 	 */
9278f8def9eSPoul-Henning Kamp 
9288f8def9eSPoul-Henning Kamp 	if (sc->cred)
9298f8def9eSPoul-Henning Kamp 		crfree(sc->cred);
930bd78ceceSJohn Baldwin 	sc->cred = crhold(cred);
9318f8def9eSPoul-Henning Kamp 
9328f8def9eSPoul-Henning Kamp 	/*
9338f8def9eSPoul-Henning Kamp 	 * Horrible kludge to establish credentials for NFS  XXX.
9348f8def9eSPoul-Henning Kamp 	 */
9358f8def9eSPoul-Henning Kamp 
9368f8def9eSPoul-Henning Kamp 	if (sc->vnode) {
9378f8def9eSPoul-Henning Kamp 		struct uio auio;
9388f8def9eSPoul-Henning Kamp 		struct iovec aiov;
9398f8def9eSPoul-Henning Kamp 
940b830359bSPawel Jakub Dawidek 		tmpbuf = malloc(sc->sectorsize, M_TEMP, M_WAITOK);
9418f8def9eSPoul-Henning Kamp 		bzero(&auio, sizeof(auio));
9428f8def9eSPoul-Henning Kamp 
9438f8def9eSPoul-Henning Kamp 		aiov.iov_base = tmpbuf;
944b830359bSPawel Jakub Dawidek 		aiov.iov_len = sc->sectorsize;
9458f8def9eSPoul-Henning Kamp 		auio.uio_iov = &aiov;
9468f8def9eSPoul-Henning Kamp 		auio.uio_iovcnt = 1;
9478f8def9eSPoul-Henning Kamp 		auio.uio_offset = 0;
9488f8def9eSPoul-Henning Kamp 		auio.uio_rw = UIO_READ;
9498f8def9eSPoul-Henning Kamp 		auio.uio_segflg = UIO_SYSSPACE;
9508f8def9eSPoul-Henning Kamp 		auio.uio_resid = aiov.iov_len;
951cb05b60aSAttilio Rao 		vn_lock(sc->vnode, LK_EXCLUSIVE | LK_RETRY);
9528f8def9eSPoul-Henning Kamp 		error = VOP_READ(sc->vnode, &auio, 0, sc->cred);
95322db15c0SAttilio Rao 		VOP_UNLOCK(sc->vnode, 0);
9548f8def9eSPoul-Henning Kamp 		free(tmpbuf, M_TEMP);
9558f8def9eSPoul-Henning Kamp 	}
9568f8def9eSPoul-Henning Kamp 	return (error);
9578f8def9eSPoul-Henning Kamp }
9588f8def9eSPoul-Henning Kamp 
9598f8def9eSPoul-Henning Kamp static int
960b830359bSPawel Jakub Dawidek mdcreate_vnode(struct md_s *sc, struct md_ioctl *mdio, struct thread *td)
9618f8def9eSPoul-Henning Kamp {
9628f8def9eSPoul-Henning Kamp 	struct vattr vattr;
9638f8def9eSPoul-Henning Kamp 	struct nameidata nd;
9643d5c947dSMarcel Moolenaar 	char *fname;
965a08d2e7fSJohn Baldwin 	int error, flags, vfslocked;
9668f8def9eSPoul-Henning Kamp 
9673d5c947dSMarcel Moolenaar 	/*
9683d5c947dSMarcel Moolenaar 	 * Kernel-originated requests must have the filename appended
9693d5c947dSMarcel Moolenaar 	 * to the mdio structure to protect against malicious software.
9703d5c947dSMarcel Moolenaar 	 */
9713d5c947dSMarcel Moolenaar 	fname = mdio->md_file;
9723d5c947dSMarcel Moolenaar 	if ((void *)fname != (void *)(mdio + 1)) {
9733d5c947dSMarcel Moolenaar 		error = copyinstr(fname, sc->file, sizeof(sc->file), NULL);
97488b5b78dSPawel Jakub Dawidek 		if (error != 0)
97588b5b78dSPawel Jakub Dawidek 			return (error);
9763d5c947dSMarcel Moolenaar 	} else
9773d5c947dSMarcel Moolenaar 		strlcpy(sc->file, fname, sizeof(sc->file));
9783d5c947dSMarcel Moolenaar 
97986776891SChristian S.J. Peron 	/*
9803d5c947dSMarcel Moolenaar 	 * If the user specified that this is a read only device, don't
9813d5c947dSMarcel Moolenaar 	 * set the FWRITE mask before trying to open the backing store.
98286776891SChristian S.J. Peron 	 */
9833d5c947dSMarcel Moolenaar 	flags = FREAD | ((mdio->md_options & MD_READONLY) ? 0 : FWRITE);
984a08d2e7fSJohn Baldwin 	NDINIT(&nd, LOOKUP, FOLLOW | MPSAFE, UIO_SYSSPACE, sc->file, td);
9859e223287SKonstantin Belousov 	error = vn_open(&nd, &flags, 0, NULL);
986e3ed29a7SPawel Jakub Dawidek 	if (error != 0)
98752c6716fSPawel Jakub Dawidek 		return (error);
988a08d2e7fSJohn Baldwin 	vfslocked = NDHASGIANT(&nd);
989b322d85dSPawel Jakub Dawidek 	NDFREE(&nd, NDF_ONLY_PNBUF);
99033fc3625SJohn Baldwin 	if (nd.ni_vp->v_type != VREG) {
99133fc3625SJohn Baldwin 		error = EINVAL;
99233fc3625SJohn Baldwin 		goto bad;
99333fc3625SJohn Baldwin 	}
99433fc3625SJohn Baldwin 	error = VOP_GETATTR(nd.ni_vp, &vattr, td->td_ucred);
99533fc3625SJohn Baldwin 	if (error != 0)
99633fc3625SJohn Baldwin 		goto bad;
99733fc3625SJohn Baldwin 	if (VOP_ISLOCKED(nd.ni_vp) != LK_EXCLUSIVE) {
99833fc3625SJohn Baldwin 		vn_lock(nd.ni_vp, LK_UPGRADE | LK_RETRY);
99933fc3625SJohn Baldwin 		if (nd.ni_vp->v_iflag & VI_DOOMED) {
100033fc3625SJohn Baldwin 			/* Forced unmount. */
100133fc3625SJohn Baldwin 			error = EBADF;
100233fc3625SJohn Baldwin 			goto bad;
100333fc3625SJohn Baldwin 		}
10048f8def9eSPoul-Henning Kamp 	}
10053b7b5496SKonstantin Belousov 	nd.ni_vp->v_vflag |= VV_MD;
100622db15c0SAttilio Rao 	VOP_UNLOCK(nd.ni_vp, 0);
10079589c256SPoul-Henning Kamp 
1008d5a929dcSPoul-Henning Kamp 	if (mdio->md_fwsectors != 0)
1009d5a929dcSPoul-Henning Kamp 		sc->fwsectors = mdio->md_fwsectors;
1010d5a929dcSPoul-Henning Kamp 	if (mdio->md_fwheads != 0)
1011d5a929dcSPoul-Henning Kamp 		sc->fwheads = mdio->md_fwheads;
10127a6b2b64SPoul-Henning Kamp 	sc->flags = mdio->md_options & (MD_FORCE | MD_ASYNC);
10139589c256SPoul-Henning Kamp 	if (!(flags & FWRITE))
10149589c256SPoul-Henning Kamp 		sc->flags |= MD_READONLY;
10158f8def9eSPoul-Henning Kamp 	sc->vnode = nd.ni_vp;
10168f8def9eSPoul-Henning Kamp 
1017a854ed98SJohn Baldwin 	error = mdsetcred(sc, td->td_ucred);
1018b830359bSPawel Jakub Dawidek 	if (error != 0) {
10193cf74e53SPhilip Paeps 		sc->vnode = NULL;
1020cb05b60aSAttilio Rao 		vn_lock(nd.ni_vp, LK_EXCLUSIVE | LK_RETRY);
10213b7b5496SKonstantin Belousov 		nd.ni_vp->v_vflag &= ~VV_MD;
102233fc3625SJohn Baldwin 		goto bad;
102333fc3625SJohn Baldwin 	}
102433fc3625SJohn Baldwin 	VFS_UNLOCK_GIANT(vfslocked);
102533fc3625SJohn Baldwin 	return (0);
102633fc3625SJohn Baldwin bad:
102722db15c0SAttilio Rao 	VOP_UNLOCK(nd.ni_vp, 0);
102852c6716fSPawel Jakub Dawidek 	(void)vn_close(nd.ni_vp, flags, td->td_ucred, td);
1029a08d2e7fSJohn Baldwin 	VFS_UNLOCK_GIANT(vfslocked);
10308f8def9eSPoul-Henning Kamp 	return (error);
10318f8def9eSPoul-Henning Kamp }
10328f8def9eSPoul-Henning Kamp 
10338f8def9eSPoul-Henning Kamp static int
1034b40ce416SJulian Elischer mddestroy(struct md_s *sc, struct thread *td)
10358f8def9eSPoul-Henning Kamp {
1036a08d2e7fSJohn Baldwin 	int vfslocked;
10370cddd8f0SMatthew Dillon 
10386f4f00f1SPoul-Henning Kamp 	if (sc->gp) {
10396f4f00f1SPoul-Henning Kamp 		sc->gp->softc = NULL;
10409b00ca19SPoul-Henning Kamp 		g_topology_lock();
10419b00ca19SPoul-Henning Kamp 		g_wither_geom(sc->gp, ENXIO);
10429b00ca19SPoul-Henning Kamp 		g_topology_unlock();
10436b60a2cdSPoul-Henning Kamp 		sc->gp = NULL;
10446b60a2cdSPoul-Henning Kamp 		sc->pp = NULL;
10451f4ee1aaSPoul-Henning Kamp 	}
1046a03be42dSMaxim Sobolev 	if (sc->devstat) {
1047a03be42dSMaxim Sobolev 		devstat_remove_entry(sc->devstat);
1048a03be42dSMaxim Sobolev 		sc->devstat = NULL;
1049a03be42dSMaxim Sobolev 	}
1050a08d2e7fSJohn Baldwin 	mtx_lock(&sc->queue_mtx);
10515c97ca54SIan Dowse 	sc->flags |= MD_SHUTDOWN;
10525c97ca54SIan Dowse 	wakeup(sc);
1053a08d2e7fSJohn Baldwin 	while (!(sc->flags & MD_EXITING))
1054a08d2e7fSJohn Baldwin 		msleep(sc->procp, &sc->queue_mtx, PRIBIO, "mddestroy", hz / 10);
1055a08d2e7fSJohn Baldwin 	mtx_unlock(&sc->queue_mtx);
10569fbea3e3SPoul-Henning Kamp 	mtx_destroy(&sc->queue_mtx);
10579b00ca19SPoul-Henning Kamp 	if (sc->vnode != NULL) {
1058a08d2e7fSJohn Baldwin 		vfslocked = VFS_LOCK_GIANT(sc->vnode->v_mount);
1059cb05b60aSAttilio Rao 		vn_lock(sc->vnode, LK_EXCLUSIVE | LK_RETRY);
10603b7b5496SKonstantin Belousov 		sc->vnode->v_vflag &= ~VV_MD;
106122db15c0SAttilio Rao 		VOP_UNLOCK(sc->vnode, 0);
10629d4b5945SMaxim Sobolev 		(void)vn_close(sc->vnode, sc->flags & MD_READONLY ?
1063b40ce416SJulian Elischer 		    FREAD : (FREAD|FWRITE), sc->cred, td);
1064a08d2e7fSJohn Baldwin 		VFS_UNLOCK_GIANT(vfslocked);
10659b00ca19SPoul-Henning Kamp 	}
10668f8def9eSPoul-Henning Kamp 	if (sc->cred != NULL)
10678f8def9eSPoul-Henning Kamp 		crfree(sc->cred);
10681db17c6dSPawel Jakub Dawidek 	if (sc->object != NULL)
1069f820bc50SAlan Cox 		vm_object_deallocate(sc->object);
1070f43b2bacSPoul-Henning Kamp 	if (sc->indir)
1071f43b2bacSPoul-Henning Kamp 		destroy_indir(sc, sc->indir);
1072f43b2bacSPoul-Henning Kamp 	if (sc->uma)
1073f43b2bacSPoul-Henning Kamp 		uma_zdestroy(sc->uma);
10741f4ee1aaSPoul-Henning Kamp 
10751f4ee1aaSPoul-Henning Kamp 	LIST_REMOVE(sc, list);
1076f4e7c5a8SJaakko Heinonen 	free_unr(md_uh, sc->unit);
1077c6517568SPoul-Henning Kamp 	free(sc, M_MD);
10788f8def9eSPoul-Henning Kamp 	return (0);
10798f8def9eSPoul-Henning Kamp }
10808f8def9eSPoul-Henning Kamp 
10818f8def9eSPoul-Henning Kamp static int
1082b830359bSPawel Jakub Dawidek mdcreate_swap(struct md_s *sc, struct md_ioctl *mdio, struct thread *td)
10838f8def9eSPoul-Henning Kamp {
1084fcd57fbeSPawel Jakub Dawidek 	vm_ooffset_t npage;
1085fcd57fbeSPawel Jakub Dawidek 	int error;
10868f8def9eSPoul-Henning Kamp 
10878f8def9eSPoul-Henning Kamp 	/*
10888f8def9eSPoul-Henning Kamp 	 * Range check.  Disallow negative sizes or any size less then the
10898f8def9eSPoul-Henning Kamp 	 * size of a page.  Then round to a page.
10908f8def9eSPoul-Henning Kamp 	 */
1091b830359bSPawel Jakub Dawidek 	if (sc->mediasize == 0 || (sc->mediasize % PAGE_SIZE) != 0)
10928f8def9eSPoul-Henning Kamp 		return (EDOM);
10938f8def9eSPoul-Henning Kamp 
10948f8def9eSPoul-Henning Kamp 	/*
10958f8def9eSPoul-Henning Kamp 	 * Allocate an OBJT_SWAP object.
10968f8def9eSPoul-Henning Kamp 	 *
10978f8def9eSPoul-Henning Kamp 	 * Note the truncation.
10988f8def9eSPoul-Henning Kamp 	 */
10998f8def9eSPoul-Henning Kamp 
1100b830359bSPawel Jakub Dawidek 	npage = mdio->md_mediasize / PAGE_SIZE;
11019ed40643SPoul-Henning Kamp 	if (mdio->md_fwsectors != 0)
11029ed40643SPoul-Henning Kamp 		sc->fwsectors = mdio->md_fwsectors;
11039ed40643SPoul-Henning Kamp 	if (mdio->md_fwheads != 0)
11049ed40643SPoul-Henning Kamp 		sc->fwheads = mdio->md_fwheads;
1105fcd57fbeSPawel Jakub Dawidek 	sc->object = vm_pager_allocate(OBJT_SWAP, NULL, PAGE_SIZE * npage,
11063364c323SKonstantin Belousov 	    VM_PROT_DEFAULT, 0, td->td_ucred);
1107812851b6SBrian Feldman 	if (sc->object == NULL)
1108812851b6SBrian Feldman 		return (ENOMEM);
110926a0ee75SDima Dorfman 	sc->flags = mdio->md_options & MD_FORCE;
11108f8def9eSPoul-Henning Kamp 	if (mdio->md_options & MD_RESERVE) {
1111fcd57fbeSPawel Jakub Dawidek 		if (swap_pager_reserve(sc->object, 0, npage) < 0) {
11123364c323SKonstantin Belousov 			error = EDOM;
11133364c323SKonstantin Belousov 			goto finish;
11148f8def9eSPoul-Henning Kamp 		}
11158f8def9eSPoul-Henning Kamp 	}
1116a854ed98SJohn Baldwin 	error = mdsetcred(sc, td->td_ucred);
11173364c323SKonstantin Belousov  finish:
1118e3ed29a7SPawel Jakub Dawidek 	if (error != 0) {
11192eafd8b1SPawel Jakub Dawidek 		vm_object_deallocate(sc->object);
11202eafd8b1SPawel Jakub Dawidek 		sc->object = NULL;
11218f8def9eSPoul-Henning Kamp 	}
1122b830359bSPawel Jakub Dawidek 	return (error);
1123b3b3d1b7SPoul-Henning Kamp }
11248f8def9eSPoul-Henning Kamp 
11259d4b5945SMaxim Sobolev 
11269d4b5945SMaxim Sobolev static int
11279b00ca19SPoul-Henning Kamp xmdctlioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags, struct thread *td)
11288f8def9eSPoul-Henning Kamp {
11298f8def9eSPoul-Henning Kamp 	struct md_ioctl *mdio;
11308f8def9eSPoul-Henning Kamp 	struct md_s *sc;
1131b830359bSPawel Jakub Dawidek 	int error, i;
11328f8def9eSPoul-Henning Kamp 
11338f8def9eSPoul-Henning Kamp 	if (md_debug)
11348f8def9eSPoul-Henning Kamp 		printf("mdctlioctl(%s %lx %p %x %p)\n",
1135b40ce416SJulian Elischer 			devtoname(dev), cmd, addr, flags, td);
11368f8def9eSPoul-Henning Kamp 
11379b00ca19SPoul-Henning Kamp 	mdio = (struct md_ioctl *)addr;
11389b00ca19SPoul-Henning Kamp 	if (mdio->md_version != MDIOVERSION)
11399b00ca19SPoul-Henning Kamp 		return (EINVAL);
11409b00ca19SPoul-Henning Kamp 
114153d745bcSDima Dorfman 	/*
114253d745bcSDima Dorfman 	 * We assert the version number in the individual ioctl
114353d745bcSDima Dorfman 	 * handlers instead of out here because (a) it is possible we
114453d745bcSDima Dorfman 	 * may add another ioctl in the future which doesn't read an
114553d745bcSDima Dorfman 	 * mdio, and (b) the correct return value for an unknown ioctl
114653d745bcSDima Dorfman 	 * is ENOIOCTL, not EINVAL.
114753d745bcSDima Dorfman 	 */
11489b00ca19SPoul-Henning Kamp 	error = 0;
11498f8def9eSPoul-Henning Kamp 	switch (cmd) {
11508f8def9eSPoul-Henning Kamp 	case MDIOCATTACH:
11518f8def9eSPoul-Henning Kamp 		switch (mdio->md_type) {
11528f8def9eSPoul-Henning Kamp 		case MD_MALLOC:
11538f8def9eSPoul-Henning Kamp 		case MD_PRELOAD:
11548f8def9eSPoul-Henning Kamp 		case MD_VNODE:
11558f8def9eSPoul-Henning Kamp 		case MD_SWAP:
1156b830359bSPawel Jakub Dawidek 			break;
11578f8def9eSPoul-Henning Kamp 		default:
11588f8def9eSPoul-Henning Kamp 			return (EINVAL);
11598f8def9eSPoul-Henning Kamp 		}
11607ee3c044SPawel Jakub Dawidek 		if (mdio->md_options & MD_AUTOUNIT)
1161947fc8deSPoul-Henning Kamp 			sc = mdnew(-1, &error, mdio->md_type);
1162f4e7c5a8SJaakko Heinonen 		else {
1163f4e7c5a8SJaakko Heinonen 			if (mdio->md_unit > INT_MAX)
1164f4e7c5a8SJaakko Heinonen 				return (EINVAL);
1165947fc8deSPoul-Henning Kamp 			sc = mdnew(mdio->md_unit, &error, mdio->md_type);
1166f4e7c5a8SJaakko Heinonen 		}
1167b830359bSPawel Jakub Dawidek 		if (sc == NULL)
11687ee3c044SPawel Jakub Dawidek 			return (error);
11697ee3c044SPawel Jakub Dawidek 		if (mdio->md_options & MD_AUTOUNIT)
11707ee3c044SPawel Jakub Dawidek 			mdio->md_unit = sc->unit;
1171b830359bSPawel Jakub Dawidek 		sc->mediasize = mdio->md_mediasize;
1172b830359bSPawel Jakub Dawidek 		if (mdio->md_sectorsize == 0)
1173b830359bSPawel Jakub Dawidek 			sc->sectorsize = DEV_BSIZE;
1174b830359bSPawel Jakub Dawidek 		else
1175b830359bSPawel Jakub Dawidek 			sc->sectorsize = mdio->md_sectorsize;
1176b830359bSPawel Jakub Dawidek 		error = EDOOFUS;
1177b830359bSPawel Jakub Dawidek 		switch (sc->type) {
1178b830359bSPawel Jakub Dawidek 		case MD_MALLOC:
11799b00ca19SPoul-Henning Kamp 			sc->start = mdstart_malloc;
1180b830359bSPawel Jakub Dawidek 			error = mdcreate_malloc(sc, mdio);
1181b830359bSPawel Jakub Dawidek 			break;
1182b830359bSPawel Jakub Dawidek 		case MD_PRELOAD:
11839b00ca19SPoul-Henning Kamp 			sc->start = mdstart_preload;
1184b830359bSPawel Jakub Dawidek 			error = mdcreate_preload(sc, mdio);
1185b830359bSPawel Jakub Dawidek 			break;
1186b830359bSPawel Jakub Dawidek 		case MD_VNODE:
11879b00ca19SPoul-Henning Kamp 			sc->start = mdstart_vnode;
1188b830359bSPawel Jakub Dawidek 			error = mdcreate_vnode(sc, mdio, td);
1189b830359bSPawel Jakub Dawidek 			break;
1190b830359bSPawel Jakub Dawidek 		case MD_SWAP:
11919b00ca19SPoul-Henning Kamp 			sc->start = mdstart_swap;
1192b830359bSPawel Jakub Dawidek 			error = mdcreate_swap(sc, mdio, td);
1193b830359bSPawel Jakub Dawidek 			break;
1194b830359bSPawel Jakub Dawidek 		}
1195b830359bSPawel Jakub Dawidek 		if (error != 0) {
1196b830359bSPawel Jakub Dawidek 			mddestroy(sc, td);
1197b830359bSPawel Jakub Dawidek 			return (error);
1198b830359bSPawel Jakub Dawidek 		}
11999b00ca19SPoul-Henning Kamp 
12009b00ca19SPoul-Henning Kamp 		/* Prune off any residual fractional sector */
12019b00ca19SPoul-Henning Kamp 		i = sc->mediasize % sc->sectorsize;
12029b00ca19SPoul-Henning Kamp 		sc->mediasize -= i;
12039b00ca19SPoul-Henning Kamp 
1204b830359bSPawel Jakub Dawidek 		mdinit(sc);
1205b830359bSPawel Jakub Dawidek 		return (0);
12068f8def9eSPoul-Henning Kamp 	case MDIOCDETACH:
1207a9ebb311SEdward Tomasz Napierala 		if (mdio->md_mediasize != 0 ||
1208a9ebb311SEdward Tomasz Napierala 		    (mdio->md_options & ~MD_FORCE) != 0)
12098f8def9eSPoul-Henning Kamp 			return (EINVAL);
12109b00ca19SPoul-Henning Kamp 
12119b00ca19SPoul-Henning Kamp 		sc = mdfind(mdio->md_unit);
12129b00ca19SPoul-Henning Kamp 		if (sc == NULL)
12139b00ca19SPoul-Henning Kamp 			return (ENOENT);
1214a9ebb311SEdward Tomasz Napierala 		if (sc->opencount != 0 && !(sc->flags & MD_FORCE) &&
1215a9ebb311SEdward Tomasz Napierala 		    !(mdio->md_options & MD_FORCE))
12169b00ca19SPoul-Henning Kamp 			return (EBUSY);
12179b00ca19SPoul-Henning Kamp 		return (mddestroy(sc, td));
1218174b5e9aSPoul-Henning Kamp 	case MDIOCQUERY:
1219174b5e9aSPoul-Henning Kamp 		sc = mdfind(mdio->md_unit);
1220174b5e9aSPoul-Henning Kamp 		if (sc == NULL)
1221174b5e9aSPoul-Henning Kamp 			return (ENOENT);
1222174b5e9aSPoul-Henning Kamp 		mdio->md_type = sc->type;
1223174b5e9aSPoul-Henning Kamp 		mdio->md_options = sc->flags;
1224b830359bSPawel Jakub Dawidek 		mdio->md_mediasize = sc->mediasize;
1225b830359bSPawel Jakub Dawidek 		mdio->md_sectorsize = sc->sectorsize;
12269b00ca19SPoul-Henning Kamp 		if (sc->type == MD_VNODE)
122788b5b78dSPawel Jakub Dawidek 			error = copyout(sc->file, mdio->md_file,
122888b5b78dSPawel Jakub Dawidek 			    strlen(sc->file) + 1);
122988b5b78dSPawel Jakub Dawidek 		return (error);
123016bcbe8cSPoul-Henning Kamp 	case MDIOCLIST:
123116bcbe8cSPoul-Henning Kamp 		i = 1;
123216bcbe8cSPoul-Henning Kamp 		LIST_FOREACH(sc, &md_softc_list, list) {
123316bcbe8cSPoul-Henning Kamp 			if (i == MDNPAD - 1)
123416bcbe8cSPoul-Henning Kamp 				mdio->md_pad[i] = -1;
123516bcbe8cSPoul-Henning Kamp 			else
123616bcbe8cSPoul-Henning Kamp 				mdio->md_pad[i++] = sc->unit;
123716bcbe8cSPoul-Henning Kamp 		}
123816bcbe8cSPoul-Henning Kamp 		mdio->md_pad[0] = i - 1;
123916bcbe8cSPoul-Henning Kamp 		return (0);
12408f8def9eSPoul-Henning Kamp 	default:
12418f8def9eSPoul-Henning Kamp 		return (ENOIOCTL);
12428f8def9eSPoul-Henning Kamp 	};
12439b00ca19SPoul-Henning Kamp }
12449b00ca19SPoul-Henning Kamp 
12459b00ca19SPoul-Henning Kamp static int
12469b00ca19SPoul-Henning Kamp mdctlioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags, struct thread *td)
12479b00ca19SPoul-Henning Kamp {
12489b00ca19SPoul-Henning Kamp 	int error;
12499b00ca19SPoul-Henning Kamp 
12509b00ca19SPoul-Henning Kamp 	sx_xlock(&md_sx);
12519b00ca19SPoul-Henning Kamp 	error = xmdctlioctl(dev, cmd, addr, flags, td);
12529b00ca19SPoul-Henning Kamp 	sx_xunlock(&md_sx);
12539b00ca19SPoul-Henning Kamp 	return (error);
12543f54a085SPoul-Henning Kamp }
12553f54a085SPoul-Henning Kamp 
125600a6a3c6SPoul-Henning Kamp static void
1257b830359bSPawel Jakub Dawidek md_preloaded(u_char *image, size_t length)
1258637f671aSPoul-Henning Kamp {
1259637f671aSPoul-Henning Kamp 	struct md_s *sc;
12609b00ca19SPoul-Henning Kamp 	int error;
1261637f671aSPoul-Henning Kamp 
1262947fc8deSPoul-Henning Kamp 	sc = mdnew(-1, &error, MD_PRELOAD);
1263637f671aSPoul-Henning Kamp 	if (sc == NULL)
1264637f671aSPoul-Henning Kamp 		return;
1265b830359bSPawel Jakub Dawidek 	sc->mediasize = length;
1266b830359bSPawel Jakub Dawidek 	sc->sectorsize = DEV_BSIZE;
1267637f671aSPoul-Henning Kamp 	sc->pl_ptr = image;
1268637f671aSPoul-Henning Kamp 	sc->pl_len = length;
12699b00ca19SPoul-Henning Kamp 	sc->start = mdstart_preload;
12705d4ca75eSLuigi Rizzo #ifdef MD_ROOT
1271637f671aSPoul-Henning Kamp 	if (sc->unit == 0)
12725d4ca75eSLuigi Rizzo 		rootdevnames[0] = "ufs:/dev/md0";
12735d4ca75eSLuigi Rizzo #endif
1274637f671aSPoul-Henning Kamp 	mdinit(sc);
1275637f671aSPoul-Henning Kamp }
1276637f671aSPoul-Henning Kamp 
1277637f671aSPoul-Henning Kamp static void
127819945697SPoul-Henning Kamp g_md_init(struct g_class *mp __unused)
127900a6a3c6SPoul-Henning Kamp {
128095f1a897SPoul-Henning Kamp 	caddr_t mod;
128195f1a897SPoul-Henning Kamp 	u_char *ptr, *name, *type;
128295f1a897SPoul-Henning Kamp 	unsigned len;
1283d12fc952SKonstantin Belousov 	int i;
1284d12fc952SKonstantin Belousov 
1285d12fc952SKonstantin Belousov 	/* figure out log2(NINDIR) */
1286d12fc952SKonstantin Belousov 	for (i = NINDIR, nshift = -1; i; nshift++)
1287d12fc952SKonstantin Belousov 		i >>= 1;
128895f1a897SPoul-Henning Kamp 
12890a937206SPoul-Henning Kamp 	mod = NULL;
12909b00ca19SPoul-Henning Kamp 	sx_init(&md_sx, "MD config lock");
12910a937206SPoul-Henning Kamp 	g_topology_unlock();
1292f4e7c5a8SJaakko Heinonen 	md_uh = new_unrhdr(0, INT_MAX, NULL);
129371e4fff8SPoul-Henning Kamp #ifdef MD_ROOT_SIZE
12949b00ca19SPoul-Henning Kamp 	sx_xlock(&md_sx);
1295de64f22aSLuigi Rizzo 	md_preloaded(mfs_root.start, sizeof(mfs_root.start));
12969b00ca19SPoul-Henning Kamp 	sx_xunlock(&md_sx);
129771e4fff8SPoul-Henning Kamp #endif
12989b00ca19SPoul-Henning Kamp 	/* XXX: are preload_* static or do they need Giant ? */
129995f1a897SPoul-Henning Kamp 	while ((mod = preload_search_next_name(mod)) != NULL) {
130095f1a897SPoul-Henning Kamp 		name = (char *)preload_search_info(mod, MODINFO_NAME);
130195f1a897SPoul-Henning Kamp 		if (name == NULL)
130295f1a897SPoul-Henning Kamp 			continue;
13039b00ca19SPoul-Henning Kamp 		type = (char *)preload_search_info(mod, MODINFO_TYPE);
130495f1a897SPoul-Henning Kamp 		if (type == NULL)
130595f1a897SPoul-Henning Kamp 			continue;
130671e4fff8SPoul-Henning Kamp 		if (strcmp(type, "md_image") && strcmp(type, "mfs_root"))
130795f1a897SPoul-Henning Kamp 			continue;
13088d5ac6c3SMarcel Moolenaar 		ptr = preload_fetch_addr(mod);
13098d5ac6c3SMarcel Moolenaar 		len = preload_fetch_size(mod);
13108d5ac6c3SMarcel Moolenaar 		if (ptr != NULL && len != 0) {
1311fe603109SMaxim Sobolev 			printf("%s%d: Preloaded image <%s> %d bytes at %p\n",
1312fe603109SMaxim Sobolev 			    MD_NAME, mdunits, name, len, ptr);
13139b00ca19SPoul-Henning Kamp 			sx_xlock(&md_sx);
1314637f671aSPoul-Henning Kamp 			md_preloaded(ptr, len);
13159b00ca19SPoul-Henning Kamp 			sx_xunlock(&md_sx);
131695f1a897SPoul-Henning Kamp 		}
13178d5ac6c3SMarcel Moolenaar 	}
131806d425f9SEd Schouten 	status_dev = make_dev(&mdctl_cdevsw, INT_MAX, UID_ROOT, GID_WHEEL,
131910b0e058SDima Dorfman 	    0600, MDCTL_NAME);
13200eb14309SPoul-Henning Kamp 	g_topology_lock();
132100a6a3c6SPoul-Henning Kamp }
132200a6a3c6SPoul-Henning Kamp 
132319945697SPoul-Henning Kamp static void
1324c27a8954SWojciech A. Koszek g_md_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp,
1325c27a8954SWojciech A. Koszek     struct g_consumer *cp __unused, struct g_provider *pp)
1326c27a8954SWojciech A. Koszek {
1327c27a8954SWojciech A. Koszek 	struct md_s *mp;
1328c27a8954SWojciech A. Koszek 	char *type;
1329c27a8954SWojciech A. Koszek 
1330c27a8954SWojciech A. Koszek 	mp = gp->softc;
1331c27a8954SWojciech A. Koszek 	if (mp == NULL)
1332c27a8954SWojciech A. Koszek 		return;
1333c27a8954SWojciech A. Koszek 
1334c27a8954SWojciech A. Koszek 	switch (mp->type) {
1335c27a8954SWojciech A. Koszek 	case MD_MALLOC:
1336c27a8954SWojciech A. Koszek 		type = "malloc";
1337c27a8954SWojciech A. Koszek 		break;
1338c27a8954SWojciech A. Koszek 	case MD_PRELOAD:
1339c27a8954SWojciech A. Koszek 		type = "preload";
1340c27a8954SWojciech A. Koszek 		break;
1341c27a8954SWojciech A. Koszek 	case MD_VNODE:
1342c27a8954SWojciech A. Koszek 		type = "vnode";
1343c27a8954SWojciech A. Koszek 		break;
1344c27a8954SWojciech A. Koszek 	case MD_SWAP:
1345c27a8954SWojciech A. Koszek 		type = "swap";
1346c27a8954SWojciech A. Koszek 		break;
1347c27a8954SWojciech A. Koszek 	default:
1348c27a8954SWojciech A. Koszek 		type = "unknown";
1349c27a8954SWojciech A. Koszek 		break;
1350c27a8954SWojciech A. Koszek 	}
1351c27a8954SWojciech A. Koszek 
1352c27a8954SWojciech A. Koszek 	if (pp != NULL) {
1353c27a8954SWojciech A. Koszek 		if (indent == NULL) {
1354c27a8954SWojciech A. Koszek 			sbuf_printf(sb, " u %d", mp->unit);
1355c27a8954SWojciech A. Koszek 			sbuf_printf(sb, " s %ju", (uintmax_t) mp->sectorsize);
1356c27a8954SWojciech A. Koszek 			sbuf_printf(sb, " f %ju", (uintmax_t) mp->fwheads);
1357c27a8954SWojciech A. Koszek 			sbuf_printf(sb, " fs %ju", (uintmax_t) mp->fwsectors);
1358c27a8954SWojciech A. Koszek 			sbuf_printf(sb, " l %ju", (uintmax_t) mp->mediasize);
1359c27a8954SWojciech A. Koszek 			sbuf_printf(sb, " t %s", type);
1360c27a8954SWojciech A. Koszek 			if (mp->type == MD_VNODE && mp->vnode != NULL)
1361c27a8954SWojciech A. Koszek 				sbuf_printf(sb, " file %s", mp->file);
1362c27a8954SWojciech A. Koszek 		} else {
1363c27a8954SWojciech A. Koszek 			sbuf_printf(sb, "%s<unit>%d</unit>\n", indent,
1364c27a8954SWojciech A. Koszek 			    mp->unit);
1365c27a8954SWojciech A. Koszek 			sbuf_printf(sb, "%s<sectorsize>%ju</sectorsize>\n",
1366c27a8954SWojciech A. Koszek 			    indent, (uintmax_t) mp->sectorsize);
1367c27a8954SWojciech A. Koszek 			sbuf_printf(sb, "%s<fwheads>%ju</fwheads>\n",
1368c27a8954SWojciech A. Koszek 			    indent, (uintmax_t) mp->fwheads);
1369c27a8954SWojciech A. Koszek 			sbuf_printf(sb, "%s<fwsectors>%ju</fwsectors>\n",
1370c27a8954SWojciech A. Koszek 			    indent, (uintmax_t) mp->fwsectors);
1371c27a8954SWojciech A. Koszek 			sbuf_printf(sb, "%s<length>%ju</length>\n",
1372c27a8954SWojciech A. Koszek 			    indent, (uintmax_t) mp->mediasize);
1373*1f192809SAndrey V. Elsukov 			sbuf_printf(sb, "%s<compression>%s</compression>\n", indent,
1374*1f192809SAndrey V. Elsukov 			    (mp->flags & MD_COMPRESS) == 0 ? "off": "on");
1375*1f192809SAndrey V. Elsukov 			sbuf_printf(sb, "%s<access>%s</access>\n", indent,
1376*1f192809SAndrey V. Elsukov 			    (mp->flags & MD_READONLY) == 0 ? "read-write":
1377*1f192809SAndrey V. Elsukov 			    "read-only");
1378c27a8954SWojciech A. Koszek 			sbuf_printf(sb, "%s<type>%s</type>\n", indent,
1379c27a8954SWojciech A. Koszek 			    type);
1380c27a8954SWojciech A. Koszek 			if (mp->type == MD_VNODE && mp->vnode != NULL)
1381c27a8954SWojciech A. Koszek 				sbuf_printf(sb, "%s<file>%s</file>\n",
1382c27a8954SWojciech A. Koszek 				    indent, mp->file);
1383c27a8954SWojciech A. Koszek 		}
1384c27a8954SWojciech A. Koszek 	}
1385c27a8954SWojciech A. Koszek }
1386c27a8954SWojciech A. Koszek 
1387c27a8954SWojciech A. Koszek static void
138819945697SPoul-Henning Kamp g_md_fini(struct g_class *mp __unused)
138957e9624eSPoul-Henning Kamp {
13909d4b5945SMaxim Sobolev 
13919b00ca19SPoul-Henning Kamp 	sx_destroy(&md_sx);
139219945697SPoul-Henning Kamp 	if (status_dev != NULL)
139357e9624eSPoul-Henning Kamp 		destroy_dev(status_dev);
1394f4e7c5a8SJaakko Heinonen 	delete_unrhdr(md_uh);
139557e9624eSPoul-Henning Kamp }
1396