xref: /freebsd/sys/dev/md/md.c (revision 8cb51643e4f82929c50ba098020a832a0fbdac33)
1098ca2bdSWarner Losh /*-
200a6a3c6SPoul-Henning Kamp  * ----------------------------------------------------------------------------
300a6a3c6SPoul-Henning Kamp  * "THE BEER-WARE LICENSE" (Revision 42):
400a6a3c6SPoul-Henning Kamp  * <phk@FreeBSD.ORG> wrote this file.  As long as you retain this notice you
500a6a3c6SPoul-Henning Kamp  * can do whatever you want with this stuff. If we meet some day, and you think
600a6a3c6SPoul-Henning Kamp  * this stuff is worth it, you can buy me a beer in return.   Poul-Henning Kamp
700a6a3c6SPoul-Henning Kamp  * ----------------------------------------------------------------------------
800a6a3c6SPoul-Henning Kamp  *
900a6a3c6SPoul-Henning Kamp  * $FreeBSD$
1000a6a3c6SPoul-Henning Kamp  *
1100a6a3c6SPoul-Henning Kamp  */
1200a6a3c6SPoul-Henning Kamp 
13098ca2bdSWarner Losh /*-
14637f671aSPoul-Henning Kamp  * The following functions are based in the vn(4) driver: mdstart_swap(),
15637f671aSPoul-Henning Kamp  * mdstart_vnode(), mdcreate_swap(), mdcreate_vnode() and mddestroy(),
16637f671aSPoul-Henning Kamp  * and as such under the following copyright:
17637f671aSPoul-Henning Kamp  *
18637f671aSPoul-Henning Kamp  * Copyright (c) 1988 University of Utah.
19637f671aSPoul-Henning Kamp  * Copyright (c) 1990, 1993
20637f671aSPoul-Henning Kamp  *	The Regents of the University of California.  All rights reserved.
21637f671aSPoul-Henning Kamp  *
22ed010cdfSWarner Losh  * This code is derived from software contributed to Berkeley by
23ed010cdfSWarner Losh  * the Systems Programming Group of the University of Utah Computer
24ed010cdfSWarner Losh  * Science Department.
25ed010cdfSWarner Losh  *
26637f671aSPoul-Henning Kamp  * Redistribution and use in source and binary forms, with or without
27637f671aSPoul-Henning Kamp  * modification, are permitted provided that the following conditions
28637f671aSPoul-Henning Kamp  * are met:
29637f671aSPoul-Henning Kamp  * 1. Redistributions of source code must retain the above copyright
30637f671aSPoul-Henning Kamp  *    notice, this list of conditions and the following disclaimer.
31637f671aSPoul-Henning Kamp  * 2. Redistributions in binary form must reproduce the above copyright
32637f671aSPoul-Henning Kamp  *    notice, this list of conditions and the following disclaimer in the
33637f671aSPoul-Henning Kamp  *    documentation and/or other materials provided with the distribution.
34637f671aSPoul-Henning Kamp  * 4. Neither the name of the University nor the names of its contributors
35637f671aSPoul-Henning Kamp  *    may be used to endorse or promote products derived from this software
36637f671aSPoul-Henning Kamp  *    without specific prior written permission.
37637f671aSPoul-Henning Kamp  *
38637f671aSPoul-Henning Kamp  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
39637f671aSPoul-Henning Kamp  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
40637f671aSPoul-Henning Kamp  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
41637f671aSPoul-Henning Kamp  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
42637f671aSPoul-Henning Kamp  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
43637f671aSPoul-Henning Kamp  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
44637f671aSPoul-Henning Kamp  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
45637f671aSPoul-Henning Kamp  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
46637f671aSPoul-Henning Kamp  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
47637f671aSPoul-Henning Kamp  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
48637f671aSPoul-Henning Kamp  * SUCH DAMAGE.
49637f671aSPoul-Henning Kamp  *
50637f671aSPoul-Henning Kamp  * from: Utah Hdr: vn.c 1.13 94/04/02
51637f671aSPoul-Henning Kamp  *
52637f671aSPoul-Henning Kamp  *	from: @(#)vn.c	8.6 (Berkeley) 4/1/94
53637f671aSPoul-Henning Kamp  * From: src/sys/dev/vn/vn.c,v 1.122 2000/12/16 16:06:03
54637f671aSPoul-Henning Kamp  */
55637f671aSPoul-Henning Kamp 
566f4f00f1SPoul-Henning Kamp #include "opt_geom.h"
573f54a085SPoul-Henning Kamp #include "opt_md.h"
5871e4fff8SPoul-Henning Kamp 
5900a6a3c6SPoul-Henning Kamp #include <sys/param.h>
6000a6a3c6SPoul-Henning Kamp #include <sys/systm.h>
619626b608SPoul-Henning Kamp #include <sys/bio.h>
6200a6a3c6SPoul-Henning Kamp #include <sys/conf.h>
63a03be42dSMaxim Sobolev #include <sys/devicestat.h>
648f8def9eSPoul-Henning Kamp #include <sys/fcntl.h>
65fb919e4dSMark Murray #include <sys/kernel.h>
665c97ca54SIan Dowse #include <sys/kthread.h>
6706d425f9SEd Schouten #include <sys/limits.h>
68fb919e4dSMark Murray #include <sys/linker.h>
69fb919e4dSMark Murray #include <sys/lock.h>
70fb919e4dSMark Murray #include <sys/malloc.h>
71fb919e4dSMark Murray #include <sys/mdioctl.h>
72a08d2e7fSJohn Baldwin #include <sys/mount.h>
739dceb26bSJohn Baldwin #include <sys/mutex.h>
749b00ca19SPoul-Henning Kamp #include <sys/sx.h>
75fb919e4dSMark Murray #include <sys/namei.h>
768f8def9eSPoul-Henning Kamp #include <sys/proc.h>
77fb919e4dSMark Murray #include <sys/queue.h>
78657bd8b1SAndrey V. Elsukov #include <sys/sbuf.h>
7963710c4dSJohn Baldwin #include <sys/sched.h>
807cd53fddSAlan Cox #include <sys/sf_buf.h>
81fb919e4dSMark Murray #include <sys/sysctl.h>
82fb919e4dSMark Murray #include <sys/vnode.h>
83fb919e4dSMark Murray 
846f4f00f1SPoul-Henning Kamp #include <geom/geom.h>
856f4f00f1SPoul-Henning Kamp 
868f8def9eSPoul-Henning Kamp #include <vm/vm.h>
878f8def9eSPoul-Henning Kamp #include <vm/vm_object.h>
888f8def9eSPoul-Henning Kamp #include <vm/vm_page.h>
898f8def9eSPoul-Henning Kamp #include <vm/vm_pager.h>
908f8def9eSPoul-Henning Kamp #include <vm/swap_pager.h>
91f43b2bacSPoul-Henning Kamp #include <vm/uma.h>
923f54a085SPoul-Henning Kamp 
93cfb00e5aSMatthew D Fleming #include <machine/vmparam.h>
94cfb00e5aSMatthew D Fleming 
9557e9624eSPoul-Henning Kamp #define MD_MODVER 1
9657e9624eSPoul-Henning Kamp 
975c97ca54SIan Dowse #define MD_SHUTDOWN	0x10000		/* Tell worker thread to terminate. */
98a08d2e7fSJohn Baldwin #define	MD_EXITING	0x20000		/* Worker thread is exiting. */
995c97ca54SIan Dowse 
100f2744793SSheldon Hearn #ifndef MD_NSECT
101f2744793SSheldon Hearn #define MD_NSECT (10000 * 2)
10233edfabeSPoul-Henning Kamp #endif
10333edfabeSPoul-Henning Kamp 
1045bb84bc8SRobert Watson static MALLOC_DEFINE(M_MD, "md_disk", "Memory Disk");
1055bb84bc8SRobert Watson static MALLOC_DEFINE(M_MDSECT, "md_sectors", "Memory Disk Sectors");
10600a6a3c6SPoul-Henning Kamp 
10771e4fff8SPoul-Henning Kamp static int md_debug;
1083eb9ab52SEitan Adler SYSCTL_INT(_debug, OID_AUTO, mddebug, CTLFLAG_RW, &md_debug, 0,
1093eb9ab52SEitan Adler     "Enable md(4) debug messages");
110c44d423eSKonstantin Belousov static int md_malloc_wait;
1113eb9ab52SEitan Adler SYSCTL_INT(_vm, OID_AUTO, md_malloc_wait, CTLFLAG_RW, &md_malloc_wait, 0,
1123eb9ab52SEitan Adler     "Allow malloc to wait for memory allocations");
11300a6a3c6SPoul-Henning Kamp 
11471e4fff8SPoul-Henning Kamp #if defined(MD_ROOT) && defined(MD_ROOT_SIZE)
115de64f22aSLuigi Rizzo /*
116de64f22aSLuigi Rizzo  * Preloaded image gets put here.
117de64f22aSLuigi Rizzo  * Applications that patch the object with the image can determine
118de64f22aSLuigi Rizzo  * the size looking at the start and end markers (strings),
119de64f22aSLuigi Rizzo  * so we want them contiguous.
120de64f22aSLuigi Rizzo  */
121de64f22aSLuigi Rizzo static struct {
122de64f22aSLuigi Rizzo 	u_char start[MD_ROOT_SIZE*1024];
123de64f22aSLuigi Rizzo 	u_char end[128];
124de64f22aSLuigi Rizzo } mfs_root = {
125de64f22aSLuigi Rizzo 	.start = "MFS Filesystem goes here",
126de64f22aSLuigi Rizzo 	.end = "MFS Filesystem had better STOP here",
127de64f22aSLuigi Rizzo };
12871e4fff8SPoul-Henning Kamp #endif
12971e4fff8SPoul-Henning Kamp 
13019945697SPoul-Henning Kamp static g_init_t g_md_init;
13119945697SPoul-Henning Kamp static g_fini_t g_md_fini;
13219945697SPoul-Henning Kamp static g_start_t g_md_start;
13319945697SPoul-Henning Kamp static g_access_t g_md_access;
134b42f40b8SJaakko Heinonen static void g_md_dumpconf(struct sbuf *sb, const char *indent,
135b42f40b8SJaakko Heinonen     struct g_geom *gp, struct g_consumer *cp __unused, struct g_provider *pp);
1360eb14309SPoul-Henning Kamp 
1378f8def9eSPoul-Henning Kamp static int mdunits;
13889c9c53dSPoul-Henning Kamp static struct cdev *status_dev = 0;
1399b00ca19SPoul-Henning Kamp static struct sx md_sx;
140f4e7c5a8SJaakko Heinonen static struct unrhdr *md_uh;
14157e9624eSPoul-Henning Kamp 
142a522a159SPoul-Henning Kamp static d_ioctl_t mdctlioctl;
1438f8def9eSPoul-Henning Kamp 
1448f8def9eSPoul-Henning Kamp static struct cdevsw mdctl_cdevsw = {
145dc08ffecSPoul-Henning Kamp 	.d_version =	D_VERSION,
1467ac40f5fSPoul-Henning Kamp 	.d_ioctl =	mdctlioctl,
1477ac40f5fSPoul-Henning Kamp 	.d_name =	MD_NAME,
14800a6a3c6SPoul-Henning Kamp };
14900a6a3c6SPoul-Henning Kamp 
15019945697SPoul-Henning Kamp struct g_class g_md_class = {
15119945697SPoul-Henning Kamp 	.name = "MD",
1525721c9c7SPoul-Henning Kamp 	.version = G_VERSION,
15319945697SPoul-Henning Kamp 	.init = g_md_init,
15419945697SPoul-Henning Kamp 	.fini = g_md_fini,
15519945697SPoul-Henning Kamp 	.start = g_md_start,
15619945697SPoul-Henning Kamp 	.access = g_md_access,
157c27a8954SWojciech A. Koszek 	.dumpconf = g_md_dumpconf,
15819945697SPoul-Henning Kamp };
15919945697SPoul-Henning Kamp 
16019945697SPoul-Henning Kamp DECLARE_GEOM_CLASS(g_md_class, g_md);
16119945697SPoul-Henning Kamp 
1620cfaeeeeSPoul-Henning Kamp 
16313e403fdSAntoine Brodin static LIST_HEAD(, md_s) md_softc_list = LIST_HEAD_INITIALIZER(md_softc_list);
1643f54a085SPoul-Henning Kamp 
165c6517568SPoul-Henning Kamp #define NINDIR	(PAGE_SIZE / sizeof(uintptr_t))
166c6517568SPoul-Henning Kamp #define NMASK	(NINDIR-1)
167c6517568SPoul-Henning Kamp static int nshift;
168c6517568SPoul-Henning Kamp 
169c6517568SPoul-Henning Kamp struct indir {
170c6517568SPoul-Henning Kamp 	uintptr_t	*array;
1718b149b51SJohn Baldwin 	u_int		total;
1728b149b51SJohn Baldwin 	u_int		used;
1738b149b51SJohn Baldwin 	u_int		shift;
174c6517568SPoul-Henning Kamp };
175c6517568SPoul-Henning Kamp 
17600a6a3c6SPoul-Henning Kamp struct md_s {
17700a6a3c6SPoul-Henning Kamp 	int unit;
1783f54a085SPoul-Henning Kamp 	LIST_ENTRY(md_s) list;
1798177437dSPoul-Henning Kamp 	struct bio_queue_head bio_queue;
1800f8500a5SPoul-Henning Kamp 	struct mtx queue_mtx;
18189c9c53dSPoul-Henning Kamp 	struct cdev *dev;
1828f8def9eSPoul-Henning Kamp 	enum md_types type;
183b830359bSPawel Jakub Dawidek 	off_t mediasize;
184b830359bSPawel Jakub Dawidek 	unsigned sectorsize;
185fe603109SMaxim Sobolev 	unsigned opencount;
1864e8bfe14SPoul-Henning Kamp 	unsigned fwheads;
1874e8bfe14SPoul-Henning Kamp 	unsigned fwsectors;
1888f8def9eSPoul-Henning Kamp 	unsigned flags;
189f43b2bacSPoul-Henning Kamp 	char name[20];
1905c97ca54SIan Dowse 	struct proc *procp;
1916f4f00f1SPoul-Henning Kamp 	struct g_geom *gp;
1926f4f00f1SPoul-Henning Kamp 	struct g_provider *pp;
1939b00ca19SPoul-Henning Kamp 	int (*start)(struct md_s *sc, struct bio *bp);
194a03be42dSMaxim Sobolev 	struct devstat *devstat;
19595f1a897SPoul-Henning Kamp 
19695f1a897SPoul-Henning Kamp 	/* MD_MALLOC related fields */
197c6517568SPoul-Henning Kamp 	struct indir *indir;
198f43b2bacSPoul-Henning Kamp 	uma_zone_t uma;
19900a6a3c6SPoul-Henning Kamp 
20095f1a897SPoul-Henning Kamp 	/* MD_PRELOAD related fields */
20195f1a897SPoul-Henning Kamp 	u_char *pl_ptr;
202b830359bSPawel Jakub Dawidek 	size_t pl_len;
20300a6a3c6SPoul-Henning Kamp 
2048f8def9eSPoul-Henning Kamp 	/* MD_VNODE related fields */
2058f8def9eSPoul-Henning Kamp 	struct vnode *vnode;
20661a6eb62SPawel Jakub Dawidek 	char file[PATH_MAX];
2078f8def9eSPoul-Henning Kamp 	struct ucred *cred;
2088f8def9eSPoul-Henning Kamp 
209e0cebb40SDima Dorfman 	/* MD_SWAP related fields */
2108f8def9eSPoul-Henning Kamp 	vm_object_t object;
2118f8def9eSPoul-Henning Kamp };
21200a6a3c6SPoul-Henning Kamp 
213c6517568SPoul-Henning Kamp static struct indir *
2148b149b51SJohn Baldwin new_indir(u_int shift)
215c6517568SPoul-Henning Kamp {
216c6517568SPoul-Henning Kamp 	struct indir *ip;
217c6517568SPoul-Henning Kamp 
218c44d423eSKonstantin Belousov 	ip = malloc(sizeof *ip, M_MD, (md_malloc_wait ? M_WAITOK : M_NOWAIT)
219c44d423eSKonstantin Belousov 	    | M_ZERO);
220c6517568SPoul-Henning Kamp 	if (ip == NULL)
221c6517568SPoul-Henning Kamp 		return (NULL);
222c6517568SPoul-Henning Kamp 	ip->array = malloc(sizeof(uintptr_t) * NINDIR,
223c44d423eSKonstantin Belousov 	    M_MDSECT, (md_malloc_wait ? M_WAITOK : M_NOWAIT) | M_ZERO);
224c6517568SPoul-Henning Kamp 	if (ip->array == NULL) {
225c6517568SPoul-Henning Kamp 		free(ip, M_MD);
226c6517568SPoul-Henning Kamp 		return (NULL);
227c6517568SPoul-Henning Kamp 	}
228c6517568SPoul-Henning Kamp 	ip->total = NINDIR;
229c6517568SPoul-Henning Kamp 	ip->shift = shift;
230c6517568SPoul-Henning Kamp 	return (ip);
231c6517568SPoul-Henning Kamp }
232c6517568SPoul-Henning Kamp 
233c6517568SPoul-Henning Kamp static void
234c6517568SPoul-Henning Kamp del_indir(struct indir *ip)
235c6517568SPoul-Henning Kamp {
236c6517568SPoul-Henning Kamp 
237f43b2bacSPoul-Henning Kamp 	free(ip->array, M_MDSECT);
238c6517568SPoul-Henning Kamp 	free(ip, M_MD);
239c6517568SPoul-Henning Kamp }
240c6517568SPoul-Henning Kamp 
241f43b2bacSPoul-Henning Kamp static void
242f43b2bacSPoul-Henning Kamp destroy_indir(struct md_s *sc, struct indir *ip)
243f43b2bacSPoul-Henning Kamp {
244f43b2bacSPoul-Henning Kamp 	int i;
245f43b2bacSPoul-Henning Kamp 
246f43b2bacSPoul-Henning Kamp 	for (i = 0; i < NINDIR; i++) {
247f43b2bacSPoul-Henning Kamp 		if (!ip->array[i])
248f43b2bacSPoul-Henning Kamp 			continue;
249f43b2bacSPoul-Henning Kamp 		if (ip->shift)
250f43b2bacSPoul-Henning Kamp 			destroy_indir(sc, (struct indir*)(ip->array[i]));
251f43b2bacSPoul-Henning Kamp 		else if (ip->array[i] > 255)
252f43b2bacSPoul-Henning Kamp 			uma_zfree(sc->uma, (void *)(ip->array[i]));
253f43b2bacSPoul-Henning Kamp 	}
254f43b2bacSPoul-Henning Kamp 	del_indir(ip);
255f43b2bacSPoul-Henning Kamp }
256f43b2bacSPoul-Henning Kamp 
257c6517568SPoul-Henning Kamp /*
2586c3cd0e2SMaxim Konovalov  * This function does the math and allocates the top level "indir" structure
259c6517568SPoul-Henning Kamp  * for a device of "size" sectors.
260c6517568SPoul-Henning Kamp  */
261c6517568SPoul-Henning Kamp 
262c6517568SPoul-Henning Kamp static struct indir *
263c6517568SPoul-Henning Kamp dimension(off_t size)
264c6517568SPoul-Henning Kamp {
265c6517568SPoul-Henning Kamp 	off_t rcnt;
266c6517568SPoul-Henning Kamp 	struct indir *ip;
267d12fc952SKonstantin Belousov 	int layer;
268c6517568SPoul-Henning Kamp 
269c6517568SPoul-Henning Kamp 	rcnt = size;
270c6517568SPoul-Henning Kamp 	layer = 0;
271c6517568SPoul-Henning Kamp 	while (rcnt > NINDIR) {
272c6517568SPoul-Henning Kamp 		rcnt /= NINDIR;
273c6517568SPoul-Henning Kamp 		layer++;
274c6517568SPoul-Henning Kamp 	}
275c6517568SPoul-Henning Kamp 
276c6517568SPoul-Henning Kamp 	/*
277c6517568SPoul-Henning Kamp 	 * XXX: the top layer is probably not fully populated, so we allocate
27883e13864SPoul-Henning Kamp 	 * too much space for ip->array in here.
279c6517568SPoul-Henning Kamp 	 */
28083e13864SPoul-Henning Kamp 	ip = malloc(sizeof *ip, M_MD, M_WAITOK | M_ZERO);
28183e13864SPoul-Henning Kamp 	ip->array = malloc(sizeof(uintptr_t) * NINDIR,
28283e13864SPoul-Henning Kamp 	    M_MDSECT, M_WAITOK | M_ZERO);
28383e13864SPoul-Henning Kamp 	ip->total = NINDIR;
28483e13864SPoul-Henning Kamp 	ip->shift = layer * nshift;
285c6517568SPoul-Henning Kamp 	return (ip);
286c6517568SPoul-Henning Kamp }
287c6517568SPoul-Henning Kamp 
288c6517568SPoul-Henning Kamp /*
289c6517568SPoul-Henning Kamp  * Read a given sector
290c6517568SPoul-Henning Kamp  */
291c6517568SPoul-Henning Kamp 
292c6517568SPoul-Henning Kamp static uintptr_t
293c6517568SPoul-Henning Kamp s_read(struct indir *ip, off_t offset)
294c6517568SPoul-Henning Kamp {
295c6517568SPoul-Henning Kamp 	struct indir *cip;
296c6517568SPoul-Henning Kamp 	int idx;
297c6517568SPoul-Henning Kamp 	uintptr_t up;
298c6517568SPoul-Henning Kamp 
299c6517568SPoul-Henning Kamp 	if (md_debug > 1)
3006569d6f3SMaxime Henrion 		printf("s_read(%jd)\n", (intmax_t)offset);
301c6517568SPoul-Henning Kamp 	up = 0;
302c6517568SPoul-Henning Kamp 	for (cip = ip; cip != NULL;) {
303c6517568SPoul-Henning Kamp 		if (cip->shift) {
304c6517568SPoul-Henning Kamp 			idx = (offset >> cip->shift) & NMASK;
305c6517568SPoul-Henning Kamp 			up = cip->array[idx];
306c6517568SPoul-Henning Kamp 			cip = (struct indir *)up;
307c6517568SPoul-Henning Kamp 			continue;
308c6517568SPoul-Henning Kamp 		}
309c6517568SPoul-Henning Kamp 		idx = offset & NMASK;
310c6517568SPoul-Henning Kamp 		return (cip->array[idx]);
311c6517568SPoul-Henning Kamp 	}
312c6517568SPoul-Henning Kamp 	return (0);
313c6517568SPoul-Henning Kamp }
314c6517568SPoul-Henning Kamp 
315c6517568SPoul-Henning Kamp /*
316c6517568SPoul-Henning Kamp  * Write a given sector, prune the tree if the value is 0
317c6517568SPoul-Henning Kamp  */
318c6517568SPoul-Henning Kamp 
319c6517568SPoul-Henning Kamp static int
320fde2a2e4SPoul-Henning Kamp s_write(struct indir *ip, off_t offset, uintptr_t ptr)
321c6517568SPoul-Henning Kamp {
322c6517568SPoul-Henning Kamp 	struct indir *cip, *lip[10];
323c6517568SPoul-Henning Kamp 	int idx, li;
324c6517568SPoul-Henning Kamp 	uintptr_t up;
325c6517568SPoul-Henning Kamp 
326c6517568SPoul-Henning Kamp 	if (md_debug > 1)
3276569d6f3SMaxime Henrion 		printf("s_write(%jd, %p)\n", (intmax_t)offset, (void *)ptr);
328c6517568SPoul-Henning Kamp 	up = 0;
329c6517568SPoul-Henning Kamp 	li = 0;
330c6517568SPoul-Henning Kamp 	cip = ip;
331c6517568SPoul-Henning Kamp 	for (;;) {
332c6517568SPoul-Henning Kamp 		lip[li++] = cip;
333c6517568SPoul-Henning Kamp 		if (cip->shift) {
334c6517568SPoul-Henning Kamp 			idx = (offset >> cip->shift) & NMASK;
335c6517568SPoul-Henning Kamp 			up = cip->array[idx];
336c6517568SPoul-Henning Kamp 			if (up != 0) {
337c6517568SPoul-Henning Kamp 				cip = (struct indir *)up;
338c6517568SPoul-Henning Kamp 				continue;
339c6517568SPoul-Henning Kamp 			}
340c6517568SPoul-Henning Kamp 			/* Allocate branch */
341c6517568SPoul-Henning Kamp 			cip->array[idx] =
342c6517568SPoul-Henning Kamp 			    (uintptr_t)new_indir(cip->shift - nshift);
343c6517568SPoul-Henning Kamp 			if (cip->array[idx] == 0)
344975b628fSPoul-Henning Kamp 				return (ENOSPC);
345c6517568SPoul-Henning Kamp 			cip->used++;
346c6517568SPoul-Henning Kamp 			up = cip->array[idx];
347c6517568SPoul-Henning Kamp 			cip = (struct indir *)up;
348c6517568SPoul-Henning Kamp 			continue;
349c6517568SPoul-Henning Kamp 		}
350c6517568SPoul-Henning Kamp 		/* leafnode */
351c6517568SPoul-Henning Kamp 		idx = offset & NMASK;
352c6517568SPoul-Henning Kamp 		up = cip->array[idx];
353c6517568SPoul-Henning Kamp 		if (up != 0)
354c6517568SPoul-Henning Kamp 			cip->used--;
355c6517568SPoul-Henning Kamp 		cip->array[idx] = ptr;
356c6517568SPoul-Henning Kamp 		if (ptr != 0)
357c6517568SPoul-Henning Kamp 			cip->used++;
358c6517568SPoul-Henning Kamp 		break;
359c6517568SPoul-Henning Kamp 	}
360c6517568SPoul-Henning Kamp 	if (cip->used != 0 || li == 1)
361c6517568SPoul-Henning Kamp 		return (0);
362c6517568SPoul-Henning Kamp 	li--;
363c6517568SPoul-Henning Kamp 	while (cip->used == 0 && cip != ip) {
364c6517568SPoul-Henning Kamp 		li--;
365c6517568SPoul-Henning Kamp 		idx = (offset >> lip[li]->shift) & NMASK;
366c6517568SPoul-Henning Kamp 		up = lip[li]->array[idx];
367c6517568SPoul-Henning Kamp 		KASSERT(up == (uintptr_t)cip, ("md screwed up"));
368c6517568SPoul-Henning Kamp 		del_indir(cip);
3694a6a94d8SArchie Cobbs 		lip[li]->array[idx] = 0;
370c6517568SPoul-Henning Kamp 		lip[li]->used--;
371c6517568SPoul-Henning Kamp 		cip = lip[li];
372c6517568SPoul-Henning Kamp 	}
373c6517568SPoul-Henning Kamp 	return (0);
374c6517568SPoul-Henning Kamp }
375c6517568SPoul-Henning Kamp 
3766f4f00f1SPoul-Henning Kamp 
3776f4f00f1SPoul-Henning Kamp static int
3786f4f00f1SPoul-Henning Kamp g_md_access(struct g_provider *pp, int r, int w, int e)
3796f4f00f1SPoul-Henning Kamp {
3806f4f00f1SPoul-Henning Kamp 	struct md_s *sc;
3816f4f00f1SPoul-Henning Kamp 
3826f4f00f1SPoul-Henning Kamp 	sc = pp->geom->softc;
38341c8b468SEdward Tomasz Napierala 	if (sc == NULL) {
38441c8b468SEdward Tomasz Napierala 		if (r <= 0 && w <= 0 && e <= 0)
38541c8b468SEdward Tomasz Napierala 			return (0);
3866b60a2cdSPoul-Henning Kamp 		return (ENXIO);
38741c8b468SEdward Tomasz Napierala 	}
3886f4f00f1SPoul-Henning Kamp 	r += pp->acr;
3896f4f00f1SPoul-Henning Kamp 	w += pp->acw;
3906f4f00f1SPoul-Henning Kamp 	e += pp->ace;
39186776891SChristian S.J. Peron 	if ((sc->flags & MD_READONLY) != 0 && w > 0)
39286776891SChristian S.J. Peron 		return (EROFS);
3936f4f00f1SPoul-Henning Kamp 	if ((pp->acr + pp->acw + pp->ace) == 0 && (r + w + e) > 0) {
3946f4f00f1SPoul-Henning Kamp 		sc->opencount = 1;
3956f4f00f1SPoul-Henning Kamp 	} else if ((pp->acr + pp->acw + pp->ace) > 0 && (r + w + e) == 0) {
3966f4f00f1SPoul-Henning Kamp 		sc->opencount = 0;
3976f4f00f1SPoul-Henning Kamp 	}
3986f4f00f1SPoul-Henning Kamp 	return (0);
3996f4f00f1SPoul-Henning Kamp }
4006f4f00f1SPoul-Henning Kamp 
4016f4f00f1SPoul-Henning Kamp static void
4026f4f00f1SPoul-Henning Kamp g_md_start(struct bio *bp)
4036f4f00f1SPoul-Henning Kamp {
4046f4f00f1SPoul-Henning Kamp 	struct md_s *sc;
4056f4f00f1SPoul-Henning Kamp 
4066f4f00f1SPoul-Henning Kamp 	sc = bp->bio_to->geom->softc;
407a03be42dSMaxim Sobolev 	if ((bp->bio_cmd == BIO_READ) || (bp->bio_cmd == BIO_WRITE))
408a03be42dSMaxim Sobolev 		devstat_start_transaction_bio(sc->devstat, bp);
4090f8500a5SPoul-Henning Kamp 	mtx_lock(&sc->queue_mtx);
410891619a6SPoul-Henning Kamp 	bioq_disksort(&sc->bio_queue, bp);
4114b07ede4SPawel Jakub Dawidek 	mtx_unlock(&sc->queue_mtx);
412e4cdd0d4SPawel Jakub Dawidek 	wakeup(sc);
4136f4f00f1SPoul-Henning Kamp }
4146f4f00f1SPoul-Henning Kamp 
415b4a4f93cSPoul-Henning Kamp static int
416b4a4f93cSPoul-Henning Kamp mdstart_malloc(struct md_s *sc, struct bio *bp)
41700a6a3c6SPoul-Henning Kamp {
418c6517568SPoul-Henning Kamp 	int i, error;
419c6517568SPoul-Henning Kamp 	u_char *dst;
420b830359bSPawel Jakub Dawidek 	off_t secno, nsec, uc;
421c6517568SPoul-Henning Kamp 	uintptr_t sp, osp;
42200a6a3c6SPoul-Henning Kamp 
4235541f25eSPawel Jakub Dawidek 	switch (bp->bio_cmd) {
4245541f25eSPawel Jakub Dawidek 	case BIO_READ:
4255541f25eSPawel Jakub Dawidek 	case BIO_WRITE:
4265541f25eSPawel Jakub Dawidek 	case BIO_DELETE:
4275541f25eSPawel Jakub Dawidek 		break;
4285541f25eSPawel Jakub Dawidek 	default:
4295541f25eSPawel Jakub Dawidek 		return (EOPNOTSUPP);
4305541f25eSPawel Jakub Dawidek 	}
4315541f25eSPawel Jakub Dawidek 
432b830359bSPawel Jakub Dawidek 	nsec = bp->bio_length / sc->sectorsize;
433b830359bSPawel Jakub Dawidek 	secno = bp->bio_offset / sc->sectorsize;
4348177437dSPoul-Henning Kamp 	dst = bp->bio_data;
435c6517568SPoul-Henning Kamp 	error = 0;
43600a6a3c6SPoul-Henning Kamp 	while (nsec--) {
437fde2a2e4SPoul-Henning Kamp 		osp = s_read(sc->indir, secno);
4388177437dSPoul-Henning Kamp 		if (bp->bio_cmd == BIO_DELETE) {
439fde2a2e4SPoul-Henning Kamp 			if (osp != 0)
440fde2a2e4SPoul-Henning Kamp 				error = s_write(sc->indir, secno, 0);
4418177437dSPoul-Henning Kamp 		} else if (bp->bio_cmd == BIO_READ) {
442fde2a2e4SPoul-Henning Kamp 			if (osp == 0)
443b830359bSPawel Jakub Dawidek 				bzero(dst, sc->sectorsize);
444fde2a2e4SPoul-Henning Kamp 			else if (osp <= 255)
445dbb95048SMarcel Moolenaar 				memset(dst, osp, sc->sectorsize);
446dbb95048SMarcel Moolenaar 			else {
447b830359bSPawel Jakub Dawidek 				bcopy((void *)osp, dst, sc->sectorsize);
448dbb95048SMarcel Moolenaar 				cpu_flush_dcache(dst, sc->sectorsize);
449dbb95048SMarcel Moolenaar 			}
450fde2a2e4SPoul-Henning Kamp 			osp = 0;
451c6517568SPoul-Henning Kamp 		} else if (bp->bio_cmd == BIO_WRITE) {
4528f8def9eSPoul-Henning Kamp 			if (sc->flags & MD_COMPRESS) {
45300a6a3c6SPoul-Henning Kamp 				uc = dst[0];
454b830359bSPawel Jakub Dawidek 				for (i = 1; i < sc->sectorsize; i++)
45500a6a3c6SPoul-Henning Kamp 					if (dst[i] != uc)
45600a6a3c6SPoul-Henning Kamp 						break;
4578f8def9eSPoul-Henning Kamp 			} else {
4588f8def9eSPoul-Henning Kamp 				i = 0;
4598f8def9eSPoul-Henning Kamp 				uc = 0;
4608f8def9eSPoul-Henning Kamp 			}
461b830359bSPawel Jakub Dawidek 			if (i == sc->sectorsize) {
462fde2a2e4SPoul-Henning Kamp 				if (osp != uc)
463fde2a2e4SPoul-Henning Kamp 					error = s_write(sc->indir, secno, uc);
46400a6a3c6SPoul-Henning Kamp 			} else {
465fde2a2e4SPoul-Henning Kamp 				if (osp <= 255) {
466b830359bSPawel Jakub Dawidek 					sp = (uintptr_t)uma_zalloc(sc->uma,
467c44d423eSKonstantin Belousov 					    md_malloc_wait ? M_WAITOK :
468b830359bSPawel Jakub Dawidek 					    M_NOWAIT);
469c6517568SPoul-Henning Kamp 					if (sp == 0) {
470fde2a2e4SPoul-Henning Kamp 						error = ENOSPC;
471fde2a2e4SPoul-Henning Kamp 						break;
472fde2a2e4SPoul-Henning Kamp 					}
473b830359bSPawel Jakub Dawidek 					bcopy(dst, (void *)sp, sc->sectorsize);
474fde2a2e4SPoul-Henning Kamp 					error = s_write(sc->indir, secno, sp);
475c6517568SPoul-Henning Kamp 				} else {
476b830359bSPawel Jakub Dawidek 					bcopy(dst, (void *)osp, sc->sectorsize);
477fde2a2e4SPoul-Henning Kamp 					osp = 0;
47800a6a3c6SPoul-Henning Kamp 				}
47900a6a3c6SPoul-Henning Kamp 			}
480c6517568SPoul-Henning Kamp 		} else {
481c6517568SPoul-Henning Kamp 			error = EOPNOTSUPP;
482c6517568SPoul-Henning Kamp 		}
483c6517568SPoul-Henning Kamp 		if (osp > 255)
484f43b2bacSPoul-Henning Kamp 			uma_zfree(sc->uma, (void*)osp);
485e3ed29a7SPawel Jakub Dawidek 		if (error != 0)
486c6517568SPoul-Henning Kamp 			break;
48700a6a3c6SPoul-Henning Kamp 		secno++;
488b830359bSPawel Jakub Dawidek 		dst += sc->sectorsize;
48900a6a3c6SPoul-Henning Kamp 	}
4908177437dSPoul-Henning Kamp 	bp->bio_resid = 0;
491c6517568SPoul-Henning Kamp 	return (error);
49200a6a3c6SPoul-Henning Kamp }
49300a6a3c6SPoul-Henning Kamp 
494b4a4f93cSPoul-Henning Kamp static int
495b4a4f93cSPoul-Henning Kamp mdstart_preload(struct md_s *sc, struct bio *bp)
49671e4fff8SPoul-Henning Kamp {
49771e4fff8SPoul-Henning Kamp 
498a8a58d03SPawel Jakub Dawidek 	switch (bp->bio_cmd) {
499a8a58d03SPawel Jakub Dawidek 	case BIO_READ:
500a8a58d03SPawel Jakub Dawidek 		bcopy(sc->pl_ptr + bp->bio_offset, bp->bio_data,
501a8a58d03SPawel Jakub Dawidek 		    bp->bio_length);
502dbb95048SMarcel Moolenaar 		cpu_flush_dcache(bp->bio_data, bp->bio_length);
503a8a58d03SPawel Jakub Dawidek 		break;
504a8a58d03SPawel Jakub Dawidek 	case BIO_WRITE:
505a8a58d03SPawel Jakub Dawidek 		bcopy(bp->bio_data, sc->pl_ptr + bp->bio_offset,
506a8a58d03SPawel Jakub Dawidek 		    bp->bio_length);
507a8a58d03SPawel Jakub Dawidek 		break;
50871e4fff8SPoul-Henning Kamp 	}
5098177437dSPoul-Henning Kamp 	bp->bio_resid = 0;
510b4a4f93cSPoul-Henning Kamp 	return (0);
51171e4fff8SPoul-Henning Kamp }
51271e4fff8SPoul-Henning Kamp 
513b4a4f93cSPoul-Henning Kamp static int
514b4a4f93cSPoul-Henning Kamp mdstart_vnode(struct md_s *sc, struct bio *bp)
5158f8def9eSPoul-Henning Kamp {
516a08d2e7fSJohn Baldwin 	int error, vfslocked;
5178f8def9eSPoul-Henning Kamp 	struct uio auio;
5188f8def9eSPoul-Henning Kamp 	struct iovec aiov;
5198f8def9eSPoul-Henning Kamp 	struct mount *mp;
5205541f25eSPawel Jakub Dawidek 	struct vnode *vp;
5215541f25eSPawel Jakub Dawidek 	struct thread *td;
5220abd21bdSDag-Erling Smørgrav 	off_t end, zerosize;
5235541f25eSPawel Jakub Dawidek 
5245541f25eSPawel Jakub Dawidek 	switch (bp->bio_cmd) {
5255541f25eSPawel Jakub Dawidek 	case BIO_READ:
5265541f25eSPawel Jakub Dawidek 	case BIO_WRITE:
5270abd21bdSDag-Erling Smørgrav 	case BIO_DELETE:
5285541f25eSPawel Jakub Dawidek 	case BIO_FLUSH:
5295541f25eSPawel Jakub Dawidek 		break;
5305541f25eSPawel Jakub Dawidek 	default:
5315541f25eSPawel Jakub Dawidek 		return (EOPNOTSUPP);
5325541f25eSPawel Jakub Dawidek 	}
5335541f25eSPawel Jakub Dawidek 
5345541f25eSPawel Jakub Dawidek 	td = curthread;
5355541f25eSPawel Jakub Dawidek 	vp = sc->vnode;
5368f8def9eSPoul-Henning Kamp 
5378f8def9eSPoul-Henning Kamp 	/*
5388f8def9eSPoul-Henning Kamp 	 * VNODE I/O
5398f8def9eSPoul-Henning Kamp 	 *
5408f8def9eSPoul-Henning Kamp 	 * If an error occurs, we set BIO_ERROR but we do not set
5418f8def9eSPoul-Henning Kamp 	 * B_INVAL because (for a write anyway), the buffer is
5428f8def9eSPoul-Henning Kamp 	 * still valid.
5438f8def9eSPoul-Henning Kamp 	 */
5448f8def9eSPoul-Henning Kamp 
5455541f25eSPawel Jakub Dawidek 	if (bp->bio_cmd == BIO_FLUSH) {
5465541f25eSPawel Jakub Dawidek 		vfslocked = VFS_LOCK_GIANT(vp->v_mount);
5475541f25eSPawel Jakub Dawidek 		(void) vn_start_write(vp, &mp, V_WAIT);
548cb05b60aSAttilio Rao 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
5495541f25eSPawel Jakub Dawidek 		error = VOP_FSYNC(vp, MNT_WAIT, td);
55022db15c0SAttilio Rao 		VOP_UNLOCK(vp, 0);
5515541f25eSPawel Jakub Dawidek 		vn_finished_write(mp);
5525541f25eSPawel Jakub Dawidek 		VFS_UNLOCK_GIANT(vfslocked);
5535541f25eSPawel Jakub Dawidek 		return (error);
5545541f25eSPawel Jakub Dawidek 	}
5555541f25eSPawel Jakub Dawidek 
5568f8def9eSPoul-Henning Kamp 	bzero(&auio, sizeof(auio));
5578f8def9eSPoul-Henning Kamp 
5580abd21bdSDag-Erling Smørgrav 	/*
5590abd21bdSDag-Erling Smørgrav 	 * Special case for BIO_DELETE.  On the surface, this is very
5600abd21bdSDag-Erling Smørgrav 	 * similar to BIO_WRITE, except that we write from our own
5610abd21bdSDag-Erling Smørgrav 	 * fixed-length buffer, so we have to loop.  The net result is
5620abd21bdSDag-Erling Smørgrav 	 * that the two cases end up having very little in common.
5630abd21bdSDag-Erling Smørgrav 	 */
5640abd21bdSDag-Erling Smørgrav 	if (bp->bio_cmd == BIO_DELETE) {
56589cb2a19SMatthew D Fleming 		zerosize = ZERO_REGION_SIZE -
56689cb2a19SMatthew D Fleming 		    (ZERO_REGION_SIZE % sc->sectorsize);
5670abd21bdSDag-Erling Smørgrav 		auio.uio_iov = &aiov;
5680abd21bdSDag-Erling Smørgrav 		auio.uio_iovcnt = 1;
5690abd21bdSDag-Erling Smørgrav 		auio.uio_offset = (vm_ooffset_t)bp->bio_offset;
5700abd21bdSDag-Erling Smørgrav 		auio.uio_segflg = UIO_SYSSPACE;
5710abd21bdSDag-Erling Smørgrav 		auio.uio_rw = UIO_WRITE;
5720abd21bdSDag-Erling Smørgrav 		auio.uio_td = td;
5730abd21bdSDag-Erling Smørgrav 		end = bp->bio_offset + bp->bio_length;
5740abd21bdSDag-Erling Smørgrav 		vfslocked = VFS_LOCK_GIANT(vp->v_mount);
5750abd21bdSDag-Erling Smørgrav 		(void) vn_start_write(vp, &mp, V_WAIT);
5760abd21bdSDag-Erling Smørgrav 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
5770abd21bdSDag-Erling Smørgrav 		error = 0;
5780abd21bdSDag-Erling Smørgrav 		while (auio.uio_offset < end) {
57989cb2a19SMatthew D Fleming 			aiov.iov_base = __DECONST(void *, zero_region);
5800abd21bdSDag-Erling Smørgrav 			aiov.iov_len = end - auio.uio_offset;
5810abd21bdSDag-Erling Smørgrav 			if (aiov.iov_len > zerosize)
5820abd21bdSDag-Erling Smørgrav 				aiov.iov_len = zerosize;
5830abd21bdSDag-Erling Smørgrav 			auio.uio_resid = aiov.iov_len;
5840abd21bdSDag-Erling Smørgrav 			error = VOP_WRITE(vp, &auio,
5850abd21bdSDag-Erling Smørgrav 			    sc->flags & MD_ASYNC ? 0 : IO_SYNC, sc->cred);
5860abd21bdSDag-Erling Smørgrav 			if (error != 0)
5870abd21bdSDag-Erling Smørgrav 				break;
5880abd21bdSDag-Erling Smørgrav 		}
5890abd21bdSDag-Erling Smørgrav 		VOP_UNLOCK(vp, 0);
5900abd21bdSDag-Erling Smørgrav 		vn_finished_write(mp);
5910abd21bdSDag-Erling Smørgrav 		bp->bio_resid = end - auio.uio_offset;
5920abd21bdSDag-Erling Smørgrav 		VFS_UNLOCK_GIANT(vfslocked);
5930abd21bdSDag-Erling Smørgrav 		return (error);
5940abd21bdSDag-Erling Smørgrav 	}
5950abd21bdSDag-Erling Smørgrav 
5968f8def9eSPoul-Henning Kamp 	aiov.iov_base = bp->bio_data;
597a8a58d03SPawel Jakub Dawidek 	aiov.iov_len = bp->bio_length;
5988f8def9eSPoul-Henning Kamp 	auio.uio_iov = &aiov;
5998f8def9eSPoul-Henning Kamp 	auio.uio_iovcnt = 1;
600a8a58d03SPawel Jakub Dawidek 	auio.uio_offset = (vm_ooffset_t)bp->bio_offset;
6018f8def9eSPoul-Henning Kamp 	auio.uio_segflg = UIO_SYSSPACE;
6028f8def9eSPoul-Henning Kamp 	if (bp->bio_cmd == BIO_READ)
6038f8def9eSPoul-Henning Kamp 		auio.uio_rw = UIO_READ;
6048e28326aSPoul-Henning Kamp 	else if (bp->bio_cmd == BIO_WRITE)
6058f8def9eSPoul-Henning Kamp 		auio.uio_rw = UIO_WRITE;
6068e28326aSPoul-Henning Kamp 	else
6078e28326aSPoul-Henning Kamp 		panic("wrong BIO_OP in mdstart_vnode");
608a8a58d03SPawel Jakub Dawidek 	auio.uio_resid = bp->bio_length;
6095541f25eSPawel Jakub Dawidek 	auio.uio_td = td;
6107e76bb56SMatthew Dillon 	/*
6117e76bb56SMatthew Dillon 	 * When reading set IO_DIRECT to try to avoid double-caching
61217a13919SPoul-Henning Kamp 	 * the data.  When writing IO_DIRECT is not optimal.
6137e76bb56SMatthew Dillon 	 */
6145541f25eSPawel Jakub Dawidek 	vfslocked = VFS_LOCK_GIANT(vp->v_mount);
6158f8def9eSPoul-Henning Kamp 	if (bp->bio_cmd == BIO_READ) {
616cb05b60aSAttilio Rao 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
6175541f25eSPawel Jakub Dawidek 		error = VOP_READ(vp, &auio, IO_DIRECT, sc->cred);
61822db15c0SAttilio Rao 		VOP_UNLOCK(vp, 0);
6198f8def9eSPoul-Henning Kamp 	} else {
6205541f25eSPawel Jakub Dawidek 		(void) vn_start_write(vp, &mp, V_WAIT);
621cb05b60aSAttilio Rao 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
6225541f25eSPawel Jakub Dawidek 		error = VOP_WRITE(vp, &auio, sc->flags & MD_ASYNC ? 0 : IO_SYNC,
6235541f25eSPawel Jakub Dawidek 		    sc->cred);
62422db15c0SAttilio Rao 		VOP_UNLOCK(vp, 0);
6258f8def9eSPoul-Henning Kamp 		vn_finished_write(mp);
6268f8def9eSPoul-Henning Kamp 	}
627a08d2e7fSJohn Baldwin 	VFS_UNLOCK_GIANT(vfslocked);
6288f8def9eSPoul-Henning Kamp 	bp->bio_resid = auio.uio_resid;
629b4a4f93cSPoul-Henning Kamp 	return (error);
6308f8def9eSPoul-Henning Kamp }
6318f8def9eSPoul-Henning Kamp 
632b4a4f93cSPoul-Henning Kamp static int
633b4a4f93cSPoul-Henning Kamp mdstart_swap(struct md_s *sc, struct bio *bp)
6348f8def9eSPoul-Henning Kamp {
6357cd53fddSAlan Cox 	struct sf_buf *sf;
6366ab0a0aeSPawel Jakub Dawidek 	int rv, offs, len, lastend;
6376ab0a0aeSPawel Jakub Dawidek 	vm_pindex_t i, lastp;
6388e28326aSPoul-Henning Kamp 	vm_page_t m;
6398e28326aSPoul-Henning Kamp 	u_char *p;
6408f8def9eSPoul-Henning Kamp 
6415541f25eSPawel Jakub Dawidek 	switch (bp->bio_cmd) {
6425541f25eSPawel Jakub Dawidek 	case BIO_READ:
6435541f25eSPawel Jakub Dawidek 	case BIO_WRITE:
6445541f25eSPawel Jakub Dawidek 	case BIO_DELETE:
6455541f25eSPawel Jakub Dawidek 		break;
6465541f25eSPawel Jakub Dawidek 	default:
6475541f25eSPawel Jakub Dawidek 		return (EOPNOTSUPP);
6485541f25eSPawel Jakub Dawidek 	}
6495541f25eSPawel Jakub Dawidek 
6508e28326aSPoul-Henning Kamp 	p = bp->bio_data;
651e07113d6SColin Percival 
652e07113d6SColin Percival 	/*
6536c3cd0e2SMaxim Konovalov 	 * offs is the offset at which to start operating on the
654e07113d6SColin Percival 	 * next (ie, first) page.  lastp is the last page on
655e07113d6SColin Percival 	 * which we're going to operate.  lastend is the ending
656e07113d6SColin Percival 	 * position within that last page (ie, PAGE_SIZE if
657e07113d6SColin Percival 	 * we're operating on complete aligned pages).
658e07113d6SColin Percival 	 */
659e07113d6SColin Percival 	offs = bp->bio_offset % PAGE_SIZE;
660e07113d6SColin Percival 	lastp = (bp->bio_offset + bp->bio_length - 1) / PAGE_SIZE;
661e07113d6SColin Percival 	lastend = (bp->bio_offset + bp->bio_length - 1) % PAGE_SIZE + 1;
662e07113d6SColin Percival 
663812851b6SBrian Feldman 	rv = VM_PAGER_OK;
6648e28326aSPoul-Henning Kamp 	VM_OBJECT_LOCK(sc->object);
6658e28326aSPoul-Henning Kamp 	vm_object_pip_add(sc->object, 1);
666e07113d6SColin Percival 	for (i = bp->bio_offset / PAGE_SIZE; i <= lastp; i++) {
667e07113d6SColin Percival 		len = ((i == lastp) ? lastend : PAGE_SIZE) - offs;
668e07113d6SColin Percival 
669e07113d6SColin Percival 		m = vm_page_grab(sc->object, i,
6708e28326aSPoul-Henning Kamp 		    VM_ALLOC_NORMAL|VM_ALLOC_RETRY);
6717cd53fddSAlan Cox 		VM_OBJECT_UNLOCK(sc->object);
672e340fc60SAlan Cox 		sched_pin();
673e340fc60SAlan Cox 		sf = sf_buf_alloc(m, SFB_CPUPRIVATE);
6747cd53fddSAlan Cox 		VM_OBJECT_LOCK(sc->object);
6758e28326aSPoul-Henning Kamp 		if (bp->bio_cmd == BIO_READ) {
67607be617fSAlan Cox 			if (m->valid != VM_PAGE_BITS_ALL)
67707be617fSAlan Cox 				rv = vm_pager_get_pages(sc->object, &m, 1, 0);
678812851b6SBrian Feldman 			if (rv == VM_PAGER_ERROR) {
679812851b6SBrian Feldman 				sf_buf_free(sf);
680e340fc60SAlan Cox 				sched_unpin();
681812851b6SBrian Feldman 				vm_page_wakeup(m);
682812851b6SBrian Feldman 				break;
683812851b6SBrian Feldman 			}
6847cd53fddSAlan Cox 			bcopy((void *)(sf_buf_kva(sf) + offs), p, len);
685dbb95048SMarcel Moolenaar 			cpu_flush_dcache(p, len);
6868e28326aSPoul-Henning Kamp 		} else if (bp->bio_cmd == BIO_WRITE) {
68707be617fSAlan Cox 			if (len != PAGE_SIZE && m->valid != VM_PAGE_BITS_ALL)
68807be617fSAlan Cox 				rv = vm_pager_get_pages(sc->object, &m, 1, 0);
689812851b6SBrian Feldman 			if (rv == VM_PAGER_ERROR) {
690812851b6SBrian Feldman 				sf_buf_free(sf);
691e340fc60SAlan Cox 				sched_unpin();
692812851b6SBrian Feldman 				vm_page_wakeup(m);
693812851b6SBrian Feldman 				break;
694812851b6SBrian Feldman 			}
6957cd53fddSAlan Cox 			bcopy(p, (void *)(sf_buf_kva(sf) + offs), len);
6968e28326aSPoul-Henning Kamp 			m->valid = VM_PAGE_BITS_ALL;
6978e28326aSPoul-Henning Kamp 		} else if (bp->bio_cmd == BIO_DELETE) {
69807be617fSAlan Cox 			if (len != PAGE_SIZE && m->valid != VM_PAGE_BITS_ALL)
69907be617fSAlan Cox 				rv = vm_pager_get_pages(sc->object, &m, 1, 0);
700812851b6SBrian Feldman 			if (rv == VM_PAGER_ERROR) {
701812851b6SBrian Feldman 				sf_buf_free(sf);
702e340fc60SAlan Cox 				sched_unpin();
703812851b6SBrian Feldman 				vm_page_wakeup(m);
704812851b6SBrian Feldman 				break;
705812851b6SBrian Feldman 			}
7064a13a769SKonstantin Belousov 			if (len != PAGE_SIZE) {
7077cd53fddSAlan Cox 				bzero((void *)(sf_buf_kva(sf) + offs), len);
7084a13a769SKonstantin Belousov 				vm_page_clear_dirty(m, offs, len);
7098e28326aSPoul-Henning Kamp 				m->valid = VM_PAGE_BITS_ALL;
7104a13a769SKonstantin Belousov 			} else
7114a13a769SKonstantin Belousov 				vm_pager_page_unswapped(m);
7128e28326aSPoul-Henning Kamp 		}
7137cd53fddSAlan Cox 		sf_buf_free(sf);
714e340fc60SAlan Cox 		sched_unpin();
7158e28326aSPoul-Henning Kamp 		vm_page_wakeup(m);
716fc0c3802SKonstantin Belousov 		vm_page_lock(m);
7174a13a769SKonstantin Belousov 		if (bp->bio_cmd == BIO_DELETE && len == PAGE_SIZE)
7184a13a769SKonstantin Belousov 			vm_page_free(m);
7194a13a769SKonstantin Belousov 		else
7208e28326aSPoul-Henning Kamp 			vm_page_activate(m);
721ecd5dd95SAlan Cox 		vm_page_unlock(m);
72207be617fSAlan Cox 		if (bp->bio_cmd == BIO_WRITE)
7238e28326aSPoul-Henning Kamp 			vm_page_dirty(m);
724e07113d6SColin Percival 
725e07113d6SColin Percival 		/* Actions on further pages start at offset 0 */
726e07113d6SColin Percival 		p += PAGE_SIZE - offs;
727e07113d6SColin Percival 		offs = 0;
7288e28326aSPoul-Henning Kamp #if 0
729e07113d6SColin Percival if (bootverbose || bp->bio_offset / PAGE_SIZE < 17)
7308e28326aSPoul-Henning Kamp printf("wire_count %d busy %d flags %x hold_count %d act_count %d queue %d valid %d dirty %d @ %d\n",
7318e28326aSPoul-Henning Kamp     m->wire_count, m->busy,
732e07113d6SColin Percival     m->flags, m->hold_count, m->act_count, m->queue, m->valid, m->dirty, i);
7338e28326aSPoul-Henning Kamp #endif
7348e28326aSPoul-Henning Kamp 	}
7358e28326aSPoul-Henning Kamp 	vm_object_pip_subtract(sc->object, 1);
7368e28326aSPoul-Henning Kamp 	VM_OBJECT_UNLOCK(sc->object);
737812851b6SBrian Feldman 	return (rv != VM_PAGER_ERROR ? 0 : ENOSPC);
7388e28326aSPoul-Henning Kamp }
7398f8def9eSPoul-Henning Kamp 
7408f8def9eSPoul-Henning Kamp static void
7415c97ca54SIan Dowse md_kthread(void *arg)
7425c97ca54SIan Dowse {
7435c97ca54SIan Dowse 	struct md_s *sc;
7445c97ca54SIan Dowse 	struct bio *bp;
745a08d2e7fSJohn Baldwin 	int error;
7465c97ca54SIan Dowse 
7475c97ca54SIan Dowse 	sc = arg;
748982d11f8SJeff Roberson 	thread_lock(curthread);
74963710c4dSJohn Baldwin 	sched_prio(curthread, PRIBIO);
750982d11f8SJeff Roberson 	thread_unlock(curthread);
7513b7b5496SKonstantin Belousov 	if (sc->type == MD_VNODE)
7523b7b5496SKonstantin Belousov 		curthread->td_pflags |= TDP_NORUNNINGBUF;
7535c97ca54SIan Dowse 
754b4a4f93cSPoul-Henning Kamp 	for (;;) {
755a08d2e7fSJohn Baldwin 		mtx_lock(&sc->queue_mtx);
7565c97ca54SIan Dowse 		if (sc->flags & MD_SHUTDOWN) {
757a08d2e7fSJohn Baldwin 			sc->flags |= MD_EXITING;
758a08d2e7fSJohn Baldwin 			mtx_unlock(&sc->queue_mtx);
7593745c395SJulian Elischer 			kproc_exit(0);
7605c97ca54SIan Dowse 		}
7619b00ca19SPoul-Henning Kamp 		bp = bioq_takefirst(&sc->bio_queue);
7629b00ca19SPoul-Henning Kamp 		if (!bp) {
7630f8500a5SPoul-Henning Kamp 			msleep(sc, &sc->queue_mtx, PRIBIO | PDROP, "mdwait", 0);
7645c97ca54SIan Dowse 			continue;
7655c97ca54SIan Dowse 		}
7660f8500a5SPoul-Henning Kamp 		mtx_unlock(&sc->queue_mtx);
7674e8bfe14SPoul-Henning Kamp 		if (bp->bio_cmd == BIO_GETATTR) {
768d91e813cSKonstantin Belousov 			if ((sc->fwsectors && sc->fwheads &&
7694e8bfe14SPoul-Henning Kamp 			    (g_handleattr_int(bp, "GEOM::fwsectors",
7704e8bfe14SPoul-Henning Kamp 			    sc->fwsectors) ||
7714e8bfe14SPoul-Henning Kamp 			    g_handleattr_int(bp, "GEOM::fwheads",
772d91e813cSKonstantin Belousov 			    sc->fwheads))) ||
773d91e813cSKonstantin Belousov 			    g_handleattr_int(bp, "GEOM::candelete", 1))
7744e8bfe14SPoul-Henning Kamp 				error = -1;
7754e8bfe14SPoul-Henning Kamp 			else
7764e8bfe14SPoul-Henning Kamp 				error = EOPNOTSUPP;
7774e8bfe14SPoul-Henning Kamp 		} else {
7789b00ca19SPoul-Henning Kamp 			error = sc->start(sc, bp);
7794e8bfe14SPoul-Henning Kamp 		}
780b4a4f93cSPoul-Henning Kamp 
7816f4f00f1SPoul-Henning Kamp 		if (error != -1) {
7826f4f00f1SPoul-Henning Kamp 			bp->bio_completed = bp->bio_length;
783a03be42dSMaxim Sobolev 			if ((bp->bio_cmd == BIO_READ) || (bp->bio_cmd == BIO_WRITE))
784a03be42dSMaxim Sobolev 				devstat_end_transaction_bio(sc->devstat, bp);
78596410b95SKonstantin Belousov 			g_io_deliver(bp, error);
786b4a4f93cSPoul-Henning Kamp 		}
7878f8def9eSPoul-Henning Kamp 	}
78826d48b40SPoul-Henning Kamp }
7898f8def9eSPoul-Henning Kamp 
7908f8def9eSPoul-Henning Kamp static struct md_s *
7918f8def9eSPoul-Henning Kamp mdfind(int unit)
7928f8def9eSPoul-Henning Kamp {
7938f8def9eSPoul-Henning Kamp 	struct md_s *sc;
7948f8def9eSPoul-Henning Kamp 
7953f54a085SPoul-Henning Kamp 	LIST_FOREACH(sc, &md_softc_list, list) {
7963f54a085SPoul-Henning Kamp 		if (sc->unit == unit)
7978f8def9eSPoul-Henning Kamp 			break;
7988f8def9eSPoul-Henning Kamp 	}
7998f8def9eSPoul-Henning Kamp 	return (sc);
8008f8def9eSPoul-Henning Kamp }
8018f8def9eSPoul-Henning Kamp 
8028f8def9eSPoul-Henning Kamp static struct md_s *
803947fc8deSPoul-Henning Kamp mdnew(int unit, int *errp, enum md_types type)
8048f8def9eSPoul-Henning Kamp {
805f4e7c5a8SJaakko Heinonen 	struct md_s *sc;
806f4e7c5a8SJaakko Heinonen 	int error;
8078f8def9eSPoul-Henning Kamp 
8089b00ca19SPoul-Henning Kamp 	*errp = 0;
809f4e7c5a8SJaakko Heinonen 	if (unit == -1)
810f4e7c5a8SJaakko Heinonen 		unit = alloc_unr(md_uh);
811f4e7c5a8SJaakko Heinonen 	else
812f4e7c5a8SJaakko Heinonen 		unit = alloc_unr_specific(md_uh, unit);
813f4e7c5a8SJaakko Heinonen 
814f4e7c5a8SJaakko Heinonen 	if (unit == -1) {
8157ee3c044SPawel Jakub Dawidek 		*errp = EBUSY;
8163f54a085SPoul-Henning Kamp 		return (NULL);
8173f54a085SPoul-Henning Kamp 	}
818f4e7c5a8SJaakko Heinonen 
8199b00ca19SPoul-Henning Kamp 	sc = (struct md_s *)malloc(sizeof *sc, M_MD, M_WAITOK | M_ZERO);
820947fc8deSPoul-Henning Kamp 	sc->type = type;
8219b00ca19SPoul-Henning Kamp 	bioq_init(&sc->bio_queue);
8229b00ca19SPoul-Henning Kamp 	mtx_init(&sc->queue_mtx, "md bio queue", NULL, MTX_DEF);
8233f54a085SPoul-Henning Kamp 	sc->unit = unit;
824f43b2bacSPoul-Henning Kamp 	sprintf(sc->name, "md%d", unit);
8257ee3c044SPawel Jakub Dawidek 	LIST_INSERT_HEAD(&md_softc_list, sc, list);
8263745c395SJulian Elischer 	error = kproc_create(md_kthread, sc, &sc->procp, 0, 0,"%s", sc->name);
8279b00ca19SPoul-Henning Kamp 	if (error == 0)
8289b00ca19SPoul-Henning Kamp 		return (sc);
8297ee3c044SPawel Jakub Dawidek 	LIST_REMOVE(sc, list);
8307ee3c044SPawel Jakub Dawidek 	mtx_destroy(&sc->queue_mtx);
831f4e7c5a8SJaakko Heinonen 	free_unr(md_uh, sc->unit);
8325c97ca54SIan Dowse 	free(sc, M_MD);
8337ee3c044SPawel Jakub Dawidek 	*errp = error;
8345c97ca54SIan Dowse 	return (NULL);
8355c97ca54SIan Dowse }
8368f8def9eSPoul-Henning Kamp 
8378f8def9eSPoul-Henning Kamp static void
8388f8def9eSPoul-Henning Kamp mdinit(struct md_s *sc)
8398f8def9eSPoul-Henning Kamp {
8406f4f00f1SPoul-Henning Kamp 	struct g_geom *gp;
8416f4f00f1SPoul-Henning Kamp 	struct g_provider *pp;
8426f4f00f1SPoul-Henning Kamp 
8436f4f00f1SPoul-Henning Kamp 	g_topology_lock();
8446f4f00f1SPoul-Henning Kamp 	gp = g_new_geomf(&g_md_class, "md%d", sc->unit);
8456f4f00f1SPoul-Henning Kamp 	gp->softc = sc;
8466f4f00f1SPoul-Henning Kamp 	pp = g_new_providerf(gp, "md%d", sc->unit);
847b830359bSPawel Jakub Dawidek 	pp->mediasize = sc->mediasize;
848b830359bSPawel Jakub Dawidek 	pp->sectorsize = sc->sectorsize;
8496f4f00f1SPoul-Henning Kamp 	sc->gp = gp;
8506f4f00f1SPoul-Henning Kamp 	sc->pp = pp;
8516f4f00f1SPoul-Henning Kamp 	g_error_provider(pp, 0);
8526f4f00f1SPoul-Henning Kamp 	g_topology_unlock();
853a03be42dSMaxim Sobolev 	sc->devstat = devstat_new_entry("md", sc->unit, sc->sectorsize,
854a03be42dSMaxim Sobolev 	    DEVSTAT_ALL_SUPPORTED, DEVSTAT_TYPE_DIRECT, DEVSTAT_PRIORITY_MAX);
8556f4f00f1SPoul-Henning Kamp }
85671e4fff8SPoul-Henning Kamp 
85796b6a55fSPoul-Henning Kamp /*
85896b6a55fSPoul-Henning Kamp  * XXX: we should check that the range they feed us is mapped.
85996b6a55fSPoul-Henning Kamp  * XXX: we should implement read-only.
86096b6a55fSPoul-Henning Kamp  */
86196b6a55fSPoul-Henning Kamp 
862637f671aSPoul-Henning Kamp static int
863b830359bSPawel Jakub Dawidek mdcreate_preload(struct md_s *sc, struct md_ioctl *mdio)
86471e4fff8SPoul-Henning Kamp {
86571e4fff8SPoul-Henning Kamp 
866b830359bSPawel Jakub Dawidek 	if (mdio->md_options & ~(MD_AUTOUNIT | MD_FORCE))
867637f671aSPoul-Henning Kamp 		return (EINVAL);
8685ed1eb2bSEdward Tomasz Napierala 	if (mdio->md_base == 0)
8695ed1eb2bSEdward Tomasz Napierala 		return (EINVAL);
87026a0ee75SDima Dorfman 	sc->flags = mdio->md_options & MD_FORCE;
87196b6a55fSPoul-Henning Kamp 	/* Cast to pointer size, then to pointer to avoid warning */
872dc57d7c6SPeter Wemm 	sc->pl_ptr = (u_char *)(uintptr_t)mdio->md_base;
873b830359bSPawel Jakub Dawidek 	sc->pl_len = (size_t)sc->mediasize;
874637f671aSPoul-Henning Kamp 	return (0);
87595f1a897SPoul-Henning Kamp }
87695f1a897SPoul-Henning Kamp 
877637f671aSPoul-Henning Kamp 
8788f8def9eSPoul-Henning Kamp static int
879b830359bSPawel Jakub Dawidek mdcreate_malloc(struct md_s *sc, struct md_ioctl *mdio)
88095f1a897SPoul-Henning Kamp {
881c6517568SPoul-Henning Kamp 	uintptr_t sp;
882c6517568SPoul-Henning Kamp 	int error;
883b830359bSPawel Jakub Dawidek 	off_t u;
88495f1a897SPoul-Henning Kamp 
885c6517568SPoul-Henning Kamp 	error = 0;
8868f8def9eSPoul-Henning Kamp 	if (mdio->md_options & ~(MD_AUTOUNIT | MD_COMPRESS | MD_RESERVE))
8878f8def9eSPoul-Henning Kamp 		return (EINVAL);
888b830359bSPawel Jakub Dawidek 	if (mdio->md_sectorsize != 0 && !powerof2(mdio->md_sectorsize))
889ebe789d6SPoul-Henning Kamp 		return (EINVAL);
8908f8def9eSPoul-Henning Kamp 	/* Compression doesn't make sense if we have reserved space */
8918f8def9eSPoul-Henning Kamp 	if (mdio->md_options & MD_RESERVE)
8928f8def9eSPoul-Henning Kamp 		mdio->md_options &= ~MD_COMPRESS;
8934e8bfe14SPoul-Henning Kamp 	if (mdio->md_fwsectors != 0)
8944e8bfe14SPoul-Henning Kamp 		sc->fwsectors = mdio->md_fwsectors;
8954e8bfe14SPoul-Henning Kamp 	if (mdio->md_fwheads != 0)
8964e8bfe14SPoul-Henning Kamp 		sc->fwheads = mdio->md_fwheads;
89726a0ee75SDima Dorfman 	sc->flags = mdio->md_options & (MD_COMPRESS | MD_FORCE);
898b830359bSPawel Jakub Dawidek 	sc->indir = dimension(sc->mediasize / sc->sectorsize);
899b830359bSPawel Jakub Dawidek 	sc->uma = uma_zcreate(sc->name, sc->sectorsize, NULL, NULL, NULL, NULL,
900b830359bSPawel Jakub Dawidek 	    0x1ff, 0);
90196b6a55fSPoul-Henning Kamp 	if (mdio->md_options & MD_RESERVE) {
902b830359bSPawel Jakub Dawidek 		off_t nsectors;
903b830359bSPawel Jakub Dawidek 
904b830359bSPawel Jakub Dawidek 		nsectors = sc->mediasize / sc->sectorsize;
905b830359bSPawel Jakub Dawidek 		for (u = 0; u < nsectors; u++) {
906007777f1SKonstantin Belousov 			sp = (uintptr_t)uma_zalloc(sc->uma, (md_malloc_wait ?
907007777f1SKonstantin Belousov 			    M_WAITOK : M_NOWAIT) | M_ZERO);
908c6517568SPoul-Henning Kamp 			if (sp != 0)
909fde2a2e4SPoul-Henning Kamp 				error = s_write(sc->indir, u, sp);
910c6517568SPoul-Henning Kamp 			else
911c6517568SPoul-Henning Kamp 				error = ENOMEM;
912b830359bSPawel Jakub Dawidek 			if (error != 0)
913c6517568SPoul-Henning Kamp 				break;
9148f8def9eSPoul-Henning Kamp 		}
915c6517568SPoul-Henning Kamp 	}
916c6517568SPoul-Henning Kamp 	return (error);
91700a6a3c6SPoul-Henning Kamp }
91800a6a3c6SPoul-Henning Kamp 
9193f54a085SPoul-Henning Kamp 
9208f8def9eSPoul-Henning Kamp static int
9218f8def9eSPoul-Henning Kamp mdsetcred(struct md_s *sc, struct ucred *cred)
9228f8def9eSPoul-Henning Kamp {
9238f8def9eSPoul-Henning Kamp 	char *tmpbuf;
9248f8def9eSPoul-Henning Kamp 	int error = 0;
9258f8def9eSPoul-Henning Kamp 
9263f54a085SPoul-Henning Kamp 	/*
9278f8def9eSPoul-Henning Kamp 	 * Set credits in our softc
9283f54a085SPoul-Henning Kamp 	 */
9298f8def9eSPoul-Henning Kamp 
9308f8def9eSPoul-Henning Kamp 	if (sc->cred)
9318f8def9eSPoul-Henning Kamp 		crfree(sc->cred);
932bd78ceceSJohn Baldwin 	sc->cred = crhold(cred);
9338f8def9eSPoul-Henning Kamp 
9348f8def9eSPoul-Henning Kamp 	/*
9358f8def9eSPoul-Henning Kamp 	 * Horrible kludge to establish credentials for NFS  XXX.
9368f8def9eSPoul-Henning Kamp 	 */
9378f8def9eSPoul-Henning Kamp 
9388f8def9eSPoul-Henning Kamp 	if (sc->vnode) {
9398f8def9eSPoul-Henning Kamp 		struct uio auio;
9408f8def9eSPoul-Henning Kamp 		struct iovec aiov;
9418f8def9eSPoul-Henning Kamp 
942b830359bSPawel Jakub Dawidek 		tmpbuf = malloc(sc->sectorsize, M_TEMP, M_WAITOK);
9438f8def9eSPoul-Henning Kamp 		bzero(&auio, sizeof(auio));
9448f8def9eSPoul-Henning Kamp 
9458f8def9eSPoul-Henning Kamp 		aiov.iov_base = tmpbuf;
946b830359bSPawel Jakub Dawidek 		aiov.iov_len = sc->sectorsize;
9478f8def9eSPoul-Henning Kamp 		auio.uio_iov = &aiov;
9488f8def9eSPoul-Henning Kamp 		auio.uio_iovcnt = 1;
9498f8def9eSPoul-Henning Kamp 		auio.uio_offset = 0;
9508f8def9eSPoul-Henning Kamp 		auio.uio_rw = UIO_READ;
9518f8def9eSPoul-Henning Kamp 		auio.uio_segflg = UIO_SYSSPACE;
9528f8def9eSPoul-Henning Kamp 		auio.uio_resid = aiov.iov_len;
953cb05b60aSAttilio Rao 		vn_lock(sc->vnode, LK_EXCLUSIVE | LK_RETRY);
9548f8def9eSPoul-Henning Kamp 		error = VOP_READ(sc->vnode, &auio, 0, sc->cred);
95522db15c0SAttilio Rao 		VOP_UNLOCK(sc->vnode, 0);
9568f8def9eSPoul-Henning Kamp 		free(tmpbuf, M_TEMP);
9578f8def9eSPoul-Henning Kamp 	}
9588f8def9eSPoul-Henning Kamp 	return (error);
9598f8def9eSPoul-Henning Kamp }
9608f8def9eSPoul-Henning Kamp 
9618f8def9eSPoul-Henning Kamp static int
962b830359bSPawel Jakub Dawidek mdcreate_vnode(struct md_s *sc, struct md_ioctl *mdio, struct thread *td)
9638f8def9eSPoul-Henning Kamp {
9648f8def9eSPoul-Henning Kamp 	struct vattr vattr;
9658f8def9eSPoul-Henning Kamp 	struct nameidata nd;
9663d5c947dSMarcel Moolenaar 	char *fname;
967a08d2e7fSJohn Baldwin 	int error, flags, vfslocked;
9688f8def9eSPoul-Henning Kamp 
9693d5c947dSMarcel Moolenaar 	/*
9703d5c947dSMarcel Moolenaar 	 * Kernel-originated requests must have the filename appended
9713d5c947dSMarcel Moolenaar 	 * to the mdio structure to protect against malicious software.
9723d5c947dSMarcel Moolenaar 	 */
9733d5c947dSMarcel Moolenaar 	fname = mdio->md_file;
9743d5c947dSMarcel Moolenaar 	if ((void *)fname != (void *)(mdio + 1)) {
9753d5c947dSMarcel Moolenaar 		error = copyinstr(fname, sc->file, sizeof(sc->file), NULL);
97688b5b78dSPawel Jakub Dawidek 		if (error != 0)
97788b5b78dSPawel Jakub Dawidek 			return (error);
9783d5c947dSMarcel Moolenaar 	} else
9793d5c947dSMarcel Moolenaar 		strlcpy(sc->file, fname, sizeof(sc->file));
9803d5c947dSMarcel Moolenaar 
98186776891SChristian S.J. Peron 	/*
9823d5c947dSMarcel Moolenaar 	 * If the user specified that this is a read only device, don't
9833d5c947dSMarcel Moolenaar 	 * set the FWRITE mask before trying to open the backing store.
98486776891SChristian S.J. Peron 	 */
9853d5c947dSMarcel Moolenaar 	flags = FREAD | ((mdio->md_options & MD_READONLY) ? 0 : FWRITE);
986a08d2e7fSJohn Baldwin 	NDINIT(&nd, LOOKUP, FOLLOW | MPSAFE, UIO_SYSSPACE, sc->file, td);
9879e223287SKonstantin Belousov 	error = vn_open(&nd, &flags, 0, NULL);
988e3ed29a7SPawel Jakub Dawidek 	if (error != 0)
98952c6716fSPawel Jakub Dawidek 		return (error);
990a08d2e7fSJohn Baldwin 	vfslocked = NDHASGIANT(&nd);
991b322d85dSPawel Jakub Dawidek 	NDFREE(&nd, NDF_ONLY_PNBUF);
99233fc3625SJohn Baldwin 	if (nd.ni_vp->v_type != VREG) {
99333fc3625SJohn Baldwin 		error = EINVAL;
99433fc3625SJohn Baldwin 		goto bad;
99533fc3625SJohn Baldwin 	}
99633fc3625SJohn Baldwin 	error = VOP_GETATTR(nd.ni_vp, &vattr, td->td_ucred);
99733fc3625SJohn Baldwin 	if (error != 0)
99833fc3625SJohn Baldwin 		goto bad;
99933fc3625SJohn Baldwin 	if (VOP_ISLOCKED(nd.ni_vp) != LK_EXCLUSIVE) {
100033fc3625SJohn Baldwin 		vn_lock(nd.ni_vp, LK_UPGRADE | LK_RETRY);
100133fc3625SJohn Baldwin 		if (nd.ni_vp->v_iflag & VI_DOOMED) {
100233fc3625SJohn Baldwin 			/* Forced unmount. */
100333fc3625SJohn Baldwin 			error = EBADF;
100433fc3625SJohn Baldwin 			goto bad;
100533fc3625SJohn Baldwin 		}
10068f8def9eSPoul-Henning Kamp 	}
10073b7b5496SKonstantin Belousov 	nd.ni_vp->v_vflag |= VV_MD;
100822db15c0SAttilio Rao 	VOP_UNLOCK(nd.ni_vp, 0);
10099589c256SPoul-Henning Kamp 
1010d5a929dcSPoul-Henning Kamp 	if (mdio->md_fwsectors != 0)
1011d5a929dcSPoul-Henning Kamp 		sc->fwsectors = mdio->md_fwsectors;
1012d5a929dcSPoul-Henning Kamp 	if (mdio->md_fwheads != 0)
1013d5a929dcSPoul-Henning Kamp 		sc->fwheads = mdio->md_fwheads;
10147a6b2b64SPoul-Henning Kamp 	sc->flags = mdio->md_options & (MD_FORCE | MD_ASYNC);
10159589c256SPoul-Henning Kamp 	if (!(flags & FWRITE))
10169589c256SPoul-Henning Kamp 		sc->flags |= MD_READONLY;
10178f8def9eSPoul-Henning Kamp 	sc->vnode = nd.ni_vp;
10188f8def9eSPoul-Henning Kamp 
1019a854ed98SJohn Baldwin 	error = mdsetcred(sc, td->td_ucred);
1020b830359bSPawel Jakub Dawidek 	if (error != 0) {
10213cf74e53SPhilip Paeps 		sc->vnode = NULL;
1022cb05b60aSAttilio Rao 		vn_lock(nd.ni_vp, LK_EXCLUSIVE | LK_RETRY);
10233b7b5496SKonstantin Belousov 		nd.ni_vp->v_vflag &= ~VV_MD;
102433fc3625SJohn Baldwin 		goto bad;
102533fc3625SJohn Baldwin 	}
102633fc3625SJohn Baldwin 	VFS_UNLOCK_GIANT(vfslocked);
102733fc3625SJohn Baldwin 	return (0);
102833fc3625SJohn Baldwin bad:
102922db15c0SAttilio Rao 	VOP_UNLOCK(nd.ni_vp, 0);
103052c6716fSPawel Jakub Dawidek 	(void)vn_close(nd.ni_vp, flags, td->td_ucred, td);
1031a08d2e7fSJohn Baldwin 	VFS_UNLOCK_GIANT(vfslocked);
10328f8def9eSPoul-Henning Kamp 	return (error);
10338f8def9eSPoul-Henning Kamp }
10348f8def9eSPoul-Henning Kamp 
10358f8def9eSPoul-Henning Kamp static int
1036b40ce416SJulian Elischer mddestroy(struct md_s *sc, struct thread *td)
10378f8def9eSPoul-Henning Kamp {
1038a08d2e7fSJohn Baldwin 	int vfslocked;
10390cddd8f0SMatthew Dillon 
10406f4f00f1SPoul-Henning Kamp 	if (sc->gp) {
10416f4f00f1SPoul-Henning Kamp 		sc->gp->softc = NULL;
10429b00ca19SPoul-Henning Kamp 		g_topology_lock();
10439b00ca19SPoul-Henning Kamp 		g_wither_geom(sc->gp, ENXIO);
10449b00ca19SPoul-Henning Kamp 		g_topology_unlock();
10456b60a2cdSPoul-Henning Kamp 		sc->gp = NULL;
10466b60a2cdSPoul-Henning Kamp 		sc->pp = NULL;
10471f4ee1aaSPoul-Henning Kamp 	}
1048a03be42dSMaxim Sobolev 	if (sc->devstat) {
1049a03be42dSMaxim Sobolev 		devstat_remove_entry(sc->devstat);
1050a03be42dSMaxim Sobolev 		sc->devstat = NULL;
1051a03be42dSMaxim Sobolev 	}
1052a08d2e7fSJohn Baldwin 	mtx_lock(&sc->queue_mtx);
10535c97ca54SIan Dowse 	sc->flags |= MD_SHUTDOWN;
10545c97ca54SIan Dowse 	wakeup(sc);
1055a08d2e7fSJohn Baldwin 	while (!(sc->flags & MD_EXITING))
1056a08d2e7fSJohn Baldwin 		msleep(sc->procp, &sc->queue_mtx, PRIBIO, "mddestroy", hz / 10);
1057a08d2e7fSJohn Baldwin 	mtx_unlock(&sc->queue_mtx);
10589fbea3e3SPoul-Henning Kamp 	mtx_destroy(&sc->queue_mtx);
10599b00ca19SPoul-Henning Kamp 	if (sc->vnode != NULL) {
1060a08d2e7fSJohn Baldwin 		vfslocked = VFS_LOCK_GIANT(sc->vnode->v_mount);
1061cb05b60aSAttilio Rao 		vn_lock(sc->vnode, LK_EXCLUSIVE | LK_RETRY);
10623b7b5496SKonstantin Belousov 		sc->vnode->v_vflag &= ~VV_MD;
106322db15c0SAttilio Rao 		VOP_UNLOCK(sc->vnode, 0);
10649d4b5945SMaxim Sobolev 		(void)vn_close(sc->vnode, sc->flags & MD_READONLY ?
1065b40ce416SJulian Elischer 		    FREAD : (FREAD|FWRITE), sc->cred, td);
1066a08d2e7fSJohn Baldwin 		VFS_UNLOCK_GIANT(vfslocked);
10679b00ca19SPoul-Henning Kamp 	}
10688f8def9eSPoul-Henning Kamp 	if (sc->cred != NULL)
10698f8def9eSPoul-Henning Kamp 		crfree(sc->cred);
10701db17c6dSPawel Jakub Dawidek 	if (sc->object != NULL)
1071f820bc50SAlan Cox 		vm_object_deallocate(sc->object);
1072f43b2bacSPoul-Henning Kamp 	if (sc->indir)
1073f43b2bacSPoul-Henning Kamp 		destroy_indir(sc, sc->indir);
1074f43b2bacSPoul-Henning Kamp 	if (sc->uma)
1075f43b2bacSPoul-Henning Kamp 		uma_zdestroy(sc->uma);
10761f4ee1aaSPoul-Henning Kamp 
10771f4ee1aaSPoul-Henning Kamp 	LIST_REMOVE(sc, list);
1078f4e7c5a8SJaakko Heinonen 	free_unr(md_uh, sc->unit);
1079c6517568SPoul-Henning Kamp 	free(sc, M_MD);
10808f8def9eSPoul-Henning Kamp 	return (0);
10818f8def9eSPoul-Henning Kamp }
10828f8def9eSPoul-Henning Kamp 
10838f8def9eSPoul-Henning Kamp static int
1084dc604f0cSEdward Tomasz Napierala mdresize(struct md_s *sc, struct md_ioctl *mdio)
1085dc604f0cSEdward Tomasz Napierala {
1086dc604f0cSEdward Tomasz Napierala 	int error, res;
1087dc604f0cSEdward Tomasz Napierala 	vm_pindex_t oldpages, newpages;
1088dc604f0cSEdward Tomasz Napierala 
1089dc604f0cSEdward Tomasz Napierala 	switch (sc->type) {
1090dc604f0cSEdward Tomasz Napierala 	case MD_VNODE:
1091dc604f0cSEdward Tomasz Napierala 		break;
1092dc604f0cSEdward Tomasz Napierala 	case MD_SWAP:
1093*8cb51643SJaakko Heinonen 		if (mdio->md_mediasize <= 0 ||
1094dc604f0cSEdward Tomasz Napierala 		    (mdio->md_mediasize % PAGE_SIZE) != 0)
1095dc604f0cSEdward Tomasz Napierala 			return (EDOM);
1096dc604f0cSEdward Tomasz Napierala 		oldpages = OFF_TO_IDX(round_page(sc->mediasize));
1097dc604f0cSEdward Tomasz Napierala 		newpages = OFF_TO_IDX(round_page(mdio->md_mediasize));
1098dc604f0cSEdward Tomasz Napierala 		if (newpages < oldpages) {
1099dc604f0cSEdward Tomasz Napierala 			VM_OBJECT_LOCK(sc->object);
1100dc604f0cSEdward Tomasz Napierala 			vm_object_page_remove(sc->object, newpages, 0, 0);
1101dc604f0cSEdward Tomasz Napierala 			swap_pager_freespace(sc->object, newpages,
1102dc604f0cSEdward Tomasz Napierala 			    oldpages - newpages);
1103dc604f0cSEdward Tomasz Napierala 			swap_release_by_cred(IDX_TO_OFF(oldpages -
1104dc604f0cSEdward Tomasz Napierala 			    newpages), sc->cred);
1105dc604f0cSEdward Tomasz Napierala 			sc->object->charge = IDX_TO_OFF(newpages);
1106dc604f0cSEdward Tomasz Napierala 			sc->object->size = newpages;
1107dc604f0cSEdward Tomasz Napierala 			VM_OBJECT_UNLOCK(sc->object);
1108dc604f0cSEdward Tomasz Napierala 		} else if (newpages > oldpages) {
1109dc604f0cSEdward Tomasz Napierala 			res = swap_reserve_by_cred(IDX_TO_OFF(newpages -
1110dc604f0cSEdward Tomasz Napierala 			    oldpages), sc->cred);
1111dc604f0cSEdward Tomasz Napierala 			if (!res)
1112dc604f0cSEdward Tomasz Napierala 				return (ENOMEM);
1113dc604f0cSEdward Tomasz Napierala 			if ((mdio->md_options & MD_RESERVE) ||
1114dc604f0cSEdward Tomasz Napierala 			    (sc->flags & MD_RESERVE)) {
1115dc604f0cSEdward Tomasz Napierala 				error = swap_pager_reserve(sc->object,
1116dc604f0cSEdward Tomasz Napierala 				    oldpages, newpages - oldpages);
1117dc604f0cSEdward Tomasz Napierala 				if (error < 0) {
1118dc604f0cSEdward Tomasz Napierala 					swap_release_by_cred(
1119dc604f0cSEdward Tomasz Napierala 					    IDX_TO_OFF(newpages - oldpages),
1120dc604f0cSEdward Tomasz Napierala 					    sc->cred);
1121dc604f0cSEdward Tomasz Napierala 					return (EDOM);
1122dc604f0cSEdward Tomasz Napierala 				}
1123dc604f0cSEdward Tomasz Napierala 			}
1124dc604f0cSEdward Tomasz Napierala 			VM_OBJECT_LOCK(sc->object);
1125dc604f0cSEdward Tomasz Napierala 			sc->object->charge = IDX_TO_OFF(newpages);
1126dc604f0cSEdward Tomasz Napierala 			sc->object->size = newpages;
1127dc604f0cSEdward Tomasz Napierala 			VM_OBJECT_UNLOCK(sc->object);
1128dc604f0cSEdward Tomasz Napierala 		}
1129dc604f0cSEdward Tomasz Napierala 		break;
1130dc604f0cSEdward Tomasz Napierala 	default:
1131dc604f0cSEdward Tomasz Napierala 		return (EOPNOTSUPP);
1132dc604f0cSEdward Tomasz Napierala 	}
1133dc604f0cSEdward Tomasz Napierala 
1134dc604f0cSEdward Tomasz Napierala 	sc->mediasize = mdio->md_mediasize;
1135dc604f0cSEdward Tomasz Napierala 	g_topology_lock();
1136dc604f0cSEdward Tomasz Napierala 	g_resize_provider(sc->pp, sc->mediasize);
1137dc604f0cSEdward Tomasz Napierala 	g_topology_unlock();
1138dc604f0cSEdward Tomasz Napierala 	return (0);
1139dc604f0cSEdward Tomasz Napierala }
1140dc604f0cSEdward Tomasz Napierala 
1141dc604f0cSEdward Tomasz Napierala static int
1142b830359bSPawel Jakub Dawidek mdcreate_swap(struct md_s *sc, struct md_ioctl *mdio, struct thread *td)
11438f8def9eSPoul-Henning Kamp {
1144fcd57fbeSPawel Jakub Dawidek 	vm_ooffset_t npage;
1145fcd57fbeSPawel Jakub Dawidek 	int error;
11468f8def9eSPoul-Henning Kamp 
11478f8def9eSPoul-Henning Kamp 	/*
11488f8def9eSPoul-Henning Kamp 	 * Range check.  Disallow negative sizes or any size less then the
11498f8def9eSPoul-Henning Kamp 	 * size of a page.  Then round to a page.
11508f8def9eSPoul-Henning Kamp 	 */
1151*8cb51643SJaakko Heinonen 	if (sc->mediasize <= 0 || (sc->mediasize % PAGE_SIZE) != 0)
11528f8def9eSPoul-Henning Kamp 		return (EDOM);
11538f8def9eSPoul-Henning Kamp 
11548f8def9eSPoul-Henning Kamp 	/*
11558f8def9eSPoul-Henning Kamp 	 * Allocate an OBJT_SWAP object.
11568f8def9eSPoul-Henning Kamp 	 *
11578f8def9eSPoul-Henning Kamp 	 * Note the truncation.
11588f8def9eSPoul-Henning Kamp 	 */
11598f8def9eSPoul-Henning Kamp 
1160b830359bSPawel Jakub Dawidek 	npage = mdio->md_mediasize / PAGE_SIZE;
11619ed40643SPoul-Henning Kamp 	if (mdio->md_fwsectors != 0)
11629ed40643SPoul-Henning Kamp 		sc->fwsectors = mdio->md_fwsectors;
11639ed40643SPoul-Henning Kamp 	if (mdio->md_fwheads != 0)
11649ed40643SPoul-Henning Kamp 		sc->fwheads = mdio->md_fwheads;
1165fcd57fbeSPawel Jakub Dawidek 	sc->object = vm_pager_allocate(OBJT_SWAP, NULL, PAGE_SIZE * npage,
11663364c323SKonstantin Belousov 	    VM_PROT_DEFAULT, 0, td->td_ucred);
1167812851b6SBrian Feldman 	if (sc->object == NULL)
1168812851b6SBrian Feldman 		return (ENOMEM);
1169dc604f0cSEdward Tomasz Napierala 	sc->flags = mdio->md_options & (MD_FORCE | MD_RESERVE);
11708f8def9eSPoul-Henning Kamp 	if (mdio->md_options & MD_RESERVE) {
1171fcd57fbeSPawel Jakub Dawidek 		if (swap_pager_reserve(sc->object, 0, npage) < 0) {
11723364c323SKonstantin Belousov 			error = EDOM;
11733364c323SKonstantin Belousov 			goto finish;
11748f8def9eSPoul-Henning Kamp 		}
11758f8def9eSPoul-Henning Kamp 	}
1176a854ed98SJohn Baldwin 	error = mdsetcred(sc, td->td_ucred);
11773364c323SKonstantin Belousov  finish:
1178e3ed29a7SPawel Jakub Dawidek 	if (error != 0) {
11792eafd8b1SPawel Jakub Dawidek 		vm_object_deallocate(sc->object);
11802eafd8b1SPawel Jakub Dawidek 		sc->object = NULL;
11818f8def9eSPoul-Henning Kamp 	}
1182b830359bSPawel Jakub Dawidek 	return (error);
1183b3b3d1b7SPoul-Henning Kamp }
11848f8def9eSPoul-Henning Kamp 
11859d4b5945SMaxim Sobolev 
11869d4b5945SMaxim Sobolev static int
11879b00ca19SPoul-Henning Kamp xmdctlioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags, struct thread *td)
11888f8def9eSPoul-Henning Kamp {
11898f8def9eSPoul-Henning Kamp 	struct md_ioctl *mdio;
11908f8def9eSPoul-Henning Kamp 	struct md_s *sc;
1191b830359bSPawel Jakub Dawidek 	int error, i;
1192*8cb51643SJaakko Heinonen 	unsigned sectsize;
11938f8def9eSPoul-Henning Kamp 
11948f8def9eSPoul-Henning Kamp 	if (md_debug)
11958f8def9eSPoul-Henning Kamp 		printf("mdctlioctl(%s %lx %p %x %p)\n",
1196b40ce416SJulian Elischer 			devtoname(dev), cmd, addr, flags, td);
11978f8def9eSPoul-Henning Kamp 
11989b00ca19SPoul-Henning Kamp 	mdio = (struct md_ioctl *)addr;
11999b00ca19SPoul-Henning Kamp 	if (mdio->md_version != MDIOVERSION)
12009b00ca19SPoul-Henning Kamp 		return (EINVAL);
12019b00ca19SPoul-Henning Kamp 
120253d745bcSDima Dorfman 	/*
120353d745bcSDima Dorfman 	 * We assert the version number in the individual ioctl
120453d745bcSDima Dorfman 	 * handlers instead of out here because (a) it is possible we
120553d745bcSDima Dorfman 	 * may add another ioctl in the future which doesn't read an
120653d745bcSDima Dorfman 	 * mdio, and (b) the correct return value for an unknown ioctl
120753d745bcSDima Dorfman 	 * is ENOIOCTL, not EINVAL.
120853d745bcSDima Dorfman 	 */
12099b00ca19SPoul-Henning Kamp 	error = 0;
12108f8def9eSPoul-Henning Kamp 	switch (cmd) {
12118f8def9eSPoul-Henning Kamp 	case MDIOCATTACH:
12128f8def9eSPoul-Henning Kamp 		switch (mdio->md_type) {
12138f8def9eSPoul-Henning Kamp 		case MD_MALLOC:
12148f8def9eSPoul-Henning Kamp 		case MD_PRELOAD:
12158f8def9eSPoul-Henning Kamp 		case MD_VNODE:
12168f8def9eSPoul-Henning Kamp 		case MD_SWAP:
1217b830359bSPawel Jakub Dawidek 			break;
12188f8def9eSPoul-Henning Kamp 		default:
12198f8def9eSPoul-Henning Kamp 			return (EINVAL);
12208f8def9eSPoul-Henning Kamp 		}
1221*8cb51643SJaakko Heinonen 		if (mdio->md_sectorsize == 0)
1222*8cb51643SJaakko Heinonen 			sectsize = DEV_BSIZE;
1223*8cb51643SJaakko Heinonen 		else
1224*8cb51643SJaakko Heinonen 			sectsize = mdio->md_sectorsize;
1225*8cb51643SJaakko Heinonen 		if (sectsize > MAXPHYS || mdio->md_mediasize < sectsize)
1226*8cb51643SJaakko Heinonen 			return (EINVAL);
12277ee3c044SPawel Jakub Dawidek 		if (mdio->md_options & MD_AUTOUNIT)
1228947fc8deSPoul-Henning Kamp 			sc = mdnew(-1, &error, mdio->md_type);
1229f4e7c5a8SJaakko Heinonen 		else {
1230f4e7c5a8SJaakko Heinonen 			if (mdio->md_unit > INT_MAX)
1231f4e7c5a8SJaakko Heinonen 				return (EINVAL);
1232947fc8deSPoul-Henning Kamp 			sc = mdnew(mdio->md_unit, &error, mdio->md_type);
1233f4e7c5a8SJaakko Heinonen 		}
1234b830359bSPawel Jakub Dawidek 		if (sc == NULL)
12357ee3c044SPawel Jakub Dawidek 			return (error);
12367ee3c044SPawel Jakub Dawidek 		if (mdio->md_options & MD_AUTOUNIT)
12377ee3c044SPawel Jakub Dawidek 			mdio->md_unit = sc->unit;
1238b830359bSPawel Jakub Dawidek 		sc->mediasize = mdio->md_mediasize;
1239*8cb51643SJaakko Heinonen 		sc->sectorsize = sectsize;
1240b830359bSPawel Jakub Dawidek 		error = EDOOFUS;
1241b830359bSPawel Jakub Dawidek 		switch (sc->type) {
1242b830359bSPawel Jakub Dawidek 		case MD_MALLOC:
12439b00ca19SPoul-Henning Kamp 			sc->start = mdstart_malloc;
1244b830359bSPawel Jakub Dawidek 			error = mdcreate_malloc(sc, mdio);
1245b830359bSPawel Jakub Dawidek 			break;
1246b830359bSPawel Jakub Dawidek 		case MD_PRELOAD:
12479b00ca19SPoul-Henning Kamp 			sc->start = mdstart_preload;
1248b830359bSPawel Jakub Dawidek 			error = mdcreate_preload(sc, mdio);
1249b830359bSPawel Jakub Dawidek 			break;
1250b830359bSPawel Jakub Dawidek 		case MD_VNODE:
12519b00ca19SPoul-Henning Kamp 			sc->start = mdstart_vnode;
1252b830359bSPawel Jakub Dawidek 			error = mdcreate_vnode(sc, mdio, td);
1253b830359bSPawel Jakub Dawidek 			break;
1254b830359bSPawel Jakub Dawidek 		case MD_SWAP:
12559b00ca19SPoul-Henning Kamp 			sc->start = mdstart_swap;
1256b830359bSPawel Jakub Dawidek 			error = mdcreate_swap(sc, mdio, td);
1257b830359bSPawel Jakub Dawidek 			break;
1258b830359bSPawel Jakub Dawidek 		}
1259b830359bSPawel Jakub Dawidek 		if (error != 0) {
1260b830359bSPawel Jakub Dawidek 			mddestroy(sc, td);
1261b830359bSPawel Jakub Dawidek 			return (error);
1262b830359bSPawel Jakub Dawidek 		}
12639b00ca19SPoul-Henning Kamp 
12649b00ca19SPoul-Henning Kamp 		/* Prune off any residual fractional sector */
12659b00ca19SPoul-Henning Kamp 		i = sc->mediasize % sc->sectorsize;
12669b00ca19SPoul-Henning Kamp 		sc->mediasize -= i;
12679b00ca19SPoul-Henning Kamp 
1268b830359bSPawel Jakub Dawidek 		mdinit(sc);
1269b830359bSPawel Jakub Dawidek 		return (0);
12708f8def9eSPoul-Henning Kamp 	case MDIOCDETACH:
1271a9ebb311SEdward Tomasz Napierala 		if (mdio->md_mediasize != 0 ||
1272a9ebb311SEdward Tomasz Napierala 		    (mdio->md_options & ~MD_FORCE) != 0)
12738f8def9eSPoul-Henning Kamp 			return (EINVAL);
12749b00ca19SPoul-Henning Kamp 
12759b00ca19SPoul-Henning Kamp 		sc = mdfind(mdio->md_unit);
12769b00ca19SPoul-Henning Kamp 		if (sc == NULL)
12779b00ca19SPoul-Henning Kamp 			return (ENOENT);
1278a9ebb311SEdward Tomasz Napierala 		if (sc->opencount != 0 && !(sc->flags & MD_FORCE) &&
1279a9ebb311SEdward Tomasz Napierala 		    !(mdio->md_options & MD_FORCE))
12809b00ca19SPoul-Henning Kamp 			return (EBUSY);
12819b00ca19SPoul-Henning Kamp 		return (mddestroy(sc, td));
1282dc604f0cSEdward Tomasz Napierala 	case MDIOCRESIZE:
1283dc604f0cSEdward Tomasz Napierala 		if ((mdio->md_options & ~(MD_FORCE | MD_RESERVE)) != 0)
1284dc604f0cSEdward Tomasz Napierala 			return (EINVAL);
1285dc604f0cSEdward Tomasz Napierala 
1286dc604f0cSEdward Tomasz Napierala 		sc = mdfind(mdio->md_unit);
1287dc604f0cSEdward Tomasz Napierala 		if (sc == NULL)
1288dc604f0cSEdward Tomasz Napierala 			return (ENOENT);
1289*8cb51643SJaakko Heinonen 		if (mdio->md_mediasize < sc->sectorsize)
1290*8cb51643SJaakko Heinonen 			return (EINVAL);
1291dc604f0cSEdward Tomasz Napierala 		if (mdio->md_mediasize < sc->mediasize &&
1292dc604f0cSEdward Tomasz Napierala 		    !(sc->flags & MD_FORCE) &&
1293dc604f0cSEdward Tomasz Napierala 		    !(mdio->md_options & MD_FORCE))
1294dc604f0cSEdward Tomasz Napierala 			return (EBUSY);
1295dc604f0cSEdward Tomasz Napierala 		return (mdresize(sc, mdio));
1296174b5e9aSPoul-Henning Kamp 	case MDIOCQUERY:
1297174b5e9aSPoul-Henning Kamp 		sc = mdfind(mdio->md_unit);
1298174b5e9aSPoul-Henning Kamp 		if (sc == NULL)
1299174b5e9aSPoul-Henning Kamp 			return (ENOENT);
1300174b5e9aSPoul-Henning Kamp 		mdio->md_type = sc->type;
1301174b5e9aSPoul-Henning Kamp 		mdio->md_options = sc->flags;
1302b830359bSPawel Jakub Dawidek 		mdio->md_mediasize = sc->mediasize;
1303b830359bSPawel Jakub Dawidek 		mdio->md_sectorsize = sc->sectorsize;
13049b00ca19SPoul-Henning Kamp 		if (sc->type == MD_VNODE)
130588b5b78dSPawel Jakub Dawidek 			error = copyout(sc->file, mdio->md_file,
130688b5b78dSPawel Jakub Dawidek 			    strlen(sc->file) + 1);
130788b5b78dSPawel Jakub Dawidek 		return (error);
130816bcbe8cSPoul-Henning Kamp 	case MDIOCLIST:
130916bcbe8cSPoul-Henning Kamp 		i = 1;
131016bcbe8cSPoul-Henning Kamp 		LIST_FOREACH(sc, &md_softc_list, list) {
131116bcbe8cSPoul-Henning Kamp 			if (i == MDNPAD - 1)
131216bcbe8cSPoul-Henning Kamp 				mdio->md_pad[i] = -1;
131316bcbe8cSPoul-Henning Kamp 			else
131416bcbe8cSPoul-Henning Kamp 				mdio->md_pad[i++] = sc->unit;
131516bcbe8cSPoul-Henning Kamp 		}
131616bcbe8cSPoul-Henning Kamp 		mdio->md_pad[0] = i - 1;
131716bcbe8cSPoul-Henning Kamp 		return (0);
13188f8def9eSPoul-Henning Kamp 	default:
13198f8def9eSPoul-Henning Kamp 		return (ENOIOCTL);
13208f8def9eSPoul-Henning Kamp 	};
13219b00ca19SPoul-Henning Kamp }
13229b00ca19SPoul-Henning Kamp 
13239b00ca19SPoul-Henning Kamp static int
13249b00ca19SPoul-Henning Kamp mdctlioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags, struct thread *td)
13259b00ca19SPoul-Henning Kamp {
13269b00ca19SPoul-Henning Kamp 	int error;
13279b00ca19SPoul-Henning Kamp 
13289b00ca19SPoul-Henning Kamp 	sx_xlock(&md_sx);
13299b00ca19SPoul-Henning Kamp 	error = xmdctlioctl(dev, cmd, addr, flags, td);
13309b00ca19SPoul-Henning Kamp 	sx_xunlock(&md_sx);
13319b00ca19SPoul-Henning Kamp 	return (error);
13323f54a085SPoul-Henning Kamp }
13333f54a085SPoul-Henning Kamp 
133400a6a3c6SPoul-Henning Kamp static void
1335b830359bSPawel Jakub Dawidek md_preloaded(u_char *image, size_t length)
1336637f671aSPoul-Henning Kamp {
1337637f671aSPoul-Henning Kamp 	struct md_s *sc;
13389b00ca19SPoul-Henning Kamp 	int error;
1339637f671aSPoul-Henning Kamp 
1340947fc8deSPoul-Henning Kamp 	sc = mdnew(-1, &error, MD_PRELOAD);
1341637f671aSPoul-Henning Kamp 	if (sc == NULL)
1342637f671aSPoul-Henning Kamp 		return;
1343b830359bSPawel Jakub Dawidek 	sc->mediasize = length;
1344b830359bSPawel Jakub Dawidek 	sc->sectorsize = DEV_BSIZE;
1345637f671aSPoul-Henning Kamp 	sc->pl_ptr = image;
1346637f671aSPoul-Henning Kamp 	sc->pl_len = length;
13479b00ca19SPoul-Henning Kamp 	sc->start = mdstart_preload;
13485d4ca75eSLuigi Rizzo #ifdef MD_ROOT
1349637f671aSPoul-Henning Kamp 	if (sc->unit == 0)
13505d4ca75eSLuigi Rizzo 		rootdevnames[0] = "ufs:/dev/md0";
13515d4ca75eSLuigi Rizzo #endif
1352637f671aSPoul-Henning Kamp 	mdinit(sc);
1353637f671aSPoul-Henning Kamp }
1354637f671aSPoul-Henning Kamp 
1355637f671aSPoul-Henning Kamp static void
135619945697SPoul-Henning Kamp g_md_init(struct g_class *mp __unused)
135700a6a3c6SPoul-Henning Kamp {
135895f1a897SPoul-Henning Kamp 	caddr_t mod;
135995f1a897SPoul-Henning Kamp 	u_char *ptr, *name, *type;
136095f1a897SPoul-Henning Kamp 	unsigned len;
1361d12fc952SKonstantin Belousov 	int i;
1362d12fc952SKonstantin Belousov 
1363d12fc952SKonstantin Belousov 	/* figure out log2(NINDIR) */
1364d12fc952SKonstantin Belousov 	for (i = NINDIR, nshift = -1; i; nshift++)
1365d12fc952SKonstantin Belousov 		i >>= 1;
136695f1a897SPoul-Henning Kamp 
13670a937206SPoul-Henning Kamp 	mod = NULL;
13689b00ca19SPoul-Henning Kamp 	sx_init(&md_sx, "MD config lock");
13690a937206SPoul-Henning Kamp 	g_topology_unlock();
1370f4e7c5a8SJaakko Heinonen 	md_uh = new_unrhdr(0, INT_MAX, NULL);
137171e4fff8SPoul-Henning Kamp #ifdef MD_ROOT_SIZE
13729b00ca19SPoul-Henning Kamp 	sx_xlock(&md_sx);
1373de64f22aSLuigi Rizzo 	md_preloaded(mfs_root.start, sizeof(mfs_root.start));
13749b00ca19SPoul-Henning Kamp 	sx_xunlock(&md_sx);
137571e4fff8SPoul-Henning Kamp #endif
13769b00ca19SPoul-Henning Kamp 	/* XXX: are preload_* static or do they need Giant ? */
137795f1a897SPoul-Henning Kamp 	while ((mod = preload_search_next_name(mod)) != NULL) {
137895f1a897SPoul-Henning Kamp 		name = (char *)preload_search_info(mod, MODINFO_NAME);
137995f1a897SPoul-Henning Kamp 		if (name == NULL)
138095f1a897SPoul-Henning Kamp 			continue;
13819b00ca19SPoul-Henning Kamp 		type = (char *)preload_search_info(mod, MODINFO_TYPE);
138295f1a897SPoul-Henning Kamp 		if (type == NULL)
138395f1a897SPoul-Henning Kamp 			continue;
138471e4fff8SPoul-Henning Kamp 		if (strcmp(type, "md_image") && strcmp(type, "mfs_root"))
138595f1a897SPoul-Henning Kamp 			continue;
13868d5ac6c3SMarcel Moolenaar 		ptr = preload_fetch_addr(mod);
13878d5ac6c3SMarcel Moolenaar 		len = preload_fetch_size(mod);
13888d5ac6c3SMarcel Moolenaar 		if (ptr != NULL && len != 0) {
1389fe603109SMaxim Sobolev 			printf("%s%d: Preloaded image <%s> %d bytes at %p\n",
1390fe603109SMaxim Sobolev 			    MD_NAME, mdunits, name, len, ptr);
13919b00ca19SPoul-Henning Kamp 			sx_xlock(&md_sx);
1392637f671aSPoul-Henning Kamp 			md_preloaded(ptr, len);
13939b00ca19SPoul-Henning Kamp 			sx_xunlock(&md_sx);
139495f1a897SPoul-Henning Kamp 		}
13958d5ac6c3SMarcel Moolenaar 	}
139606d425f9SEd Schouten 	status_dev = make_dev(&mdctl_cdevsw, INT_MAX, UID_ROOT, GID_WHEEL,
139710b0e058SDima Dorfman 	    0600, MDCTL_NAME);
13980eb14309SPoul-Henning Kamp 	g_topology_lock();
139900a6a3c6SPoul-Henning Kamp }
140000a6a3c6SPoul-Henning Kamp 
140119945697SPoul-Henning Kamp static void
1402c27a8954SWojciech A. Koszek g_md_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp,
1403c27a8954SWojciech A. Koszek     struct g_consumer *cp __unused, struct g_provider *pp)
1404c27a8954SWojciech A. Koszek {
1405c27a8954SWojciech A. Koszek 	struct md_s *mp;
1406c27a8954SWojciech A. Koszek 	char *type;
1407c27a8954SWojciech A. Koszek 
1408c27a8954SWojciech A. Koszek 	mp = gp->softc;
1409c27a8954SWojciech A. Koszek 	if (mp == NULL)
1410c27a8954SWojciech A. Koszek 		return;
1411c27a8954SWojciech A. Koszek 
1412c27a8954SWojciech A. Koszek 	switch (mp->type) {
1413c27a8954SWojciech A. Koszek 	case MD_MALLOC:
1414c27a8954SWojciech A. Koszek 		type = "malloc";
1415c27a8954SWojciech A. Koszek 		break;
1416c27a8954SWojciech A. Koszek 	case MD_PRELOAD:
1417c27a8954SWojciech A. Koszek 		type = "preload";
1418c27a8954SWojciech A. Koszek 		break;
1419c27a8954SWojciech A. Koszek 	case MD_VNODE:
1420c27a8954SWojciech A. Koszek 		type = "vnode";
1421c27a8954SWojciech A. Koszek 		break;
1422c27a8954SWojciech A. Koszek 	case MD_SWAP:
1423c27a8954SWojciech A. Koszek 		type = "swap";
1424c27a8954SWojciech A. Koszek 		break;
1425c27a8954SWojciech A. Koszek 	default:
1426c27a8954SWojciech A. Koszek 		type = "unknown";
1427c27a8954SWojciech A. Koszek 		break;
1428c27a8954SWojciech A. Koszek 	}
1429c27a8954SWojciech A. Koszek 
1430c27a8954SWojciech A. Koszek 	if (pp != NULL) {
1431c27a8954SWojciech A. Koszek 		if (indent == NULL) {
1432c27a8954SWojciech A. Koszek 			sbuf_printf(sb, " u %d", mp->unit);
1433c27a8954SWojciech A. Koszek 			sbuf_printf(sb, " s %ju", (uintmax_t) mp->sectorsize);
1434c27a8954SWojciech A. Koszek 			sbuf_printf(sb, " f %ju", (uintmax_t) mp->fwheads);
1435c27a8954SWojciech A. Koszek 			sbuf_printf(sb, " fs %ju", (uintmax_t) mp->fwsectors);
1436c27a8954SWojciech A. Koszek 			sbuf_printf(sb, " l %ju", (uintmax_t) mp->mediasize);
1437c27a8954SWojciech A. Koszek 			sbuf_printf(sb, " t %s", type);
1438c27a8954SWojciech A. Koszek 			if (mp->type == MD_VNODE && mp->vnode != NULL)
1439c27a8954SWojciech A. Koszek 				sbuf_printf(sb, " file %s", mp->file);
1440c27a8954SWojciech A. Koszek 		} else {
1441c27a8954SWojciech A. Koszek 			sbuf_printf(sb, "%s<unit>%d</unit>\n", indent,
1442c27a8954SWojciech A. Koszek 			    mp->unit);
1443c27a8954SWojciech A. Koszek 			sbuf_printf(sb, "%s<sectorsize>%ju</sectorsize>\n",
1444c27a8954SWojciech A. Koszek 			    indent, (uintmax_t) mp->sectorsize);
1445c27a8954SWojciech A. Koszek 			sbuf_printf(sb, "%s<fwheads>%ju</fwheads>\n",
1446c27a8954SWojciech A. Koszek 			    indent, (uintmax_t) mp->fwheads);
1447c27a8954SWojciech A. Koszek 			sbuf_printf(sb, "%s<fwsectors>%ju</fwsectors>\n",
1448c27a8954SWojciech A. Koszek 			    indent, (uintmax_t) mp->fwsectors);
1449c27a8954SWojciech A. Koszek 			sbuf_printf(sb, "%s<length>%ju</length>\n",
1450c27a8954SWojciech A. Koszek 			    indent, (uintmax_t) mp->mediasize);
14511f192809SAndrey V. Elsukov 			sbuf_printf(sb, "%s<compression>%s</compression>\n", indent,
14521f192809SAndrey V. Elsukov 			    (mp->flags & MD_COMPRESS) == 0 ? "off": "on");
14531f192809SAndrey V. Elsukov 			sbuf_printf(sb, "%s<access>%s</access>\n", indent,
14541f192809SAndrey V. Elsukov 			    (mp->flags & MD_READONLY) == 0 ? "read-write":
14551f192809SAndrey V. Elsukov 			    "read-only");
1456c27a8954SWojciech A. Koszek 			sbuf_printf(sb, "%s<type>%s</type>\n", indent,
1457c27a8954SWojciech A. Koszek 			    type);
1458c27a8954SWojciech A. Koszek 			if (mp->type == MD_VNODE && mp->vnode != NULL)
1459c27a8954SWojciech A. Koszek 				sbuf_printf(sb, "%s<file>%s</file>\n",
1460c27a8954SWojciech A. Koszek 				    indent, mp->file);
1461c27a8954SWojciech A. Koszek 		}
1462c27a8954SWojciech A. Koszek 	}
1463c27a8954SWojciech A. Koszek }
1464c27a8954SWojciech A. Koszek 
1465c27a8954SWojciech A. Koszek static void
146619945697SPoul-Henning Kamp g_md_fini(struct g_class *mp __unused)
146757e9624eSPoul-Henning Kamp {
14689d4b5945SMaxim Sobolev 
14699b00ca19SPoul-Henning Kamp 	sx_destroy(&md_sx);
147019945697SPoul-Henning Kamp 	if (status_dev != NULL)
147157e9624eSPoul-Henning Kamp 		destroy_dev(status_dev);
1472f4e7c5a8SJaakko Heinonen 	delete_unrhdr(md_uh);
147357e9624eSPoul-Henning Kamp }
1474