xref: /freebsd/sys/geom/geom_vfs.c (revision 2008043f386721d58158e37e0d7e50df8095942d)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2004 Poul-Henning Kamp
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 
29 #include <sys/cdefs.h>
30 #include <sys/param.h>
31 #include <sys/systm.h>
32 #include <sys/bio.h>
33 #include <sys/kernel.h>
34 #include <sys/lock.h>
35 #include <sys/malloc.h>
36 #include <sys/mutex.h>
37 #include <sys/sbuf.h>
38 #include <sys/vnode.h>
39 #include <sys/mount.h>
40 
41 #include <geom/geom.h>
42 #include <geom/geom_vfs.h>
43 
44 /*
45  * subroutines for use by filesystems.
46  *
47  * XXX: should maybe live somewhere else ?
48  */
49 #include <sys/buf.h>
50 
51 struct g_vfs_softc {
52 	struct mtx	 sc_mtx;
53 	struct bufobj	*sc_bo;
54 	struct g_event	*sc_event;
55 	int		 sc_active;
56 	bool		 sc_orphaned;
57 	int		 sc_enxio_active;
58 	int		 sc_enxio_reported;
59 };
60 
61 static struct buf_ops __g_vfs_bufops = {
62 	.bop_name =	"GEOM_VFS",
63 	.bop_write =	bufwrite,
64 	.bop_strategy =	g_vfs_strategy,
65 	.bop_sync =	bufsync,
66 	.bop_bdflush =	bufbdflush
67 };
68 
69 struct buf_ops *g_vfs_bufops = &__g_vfs_bufops;
70 
71 static g_orphan_t g_vfs_orphan;
72 
73 static struct g_class g_vfs_class = {
74 	.name =		"VFS",
75 	.version =	G_VERSION,
76 	.orphan =	g_vfs_orphan,
77 };
78 
79 DECLARE_GEOM_CLASS(g_vfs_class, g_vfs);
80 
81 static void
82 g_vfs_destroy(void *arg, int flags __unused)
83 {
84 	struct g_consumer *cp;
85 
86 	g_topology_assert();
87 	cp = arg;
88 	if (cp->acr > 0 || cp->acw > 0 || cp->ace > 0)
89 		g_access(cp, -cp->acr, -cp->acw, -cp->ace);
90 	g_detach(cp);
91 	if (cp->geom->softc == NULL)
92 		g_wither_geom(cp->geom, ENXIO);
93 }
94 
95 static void
96 g_vfs_done(struct bio *bip)
97 {
98 	struct g_consumer *cp;
99 	struct g_event *event;
100 	struct g_vfs_softc *sc;
101 	struct buf *bp;
102 	int destroy;
103 	struct mount *mp;
104 	struct vnode *vp;
105 	struct cdev *cdevp;
106 
107 	/*
108 	 * Collect statistics on synchronous and asynchronous read
109 	 * and write counts for disks that have associated filesystems.
110 	 */
111 	bp = bip->bio_caller2;
112 	vp = bp->b_vp;
113 	if (vp != NULL) {
114 		/*
115 		 * If not a disk vnode, use its associated mount point
116 		 * otherwise use the mountpoint associated with the disk.
117 		 */
118 		VI_LOCK(vp);
119 		if (vp->v_type != VCHR ||
120 		    (cdevp = vp->v_rdev) == NULL ||
121 		    cdevp->si_devsw == NULL ||
122 		    (cdevp->si_devsw->d_flags & D_DISK) == 0)
123 			mp = vp->v_mount;
124 		else
125 			mp = cdevp->si_mountpt;
126 		if (mp != NULL) {
127 			if (bp->b_iocmd == BIO_READ) {
128 				if (LK_HOLDER(bp->b_lock.lk_lock) == LK_KERNPROC)
129 					mp->mnt_stat.f_asyncreads++;
130 				else
131 					mp->mnt_stat.f_syncreads++;
132 			} else if (bp->b_iocmd == BIO_WRITE) {
133 				if (LK_HOLDER(bp->b_lock.lk_lock) == LK_KERNPROC)
134 					mp->mnt_stat.f_asyncwrites++;
135 				else
136 					mp->mnt_stat.f_syncwrites++;
137 			}
138 		}
139 		VI_UNLOCK(vp);
140 	}
141 
142 	cp = bip->bio_from;
143 	sc = cp->geom->softc;
144 	if (bip->bio_error != 0 && bip->bio_error != EOPNOTSUPP) {
145 		if ((bp->b_xflags & BX_CVTENXIO) != 0) {
146 			if (atomic_cmpset_int(&sc->sc_enxio_active, 0, 1))
147 				printf("g_vfs_done(): %s converting all errors to ENXIO\n",
148 				    bip->bio_to->name);
149 		}
150 		if (sc->sc_enxio_active)
151 			bip->bio_error = ENXIO;
152 		if (bip->bio_error != ENXIO ||
153 		    atomic_cmpset_int(&sc->sc_enxio_reported, 0, 1)) {
154 			g_print_bio("g_vfs_done():", bip, "error = %d%s",
155 			    bip->bio_error,
156 			    bip->bio_error != ENXIO ? "" :
157 			    " supressing further ENXIO");
158 		}
159 	}
160 	bp->b_error = bip->bio_error;
161 	bp->b_ioflags = bip->bio_flags;
162 	if (bip->bio_error)
163 		bp->b_ioflags |= BIO_ERROR;
164 	bp->b_resid = bp->b_bcount - bip->bio_completed;
165 	g_destroy_bio(bip);
166 
167 	mtx_lock(&sc->sc_mtx);
168 	destroy = ((--sc->sc_active) == 0 && sc->sc_orphaned);
169 	if (destroy) {
170 		event = sc->sc_event;
171 		sc->sc_event = NULL;
172 	} else
173 		event = NULL;
174 	mtx_unlock(&sc->sc_mtx);
175 	if (destroy)
176 		g_post_event_ep(g_vfs_destroy, cp, event, NULL);
177 
178 	bufdone(bp);
179 }
180 
181 void
182 g_vfs_strategy(struct bufobj *bo, struct buf *bp)
183 {
184 	struct g_vfs_softc *sc;
185 	struct g_consumer *cp;
186 	struct bio *bip;
187 
188 	cp = bo->bo_private;
189 	sc = cp->geom->softc;
190 
191 	/*
192 	 * If the provider has orphaned us, just return ENXIO.
193 	 */
194 	mtx_lock(&sc->sc_mtx);
195 	if (sc->sc_orphaned || sc->sc_enxio_active) {
196 		mtx_unlock(&sc->sc_mtx);
197 		bp->b_error = ENXIO;
198 		bp->b_ioflags |= BIO_ERROR;
199 		bufdone(bp);
200 		return;
201 	}
202 	sc->sc_active++;
203 	mtx_unlock(&sc->sc_mtx);
204 
205 	bip = g_alloc_bio();
206 	bip->bio_cmd = bp->b_iocmd;
207 	bip->bio_offset = bp->b_iooffset;
208 	bip->bio_length = bp->b_bcount;
209 	bdata2bio(bp, bip);
210 	if ((bp->b_flags & B_BARRIER) != 0) {
211 		bip->bio_flags |= BIO_ORDERED;
212 		bp->b_flags &= ~B_BARRIER;
213 	}
214 	if (bp->b_iocmd == BIO_SPEEDUP)
215 		bip->bio_flags |= bp->b_ioflags;
216 	bip->bio_done = g_vfs_done;
217 	bip->bio_caller2 = bp;
218 #if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING)
219 	buf_track(bp, __func__);
220 	bip->bio_track_bp = bp;
221 #endif
222 	g_io_request(bip, cp);
223 }
224 
225 static void
226 g_vfs_orphan(struct g_consumer *cp)
227 {
228 	struct g_geom *gp;
229 	struct g_event *event;
230 	struct g_vfs_softc *sc;
231 	int destroy;
232 
233 	g_topology_assert();
234 
235 	gp = cp->geom;
236 	g_trace(G_T_TOPOLOGY, "g_vfs_orphan(%p(%s))", cp, gp->name);
237 	sc = gp->softc;
238 	if (sc == NULL)
239 		return;
240 	event = g_alloc_event(M_WAITOK);
241 	mtx_lock(&sc->sc_mtx);
242 	KASSERT(sc->sc_event == NULL, ("g_vfs %p already has an event", sc));
243 	sc->sc_orphaned = true;
244 	destroy = (sc->sc_active == 0);
245 	if (!destroy) {
246 		sc->sc_event = event;
247 		event = NULL;
248 	}
249 	mtx_unlock(&sc->sc_mtx);
250 	if (destroy) {
251 		g_free(event);
252 		g_vfs_destroy(cp, 0);
253 	}
254 
255 	/*
256 	 * Do not destroy the geom.  Filesystem will do that during unmount.
257 	 */
258 }
259 
260 int
261 g_vfs_open(struct vnode *vp, struct g_consumer **cpp, const char *fsname, int wr)
262 {
263 	struct g_geom *gp;
264 	struct g_provider *pp;
265 	struct g_consumer *cp;
266 	struct g_vfs_softc *sc;
267 	struct bufobj *bo;
268 	int error;
269 
270 	g_topology_assert();
271 
272 	*cpp = NULL;
273 	bo = &vp->v_bufobj;
274 	if (bo->bo_private != vp)
275 		return (EBUSY);
276 
277 	pp = g_dev_getprovider(vp->v_rdev);
278 	if (pp == NULL)
279 		return (ENOENT);
280 	gp = g_new_geomf(&g_vfs_class, "%s.%s", fsname, pp->name);
281 	sc = g_malloc(sizeof(*sc), M_WAITOK | M_ZERO);
282 	mtx_init(&sc->sc_mtx, "g_vfs", NULL, MTX_DEF);
283 	sc->sc_bo = bo;
284 	gp->softc = sc;
285 	cp = g_new_consumer(gp);
286 	error = g_attach(cp, pp);
287 	if (error) {
288 		g_wither_geom(gp, ENXIO);
289 		return (error);
290 	}
291 	error = g_access(cp, 1, wr, wr);
292 	if (error) {
293 		g_wither_geom(gp, ENXIO);
294 		return (error);
295 	}
296 	vnode_create_vobject(vp, pp->mediasize, curthread);
297 	*cpp = cp;
298 	cp->private = vp;
299 	cp->flags |= G_CF_DIRECT_SEND | G_CF_DIRECT_RECEIVE;
300 	bo->bo_ops = g_vfs_bufops;
301 	bo->bo_private = cp;
302 	bo->bo_bsize = pp->sectorsize;
303 
304 	return (error);
305 }
306 
307 void
308 g_vfs_close(struct g_consumer *cp)
309 {
310 	struct g_geom *gp;
311 	struct g_vfs_softc *sc;
312 
313 	g_topology_assert();
314 
315 	gp = cp->geom;
316 	sc = gp->softc;
317 	bufobj_invalbuf(sc->sc_bo, V_SAVE, 0, 0);
318 	sc->sc_bo->bo_private = cp->private;
319 	gp->softc = NULL;
320 	mtx_destroy(&sc->sc_mtx);
321 	if (!sc->sc_orphaned || cp->provider == NULL)
322 		g_wither_geom_close(gp, ENXIO);
323 	KASSERT(sc->sc_event == NULL, ("g_vfs %p event is non-NULL", sc));
324 	g_free(sc);
325 }
326