1 /*- 2 * Copyright (c) 2004 Poul-Henning Kamp 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 */ 26 27 #include <sys/cdefs.h> 28 __FBSDID("$FreeBSD$"); 29 30 #include <sys/param.h> 31 #include <sys/systm.h> 32 #include <sys/bio.h> 33 #include <sys/kernel.h> 34 #include <sys/lock.h> 35 #include <sys/malloc.h> 36 #include <sys/mutex.h> 37 #include <sys/vnode.h> 38 #include <sys/mount.h> 39 40 #include <geom/geom.h> 41 #include <geom/geom_vfs.h> 42 43 /* 44 * subroutines for use by filesystems. 45 * 46 * XXX: should maybe live somewhere else ? 47 */ 48 #include <sys/buf.h> 49 50 struct g_vfs_softc { 51 struct mtx sc_mtx; 52 struct bufobj *sc_bo; 53 int sc_active; 54 int sc_orphaned; 55 }; 56 57 static struct buf_ops __g_vfs_bufops = { 58 .bop_name = "GEOM_VFS", 59 .bop_write = bufwrite, 60 .bop_strategy = g_vfs_strategy, 61 .bop_sync = bufsync, 62 .bop_bdflush = bufbdflush 63 }; 64 65 struct buf_ops *g_vfs_bufops = &__g_vfs_bufops; 66 67 static g_orphan_t g_vfs_orphan; 68 69 static struct g_class g_vfs_class = { 70 .name = "VFS", 71 .version = G_VERSION, 72 .orphan = g_vfs_orphan, 73 }; 74 75 DECLARE_GEOM_CLASS(g_vfs_class, g_vfs); 76 77 static void 78 g_vfs_destroy(void *arg, int flags __unused) 79 { 80 struct g_consumer *cp; 81 82 g_topology_assert(); 83 cp = arg; 84 if (cp->acr > 0 || cp->acw > 0 || cp->ace > 0) 85 g_access(cp, -cp->acr, -cp->acw, -cp->ace); 86 g_detach(cp); 87 if (cp->geom->softc == NULL) 88 g_wither_geom(cp->geom, ENXIO); 89 } 90 91 static void 92 g_vfs_done(struct bio *bip) 93 { 94 struct g_consumer *cp; 95 struct g_vfs_softc *sc; 96 struct buf *bp; 97 int destroy; 98 struct mount *mp; 99 struct vnode *vp; 100 struct cdev *cdevp; 101 102 /* 103 * Collect statistics on synchronous and asynchronous read 104 * and write counts for disks that have associated filesystems. 105 */ 106 bp = bip->bio_caller2; 107 vp = bp->b_vp; 108 if (vp != NULL) { 109 /* 110 * If not a disk vnode, use its associated mount point 111 * otherwise use the mountpoint associated with the disk. 112 */ 113 VI_LOCK(vp); 114 if (vp->v_type != VCHR || 115 (cdevp = vp->v_rdev) == NULL || 116 cdevp->si_devsw == NULL || 117 (cdevp->si_devsw->d_flags & D_DISK) == 0) 118 mp = vp->v_mount; 119 else 120 mp = cdevp->si_mountpt; 121 if (mp != NULL) { 122 if (bp->b_iocmd == BIO_READ) { 123 if (LK_HOLDER(bp->b_lock.lk_lock) == LK_KERNPROC) 124 mp->mnt_stat.f_asyncreads++; 125 else 126 mp->mnt_stat.f_syncreads++; 127 } else if (bp->b_iocmd == BIO_WRITE) { 128 if (LK_HOLDER(bp->b_lock.lk_lock) == LK_KERNPROC) 129 mp->mnt_stat.f_asyncwrites++; 130 else 131 mp->mnt_stat.f_syncwrites++; 132 } 133 } 134 VI_UNLOCK(vp); 135 } 136 137 cp = bip->bio_from; 138 sc = cp->geom->softc; 139 if (bip->bio_error) { 140 printf("g_vfs_done():"); 141 g_print_bio(bip); 142 printf("error = %d\n", bip->bio_error); 143 } 144 bp->b_error = bip->bio_error; 145 bp->b_ioflags = bip->bio_flags; 146 if (bip->bio_error) 147 bp->b_ioflags |= BIO_ERROR; 148 bp->b_resid = bp->b_bcount - bip->bio_completed; 149 g_destroy_bio(bip); 150 151 mtx_lock(&sc->sc_mtx); 152 destroy = ((--sc->sc_active) == 0 && sc->sc_orphaned); 153 mtx_unlock(&sc->sc_mtx); 154 if (destroy) 155 g_post_event(g_vfs_destroy, cp, M_WAITOK, NULL); 156 157 bufdone(bp); 158 } 159 160 void 161 g_vfs_strategy(struct bufobj *bo, struct buf *bp) 162 { 163 struct g_vfs_softc *sc; 164 struct g_consumer *cp; 165 struct bio *bip; 166 167 cp = bo->bo_private; 168 sc = cp->geom->softc; 169 170 /* 171 * If the provider has orphaned us, just return EXIO. 172 */ 173 mtx_lock(&sc->sc_mtx); 174 if (sc->sc_orphaned) { 175 mtx_unlock(&sc->sc_mtx); 176 bp->b_error = ENXIO; 177 bp->b_ioflags |= BIO_ERROR; 178 bufdone(bp); 179 return; 180 } 181 sc->sc_active++; 182 mtx_unlock(&sc->sc_mtx); 183 184 bip = g_alloc_bio(); 185 bip->bio_cmd = bp->b_iocmd; 186 bip->bio_offset = bp->b_iooffset; 187 bip->bio_length = bp->b_bcount; 188 bdata2bio(bp, bip); 189 if ((bp->b_flags & B_BARRIER) != 0) { 190 bip->bio_flags |= BIO_ORDERED; 191 bp->b_flags &= ~B_BARRIER; 192 } 193 bip->bio_done = g_vfs_done; 194 bip->bio_caller2 = bp; 195 g_io_request(bip, cp); 196 } 197 198 static void 199 g_vfs_orphan(struct g_consumer *cp) 200 { 201 struct g_geom *gp; 202 struct g_vfs_softc *sc; 203 int destroy; 204 205 g_topology_assert(); 206 207 gp = cp->geom; 208 g_trace(G_T_TOPOLOGY, "g_vfs_orphan(%p(%s))", cp, gp->name); 209 sc = gp->softc; 210 if (sc == NULL) 211 return; 212 mtx_lock(&sc->sc_mtx); 213 sc->sc_orphaned = 1; 214 destroy = (sc->sc_active == 0); 215 mtx_unlock(&sc->sc_mtx); 216 if (destroy) 217 g_vfs_destroy(cp, 0); 218 219 /* 220 * Do not destroy the geom. Filesystem will do that during unmount. 221 */ 222 } 223 224 int 225 g_vfs_open(struct vnode *vp, struct g_consumer **cpp, const char *fsname, int wr) 226 { 227 struct g_geom *gp; 228 struct g_provider *pp; 229 struct g_consumer *cp; 230 struct g_vfs_softc *sc; 231 struct bufobj *bo; 232 int error; 233 234 g_topology_assert(); 235 236 *cpp = NULL; 237 bo = &vp->v_bufobj; 238 if (bo->bo_private != vp) 239 return (EBUSY); 240 241 pp = g_dev_getprovider(vp->v_rdev); 242 if (pp == NULL) 243 return (ENOENT); 244 gp = g_new_geomf(&g_vfs_class, "%s.%s", fsname, pp->name); 245 sc = g_malloc(sizeof(*sc), M_WAITOK | M_ZERO); 246 mtx_init(&sc->sc_mtx, "g_vfs", NULL, MTX_DEF); 247 sc->sc_bo = bo; 248 gp->softc = sc; 249 cp = g_new_consumer(gp); 250 g_attach(cp, pp); 251 error = g_access(cp, 1, wr, wr); 252 if (error) { 253 g_wither_geom(gp, ENXIO); 254 return (error); 255 } 256 vnode_create_vobject(vp, pp->mediasize, curthread); 257 *cpp = cp; 258 cp->private = vp; 259 cp->flags |= G_CF_DIRECT_SEND | G_CF_DIRECT_RECEIVE; 260 bo->bo_ops = g_vfs_bufops; 261 bo->bo_private = cp; 262 bo->bo_bsize = pp->sectorsize; 263 264 return (error); 265 } 266 267 void 268 g_vfs_close(struct g_consumer *cp) 269 { 270 struct g_geom *gp; 271 struct g_vfs_softc *sc; 272 273 g_topology_assert(); 274 275 gp = cp->geom; 276 sc = gp->softc; 277 bufobj_invalbuf(sc->sc_bo, V_SAVE, 0, 0); 278 sc->sc_bo->bo_private = cp->private; 279 gp->softc = NULL; 280 mtx_destroy(&sc->sc_mtx); 281 if (!sc->sc_orphaned || cp->provider == NULL) 282 g_wither_geom_close(gp, ENXIO); 283 g_free(sc); 284 } 285