Lines Matching +full:wr +full:- +full:active

1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
4 * Copyright (c) 2004 Poul-Henning Kamp
89 if (cp->acr > 0 || cp->acw > 0 || cp->ace > 0) in g_vfs_destroy()
90 g_access(cp, -cp->acr, -cp->acw, -cp->ace); in g_vfs_destroy()
92 if (cp->geom->softc == NULL) in g_vfs_destroy()
93 g_wither_geom(cp->geom, ENXIO); in g_vfs_destroy()
112 bp = bip->bio_caller2; in g_vfs_done()
113 vp = bp->b_vp; in g_vfs_done()
120 if (vp->v_type != VCHR || in g_vfs_done()
121 (cdevp = vp->v_rdev) == NULL || in g_vfs_done()
122 cdevp->si_devsw == NULL || in g_vfs_done()
123 (cdevp->si_devsw->d_flags & D_DISK) == 0) in g_vfs_done()
124 mp = vp->v_mount; in g_vfs_done()
126 mp = cdevp->si_mountpt; in g_vfs_done()
128 if (bp->b_iocmd == BIO_READ) { in g_vfs_done()
130 mp->mnt_stat.f_asyncreads++; in g_vfs_done()
132 mp->mnt_stat.f_syncreads++; in g_vfs_done()
133 } else if (bp->b_iocmd == BIO_WRITE) { in g_vfs_done()
135 mp->mnt_stat.f_asyncwrites++; in g_vfs_done()
137 mp->mnt_stat.f_syncwrites++; in g_vfs_done()
143 cp = bip->bio_from; in g_vfs_done()
144 sc = cp->geom->softc; in g_vfs_done()
145 if (bip->bio_error != 0 && bip->bio_error != EOPNOTSUPP) { in g_vfs_done()
146 if ((bp->b_xflags & BX_CVTENXIO) != 0) { in g_vfs_done()
147 if (atomic_cmpset_int(&sc->sc_enxio_active, 0, 1)) in g_vfs_done()
149 bip->bio_to->name); in g_vfs_done()
151 if (sc->sc_enxio_active) in g_vfs_done()
152 bip->bio_error = ENXIO; in g_vfs_done()
153 if (bip->bio_error != ENXIO || in g_vfs_done()
154 atomic_cmpset_int(&sc->sc_enxio_reported, 0, 1)) { in g_vfs_done()
156 bip->bio_error, in g_vfs_done()
157 bip->bio_error != ENXIO ? "" : in g_vfs_done()
161 bp->b_ioflags = bip->bio_flags; in g_vfs_done()
162 if (bip->bio_error) in g_vfs_done()
163 bp->b_ioflags |= BIO_ERROR; in g_vfs_done()
164 if ((bp->b_ioflags & BIO_EXTERR) != 0) in g_vfs_done()
165 bp->b_exterr = bip->bio_exterr; in g_vfs_done()
167 bp->b_error = bip->bio_error; in g_vfs_done()
168 bp->b_resid = bp->b_bcount - bip->bio_completed; in g_vfs_done()
171 mtx_lock(&sc->sc_mtx); in g_vfs_done()
172 destroy = ((--sc->sc_active) == 0 && sc->sc_orphaned); in g_vfs_done()
174 event = sc->sc_event; in g_vfs_done()
175 sc->sc_event = NULL; in g_vfs_done()
178 mtx_unlock(&sc->sc_mtx); in g_vfs_done()
192 cp = bo->bo_private; in g_vfs_strategy()
193 sc = cp->geom->softc; in g_vfs_strategy()
198 mtx_lock(&sc->sc_mtx); in g_vfs_strategy()
199 if (sc->sc_orphaned || sc->sc_enxio_active) { in g_vfs_strategy()
200 mtx_unlock(&sc->sc_mtx); in g_vfs_strategy()
201 bp->b_error = ENXIO; in g_vfs_strategy()
202 bp->b_ioflags |= BIO_ERROR; in g_vfs_strategy()
203 EXTERROR_KE(&bp->b_exterr, ENXIO, in g_vfs_strategy()
204 "orphaned or enxio active"); in g_vfs_strategy()
208 sc->sc_active++; in g_vfs_strategy()
209 mtx_unlock(&sc->sc_mtx); in g_vfs_strategy()
212 bip->bio_cmd = bp->b_iocmd; in g_vfs_strategy()
213 bip->bio_offset = bp->b_iooffset; in g_vfs_strategy()
214 bip->bio_length = bp->b_bcount; in g_vfs_strategy()
216 if ((bp->b_flags & B_BARRIER) != 0) { in g_vfs_strategy()
217 bip->bio_flags |= BIO_ORDERED; in g_vfs_strategy()
218 bp->b_flags &= ~B_BARRIER; in g_vfs_strategy()
220 if (bp->b_iocmd == BIO_SPEEDUP) in g_vfs_strategy()
221 bip->bio_flags |= bp->b_ioflags; in g_vfs_strategy()
222 bip->bio_done = g_vfs_done; in g_vfs_strategy()
223 bip->bio_caller2 = bp; in g_vfs_strategy()
226 bip->bio_track_bp = bp; in g_vfs_strategy()
241 gp = cp->geom; in g_vfs_orphan()
242 g_trace(G_T_TOPOLOGY, "g_vfs_orphan(%p(%s))", cp, gp->name); in g_vfs_orphan()
243 sc = gp->softc; in g_vfs_orphan()
247 mtx_lock(&sc->sc_mtx); in g_vfs_orphan()
248 KASSERT(sc->sc_event == NULL, ("g_vfs %p already has an event", sc)); in g_vfs_orphan()
249 sc->sc_orphaned = true; in g_vfs_orphan()
250 destroy = (sc->sc_active == 0); in g_vfs_orphan()
252 sc->sc_event = event; in g_vfs_orphan()
255 mtx_unlock(&sc->sc_mtx); in g_vfs_orphan()
267 g_vfs_open(struct vnode *vp, struct g_consumer **cpp, const char *fsname, int wr) in g_vfs_open() argument
279 bo = &vp->v_bufobj; in g_vfs_open()
280 if (bo->bo_private != vp) in g_vfs_open()
283 pp = g_dev_getprovider(vp->v_rdev); in g_vfs_open()
286 gp = g_new_geomf(&g_vfs_class, "%s.%s", fsname, pp->name); in g_vfs_open()
288 mtx_init(&sc->sc_mtx, "g_vfs", NULL, MTX_DEF); in g_vfs_open()
289 sc->sc_bo = bo; in g_vfs_open()
290 gp->softc = sc; in g_vfs_open()
297 error = g_access(cp, 1, wr, wr); in g_vfs_open()
306 if (pp->mediasize == 0) { in g_vfs_open()
307 (void)g_access(cp, -1, -wr, -wr); in g_vfs_open()
311 vnode_create_disk_vobject(vp, pp->mediasize, curthread); in g_vfs_open()
313 cp->private = vp; in g_vfs_open()
314 cp->flags |= G_CF_DIRECT_SEND | G_CF_DIRECT_RECEIVE; in g_vfs_open()
315 bo->bo_ops = g_vfs_bufops; in g_vfs_open()
316 bo->bo_private = cp; in g_vfs_open()
317 bo->bo_bsize = pp->sectorsize; in g_vfs_open()
330 gp = cp->geom; in g_vfs_close()
331 sc = gp->softc; in g_vfs_close()
332 bufobj_invalbuf(sc->sc_bo, V_SAVE, 0, 0); in g_vfs_close()
333 sc->sc_bo->bo_private = cp->private; in g_vfs_close()
334 gp->softc = NULL; in g_vfs_close()
335 mtx_destroy(&sc->sc_mtx); in g_vfs_close()
336 if (!sc->sc_orphaned || cp->provider == NULL) in g_vfs_close()
338 KASSERT(sc->sc_event == NULL, ("g_vfs %p event is non-NULL", sc)); in g_vfs_close()