xref: /freebsd/sys/geom/geom_dev.c (revision 1f4bcc459a76b7aa664f3fd557684cd0ba6da352)
1 /*-
2  * Copyright (c) 2002 Poul-Henning Kamp
3  * Copyright (c) 2002 Networks Associates Technology, Inc.
4  * All rights reserved.
5  *
6  * This software was developed for the FreeBSD Project by Poul-Henning Kamp
7  * and NAI Labs, the Security Research Division of Network Associates, Inc.
8  * under DARPA/SPAWAR contract N66001-01-C-8035 ("CBOSS"), as part of the
9  * DARPA CHATS research program.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  * 3. The names of the authors may not be used to endorse or promote
20  *    products derived from this software without specific prior written
21  *    permission.
22  *
23  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
24  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
27  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33  * SUCH DAMAGE.
34  */
35 
36 #include <sys/cdefs.h>
37 __FBSDID("$FreeBSD$");
38 
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/malloc.h>
42 #include <sys/kernel.h>
43 #include <sys/conf.h>
44 #include <sys/ctype.h>
45 #include <sys/bio.h>
46 #include <sys/bus.h>
47 #include <sys/lock.h>
48 #include <sys/mutex.h>
49 #include <sys/proc.h>
50 #include <sys/errno.h>
51 #include <sys/time.h>
52 #include <sys/disk.h>
53 #include <sys/fcntl.h>
54 #include <sys/limits.h>
55 #include <sys/sysctl.h>
56 #include <geom/geom.h>
57 #include <geom/geom_int.h>
58 #include <machine/stdarg.h>
59 
60 struct g_dev_softc {
61 	struct mtx	 sc_mtx;
62 	struct cdev	*sc_dev;
63 	struct cdev	*sc_alias;
64 	int		 sc_open;
65 	int		 sc_active;
66 };
67 
68 static d_open_t		g_dev_open;
69 static d_close_t	g_dev_close;
70 static d_strategy_t	g_dev_strategy;
71 static d_ioctl_t	g_dev_ioctl;
72 
73 static struct cdevsw g_dev_cdevsw = {
74 	.d_version =	D_VERSION,
75 	.d_open =	g_dev_open,
76 	.d_close =	g_dev_close,
77 	.d_read =	physread,
78 	.d_write =	physwrite,
79 	.d_ioctl =	g_dev_ioctl,
80 	.d_strategy =	g_dev_strategy,
81 	.d_name =	"g_dev",
82 	.d_flags =	D_DISK | D_TRACKCLOSE,
83 };
84 
85 static g_init_t g_dev_init;
86 static g_fini_t g_dev_fini;
87 static g_taste_t g_dev_taste;
88 static g_orphan_t g_dev_orphan;
89 static g_attrchanged_t g_dev_attrchanged;
90 
91 static struct g_class g_dev_class	= {
92 	.name = "DEV",
93 	.version = G_VERSION,
94 	.init = g_dev_init,
95 	.fini = g_dev_fini,
96 	.taste = g_dev_taste,
97 	.orphan = g_dev_orphan,
98 	.attrchanged = g_dev_attrchanged
99 };
100 
101 /*
102  * We target 262144 (8 x 32768) sectors by default as this significantly
103  * increases the throughput on commonly used SSD's with a marginal
104  * increase in non-interruptible request latency.
105  */
106 static uint64_t g_dev_del_max_sectors = 262144;
107 SYSCTL_DECL(_kern_geom);
108 SYSCTL_NODE(_kern_geom, OID_AUTO, dev, CTLFLAG_RW, 0, "GEOM_DEV stuff");
109 SYSCTL_QUAD(_kern_geom_dev, OID_AUTO, delete_max_sectors, CTLFLAG_RW,
110     &g_dev_del_max_sectors, 0, "Maximum number of sectors in a single "
111     "delete request sent to the provider. Larger requests are chunked "
112     "so they can be interrupted. (0 = disable chunking)");
113 
114 static char *dumpdev = NULL;
115 static void
116 g_dev_init(struct g_class *mp)
117 {
118 
119 	dumpdev = kern_getenv("dumpdev");
120 }
121 
122 static void
123 g_dev_fini(struct g_class *mp)
124 {
125 
126 	freeenv(dumpdev);
127 	dumpdev = NULL;
128 }
129 
130 static int
131 g_dev_setdumpdev(struct cdev *dev, struct thread *td)
132 {
133 	struct g_kerneldump kd;
134 	struct g_consumer *cp;
135 	int error, len;
136 
137 	if (dev == NULL)
138 		return (set_dumper(NULL, NULL, td));
139 
140 	cp = dev->si_drv2;
141 	len = sizeof(kd);
142 	kd.offset = 0;
143 	kd.length = OFF_MAX;
144 	error = g_io_getattr("GEOM::kerneldump", cp, &len, &kd);
145 	if (error == 0) {
146 		error = set_dumper(&kd.di, devtoname(dev), td);
147 		if (error == 0)
148 			dev->si_flags |= SI_DUMPDEV;
149 	}
150 	return (error);
151 }
152 
153 static int
154 init_dumpdev(struct cdev *dev)
155 {
156 	struct g_consumer *cp;
157 	const char *devprefix = "/dev/", *devname;
158 	int error;
159 	size_t len;
160 
161 	if (dumpdev == NULL)
162 		return (0);
163 
164 	len = strlen(devprefix);
165 	devname = devtoname(dev);
166 	if (strcmp(devname, dumpdev) != 0 &&
167 	   (strncmp(dumpdev, devprefix, len) != 0 ||
168 	    strcmp(devname, dumpdev + len) != 0))
169 		return (0);
170 
171 	cp = (struct g_consumer *)dev->si_drv2;
172 	error = g_access(cp, 1, 0, 0);
173 	if (error != 0)
174 		return (error);
175 
176 	error = g_dev_setdumpdev(dev, curthread);
177 	if (error == 0) {
178 		freeenv(dumpdev);
179 		dumpdev = NULL;
180 	}
181 
182 	(void)g_access(cp, -1, 0, 0);
183 
184 	return (error);
185 }
186 
187 static void
188 g_dev_destroy(void *arg, int flags __unused)
189 {
190 	struct g_consumer *cp;
191 	struct g_geom *gp;
192 	struct g_dev_softc *sc;
193 	char buf[SPECNAMELEN + 6];
194 
195 	g_topology_assert();
196 	cp = arg;
197 	gp = cp->geom;
198 	sc = cp->private;
199 	g_trace(G_T_TOPOLOGY, "g_dev_destroy(%p(%s))", cp, gp->name);
200 	snprintf(buf, sizeof(buf), "cdev=%s", gp->name);
201 	devctl_notify_f("GEOM", "DEV", "DESTROY", buf, M_WAITOK);
202 	if (cp->acr > 0 || cp->acw > 0 || cp->ace > 0)
203 		g_access(cp, -cp->acr, -cp->acw, -cp->ace);
204 	g_detach(cp);
205 	g_destroy_consumer(cp);
206 	g_destroy_geom(gp);
207 	mtx_destroy(&sc->sc_mtx);
208 	g_free(sc);
209 }
210 
211 void
212 g_dev_print(void)
213 {
214 	struct g_geom *gp;
215 	char const *p = "";
216 
217 	LIST_FOREACH(gp, &g_dev_class.geom, geom) {
218 		printf("%s%s", p, gp->name);
219 		p = " ";
220 	}
221 	printf("\n");
222 }
223 
224 static void
225 g_dev_attrchanged(struct g_consumer *cp, const char *attr)
226 {
227 	struct g_dev_softc *sc;
228 	struct cdev *dev;
229 	char buf[SPECNAMELEN + 6];
230 
231 	sc = cp->private;
232 	if (strcmp(attr, "GEOM::media") == 0) {
233 		dev = sc->sc_dev;
234 		snprintf(buf, sizeof(buf), "cdev=%s", dev->si_name);
235 		devctl_notify_f("DEVFS", "CDEV", "MEDIACHANGE", buf, M_WAITOK);
236 		devctl_notify_f("GEOM", "DEV", "MEDIACHANGE", buf, M_WAITOK);
237 		dev = sc->sc_alias;
238 		if (dev != NULL) {
239 			snprintf(buf, sizeof(buf), "cdev=%s", dev->si_name);
240 			devctl_notify_f("DEVFS", "CDEV", "MEDIACHANGE", buf,
241 			    M_WAITOK);
242 			devctl_notify_f("GEOM", "DEV", "MEDIACHANGE", buf,
243 			    M_WAITOK);
244 		}
245 		return;
246 	}
247 
248 	if (strcmp(attr, "GEOM::physpath") != 0)
249 		return;
250 
251 	if (g_access(cp, 1, 0, 0) == 0) {
252 		char *physpath;
253 		int error, physpath_len;
254 
255 		physpath_len = MAXPATHLEN;
256 		physpath = g_malloc(physpath_len, M_WAITOK|M_ZERO);
257 		error =
258 		    g_io_getattr("GEOM::physpath", cp, &physpath_len, physpath);
259 		g_access(cp, -1, 0, 0);
260 		if (error == 0 && strlen(physpath) != 0) {
261 			struct cdev *old_alias_dev;
262 			struct cdev **alias_devp;
263 
264 			dev = sc->sc_dev;
265 			old_alias_dev = sc->sc_alias;
266 			alias_devp = (struct cdev **)&sc->sc_alias;
267 			make_dev_physpath_alias(MAKEDEV_WAITOK, alias_devp,
268 			    dev, old_alias_dev, physpath);
269 		} else if (sc->sc_alias) {
270 			destroy_dev((struct cdev *)sc->sc_alias);
271 			sc->sc_alias = NULL;
272 		}
273 		g_free(physpath);
274 	}
275 }
276 
277 struct g_provider *
278 g_dev_getprovider(struct cdev *dev)
279 {
280 	struct g_consumer *cp;
281 
282 	g_topology_assert();
283 	if (dev == NULL)
284 		return (NULL);
285 	if (dev->si_devsw != &g_dev_cdevsw)
286 		return (NULL);
287 	cp = dev->si_drv2;
288 	return (cp->provider);
289 }
290 
291 static struct g_geom *
292 g_dev_taste(struct g_class *mp, struct g_provider *pp, int insist __unused)
293 {
294 	struct g_geom *gp;
295 	struct g_consumer *cp;
296 	struct g_dev_softc *sc;
297 	int error;
298 	struct cdev *dev;
299 	char buf[SPECNAMELEN + 6];
300 
301 	g_trace(G_T_TOPOLOGY, "dev_taste(%s,%s)", mp->name, pp->name);
302 	g_topology_assert();
303 	gp = g_new_geomf(mp, "%s", pp->name);
304 	sc = g_malloc(sizeof(*sc), M_WAITOK | M_ZERO);
305 	mtx_init(&sc->sc_mtx, "g_dev", NULL, MTX_DEF);
306 	cp = g_new_consumer(gp);
307 	cp->private = sc;
308 	cp->flags |= G_CF_DIRECT_SEND | G_CF_DIRECT_RECEIVE;
309 	error = g_attach(cp, pp);
310 	KASSERT(error == 0,
311 	    ("g_dev_taste(%s) failed to g_attach, err=%d", pp->name, error));
312 	error = make_dev_p(MAKEDEV_CHECKNAME | MAKEDEV_WAITOK, &dev,
313 	    &g_dev_cdevsw, NULL, UID_ROOT, GID_OPERATOR, 0640, "%s", gp->name);
314 	if (error != 0) {
315 		printf("%s: make_dev_p() failed (gp->name=%s, error=%d)\n",
316 		    __func__, gp->name, error);
317 		g_detach(cp);
318 		g_destroy_consumer(cp);
319 		g_destroy_geom(gp);
320 		mtx_destroy(&sc->sc_mtx);
321 		g_free(sc);
322 		return (NULL);
323 	}
324 	dev->si_flags |= SI_UNMAPPED;
325 	sc->sc_dev = dev;
326 
327 	dev->si_iosize_max = MAXPHYS;
328 	dev->si_drv2 = cp;
329 	error = init_dumpdev(dev);
330 	if (error != 0)
331 		printf("%s: init_dumpdev() failed (gp->name=%s, error=%d)\n",
332 		    __func__, gp->name, error);
333 
334 	g_dev_attrchanged(cp, "GEOM::physpath");
335 	snprintf(buf, sizeof(buf), "cdev=%s", gp->name);
336 	devctl_notify_f("GEOM", "DEV", "CREATE", buf, M_WAITOK);
337 
338 	return (gp);
339 }
340 
341 static int
342 g_dev_open(struct cdev *dev, int flags, int fmt, struct thread *td)
343 {
344 	struct g_consumer *cp;
345 	struct g_dev_softc *sc;
346 	int error, r, w, e;
347 
348 	cp = dev->si_drv2;
349 	if (cp == NULL)
350 		return (ENXIO);		/* g_dev_taste() not done yet */
351 	g_trace(G_T_ACCESS, "g_dev_open(%s, %d, %d, %p)",
352 	    cp->geom->name, flags, fmt, td);
353 
354 	r = flags & FREAD ? 1 : 0;
355 	w = flags & FWRITE ? 1 : 0;
356 #ifdef notyet
357 	e = flags & O_EXCL ? 1 : 0;
358 #else
359 	e = 0;
360 #endif
361 
362 	/*
363 	 * This happens on attempt to open a device node with O_EXEC.
364 	 */
365 	if (r + w + e == 0)
366 		return (EINVAL);
367 
368 	if (w) {
369 		/*
370 		 * When running in very secure mode, do not allow
371 		 * opens for writing of any disks.
372 		 */
373 		error = securelevel_ge(td->td_ucred, 2);
374 		if (error)
375 			return (error);
376 	}
377 	g_topology_lock();
378 	error = g_access(cp, r, w, e);
379 	g_topology_unlock();
380 	if (error == 0) {
381 		sc = cp->private;
382 		mtx_lock(&sc->sc_mtx);
383 		if (sc->sc_open == 0 && sc->sc_active != 0)
384 			wakeup(&sc->sc_active);
385 		sc->sc_open += r + w + e;
386 		mtx_unlock(&sc->sc_mtx);
387 	}
388 	return (error);
389 }
390 
391 static int
392 g_dev_close(struct cdev *dev, int flags, int fmt, struct thread *td)
393 {
394 	struct g_consumer *cp;
395 	struct g_dev_softc *sc;
396 	int error, r, w, e;
397 
398 	cp = dev->si_drv2;
399 	if (cp == NULL)
400 		return (ENXIO);
401 	g_trace(G_T_ACCESS, "g_dev_close(%s, %d, %d, %p)",
402 	    cp->geom->name, flags, fmt, td);
403 
404 	r = flags & FREAD ? -1 : 0;
405 	w = flags & FWRITE ? -1 : 0;
406 #ifdef notyet
407 	e = flags & O_EXCL ? -1 : 0;
408 #else
409 	e = 0;
410 #endif
411 
412 	/*
413 	 * The vgonel(9) - caused by eg. forced unmount of devfs - calls
414 	 * VOP_CLOSE(9) on devfs vnode without any FREAD or FWRITE flags,
415 	 * which would result in zero deltas, which in turn would cause
416 	 * panic in g_access(9).
417 	 *
418 	 * Note that we cannot zero the counters (ie. do "r = cp->acr"
419 	 * etc) instead, because the consumer might be opened in another
420 	 * devfs instance.
421 	 */
422 	if (r + w + e == 0)
423 		return (EINVAL);
424 
425 	sc = cp->private;
426 	mtx_lock(&sc->sc_mtx);
427 	sc->sc_open += r + w + e;
428 	while (sc->sc_open == 0 && sc->sc_active != 0)
429 		msleep(&sc->sc_active, &sc->sc_mtx, 0, "PRIBIO", 0);
430 	mtx_unlock(&sc->sc_mtx);
431 	g_topology_lock();
432 	error = g_access(cp, r, w, e);
433 	g_topology_unlock();
434 	return (error);
435 }
436 
437 /*
438  * XXX: Until we have unmessed the ioctl situation, there is a race against
439  * XXX: a concurrent orphanization.  We cannot close it by holding topology
440  * XXX: since that would prevent us from doing our job, and stalling events
441  * XXX: will break (actually: stall) the BSD disklabel hacks.
442  */
443 static int
444 g_dev_ioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag, struct thread *td)
445 {
446 	struct g_consumer *cp;
447 	struct g_provider *pp;
448 	off_t offset, length, chunk;
449 	int i, error;
450 
451 	cp = dev->si_drv2;
452 	pp = cp->provider;
453 
454 	error = 0;
455 	KASSERT(cp->acr || cp->acw,
456 	    ("Consumer with zero access count in g_dev_ioctl"));
457 
458 	i = IOCPARM_LEN(cmd);
459 	switch (cmd) {
460 	case DIOCGSECTORSIZE:
461 		*(u_int *)data = cp->provider->sectorsize;
462 		if (*(u_int *)data == 0)
463 			error = ENOENT;
464 		break;
465 	case DIOCGMEDIASIZE:
466 		*(off_t *)data = cp->provider->mediasize;
467 		if (*(off_t *)data == 0)
468 			error = ENOENT;
469 		break;
470 	case DIOCGFWSECTORS:
471 		error = g_io_getattr("GEOM::fwsectors", cp, &i, data);
472 		if (error == 0 && *(u_int *)data == 0)
473 			error = ENOENT;
474 		break;
475 	case DIOCGFWHEADS:
476 		error = g_io_getattr("GEOM::fwheads", cp, &i, data);
477 		if (error == 0 && *(u_int *)data == 0)
478 			error = ENOENT;
479 		break;
480 	case DIOCGFRONTSTUFF:
481 		error = g_io_getattr("GEOM::frontstuff", cp, &i, data);
482 		break;
483 	case DIOCSKERNELDUMP:
484 		if (*(u_int *)data == 0)
485 			error = g_dev_setdumpdev(NULL, td);
486 		else
487 			error = g_dev_setdumpdev(dev, td);
488 		break;
489 	case DIOCGFLUSH:
490 		error = g_io_flush(cp);
491 		break;
492 	case DIOCGDELETE:
493 		offset = ((off_t *)data)[0];
494 		length = ((off_t *)data)[1];
495 		if ((offset % cp->provider->sectorsize) != 0 ||
496 		    (length % cp->provider->sectorsize) != 0 || length <= 0) {
497 			printf("%s: offset=%jd length=%jd\n", __func__, offset,
498 			    length);
499 			error = EINVAL;
500 			break;
501 		}
502 		while (length > 0) {
503 			chunk = length;
504 			if (g_dev_del_max_sectors != 0 && chunk >
505 			    g_dev_del_max_sectors * cp->provider->sectorsize) {
506 				chunk = g_dev_del_max_sectors *
507 				    cp->provider->sectorsize;
508 			}
509 			error = g_delete_data(cp, offset, chunk);
510 			length -= chunk;
511 			offset += chunk;
512 			if (error)
513 				break;
514 			/*
515 			 * Since the request size can be large, the service
516 			 * time can be is likewise.  We make this ioctl
517 			 * interruptible by checking for signals for each bio.
518 			 */
519 			if (SIGPENDING(td))
520 				break;
521 		}
522 		break;
523 	case DIOCGIDENT:
524 		error = g_io_getattr("GEOM::ident", cp, &i, data);
525 		break;
526 	case DIOCGPROVIDERNAME:
527 		if (pp == NULL)
528 			return (ENOENT);
529 		strlcpy(data, pp->name, i);
530 		break;
531 	case DIOCGSTRIPESIZE:
532 		*(off_t *)data = cp->provider->stripesize;
533 		break;
534 	case DIOCGSTRIPEOFFSET:
535 		*(off_t *)data = cp->provider->stripeoffset;
536 		break;
537 	case DIOCGPHYSPATH:
538 		error = g_io_getattr("GEOM::physpath", cp, &i, data);
539 		if (error == 0 && *(char *)data == '\0')
540 			error = ENOENT;
541 		break;
542 	case DIOCGATTR: {
543 		struct diocgattr_arg *arg = (struct diocgattr_arg *)data;
544 
545 		if (arg->len > sizeof(arg->value)) {
546 			error = EINVAL;
547 			break;
548 		}
549 		error = g_io_getattr(arg->name, cp, &arg->len, &arg->value);
550 		break;
551 	}
552 	default:
553 		if (cp->provider->geom->ioctl != NULL) {
554 			error = cp->provider->geom->ioctl(cp->provider, cmd, data, fflag, td);
555 		} else {
556 			error = ENOIOCTL;
557 		}
558 	}
559 
560 	return (error);
561 }
562 
563 static void
564 g_dev_done(struct bio *bp2)
565 {
566 	struct g_consumer *cp;
567 	struct g_dev_softc *sc;
568 	struct bio *bp;
569 	int destroy;
570 
571 	cp = bp2->bio_from;
572 	sc = cp->private;
573 	bp = bp2->bio_parent;
574 	bp->bio_error = bp2->bio_error;
575 	bp->bio_completed = bp2->bio_completed;
576 	bp->bio_resid = bp->bio_length - bp2->bio_completed;
577 	if (bp2->bio_error != 0) {
578 		g_trace(G_T_BIO, "g_dev_done(%p) had error %d",
579 		    bp2, bp2->bio_error);
580 		bp->bio_flags |= BIO_ERROR;
581 	} else {
582 		g_trace(G_T_BIO, "g_dev_done(%p/%p) resid %ld completed %jd",
583 		    bp2, bp, bp2->bio_resid, (intmax_t)bp2->bio_completed);
584 	}
585 	g_destroy_bio(bp2);
586 	destroy = 0;
587 	mtx_lock(&sc->sc_mtx);
588 	if ((--sc->sc_active) == 0) {
589 		if (sc->sc_open == 0)
590 			wakeup(&sc->sc_active);
591 		if (sc->sc_dev == NULL)
592 			destroy = 1;
593 	}
594 	mtx_unlock(&sc->sc_mtx);
595 	if (destroy)
596 		g_post_event(g_dev_destroy, cp, M_NOWAIT, NULL);
597 	biodone(bp);
598 }
599 
600 static void
601 g_dev_strategy(struct bio *bp)
602 {
603 	struct g_consumer *cp;
604 	struct bio *bp2;
605 	struct cdev *dev;
606 	struct g_dev_softc *sc;
607 
608 	KASSERT(bp->bio_cmd == BIO_READ ||
609 	        bp->bio_cmd == BIO_WRITE ||
610 	        bp->bio_cmd == BIO_DELETE ||
611 		bp->bio_cmd == BIO_FLUSH,
612 		("Wrong bio_cmd bio=%p cmd=%d", bp, bp->bio_cmd));
613 	dev = bp->bio_dev;
614 	cp = dev->si_drv2;
615 	sc = cp->private;
616 	KASSERT(cp->acr || cp->acw,
617 	    ("Consumer with zero access count in g_dev_strategy"));
618 #ifdef INVARIANTS
619 	if ((bp->bio_offset % cp->provider->sectorsize) != 0 ||
620 	    (bp->bio_bcount % cp->provider->sectorsize) != 0) {
621 		bp->bio_resid = bp->bio_bcount;
622 		biofinish(bp, NULL, EINVAL);
623 		return;
624 	}
625 #endif
626 	mtx_lock(&sc->sc_mtx);
627 	KASSERT(sc->sc_open > 0, ("Closed device in g_dev_strategy"));
628 	sc->sc_active++;
629 	mtx_unlock(&sc->sc_mtx);
630 
631 	for (;;) {
632 		/*
633 		 * XXX: This is not an ideal solution, but I belive it to
634 		 * XXX: deadlock safe, all things considered.
635 		 */
636 		bp2 = g_clone_bio(bp);
637 		if (bp2 != NULL)
638 			break;
639 		pause("gdstrat", hz / 10);
640 	}
641 	KASSERT(bp2 != NULL, ("XXX: ENOMEM in a bad place"));
642 	bp2->bio_done = g_dev_done;
643 	g_trace(G_T_BIO,
644 	    "g_dev_strategy(%p/%p) offset %jd length %jd data %p cmd %d",
645 	    bp, bp2, (intmax_t)bp->bio_offset, (intmax_t)bp2->bio_length,
646 	    bp2->bio_data, bp2->bio_cmd);
647 	g_io_request(bp2, cp);
648 	KASSERT(cp->acr || cp->acw,
649 	    ("g_dev_strategy raced with g_dev_close and lost"));
650 
651 }
652 
653 /*
654  * g_dev_callback()
655  *
656  * Called by devfs when asynchronous device destruction is completed.
657  * - Mark that we have no attached device any more.
658  * - If there are no outstanding requests, schedule geom destruction.
659  *   Otherwise destruction will be scheduled later by g_dev_done().
660  */
661 
662 static void
663 g_dev_callback(void *arg)
664 {
665 	struct g_consumer *cp;
666 	struct g_dev_softc *sc;
667 	int destroy;
668 
669 	cp = arg;
670 	sc = cp->private;
671 	g_trace(G_T_TOPOLOGY, "g_dev_callback(%p(%s))", cp, cp->geom->name);
672 
673 	mtx_lock(&sc->sc_mtx);
674 	sc->sc_dev = NULL;
675 	sc->sc_alias = NULL;
676 	destroy = (sc->sc_active == 0);
677 	mtx_unlock(&sc->sc_mtx);
678 	if (destroy)
679 		g_post_event(g_dev_destroy, cp, M_WAITOK, NULL);
680 }
681 
682 /*
683  * g_dev_orphan()
684  *
685  * Called from below when the provider orphaned us.
686  * - Clear any dump settings.
687  * - Request asynchronous device destruction to prevent any more requests
688  *   from coming in.  The provider is already marked with an error, so
689  *   anything which comes in in the interrim will be returned immediately.
690  */
691 
692 static void
693 g_dev_orphan(struct g_consumer *cp)
694 {
695 	struct cdev *dev;
696 	struct g_dev_softc *sc;
697 
698 	g_topology_assert();
699 	sc = cp->private;
700 	dev = sc->sc_dev;
701 	g_trace(G_T_TOPOLOGY, "g_dev_orphan(%p(%s))", cp, cp->geom->name);
702 
703 	/* Reset any dump-area set on this device */
704 	if (dev->si_flags & SI_DUMPDEV)
705 		(void)set_dumper(NULL, NULL, curthread);
706 
707 	/* Destroy the struct cdev *so we get no more requests */
708 	destroy_dev_sched_cb(dev, g_dev_callback, cp);
709 }
710 
711 DECLARE_GEOM_CLASS(g_dev_class, g_dev);
712