xref: /freebsd/sys/geom/geom_dev.c (revision 6829dae12bb055451fa467da4589c43bd03b1e64)
1 /*-
2  * SPDX-License-Identifier: BSD-3-Clause
3  *
4  * Copyright (c) 2002 Poul-Henning Kamp
5  * Copyright (c) 2002 Networks Associates Technology, Inc.
6  * All rights reserved.
7  *
8  * This software was developed for the FreeBSD Project by Poul-Henning Kamp
9  * and NAI Labs, the Security Research Division of Network Associates, Inc.
10  * under DARPA/SPAWAR contract N66001-01-C-8035 ("CBOSS"), as part of the
11  * DARPA CHATS research program.
12  *
13  * Redistribution and use in source and binary forms, with or without
14  * modification, are permitted provided that the following conditions
15  * are met:
16  * 1. Redistributions of source code must retain the above copyright
17  *    notice, this list of conditions and the following disclaimer.
18  * 2. Redistributions in binary form must reproduce the above copyright
19  *    notice, this list of conditions and the following disclaimer in the
20  *    documentation and/or other materials provided with the distribution.
21  * 3. The names of the authors may not be used to endorse or promote
22  *    products derived from this software without specific prior written
23  *    permission.
24  *
25  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
29  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35  * SUCH DAMAGE.
36  */
37 
38 #include <sys/cdefs.h>
39 __FBSDID("$FreeBSD$");
40 
41 #include <sys/param.h>
42 #include <sys/systm.h>
43 #include <sys/malloc.h>
44 #include <sys/kernel.h>
45 #include <sys/conf.h>
46 #include <sys/ctype.h>
47 #include <sys/bio.h>
48 #include <sys/bus.h>
49 #include <sys/lock.h>
50 #include <sys/mutex.h>
51 #include <sys/proc.h>
52 #include <sys/errno.h>
53 #include <sys/time.h>
54 #include <sys/disk.h>
55 #include <sys/fcntl.h>
56 #include <sys/limits.h>
57 #include <sys/sysctl.h>
58 #include <geom/geom.h>
59 #include <geom/geom_int.h>
60 #include <machine/stdarg.h>
61 
62 struct g_dev_softc {
63 	struct mtx	 sc_mtx;
64 	struct cdev	*sc_dev;
65 	struct cdev	*sc_alias;
66 	int		 sc_open;
67 	u_int		 sc_active;
68 #define	SC_A_DESTROY	(1 << 31)
69 #define	SC_A_OPEN	(1 << 30)
70 #define	SC_A_ACTIVE	(SC_A_OPEN - 1)
71 };
72 
73 static d_open_t		g_dev_open;
74 static d_close_t	g_dev_close;
75 static d_strategy_t	g_dev_strategy;
76 static d_ioctl_t	g_dev_ioctl;
77 
78 static struct cdevsw g_dev_cdevsw = {
79 	.d_version =	D_VERSION,
80 	.d_open =	g_dev_open,
81 	.d_close =	g_dev_close,
82 	.d_read =	physread,
83 	.d_write =	physwrite,
84 	.d_ioctl =	g_dev_ioctl,
85 	.d_strategy =	g_dev_strategy,
86 	.d_name =	"g_dev",
87 	.d_flags =	D_DISK | D_TRACKCLOSE,
88 };
89 
90 static g_init_t g_dev_init;
91 static g_fini_t g_dev_fini;
92 static g_taste_t g_dev_taste;
93 static g_orphan_t g_dev_orphan;
94 static g_attrchanged_t g_dev_attrchanged;
95 
96 static struct g_class g_dev_class	= {
97 	.name = "DEV",
98 	.version = G_VERSION,
99 	.init = g_dev_init,
100 	.fini = g_dev_fini,
101 	.taste = g_dev_taste,
102 	.orphan = g_dev_orphan,
103 	.attrchanged = g_dev_attrchanged
104 };
105 
106 /*
107  * We target 262144 (8 x 32768) sectors by default as this significantly
108  * increases the throughput on commonly used SSD's with a marginal
109  * increase in non-interruptible request latency.
110  */
111 static uint64_t g_dev_del_max_sectors = 262144;
112 SYSCTL_DECL(_kern_geom);
113 SYSCTL_NODE(_kern_geom, OID_AUTO, dev, CTLFLAG_RW, 0, "GEOM_DEV stuff");
114 SYSCTL_QUAD(_kern_geom_dev, OID_AUTO, delete_max_sectors, CTLFLAG_RW,
115     &g_dev_del_max_sectors, 0, "Maximum number of sectors in a single "
116     "delete request sent to the provider. Larger requests are chunked "
117     "so they can be interrupted. (0 = disable chunking)");
118 
119 static char *dumpdev = NULL;
120 static void
121 g_dev_init(struct g_class *mp)
122 {
123 
124 	dumpdev = kern_getenv("dumpdev");
125 }
126 
127 static void
128 g_dev_fini(struct g_class *mp)
129 {
130 
131 	freeenv(dumpdev);
132 	dumpdev = NULL;
133 }
134 
135 static int
136 g_dev_setdumpdev(struct cdev *dev, struct diocskerneldump_arg *kda,
137     struct thread *td)
138 {
139 	struct g_kerneldump kd;
140 	struct g_consumer *cp;
141 	int error, len;
142 
143 	if (dev == NULL || kda == NULL)
144 		return (clear_dumper(td));
145 
146 	cp = dev->si_drv2;
147 	len = sizeof(kd);
148 	memset(&kd, 0, len);
149 	kd.offset = 0;
150 	kd.length = OFF_MAX;
151 	error = g_io_getattr("GEOM::kerneldump", cp, &len, &kd);
152 	if (error != 0)
153 		return (error);
154 
155 	error = set_dumper(&kd.di, devtoname(dev), td, kda->kda_compression,
156 	    kda->kda_encryption, kda->kda_key, kda->kda_encryptedkeysize,
157 	    kda->kda_encryptedkey);
158 	if (error == 0)
159 		dev->si_flags |= SI_DUMPDEV;
160 
161 	return (error);
162 }
163 
164 static int
165 init_dumpdev(struct cdev *dev)
166 {
167 	struct diocskerneldump_arg kda;
168 	struct g_consumer *cp;
169 	const char *devprefix = "/dev/", *devname;
170 	int error;
171 	size_t len;
172 
173 	bzero(&kda, sizeof(kda));
174 	kda.kda_enable = 1;
175 
176 	if (dumpdev == NULL)
177 		return (0);
178 
179 	len = strlen(devprefix);
180 	devname = devtoname(dev);
181 	if (strcmp(devname, dumpdev) != 0 &&
182 	   (strncmp(dumpdev, devprefix, len) != 0 ||
183 	    strcmp(devname, dumpdev + len) != 0))
184 		return (0);
185 
186 	cp = (struct g_consumer *)dev->si_drv2;
187 	error = g_access(cp, 1, 0, 0);
188 	if (error != 0)
189 		return (error);
190 
191 	error = g_dev_setdumpdev(dev, &kda, curthread);
192 	if (error == 0) {
193 		freeenv(dumpdev);
194 		dumpdev = NULL;
195 	}
196 
197 	(void)g_access(cp, -1, 0, 0);
198 
199 	return (error);
200 }
201 
202 static void
203 g_dev_destroy(void *arg, int flags __unused)
204 {
205 	struct g_consumer *cp;
206 	struct g_geom *gp;
207 	struct g_dev_softc *sc;
208 	char buf[SPECNAMELEN + 6];
209 
210 	g_topology_assert();
211 	cp = arg;
212 	gp = cp->geom;
213 	sc = cp->private;
214 	g_trace(G_T_TOPOLOGY, "g_dev_destroy(%p(%s))", cp, gp->name);
215 	snprintf(buf, sizeof(buf), "cdev=%s", gp->name);
216 	devctl_notify_f("GEOM", "DEV", "DESTROY", buf, M_WAITOK);
217 	if (cp->acr > 0 || cp->acw > 0 || cp->ace > 0)
218 		g_access(cp, -cp->acr, -cp->acw, -cp->ace);
219 	g_detach(cp);
220 	g_destroy_consumer(cp);
221 	g_destroy_geom(gp);
222 	mtx_destroy(&sc->sc_mtx);
223 	g_free(sc);
224 }
225 
226 void
227 g_dev_print(void)
228 {
229 	struct g_geom *gp;
230 	char const *p = "";
231 
232 	LIST_FOREACH(gp, &g_dev_class.geom, geom) {
233 		printf("%s%s", p, gp->name);
234 		p = " ";
235 	}
236 	printf("\n");
237 }
238 
239 static void
240 g_dev_set_physpath(struct g_consumer *cp)
241 {
242 	struct g_dev_softc *sc;
243 	char *physpath;
244 	int error, physpath_len;
245 
246 	if (g_access(cp, 1, 0, 0) != 0)
247 		return;
248 
249 	sc = cp->private;
250 	physpath_len = MAXPATHLEN;
251 	physpath = g_malloc(physpath_len, M_WAITOK|M_ZERO);
252 	error = g_io_getattr("GEOM::physpath", cp, &physpath_len, physpath);
253 	g_access(cp, -1, 0, 0);
254 	if (error == 0 && strlen(physpath) != 0) {
255 		struct cdev *dev, *old_alias_dev;
256 		struct cdev **alias_devp;
257 
258 		dev = sc->sc_dev;
259 		old_alias_dev = sc->sc_alias;
260 		alias_devp = (struct cdev **)&sc->sc_alias;
261 		make_dev_physpath_alias(MAKEDEV_WAITOK, alias_devp, dev,
262 		    old_alias_dev, physpath);
263 	} else if (sc->sc_alias) {
264 		destroy_dev((struct cdev *)sc->sc_alias);
265 		sc->sc_alias = NULL;
266 	}
267 	g_free(physpath);
268 }
269 
270 static void
271 g_dev_set_media(struct g_consumer *cp)
272 {
273 	struct g_dev_softc *sc;
274 	struct cdev *dev;
275 	char buf[SPECNAMELEN + 6];
276 
277 	sc = cp->private;
278 	dev = sc->sc_dev;
279 	snprintf(buf, sizeof(buf), "cdev=%s", dev->si_name);
280 	devctl_notify_f("DEVFS", "CDEV", "MEDIACHANGE", buf, M_WAITOK);
281 	devctl_notify_f("GEOM", "DEV", "MEDIACHANGE", buf, M_WAITOK);
282 	dev = sc->sc_alias;
283 	if (dev != NULL) {
284 		snprintf(buf, sizeof(buf), "cdev=%s", dev->si_name);
285 		devctl_notify_f("DEVFS", "CDEV", "MEDIACHANGE", buf, M_WAITOK);
286 		devctl_notify_f("GEOM", "DEV", "MEDIACHANGE", buf, M_WAITOK);
287 	}
288 }
289 
290 static void
291 g_dev_attrchanged(struct g_consumer *cp, const char *attr)
292 {
293 
294 	if (strcmp(attr, "GEOM::media") == 0) {
295 		g_dev_set_media(cp);
296 		return;
297 	}
298 
299 	if (strcmp(attr, "GEOM::physpath") == 0) {
300 		g_dev_set_physpath(cp);
301 		return;
302 	}
303 }
304 
305 struct g_provider *
306 g_dev_getprovider(struct cdev *dev)
307 {
308 	struct g_consumer *cp;
309 
310 	g_topology_assert();
311 	if (dev == NULL)
312 		return (NULL);
313 	if (dev->si_devsw != &g_dev_cdevsw)
314 		return (NULL);
315 	cp = dev->si_drv2;
316 	return (cp->provider);
317 }
318 
319 static struct g_geom *
320 g_dev_taste(struct g_class *mp, struct g_provider *pp, int insist __unused)
321 {
322 	struct g_geom *gp;
323 	struct g_geom_alias *gap;
324 	struct g_consumer *cp;
325 	struct g_dev_softc *sc;
326 	int error;
327 	struct cdev *dev, *adev;
328 	char buf[SPECNAMELEN + 6];
329 
330 	g_trace(G_T_TOPOLOGY, "dev_taste(%s,%s)", mp->name, pp->name);
331 	g_topology_assert();
332 	gp = g_new_geomf(mp, "%s", pp->name);
333 	sc = g_malloc(sizeof(*sc), M_WAITOK | M_ZERO);
334 	mtx_init(&sc->sc_mtx, "g_dev", NULL, MTX_DEF);
335 	cp = g_new_consumer(gp);
336 	cp->private = sc;
337 	cp->flags |= G_CF_DIRECT_SEND | G_CF_DIRECT_RECEIVE;
338 	error = g_attach(cp, pp);
339 	KASSERT(error == 0,
340 	    ("g_dev_taste(%s) failed to g_attach, err=%d", pp->name, error));
341 	error = make_dev_p(MAKEDEV_CHECKNAME | MAKEDEV_WAITOK, &dev,
342 	    &g_dev_cdevsw, NULL, UID_ROOT, GID_OPERATOR, 0640, "%s", gp->name);
343 	if (error != 0) {
344 		printf("%s: make_dev_p() failed (gp->name=%s, error=%d)\n",
345 		    __func__, gp->name, error);
346 		g_detach(cp);
347 		g_destroy_consumer(cp);
348 		g_destroy_geom(gp);
349 		mtx_destroy(&sc->sc_mtx);
350 		g_free(sc);
351 		return (NULL);
352 	}
353 	dev->si_flags |= SI_UNMAPPED;
354 	sc->sc_dev = dev;
355 
356 	dev->si_iosize_max = MAXPHYS;
357 	dev->si_drv2 = cp;
358 	error = init_dumpdev(dev);
359 	if (error != 0)
360 		printf("%s: init_dumpdev() failed (gp->name=%s, error=%d)\n",
361 		    __func__, gp->name, error);
362 
363 	g_dev_attrchanged(cp, "GEOM::physpath");
364 	snprintf(buf, sizeof(buf), "cdev=%s", gp->name);
365 	devctl_notify_f("GEOM", "DEV", "CREATE", buf, M_WAITOK);
366 	/*
367 	 * Now add all the aliases for this drive
368 	 */
369 	LIST_FOREACH(gap, &pp->geom->aliases, ga_next) {
370 		error = make_dev_alias_p(MAKEDEV_CHECKNAME | MAKEDEV_WAITOK, &adev, dev,
371 		    "%s", gap->ga_alias);
372 		if (error) {
373 			printf("%s: make_dev_alias_p() failed (name=%s, error=%d)\n",
374 			    __func__, gap->ga_alias, error);
375 			continue;
376 		}
377 		snprintf(buf, sizeof(buf), "cdev=%s", gap->ga_alias);
378 		devctl_notify_f("GEOM", "DEV", "CREATE", buf, M_WAITOK);
379 	}
380 
381 	return (gp);
382 }
383 
384 static int
385 g_dev_open(struct cdev *dev, int flags, int fmt, struct thread *td)
386 {
387 	struct g_consumer *cp;
388 	struct g_dev_softc *sc;
389 	int error, r, w, e;
390 
391 	cp = dev->si_drv2;
392 	if (cp == NULL)
393 		return (ENXIO);		/* g_dev_taste() not done yet */
394 	g_trace(G_T_ACCESS, "g_dev_open(%s, %d, %d, %p)",
395 	    cp->geom->name, flags, fmt, td);
396 
397 	r = flags & FREAD ? 1 : 0;
398 	w = flags & FWRITE ? 1 : 0;
399 #ifdef notyet
400 	e = flags & O_EXCL ? 1 : 0;
401 #else
402 	e = 0;
403 #endif
404 
405 	/*
406 	 * This happens on attempt to open a device node with O_EXEC.
407 	 */
408 	if (r + w + e == 0)
409 		return (EINVAL);
410 
411 	if (w) {
412 		/*
413 		 * When running in very secure mode, do not allow
414 		 * opens for writing of any disks.
415 		 */
416 		error = securelevel_ge(td->td_ucred, 2);
417 		if (error)
418 			return (error);
419 	}
420 	g_topology_lock();
421 	error = g_access(cp, r, w, e);
422 	g_topology_unlock();
423 	if (error == 0) {
424 		sc = cp->private;
425 		mtx_lock(&sc->sc_mtx);
426 		if (sc->sc_open == 0 && (sc->sc_active & SC_A_ACTIVE) != 0)
427 			wakeup(&sc->sc_active);
428 		sc->sc_open += r + w + e;
429 		if (sc->sc_open == 0)
430 			atomic_clear_int(&sc->sc_active, SC_A_OPEN);
431 		else
432 			atomic_set_int(&sc->sc_active, SC_A_OPEN);
433 		mtx_unlock(&sc->sc_mtx);
434 	}
435 	return (error);
436 }
437 
438 static int
439 g_dev_close(struct cdev *dev, int flags, int fmt, struct thread *td)
440 {
441 	struct g_consumer *cp;
442 	struct g_dev_softc *sc;
443 	int error, r, w, e;
444 
445 	cp = dev->si_drv2;
446 	if (cp == NULL)
447 		return (ENXIO);
448 	g_trace(G_T_ACCESS, "g_dev_close(%s, %d, %d, %p)",
449 	    cp->geom->name, flags, fmt, td);
450 
451 	r = flags & FREAD ? -1 : 0;
452 	w = flags & FWRITE ? -1 : 0;
453 #ifdef notyet
454 	e = flags & O_EXCL ? -1 : 0;
455 #else
456 	e = 0;
457 #endif
458 
459 	/*
460 	 * The vgonel(9) - caused by eg. forced unmount of devfs - calls
461 	 * VOP_CLOSE(9) on devfs vnode without any FREAD or FWRITE flags,
462 	 * which would result in zero deltas, which in turn would cause
463 	 * panic in g_access(9).
464 	 *
465 	 * Note that we cannot zero the counters (ie. do "r = cp->acr"
466 	 * etc) instead, because the consumer might be opened in another
467 	 * devfs instance.
468 	 */
469 	if (r + w + e == 0)
470 		return (EINVAL);
471 
472 	sc = cp->private;
473 	mtx_lock(&sc->sc_mtx);
474 	sc->sc_open += r + w + e;
475 	if (sc->sc_open == 0)
476 		atomic_clear_int(&sc->sc_active, SC_A_OPEN);
477 	else
478 		atomic_set_int(&sc->sc_active, SC_A_OPEN);
479 	while (sc->sc_open == 0 && (sc->sc_active & SC_A_ACTIVE) != 0)
480 		msleep(&sc->sc_active, &sc->sc_mtx, 0, "g_dev_close", hz / 10);
481 	mtx_unlock(&sc->sc_mtx);
482 	g_topology_lock();
483 	error = g_access(cp, r, w, e);
484 	g_topology_unlock();
485 	return (error);
486 }
487 
488 /*
489  * XXX: Until we have unmessed the ioctl situation, there is a race against
490  * XXX: a concurrent orphanization.  We cannot close it by holding topology
491  * XXX: since that would prevent us from doing our job, and stalling events
492  * XXX: will break (actually: stall) the BSD disklabel hacks.
493  */
494 static int
495 g_dev_ioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag, struct thread *td)
496 {
497 	struct g_consumer *cp;
498 	struct g_provider *pp;
499 	off_t offset, length, chunk, odd;
500 	int i, error;
501 
502 	cp = dev->si_drv2;
503 	pp = cp->provider;
504 
505 	error = 0;
506 	KASSERT(cp->acr || cp->acw,
507 	    ("Consumer with zero access count in g_dev_ioctl"));
508 
509 	i = IOCPARM_LEN(cmd);
510 	switch (cmd) {
511 	case DIOCGSECTORSIZE:
512 		*(u_int *)data = cp->provider->sectorsize;
513 		if (*(u_int *)data == 0)
514 			error = ENOENT;
515 		break;
516 	case DIOCGMEDIASIZE:
517 		*(off_t *)data = cp->provider->mediasize;
518 		if (*(off_t *)data == 0)
519 			error = ENOENT;
520 		break;
521 	case DIOCGFWSECTORS:
522 		error = g_io_getattr("GEOM::fwsectors", cp, &i, data);
523 		if (error == 0 && *(u_int *)data == 0)
524 			error = ENOENT;
525 		break;
526 	case DIOCGFWHEADS:
527 		error = g_io_getattr("GEOM::fwheads", cp, &i, data);
528 		if (error == 0 && *(u_int *)data == 0)
529 			error = ENOENT;
530 		break;
531 	case DIOCGFRONTSTUFF:
532 		error = g_io_getattr("GEOM::frontstuff", cp, &i, data);
533 		break;
534 #ifdef COMPAT_FREEBSD11
535 	case DIOCSKERNELDUMP_FREEBSD11:
536 	    {
537 		struct diocskerneldump_arg kda;
538 
539 		bzero(&kda, sizeof(kda));
540 		kda.kda_encryption = KERNELDUMP_ENC_NONE;
541 		kda.kda_enable = (uint8_t)*(u_int *)data;
542 		if (kda.kda_enable == 0)
543 			error = g_dev_setdumpdev(NULL, NULL, td);
544 		else
545 			error = g_dev_setdumpdev(dev, &kda, td);
546 		break;
547 	    }
548 #endif
549 	case DIOCSKERNELDUMP:
550 	    {
551 		struct diocskerneldump_arg *kda;
552 		uint8_t *encryptedkey;
553 
554 		kda = (struct diocskerneldump_arg *)data;
555 		if (kda->kda_enable == 0) {
556 			error = g_dev_setdumpdev(NULL, NULL, td);
557 			break;
558 		}
559 
560 		if (kda->kda_encryption != KERNELDUMP_ENC_NONE) {
561 			if (kda->kda_encryptedkeysize <= 0 ||
562 			    kda->kda_encryptedkeysize >
563 			    KERNELDUMP_ENCKEY_MAX_SIZE) {
564 				return (EINVAL);
565 			}
566 			encryptedkey = malloc(kda->kda_encryptedkeysize, M_TEMP,
567 			    M_WAITOK);
568 			error = copyin(kda->kda_encryptedkey, encryptedkey,
569 			    kda->kda_encryptedkeysize);
570 		} else {
571 			encryptedkey = NULL;
572 		}
573 		if (error == 0) {
574 			kda->kda_encryptedkey = encryptedkey;
575 			error = g_dev_setdumpdev(dev, kda, td);
576 		}
577 		if (encryptedkey != NULL) {
578 			explicit_bzero(encryptedkey, kda->kda_encryptedkeysize);
579 			free(encryptedkey, M_TEMP);
580 		}
581 		explicit_bzero(kda, sizeof(*kda));
582 		break;
583 	    }
584 	case DIOCGFLUSH:
585 		error = g_io_flush(cp);
586 		break;
587 	case DIOCGDELETE:
588 		offset = ((off_t *)data)[0];
589 		length = ((off_t *)data)[1];
590 		if ((offset % cp->provider->sectorsize) != 0 ||
591 		    (length % cp->provider->sectorsize) != 0 || length <= 0) {
592 			printf("%s: offset=%jd length=%jd\n", __func__, offset,
593 			    length);
594 			error = EINVAL;
595 			break;
596 		}
597 		if ((cp->provider->mediasize > 0) &&
598 		    (offset >= cp->provider->mediasize)) {
599 			/*
600 			 * Catch out-of-bounds requests here. The problem is
601 			 * that due to historical GEOM I/O implementation
602 			 * peculatities, g_delete_data() would always return
603 			 * success for requests starting just the next byte
604 			 * after providers media boundary. Condition check on
605 			 * non-zero media size, since that condition would
606 			 * (most likely) cause ENXIO instead.
607 			 */
608 			error = EIO;
609 			break;
610 		}
611 		while (length > 0) {
612 			chunk = length;
613 			if (g_dev_del_max_sectors != 0 && chunk >
614 			    g_dev_del_max_sectors * cp->provider->sectorsize) {
615 				chunk = g_dev_del_max_sectors *
616 				    cp->provider->sectorsize;
617 				if (cp->provider->stripesize > 0) {
618 					odd = (offset + chunk +
619 					    cp->provider->stripeoffset) %
620 					    cp->provider->stripesize;
621 					if (chunk > odd)
622 						chunk -= odd;
623 				}
624 			}
625 			error = g_delete_data(cp, offset, chunk);
626 			length -= chunk;
627 			offset += chunk;
628 			if (error)
629 				break;
630 			/*
631 			 * Since the request size can be large, the service
632 			 * time can be is likewise.  We make this ioctl
633 			 * interruptible by checking for signals for each bio.
634 			 */
635 			if (SIGPENDING(td))
636 				break;
637 		}
638 		break;
639 	case DIOCGIDENT:
640 		error = g_io_getattr("GEOM::ident", cp, &i, data);
641 		break;
642 	case DIOCGPROVIDERNAME:
643 		if (pp == NULL)
644 			return (ENOENT);
645 		strlcpy(data, pp->name, i);
646 		break;
647 	case DIOCGSTRIPESIZE:
648 		*(off_t *)data = cp->provider->stripesize;
649 		break;
650 	case DIOCGSTRIPEOFFSET:
651 		*(off_t *)data = cp->provider->stripeoffset;
652 		break;
653 	case DIOCGPHYSPATH:
654 		error = g_io_getattr("GEOM::physpath", cp, &i, data);
655 		if (error == 0 && *(char *)data == '\0')
656 			error = ENOENT;
657 		break;
658 	case DIOCGATTR: {
659 		struct diocgattr_arg *arg = (struct diocgattr_arg *)data;
660 
661 		if (arg->len > sizeof(arg->value)) {
662 			error = EINVAL;
663 			break;
664 		}
665 		error = g_io_getattr(arg->name, cp, &arg->len, &arg->value);
666 		break;
667 	}
668 	case DIOCZONECMD: {
669 		struct disk_zone_args *zone_args =(struct disk_zone_args *)data;
670 		struct disk_zone_rep_entry *new_entries, *old_entries;
671 		struct disk_zone_report *rep;
672 		size_t alloc_size;
673 
674 		old_entries = NULL;
675 		new_entries = NULL;
676 		rep = NULL;
677 		alloc_size = 0;
678 
679 		if (zone_args->zone_cmd == DISK_ZONE_REPORT_ZONES) {
680 			rep = &zone_args->zone_params.report;
681 #define	MAXENTRIES	(MAXPHYS / sizeof(struct disk_zone_rep_entry))
682 			if (rep->entries_allocated > MAXENTRIES)
683 				rep->entries_allocated = MAXENTRIES;
684 			alloc_size = rep->entries_allocated *
685 			    sizeof(struct disk_zone_rep_entry);
686 			if (alloc_size != 0)
687 				new_entries = g_malloc(alloc_size,
688 				    M_WAITOK| M_ZERO);
689 			old_entries = rep->entries;
690 			rep->entries = new_entries;
691 		}
692 		error = g_io_zonecmd(zone_args, cp);
693 		if (zone_args->zone_cmd == DISK_ZONE_REPORT_ZONES &&
694 		    alloc_size != 0 && error == 0)
695 			error = copyout(new_entries, old_entries, alloc_size);
696 		if (old_entries != NULL && rep != NULL)
697 			rep->entries = old_entries;
698 		if (new_entries != NULL)
699 			g_free(new_entries);
700 		break;
701 	}
702 	default:
703 		if (cp->provider->geom->ioctl != NULL) {
704 			error = cp->provider->geom->ioctl(cp->provider, cmd, data, fflag, td);
705 		} else {
706 			error = ENOIOCTL;
707 		}
708 	}
709 
710 	return (error);
711 }
712 
713 static void
714 g_dev_done(struct bio *bp2)
715 {
716 	struct g_consumer *cp;
717 	struct g_dev_softc *sc;
718 	struct bio *bp;
719 	int active;
720 
721 	cp = bp2->bio_from;
722 	sc = cp->private;
723 	bp = bp2->bio_parent;
724 	bp->bio_error = bp2->bio_error;
725 	bp->bio_completed = bp2->bio_completed;
726 	bp->bio_resid = bp->bio_length - bp2->bio_completed;
727 	if (bp2->bio_cmd == BIO_ZONE)
728 		bcopy(&bp2->bio_zone, &bp->bio_zone, sizeof(bp->bio_zone));
729 
730 	if (bp2->bio_error != 0) {
731 		g_trace(G_T_BIO, "g_dev_done(%p) had error %d",
732 		    bp2, bp2->bio_error);
733 		bp->bio_flags |= BIO_ERROR;
734 	} else {
735 		g_trace(G_T_BIO, "g_dev_done(%p/%p) resid %ld completed %jd",
736 		    bp2, bp, bp2->bio_resid, (intmax_t)bp2->bio_completed);
737 	}
738 	g_destroy_bio(bp2);
739 	active = atomic_fetchadd_int(&sc->sc_active, -1) - 1;
740 	if ((active & SC_A_ACTIVE) == 0) {
741 		if ((active & SC_A_OPEN) == 0)
742 			wakeup(&sc->sc_active);
743 		if (active & SC_A_DESTROY)
744 			g_post_event(g_dev_destroy, cp, M_NOWAIT, NULL);
745 	}
746 	biodone(bp);
747 }
748 
749 static void
750 g_dev_strategy(struct bio *bp)
751 {
752 	struct g_consumer *cp;
753 	struct bio *bp2;
754 	struct cdev *dev;
755 	struct g_dev_softc *sc;
756 
757 	KASSERT(bp->bio_cmd == BIO_READ ||
758 	        bp->bio_cmd == BIO_WRITE ||
759 	        bp->bio_cmd == BIO_DELETE ||
760 		bp->bio_cmd == BIO_FLUSH ||
761 		bp->bio_cmd == BIO_ZONE,
762 		("Wrong bio_cmd bio=%p cmd=%d", bp, bp->bio_cmd));
763 	dev = bp->bio_dev;
764 	cp = dev->si_drv2;
765 	sc = cp->private;
766 	KASSERT(cp->acr || cp->acw,
767 	    ("Consumer with zero access count in g_dev_strategy"));
768 	biotrack(bp, __func__);
769 #ifdef INVARIANTS
770 	if ((bp->bio_offset % cp->provider->sectorsize) != 0 ||
771 	    (bp->bio_bcount % cp->provider->sectorsize) != 0) {
772 		bp->bio_resid = bp->bio_bcount;
773 		biofinish(bp, NULL, EINVAL);
774 		return;
775 	}
776 #endif
777 	KASSERT(sc->sc_open > 0, ("Closed device in g_dev_strategy"));
778 	atomic_add_int(&sc->sc_active, 1);
779 
780 	for (;;) {
781 		/*
782 		 * XXX: This is not an ideal solution, but I believe it to
783 		 * XXX: deadlock safely, all things considered.
784 		 */
785 		bp2 = g_clone_bio(bp);
786 		if (bp2 != NULL)
787 			break;
788 		pause("gdstrat", hz / 10);
789 	}
790 	KASSERT(bp2 != NULL, ("XXX: ENOMEM in a bad place"));
791 	bp2->bio_done = g_dev_done;
792 	g_trace(G_T_BIO,
793 	    "g_dev_strategy(%p/%p) offset %jd length %jd data %p cmd %d",
794 	    bp, bp2, (intmax_t)bp->bio_offset, (intmax_t)bp2->bio_length,
795 	    bp2->bio_data, bp2->bio_cmd);
796 	g_io_request(bp2, cp);
797 	KASSERT(cp->acr || cp->acw,
798 	    ("g_dev_strategy raced with g_dev_close and lost"));
799 
800 }
801 
802 /*
803  * g_dev_callback()
804  *
805  * Called by devfs when asynchronous device destruction is completed.
806  * - Mark that we have no attached device any more.
807  * - If there are no outstanding requests, schedule geom destruction.
808  *   Otherwise destruction will be scheduled later by g_dev_done().
809  */
810 
811 static void
812 g_dev_callback(void *arg)
813 {
814 	struct g_consumer *cp;
815 	struct g_dev_softc *sc;
816 	int active;
817 
818 	cp = arg;
819 	sc = cp->private;
820 	g_trace(G_T_TOPOLOGY, "g_dev_callback(%p(%s))", cp, cp->geom->name);
821 
822 	sc->sc_dev = NULL;
823 	sc->sc_alias = NULL;
824 	active = atomic_fetchadd_int(&sc->sc_active, SC_A_DESTROY);
825 	if ((active & SC_A_ACTIVE) == 0)
826 		g_post_event(g_dev_destroy, cp, M_WAITOK, NULL);
827 }
828 
829 /*
830  * g_dev_orphan()
831  *
832  * Called from below when the provider orphaned us.
833  * - Clear any dump settings.
834  * - Request asynchronous device destruction to prevent any more requests
835  *   from coming in.  The provider is already marked with an error, so
836  *   anything which comes in the interim will be returned immediately.
837  */
838 
839 static void
840 g_dev_orphan(struct g_consumer *cp)
841 {
842 	struct cdev *dev;
843 	struct g_dev_softc *sc;
844 
845 	g_topology_assert();
846 	sc = cp->private;
847 	dev = sc->sc_dev;
848 	g_trace(G_T_TOPOLOGY, "g_dev_orphan(%p(%s))", cp, cp->geom->name);
849 
850 	/* Reset any dump-area set on this device */
851 	if (dev->si_flags & SI_DUMPDEV)
852 		(void)clear_dumper(curthread);
853 
854 	/* Destroy the struct cdev *so we get no more requests */
855 	destroy_dev_sched_cb(dev, g_dev_callback, cp);
856 }
857 
858 DECLARE_GEOM_CLASS(g_dev_class, g_dev);
859