1 /*-
2 * SPDX-License-Identifier: BSD-3-Clause
3 *
4 * Copyright (c) 2002 Poul-Henning Kamp
5 * Copyright (c) 2002 Networks Associates Technology, Inc.
6 * All rights reserved.
7 *
8 * This software was developed for the FreeBSD Project by Poul-Henning Kamp
9 * and NAI Labs, the Security Research Division of Network Associates, Inc.
10 * under DARPA/SPAWAR contract N66001-01-C-8035 ("CBOSS"), as part of the
11 * DARPA CHATS research program.
12 *
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
15 * are met:
16 * 1. Redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer.
18 * 2. Redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution.
21 * 3. The names of the authors may not be used to endorse or promote
22 * products derived from this software without specific prior written
23 * permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * SUCH DAMAGE.
36 */
37
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/malloc.h>
41 #include <sys/kernel.h>
42 #include <sys/conf.h>
43 #include <sys/ctype.h>
44 #include <sys/bio.h>
45 #include <sys/devctl.h>
46 #include <sys/lock.h>
47 #include <sys/mutex.h>
48 #include <sys/proc.h>
49 #include <sys/errno.h>
50 #include <sys/time.h>
51 #include <sys/disk.h>
52 #include <sys/fcntl.h>
53 #include <sys/limits.h>
54 #include <sys/selinfo.h>
55 #include <sys/stdarg.h>
56 #include <sys/sysctl.h>
57 #include <geom/geom.h>
58 #include <geom/geom_int.h>
59
60 struct g_dev_softc {
61 struct mtx sc_mtx;
62 struct cdev *sc_dev;
63 struct cdev *sc_alias;
64 int sc_open;
65 u_int sc_active;
66 struct selinfo sc_selinfo;
67 #define SC_A_DESTROY (1 << 31)
68 #define SC_A_OPEN (1 << 30)
69 #define SC_A_ACTIVE (SC_A_OPEN - 1)
70 };
71
72 static d_open_t g_dev_open;
73 static d_close_t g_dev_close;
74 static d_strategy_t g_dev_strategy;
75 static d_ioctl_t g_dev_ioctl;
76 static d_kqfilter_t g_dev_kqfilter;
77
78 static void gdev_filter_detach(struct knote *kn);
79 static int gdev_filter_vnode(struct knote *kn, long hint);
80
81 static const struct filterops gdev_filterops_vnode = {
82 .f_isfd = 1,
83 .f_detach = gdev_filter_detach,
84 .f_event = gdev_filter_vnode,
85 .f_copy = knote_triv_copy,
86 };
87
88 static struct cdevsw g_dev_cdevsw = {
89 .d_version = D_VERSION,
90 .d_open = g_dev_open,
91 .d_close = g_dev_close,
92 .d_read = physread,
93 .d_write = physwrite,
94 .d_ioctl = g_dev_ioctl,
95 .d_strategy = g_dev_strategy,
96 .d_name = "g_dev",
97 .d_flags = D_DISK | D_TRACKCLOSE,
98 .d_kqfilter = g_dev_kqfilter,
99 };
100
101 static g_init_t g_dev_init;
102 static g_fini_t g_dev_fini;
103 static g_taste_t g_dev_taste;
104 static g_orphan_t g_dev_orphan;
105 static g_attrchanged_t g_dev_attrchanged;
106 static g_resize_t g_dev_resize;
107
108 static struct g_class g_dev_class = {
109 .name = "DEV",
110 .version = G_VERSION,
111 .init = g_dev_init,
112 .fini = g_dev_fini,
113 .taste = g_dev_taste,
114 .orphan = g_dev_orphan,
115 .attrchanged = g_dev_attrchanged,
116 .resize = g_dev_resize
117 };
118
119 /*
120 * We target 262144 (8 x 32768) sectors by default as this significantly
121 * increases the throughput on commonly used SSD's with a marginal
122 * increase in non-interruptible request latency.
123 */
124 static uint64_t g_dev_del_max_sectors = 262144;
125 SYSCTL_DECL(_kern_geom);
126 SYSCTL_NODE(_kern_geom, OID_AUTO, dev, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
127 "GEOM_DEV stuff");
128 SYSCTL_QUAD(_kern_geom_dev, OID_AUTO, delete_max_sectors, CTLFLAG_RW,
129 &g_dev_del_max_sectors, 0, "Maximum number of sectors in a single "
130 "delete request sent to the provider. Larger requests are chunked "
131 "so they can be interrupted. (0 = disable chunking)");
132
133 static char *dumpdev = NULL;
134 static void
g_dev_init(struct g_class * mp)135 g_dev_init(struct g_class *mp)
136 {
137
138 dumpdev = kern_getenv("dumpdev");
139 }
140
141 static void
g_dev_fini(struct g_class * mp)142 g_dev_fini(struct g_class *mp)
143 {
144
145 freeenv(dumpdev);
146 dumpdev = NULL;
147 }
148
149 static int
g_dev_setdumpdev(struct cdev * dev,struct diocskerneldump_arg * kda)150 g_dev_setdumpdev(struct cdev *dev, struct diocskerneldump_arg *kda)
151 {
152 struct g_kerneldump kd;
153 struct g_consumer *cp;
154 int error, len;
155
156 MPASS(dev != NULL && kda != NULL);
157 MPASS(kda->kda_index != KDA_REMOVE);
158
159 cp = dev->si_drv2;
160 len = sizeof(kd);
161 memset(&kd, 0, len);
162 kd.offset = 0;
163 kd.length = OFF_MAX;
164 error = g_io_getattr("GEOM::kerneldump", cp, &len, &kd);
165 if (error != 0)
166 return (error);
167
168 error = dumper_insert(&kd.di, devtoname(dev), kda);
169 if (error == 0)
170 dev->si_flags |= SI_DUMPDEV;
171
172 return (error);
173 }
174
175 static int
init_dumpdev(struct cdev * dev)176 init_dumpdev(struct cdev *dev)
177 {
178 struct diocskerneldump_arg kda;
179 struct g_consumer *cp;
180 const char *devprefix = _PATH_DEV, *devname;
181 int error;
182 size_t len;
183
184 bzero(&kda, sizeof(kda));
185 kda.kda_index = KDA_APPEND;
186
187 if (dumpdev == NULL)
188 return (0);
189
190 len = strlen(devprefix);
191 devname = devtoname(dev);
192 if (strcmp(devname, dumpdev) != 0 &&
193 (strncmp(dumpdev, devprefix, len) != 0 ||
194 strcmp(devname, dumpdev + len) != 0))
195 return (0);
196
197 cp = (struct g_consumer *)dev->si_drv2;
198 error = g_access(cp, 1, 0, 0);
199 if (error != 0)
200 return (error);
201
202 error = g_dev_setdumpdev(dev, &kda);
203 if (error == 0) {
204 freeenv(dumpdev);
205 dumpdev = NULL;
206 }
207
208 (void)g_access(cp, -1, 0, 0);
209
210 return (error);
211 }
212
213 static void
g_dev_destroy(void * arg,int flags __unused)214 g_dev_destroy(void *arg, int flags __unused)
215 {
216 struct g_consumer *cp;
217 struct g_geom *gp;
218 struct g_dev_softc *sc;
219 char buf[SPECNAMELEN + 6];
220
221 g_topology_assert();
222 cp = arg;
223 gp = cp->geom;
224 sc = cp->private;
225 g_trace(G_T_TOPOLOGY, "g_dev_destroy(%p(%s))", cp, gp->name);
226 snprintf(buf, sizeof(buf), "cdev=%s", gp->name);
227 devctl_notify("GEOM", "DEV", "DESTROY", buf);
228 knlist_clear(&sc->sc_selinfo.si_note, 0);
229 knlist_destroy(&sc->sc_selinfo.si_note);
230 if (cp->acr > 0 || cp->acw > 0 || cp->ace > 0)
231 g_access(cp, -cp->acr, -cp->acw, -cp->ace);
232 g_detach(cp);
233 g_destroy_consumer(cp);
234 g_destroy_geom(gp);
235 mtx_destroy(&sc->sc_mtx);
236 g_free(sc);
237 }
238
239 void
g_dev_print(void)240 g_dev_print(void)
241 {
242 struct g_geom *gp;
243 char const *p = "";
244
245 LIST_FOREACH(gp, &g_dev_class.geom, geom) {
246 printf("%s%s", p, gp->name);
247 p = " ";
248 }
249 printf("\n");
250 }
251
252 static void
g_dev_set_physpath(struct g_consumer * cp)253 g_dev_set_physpath(struct g_consumer *cp)
254 {
255 struct g_dev_softc *sc;
256 char *physpath;
257 int error, physpath_len;
258
259 if (g_access(cp, 1, 0, 0) != 0)
260 return;
261
262 sc = cp->private;
263 physpath_len = MAXPATHLEN;
264 physpath = g_malloc(physpath_len, M_WAITOK|M_ZERO);
265 error = g_io_getattr("GEOM::physpath", cp, &physpath_len, physpath);
266 g_access(cp, -1, 0, 0);
267 if (error == 0 && strlen(physpath) != 0) {
268 struct cdev *dev, *old_alias_dev;
269 struct cdev **alias_devp;
270
271 dev = sc->sc_dev;
272 old_alias_dev = sc->sc_alias;
273 alias_devp = (struct cdev **)&sc->sc_alias;
274 make_dev_physpath_alias(MAKEDEV_WAITOK | MAKEDEV_CHECKNAME,
275 alias_devp, dev, old_alias_dev, physpath);
276 } else if (sc->sc_alias) {
277 destroy_dev((struct cdev *)sc->sc_alias);
278 sc->sc_alias = NULL;
279 }
280 g_free(physpath);
281 }
282
283 static void
g_dev_set_media(struct g_consumer * cp)284 g_dev_set_media(struct g_consumer *cp)
285 {
286 struct g_dev_softc *sc;
287 struct cdev *dev;
288 char buf[SPECNAMELEN + 6];
289
290 sc = cp->private;
291 dev = sc->sc_dev;
292 snprintf(buf, sizeof(buf), "cdev=%s", dev->si_name);
293 devctl_notify("DEVFS", "CDEV", "MEDIACHANGE", buf);
294 devctl_notify("GEOM", "DEV", "MEDIACHANGE", buf);
295 dev = sc->sc_alias;
296 if (dev != NULL) {
297 snprintf(buf, sizeof(buf), "cdev=%s", dev->si_name);
298 devctl_notify("DEVFS", "CDEV", "MEDIACHANGE", buf);
299 devctl_notify("GEOM", "DEV", "MEDIACHANGE", buf);
300 }
301 }
302
303 static void
g_dev_attrchanged(struct g_consumer * cp,const char * attr)304 g_dev_attrchanged(struct g_consumer *cp, const char *attr)
305 {
306
307 if (strcmp(attr, "GEOM::media") == 0) {
308 g_dev_set_media(cp);
309 return;
310 }
311
312 if (strcmp(attr, "GEOM::physpath") == 0) {
313 g_dev_set_physpath(cp);
314 return;
315 }
316 }
317
318 static void
g_dev_resize(struct g_consumer * cp)319 g_dev_resize(struct g_consumer *cp)
320 {
321 struct g_dev_softc *sc;
322 char buf[SPECNAMELEN + 6];
323
324 sc = cp->private;
325 KNOTE_UNLOCKED(&sc->sc_selinfo.si_note, NOTE_ATTRIB);
326
327 snprintf(buf, sizeof(buf), "cdev=%s", cp->provider->name);
328 devctl_notify("GEOM", "DEV", "SIZECHANGE", buf);
329 }
330
331 struct g_provider *
g_dev_getprovider(struct cdev * dev)332 g_dev_getprovider(struct cdev *dev)
333 {
334 struct g_consumer *cp;
335
336 g_topology_assert();
337 if (dev == NULL)
338 return (NULL);
339 if (dev->si_devsw != &g_dev_cdevsw)
340 return (NULL);
341 cp = dev->si_drv2;
342 return (cp->provider);
343 }
344
345 static struct g_geom *
g_dev_taste(struct g_class * mp,struct g_provider * pp,int insist __unused)346 g_dev_taste(struct g_class *mp, struct g_provider *pp, int insist __unused)
347 {
348 struct g_geom *gp;
349 struct g_geom_alias *gap;
350 struct g_consumer *cp;
351 struct g_dev_softc *sc;
352 int error;
353 struct cdev *dev, *adev;
354 char buf[SPECNAMELEN + 6];
355 struct make_dev_args args;
356
357 g_trace(G_T_TOPOLOGY, "dev_taste(%s,%s)", mp->name, pp->name);
358 g_topology_assert();
359 gp = g_new_geom(mp, pp->name);
360 sc = g_malloc(sizeof(*sc), M_WAITOK | M_ZERO);
361 mtx_init(&sc->sc_mtx, "g_dev", NULL, MTX_DEF);
362 cp = g_new_consumer(gp);
363 cp->private = sc;
364 cp->flags |= G_CF_DIRECT_SEND | G_CF_DIRECT_RECEIVE;
365 error = g_attach(cp, pp);
366 if (error != 0) {
367 printf("%s: g_dev_taste(%s) failed to g_attach, error=%d\n",
368 __func__, pp->name, error);
369 g_destroy_consumer(cp);
370 g_destroy_geom(gp);
371 mtx_destroy(&sc->sc_mtx);
372 g_free(sc);
373 return (NULL);
374 }
375 make_dev_args_init(&args);
376 args.mda_flags = MAKEDEV_CHECKNAME | MAKEDEV_WAITOK;
377 args.mda_devsw = &g_dev_cdevsw;
378 args.mda_cr = NULL;
379 args.mda_uid = UID_ROOT;
380 args.mda_gid = GID_OPERATOR;
381 args.mda_mode = 0640;
382 args.mda_si_drv1 = sc;
383 args.mda_si_drv2 = cp;
384 error = make_dev_s(&args, &sc->sc_dev, "%s", gp->name);
385 if (error != 0) {
386 printf("%s: make_dev_p() failed (gp->name=%s, error=%d)\n",
387 __func__, gp->name, error);
388 g_detach(cp);
389 g_destroy_consumer(cp);
390 g_destroy_geom(gp);
391 mtx_destroy(&sc->sc_mtx);
392 g_free(sc);
393 return (NULL);
394 }
395 dev = sc->sc_dev;
396 dev->si_flags |= SI_UNMAPPED;
397 dev->si_iosize_max = maxphys;
398 knlist_init_mtx(&sc->sc_selinfo.si_note, &sc->sc_mtx);
399 error = init_dumpdev(dev);
400 if (error != 0)
401 printf("%s: init_dumpdev() failed (gp->name=%s, error=%d)\n",
402 __func__, gp->name, error);
403
404 g_dev_attrchanged(cp, "GEOM::physpath");
405 snprintf(buf, sizeof(buf), "cdev=%s", gp->name);
406 devctl_notify("GEOM", "DEV", "CREATE", buf);
407 /*
408 * Now add all the aliases for this drive
409 */
410 LIST_FOREACH(gap, &pp->aliases, ga_next) {
411 error = make_dev_alias_p(MAKEDEV_CHECKNAME | MAKEDEV_WAITOK, &adev, dev,
412 "%s", gap->ga_alias);
413 if (error) {
414 printf("%s: make_dev_alias_p() failed (name=%s, error=%d)\n",
415 __func__, gap->ga_alias, error);
416 continue;
417 }
418 snprintf(buf, sizeof(buf), "cdev=%s", gap->ga_alias);
419 devctl_notify("GEOM", "DEV", "CREATE", buf);
420 }
421
422 return (gp);
423 }
424
425 static int
g_dev_open(struct cdev * dev,int flags,int fmt,struct thread * td)426 g_dev_open(struct cdev *dev, int flags, int fmt, struct thread *td)
427 {
428 struct g_consumer *cp;
429 struct g_dev_softc *sc;
430 int error, r, w, e;
431
432 cp = dev->si_drv2;
433 g_trace(G_T_ACCESS, "g_dev_open(%s, %d, %d, %p)",
434 cp->geom->name, flags, fmt, td);
435
436 r = flags & FREAD ? 1 : 0;
437 w = flags & FWRITE ? 1 : 0;
438 #ifdef notyet
439 e = flags & O_EXCL ? 1 : 0;
440 #else
441 e = 0;
442 #endif
443
444 /*
445 * This happens on attempt to open a device node with O_EXEC.
446 */
447 if (r + w + e == 0)
448 return (EINVAL);
449
450 if (w) {
451 /*
452 * When running in very secure mode, do not allow
453 * opens for writing of any disks.
454 */
455 error = securelevel_ge(td->td_ucred, 2);
456 if (error)
457 return (error);
458 }
459 g_topology_lock();
460 error = g_access(cp, r, w, e);
461 g_topology_unlock();
462 if (error == 0) {
463 sc = dev->si_drv1;
464 mtx_lock(&sc->sc_mtx);
465 if (sc->sc_open == 0 && (sc->sc_active & SC_A_ACTIVE) != 0)
466 wakeup(&sc->sc_active);
467 sc->sc_open += r + w + e;
468 if (sc->sc_open == 0)
469 atomic_clear_int(&sc->sc_active, SC_A_OPEN);
470 else
471 atomic_set_int(&sc->sc_active, SC_A_OPEN);
472 KNOTE_LOCKED(&sc->sc_selinfo.si_note, NOTE_OPEN);
473 mtx_unlock(&sc->sc_mtx);
474 }
475 return (error);
476 }
477
478 static int
g_dev_close(struct cdev * dev,int flags,int fmt,struct thread * td)479 g_dev_close(struct cdev *dev, int flags, int fmt, struct thread *td)
480 {
481 struct g_consumer *cp;
482 struct g_dev_softc *sc;
483 int error, r, w, e;
484
485 cp = dev->si_drv2;
486 g_trace(G_T_ACCESS, "g_dev_close(%s, %d, %d, %p)",
487 cp->geom->name, flags, fmt, td);
488
489 r = flags & FREAD ? -1 : 0;
490 w = flags & FWRITE ? -1 : 0;
491 #ifdef notyet
492 e = flags & O_EXCL ? -1 : 0;
493 #else
494 e = 0;
495 #endif
496
497 /*
498 * The vgonel(9) - caused by eg. forced unmount of devfs - calls
499 * VOP_CLOSE(9) on devfs vnode without any FREAD or FWRITE flags,
500 * which would result in zero deltas, which in turn would cause
501 * panic in g_access(9).
502 *
503 * Note that we cannot zero the counters (ie. do "r = cp->acr"
504 * etc) instead, because the consumer might be opened in another
505 * devfs instance.
506 */
507 if (r + w + e == 0)
508 return (EINVAL);
509
510 sc = dev->si_drv1;
511 mtx_lock(&sc->sc_mtx);
512 sc->sc_open += r + w + e;
513 if (sc->sc_open == 0)
514 atomic_clear_int(&sc->sc_active, SC_A_OPEN);
515 else
516 atomic_set_int(&sc->sc_active, SC_A_OPEN);
517 while (sc->sc_open == 0 && (sc->sc_active & SC_A_ACTIVE) != 0)
518 msleep(&sc->sc_active, &sc->sc_mtx, 0, "g_dev_close", hz / 10);
519 KNOTE_LOCKED(&sc->sc_selinfo.si_note, NOTE_CLOSE | (w ? NOTE_CLOSE_WRITE : 0));
520 mtx_unlock(&sc->sc_mtx);
521 g_topology_lock();
522 error = g_access(cp, r, w, e);
523 g_topology_unlock();
524 return (error);
525 }
526
527 static int
g_dev_ioctl(struct cdev * dev,u_long cmd,caddr_t data,int fflag,struct thread * td)528 g_dev_ioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag, struct thread *td)
529 {
530 struct g_consumer *cp;
531 struct g_provider *pp;
532 off_t offset, length, chunk, odd;
533 int i, error;
534
535 cp = dev->si_drv2;
536 pp = cp->provider;
537
538 /* If consumer or provider is dying, don't disturb. */
539 if (cp->flags & G_CF_ORPHAN)
540 return (ENXIO);
541 if (pp->error)
542 return (pp->error);
543
544 error = 0;
545 KASSERT(cp->acr || cp->acw,
546 ("Consumer with zero access count in g_dev_ioctl"));
547
548 i = IOCPARM_LEN(cmd);
549 switch (cmd) {
550 case DIOCGSECTORSIZE:
551 *(u_int *)data = pp->sectorsize;
552 if (*(u_int *)data == 0)
553 error = ENOENT;
554 break;
555 case DIOCGMEDIASIZE:
556 *(off_t *)data = pp->mediasize;
557 if (*(off_t *)data == 0)
558 error = ENOENT;
559 break;
560 case DIOCGFWSECTORS:
561 error = g_io_getattr("GEOM::fwsectors", cp, &i, data);
562 if (error == 0 && *(u_int *)data == 0)
563 error = ENOENT;
564 break;
565 case DIOCGFWHEADS:
566 error = g_io_getattr("GEOM::fwheads", cp, &i, data);
567 if (error == 0 && *(u_int *)data == 0)
568 error = ENOENT;
569 break;
570 case DIOCSKERNELDUMP:
571 {
572 struct diocskerneldump_arg *kda;
573 uint8_t *encryptedkey;
574
575 kda = (struct diocskerneldump_arg *)data;
576 if (kda->kda_index == KDA_REMOVE_ALL ||
577 kda->kda_index == KDA_REMOVE_DEV ||
578 kda->kda_index == KDA_REMOVE) {
579 error = dumper_remove(devtoname(dev), kda);
580 explicit_bzero(kda, sizeof(*kda));
581 break;
582 }
583
584 if (kda->kda_encryption != KERNELDUMP_ENC_NONE) {
585 if (kda->kda_encryptedkeysize == 0 ||
586 kda->kda_encryptedkeysize >
587 KERNELDUMP_ENCKEY_MAX_SIZE) {
588 explicit_bzero(kda, sizeof(*kda));
589 return (EINVAL);
590 }
591 encryptedkey = malloc(kda->kda_encryptedkeysize, M_TEMP,
592 M_WAITOK);
593 error = copyin(kda->kda_encryptedkey, encryptedkey,
594 kda->kda_encryptedkeysize);
595 } else {
596 encryptedkey = NULL;
597 }
598 if (error == 0) {
599 kda->kda_encryptedkey = encryptedkey;
600 error = g_dev_setdumpdev(dev, kda);
601 }
602 zfree(encryptedkey, M_TEMP);
603 explicit_bzero(kda, sizeof(*kda));
604 break;
605 }
606 case DIOCGFLUSH:
607 error = g_io_flush(cp);
608 break;
609 case DIOCGDELETE:
610 offset = ((off_t *)data)[0];
611 length = ((off_t *)data)[1];
612 if ((offset % pp->sectorsize) != 0 ||
613 (length % pp->sectorsize) != 0 || length <= 0) {
614 printf("%s: offset=%jd length=%jd\n", __func__, offset,
615 length);
616 error = EINVAL;
617 break;
618 }
619 while (length > 0) {
620 chunk = length;
621 if (g_dev_del_max_sectors != 0 &&
622 chunk > g_dev_del_max_sectors * pp->sectorsize) {
623 chunk = g_dev_del_max_sectors * pp->sectorsize;
624 if (pp->stripesize > 0) {
625 odd = (offset + chunk +
626 pp->stripeoffset) % pp->stripesize;
627 if (chunk > odd)
628 chunk -= odd;
629 }
630 }
631 error = g_delete_data(cp, offset, chunk);
632 length -= chunk;
633 offset += chunk;
634 if (error)
635 break;
636 /*
637 * Since the request size can be large, the service
638 * time can be is likewise. We make this ioctl
639 * interruptible by checking for signals for each bio.
640 */
641 if (SIGPENDING(td))
642 break;
643 }
644 break;
645 case DIOCGIDENT:
646 error = g_io_getattr("GEOM::ident", cp, &i, data);
647 break;
648 case DIOCGPROVIDERNAME:
649 strlcpy(data, pp->name, i);
650 break;
651 case DIOCGSTRIPESIZE:
652 *(off_t *)data = pp->stripesize;
653 break;
654 case DIOCGSTRIPEOFFSET:
655 *(off_t *)data = pp->stripeoffset;
656 break;
657 case DIOCGPHYSPATH:
658 error = g_io_getattr("GEOM::physpath", cp, &i, data);
659 if (error == 0 && *(char *)data == '\0')
660 error = ENOENT;
661 break;
662 case DIOCGATTR: {
663 struct diocgattr_arg *arg = (struct diocgattr_arg *)data;
664
665 if (arg->len > sizeof(arg->value)) {
666 error = EINVAL;
667 break;
668 }
669 error = g_io_getattr(arg->name, cp, &arg->len, &arg->value);
670 break;
671 }
672 case DIOCZONECMD: {
673 struct disk_zone_args *zone_args =(struct disk_zone_args *)data;
674 struct disk_zone_rep_entry *new_entries, *old_entries;
675 struct disk_zone_report *rep;
676 size_t alloc_size;
677
678 old_entries = NULL;
679 new_entries = NULL;
680 rep = NULL;
681 alloc_size = 0;
682
683 if (zone_args->zone_cmd == DISK_ZONE_REPORT_ZONES) {
684 rep = &zone_args->zone_params.report;
685 #define MAXENTRIES (maxphys / sizeof(struct disk_zone_rep_entry))
686 if (rep->entries_allocated > MAXENTRIES)
687 rep->entries_allocated = MAXENTRIES;
688 alloc_size = rep->entries_allocated *
689 sizeof(struct disk_zone_rep_entry);
690 if (alloc_size != 0)
691 new_entries = g_malloc(alloc_size,
692 M_WAITOK | M_ZERO);
693 old_entries = rep->entries;
694 rep->entries = new_entries;
695 }
696 error = g_io_zonecmd(zone_args, cp);
697 if (zone_args->zone_cmd == DISK_ZONE_REPORT_ZONES &&
698 alloc_size != 0 && error == 0)
699 error = copyout(new_entries, old_entries, alloc_size);
700 if (old_entries != NULL && rep != NULL)
701 rep->entries = old_entries;
702 g_free(new_entries);
703 break;
704 }
705 default:
706 if (pp->geom->ioctl != NULL) {
707 error = pp->geom->ioctl(pp, cmd, data, fflag, td);
708 } else {
709 error = ENOIOCTL;
710 }
711 }
712
713 return (error);
714 }
715
716 static void
g_dev_done(struct bio * bp2)717 g_dev_done(struct bio *bp2)
718 {
719 struct g_consumer *cp;
720 struct g_dev_softc *sc;
721 struct bio *bp;
722 int active;
723
724 cp = bp2->bio_from;
725 sc = cp->private;
726 bp = bp2->bio_parent;
727 bp->bio_error = bp2->bio_error;
728 bp->bio_completed = bp2->bio_completed;
729 bp->bio_resid = bp->bio_length - bp2->bio_completed;
730 if (bp2->bio_cmd == BIO_ZONE)
731 bcopy(&bp2->bio_zone, &bp->bio_zone, sizeof(bp->bio_zone));
732
733 if (bp2->bio_error != 0) {
734 g_trace(G_T_BIO, "g_dev_done(%p) had error %d",
735 bp2, bp2->bio_error);
736 bp->bio_flags |= BIO_ERROR;
737 } else {
738 if (bp->bio_cmd == BIO_READ)
739 KNOTE_UNLOCKED(&sc->sc_selinfo.si_note, NOTE_READ);
740 if (bp->bio_cmd == BIO_WRITE)
741 KNOTE_UNLOCKED(&sc->sc_selinfo.si_note, NOTE_WRITE);
742 g_trace(G_T_BIO, "g_dev_done(%p/%p) resid %ld completed %jd",
743 bp2, bp, bp2->bio_resid, (intmax_t)bp2->bio_completed);
744 }
745 g_destroy_bio(bp2);
746 active = atomic_fetchadd_int(&sc->sc_active, -1) - 1;
747 if ((active & SC_A_ACTIVE) == 0) {
748 if ((active & SC_A_OPEN) == 0)
749 wakeup(&sc->sc_active);
750 if (active & SC_A_DESTROY)
751 g_post_event(g_dev_destroy, cp, M_NOWAIT, NULL);
752 }
753 biodone(bp);
754 }
755
756 static void
g_dev_strategy(struct bio * bp)757 g_dev_strategy(struct bio *bp)
758 {
759 struct g_consumer *cp;
760 struct bio *bp2;
761 struct cdev *dev;
762 struct g_dev_softc *sc;
763
764 KASSERT(bp->bio_cmd == BIO_READ ||
765 bp->bio_cmd == BIO_WRITE ||
766 bp->bio_cmd == BIO_DELETE ||
767 bp->bio_cmd == BIO_FLUSH ||
768 bp->bio_cmd == BIO_ZONE,
769 ("Wrong bio_cmd bio=%p cmd=%d", bp, bp->bio_cmd));
770 dev = bp->bio_dev;
771 cp = dev->si_drv2;
772 KASSERT(cp->acr || cp->acw,
773 ("Consumer with zero access count in g_dev_strategy"));
774 biotrack(bp, __func__);
775 #ifdef INVARIANTS
776 if ((bp->bio_offset % cp->provider->sectorsize) != 0 ||
777 (bp->bio_bcount % cp->provider->sectorsize) != 0) {
778 bp->bio_resid = bp->bio_bcount;
779 biofinish(bp, NULL, EINVAL);
780 return;
781 }
782 #endif
783 sc = dev->si_drv1;
784 KASSERT(sc->sc_open > 0, ("Closed device in g_dev_strategy"));
785 atomic_add_int(&sc->sc_active, 1);
786
787 for (;;) {
788 /*
789 * XXX: This is not an ideal solution, but I believe it to
790 * XXX: deadlock safely, all things considered.
791 */
792 bp2 = g_clone_bio(bp);
793 if (bp2 != NULL)
794 break;
795 pause("gdstrat", hz / 10);
796 }
797 KASSERT(bp2 != NULL, ("XXX: ENOMEM in a bad place"));
798 bp2->bio_done = g_dev_done;
799 g_trace(G_T_BIO,
800 "g_dev_strategy(%p/%p) offset %jd length %jd data %p cmd %d",
801 bp, bp2, (intmax_t)bp->bio_offset, (intmax_t)bp2->bio_length,
802 bp2->bio_data, bp2->bio_cmd);
803 g_io_request(bp2, cp);
804 KASSERT(cp->acr || cp->acw,
805 ("g_dev_strategy raced with g_dev_close and lost"));
806
807 }
808
809 /*
810 * g_dev_callback()
811 *
812 * Called by devfs when asynchronous device destruction is completed.
813 * - Mark that we have no attached device any more.
814 * - If there are no outstanding requests, schedule geom destruction.
815 * Otherwise destruction will be scheduled later by g_dev_done().
816 */
817
818 static void
g_dev_callback(void * arg)819 g_dev_callback(void *arg)
820 {
821 struct g_consumer *cp;
822 struct g_dev_softc *sc;
823 int active;
824
825 cp = arg;
826 sc = cp->private;
827 g_trace(G_T_TOPOLOGY, "g_dev_callback(%p(%s))", cp, cp->geom->name);
828
829 sc->sc_dev = NULL;
830 sc->sc_alias = NULL;
831 active = atomic_fetchadd_int(&sc->sc_active, SC_A_DESTROY);
832 if ((active & SC_A_ACTIVE) == 0)
833 g_post_event(g_dev_destroy, cp, M_WAITOK, NULL);
834 }
835
836 /*
837 * g_dev_orphan()
838 *
839 * Called from below when the provider orphaned us.
840 * - Clear any dump settings.
841 * - Request asynchronous device destruction to prevent any more requests
842 * from coming in. The provider is already marked with an error, so
843 * anything which comes in the interim will be returned immediately.
844 */
845
846 static void
g_dev_orphan(struct g_consumer * cp)847 g_dev_orphan(struct g_consumer *cp)
848 {
849 struct cdev *dev;
850 struct g_dev_softc *sc;
851
852 g_topology_assert();
853 sc = cp->private;
854 dev = sc->sc_dev;
855 g_trace(G_T_TOPOLOGY, "g_dev_orphan(%p(%s))", cp, cp->geom->name);
856
857 if (dev == NULL)
858 return;
859
860 /* Reset any dump-area set on this device */
861 if (dev->si_flags & SI_DUMPDEV) {
862 struct diocskerneldump_arg kda;
863
864 bzero(&kda, sizeof(kda));
865 kda.kda_index = KDA_REMOVE_DEV;
866 (void)dumper_remove(devtoname(dev), &kda);
867 }
868
869 /* Destroy the struct cdev *so we get no more requests */
870 delist_dev(dev);
871 destroy_dev_sched_cb(dev, g_dev_callback, cp);
872 }
873
874 static void
gdev_filter_detach(struct knote * kn)875 gdev_filter_detach(struct knote *kn)
876 {
877 struct g_dev_softc *sc;
878
879 sc = kn->kn_hook;
880
881 knlist_remove(&sc->sc_selinfo.si_note, kn, 0);
882 }
883
884 static int
gdev_filter_vnode(struct knote * kn,long hint)885 gdev_filter_vnode(struct knote *kn, long hint)
886 {
887 kn->kn_fflags |= kn->kn_sfflags & hint;
888
889 return (kn->kn_fflags != 0);
890 }
891
892 static int
g_dev_kqfilter(struct cdev * dev,struct knote * kn)893 g_dev_kqfilter(struct cdev *dev, struct knote *kn)
894 {
895 struct g_dev_softc *sc;
896
897 sc = dev->si_drv1;
898
899 if (kn->kn_filter != EVFILT_VNODE)
900 return (EINVAL);
901
902 #define SUPPORTED_EVENTS (NOTE_ATTRIB | NOTE_OPEN | NOTE_CLOSE | \
903 NOTE_CLOSE_WRITE | NOTE_READ | NOTE_WRITE)
904 if (kn->kn_sfflags & ~SUPPORTED_EVENTS)
905 return (EOPNOTSUPP);
906
907 kn->kn_fop = &gdev_filterops_vnode;
908 kn->kn_hook = sc;
909 knlist_add(&sc->sc_selinfo.si_note, kn, 0);
910
911 return (0);
912 }
913
914 DECLARE_GEOM_CLASS(g_dev_class, g_dev);
915