xref: /freebsd/sys/geom/geom_disk.c (revision ca987d4641cdcd7f27e153db17c5bf064934faf5)
1 /*-
2  * Copyright (c) 2002 Poul-Henning Kamp
3  * Copyright (c) 2002 Networks Associates Technology, Inc.
4  * All rights reserved.
5  *
6  * This software was developed for the FreeBSD Project by Poul-Henning Kamp
7  * and NAI Labs, the Security Research Division of Network Associates, Inc.
8  * under DARPA/SPAWAR contract N66001-01-C-8035 ("CBOSS"), as part of the
9  * DARPA CHATS research program.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  * 3. The names of the authors may not be used to endorse or promote
20  *    products derived from this software without specific prior written
21  *    permission.
22  *
23  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
24  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
27  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33  * SUCH DAMAGE.
34  */
35 
36 #include <sys/cdefs.h>
37 __FBSDID("$FreeBSD$");
38 
39 #include "opt_geom.h"
40 
41 #include <sys/param.h>
42 #include <sys/systm.h>
43 #include <sys/kernel.h>
44 #include <sys/sysctl.h>
45 #include <sys/bio.h>
46 #include <sys/bus.h>
47 #include <sys/ctype.h>
48 #include <sys/fcntl.h>
49 #include <sys/malloc.h>
50 #include <sys/sbuf.h>
51 #include <sys/devicestat.h>
52 #include <machine/md_var.h>
53 
54 #include <sys/lock.h>
55 #include <sys/mutex.h>
56 #include <geom/geom.h>
57 #include <geom/geom_disk.h>
58 #include <geom/geom_int.h>
59 
60 #include <dev/led/led.h>
61 
62 #include <machine/bus.h>
63 
64 struct g_disk_softc {
65 	struct mtx		 done_mtx;
66 	struct disk		*dp;
67 	struct sysctl_ctx_list	sysctl_ctx;
68 	struct sysctl_oid	*sysctl_tree;
69 	char			led[64];
70 	uint32_t		state;
71 	struct mtx		 start_mtx;
72 };
73 
74 static g_access_t g_disk_access;
75 static g_start_t g_disk_start;
76 static g_ioctl_t g_disk_ioctl;
77 static g_dumpconf_t g_disk_dumpconf;
78 static g_provgone_t g_disk_providergone;
79 
80 static int g_disk_sysctl_flags(SYSCTL_HANDLER_ARGS);
81 
82 static struct g_class g_disk_class = {
83 	.name = G_DISK_CLASS_NAME,
84 	.version = G_VERSION,
85 	.start = g_disk_start,
86 	.access = g_disk_access,
87 	.ioctl = g_disk_ioctl,
88 	.providergone = g_disk_providergone,
89 	.dumpconf = g_disk_dumpconf,
90 };
91 
92 SYSCTL_DECL(_kern_geom);
93 static SYSCTL_NODE(_kern_geom, OID_AUTO, disk, CTLFLAG_RW, 0,
94     "GEOM_DISK stuff");
95 
96 DECLARE_GEOM_CLASS(g_disk_class, g_disk);
97 
98 static int
99 g_disk_access(struct g_provider *pp, int r, int w, int e)
100 {
101 	struct disk *dp;
102 	struct g_disk_softc *sc;
103 	int error;
104 
105 	g_trace(G_T_ACCESS, "g_disk_access(%s, %d, %d, %d)",
106 	    pp->name, r, w, e);
107 	g_topology_assert();
108 	sc = pp->private;
109 	if (sc == NULL || (dp = sc->dp) == NULL || dp->d_destroyed) {
110 		/*
111 		 * Allow decreasing access count even if disk is not
112 		 * available anymore.
113 		 */
114 		if (r <= 0 && w <= 0 && e <= 0)
115 			return (0);
116 		return (ENXIO);
117 	}
118 	r += pp->acr;
119 	w += pp->acw;
120 	e += pp->ace;
121 	error = 0;
122 	if ((pp->acr + pp->acw + pp->ace) == 0 && (r + w + e) > 0) {
123 		if (dp->d_open != NULL) {
124 			error = dp->d_open(dp);
125 			if (bootverbose && error != 0)
126 				printf("Opened disk %s -> %d\n",
127 				    pp->name, error);
128 			if (error != 0)
129 				return (error);
130 		}
131 		pp->sectorsize = dp->d_sectorsize;
132 		if (dp->d_maxsize == 0) {
133 			printf("WARNING: Disk drive %s%d has no d_maxsize\n",
134 			    dp->d_name, dp->d_unit);
135 			dp->d_maxsize = DFLTPHYS;
136 		}
137 		if (dp->d_delmaxsize == 0) {
138 			if (bootverbose && dp->d_flags & DISKFLAG_CANDELETE) {
139 				printf("WARNING: Disk drive %s%d has no "
140 				    "d_delmaxsize\n", dp->d_name, dp->d_unit);
141 			}
142 			dp->d_delmaxsize = dp->d_maxsize;
143 		}
144 		pp->stripeoffset = dp->d_stripeoffset;
145 		pp->stripesize = dp->d_stripesize;
146 		dp->d_flags |= DISKFLAG_OPEN;
147 		/*
148 		 * Do not invoke resize event when initial size was zero.
149 		 * Some disks report its size only after first opening.
150 		 */
151 		if (pp->mediasize == 0)
152 			pp->mediasize = dp->d_mediasize;
153 		else
154 			g_resize_provider(pp, dp->d_mediasize);
155 	} else if ((pp->acr + pp->acw + pp->ace) > 0 && (r + w + e) == 0) {
156 		if (dp->d_close != NULL) {
157 			error = dp->d_close(dp);
158 			if (error != 0)
159 				printf("Closed disk %s -> %d\n",
160 				    pp->name, error);
161 		}
162 		sc->state = G_STATE_ACTIVE;
163 		if (sc->led[0] != 0)
164 			led_set(sc->led, "0");
165 		dp->d_flags &= ~DISKFLAG_OPEN;
166 	}
167 	return (error);
168 }
169 
170 static void
171 g_disk_kerneldump(struct bio *bp, struct disk *dp)
172 {
173 	struct g_kerneldump *gkd;
174 	struct g_geom *gp;
175 
176 	gkd = (struct g_kerneldump*)bp->bio_data;
177 	gp = bp->bio_to->geom;
178 	g_trace(G_T_TOPOLOGY, "g_disk_kerneldump(%s, %jd, %jd)",
179 		gp->name, (intmax_t)gkd->offset, (intmax_t)gkd->length);
180 	if (dp->d_dump == NULL) {
181 		g_io_deliver(bp, ENODEV);
182 		return;
183 	}
184 	gkd->di.dumper = dp->d_dump;
185 	gkd->di.priv = dp;
186 	gkd->di.blocksize = dp->d_sectorsize;
187 	gkd->di.maxiosize = dp->d_maxsize;
188 	gkd->di.mediaoffset = gkd->offset;
189 	if ((gkd->offset + gkd->length) > dp->d_mediasize)
190 		gkd->length = dp->d_mediasize - gkd->offset;
191 	gkd->di.mediasize = gkd->length;
192 	g_io_deliver(bp, 0);
193 }
194 
195 static void
196 g_disk_setstate(struct bio *bp, struct g_disk_softc *sc)
197 {
198 	const char *cmd;
199 
200 	memcpy(&sc->state, bp->bio_data, sizeof(sc->state));
201 	if (sc->led[0] != 0) {
202 		switch (sc->state) {
203 		case G_STATE_FAILED:
204 			cmd = "1";
205 			break;
206 		case G_STATE_REBUILD:
207 			cmd = "f5";
208 			break;
209 		case G_STATE_RESYNC:
210 			cmd = "f1";
211 			break;
212 		default:
213 			cmd = "0";
214 			break;
215 		}
216 		led_set(sc->led, cmd);
217 	}
218 	g_io_deliver(bp, 0);
219 }
220 
221 static void
222 g_disk_done(struct bio *bp)
223 {
224 	struct bintime now;
225 	struct bio *bp2;
226 	struct g_disk_softc *sc;
227 
228 	/* See "notes" for why we need a mutex here */
229 	/* XXX: will witness accept a mix of Giant/unGiant drivers here ? */
230 	bp2 = bp->bio_parent;
231 	sc = bp2->bio_to->private;
232 	bp->bio_completed = bp->bio_length - bp->bio_resid;
233 	binuptime(&now);
234 	mtx_lock(&sc->done_mtx);
235 	if (bp2->bio_error == 0)
236 		bp2->bio_error = bp->bio_error;
237 	bp2->bio_completed += bp->bio_completed;
238 
239 	switch (bp->bio_cmd) {
240 	case BIO_ZONE:
241 		bcopy(&bp->bio_zone, &bp2->bio_zone, sizeof(bp->bio_zone));
242 		/*FALLTHROUGH*/
243 	case BIO_READ:
244 	case BIO_WRITE:
245 	case BIO_DELETE:
246 	case BIO_FLUSH:
247 		devstat_end_transaction_bio_bt(sc->dp->d_devstat, bp, &now);
248 		break;
249 	default:
250 		break;
251 	}
252 	bp2->bio_inbed++;
253 	if (bp2->bio_children == bp2->bio_inbed) {
254 		mtx_unlock(&sc->done_mtx);
255 		bp2->bio_resid = bp2->bio_bcount - bp2->bio_completed;
256 		g_io_deliver(bp2, bp2->bio_error);
257 	} else
258 		mtx_unlock(&sc->done_mtx);
259 	g_destroy_bio(bp);
260 }
261 
262 static int
263 g_disk_ioctl(struct g_provider *pp, u_long cmd, void * data, int fflag, struct thread *td)
264 {
265 	struct disk *dp;
266 	struct g_disk_softc *sc;
267 	int error;
268 
269 	sc = pp->private;
270 	dp = sc->dp;
271 
272 	if (dp->d_ioctl == NULL)
273 		return (ENOIOCTL);
274 	error = dp->d_ioctl(dp, cmd, data, fflag, td);
275 	return (error);
276 }
277 
278 static off_t
279 g_disk_maxsize(struct disk *dp, struct bio *bp)
280 {
281 	if (bp->bio_cmd == BIO_DELETE)
282 		return (dp->d_delmaxsize);
283 	return (dp->d_maxsize);
284 }
285 
286 static int
287 g_disk_maxsegs(struct disk *dp, struct bio *bp)
288 {
289 	return ((g_disk_maxsize(dp, bp) / PAGE_SIZE) + 1);
290 }
291 
292 static void
293 g_disk_advance(struct disk *dp, struct bio *bp, off_t off)
294 {
295 
296 	bp->bio_offset += off;
297 	bp->bio_length -= off;
298 
299 	if ((bp->bio_flags & BIO_VLIST) != 0) {
300 		bus_dma_segment_t *seg, *end;
301 
302 		seg = (bus_dma_segment_t *)bp->bio_data;
303 		end = (bus_dma_segment_t *)bp->bio_data + bp->bio_ma_n;
304 		off += bp->bio_ma_offset;
305 		while (off >= seg->ds_len) {
306 			KASSERT((seg != end),
307 			    ("vlist request runs off the end"));
308 			off -= seg->ds_len;
309 			seg++;
310 		}
311 		bp->bio_ma_offset = off;
312 		bp->bio_ma_n = end - seg;
313 		bp->bio_data = (void *)seg;
314 	} else if ((bp->bio_flags & BIO_UNMAPPED) != 0) {
315 		bp->bio_ma += off / PAGE_SIZE;
316 		bp->bio_ma_offset += off;
317 		bp->bio_ma_offset %= PAGE_SIZE;
318 		bp->bio_ma_n -= off / PAGE_SIZE;
319 	} else {
320 		bp->bio_data += off;
321 	}
322 }
323 
324 static void
325 g_disk_seg_limit(bus_dma_segment_t *seg, off_t *poffset,
326     off_t *plength, int *ppages)
327 {
328 	uintptr_t seg_page_base;
329 	uintptr_t seg_page_end;
330 	off_t offset;
331 	off_t length;
332 	int seg_pages;
333 
334 	offset = *poffset;
335 	length = *plength;
336 
337 	if (length > seg->ds_len - offset)
338 		length = seg->ds_len - offset;
339 
340 	seg_page_base = trunc_page(seg->ds_addr + offset);
341 	seg_page_end  = round_page(seg->ds_addr + offset + length);
342 	seg_pages = (seg_page_end - seg_page_base) >> PAGE_SHIFT;
343 
344 	if (seg_pages > *ppages) {
345 		seg_pages = *ppages;
346 		length = (seg_page_base + (seg_pages << PAGE_SHIFT)) -
347 		    (seg->ds_addr + offset);
348 	}
349 
350 	*poffset = 0;
351 	*plength -= length;
352 	*ppages -= seg_pages;
353 }
354 
355 static off_t
356 g_disk_vlist_limit(struct disk *dp, struct bio *bp, bus_dma_segment_t **pendseg)
357 {
358 	bus_dma_segment_t *seg, *end;
359 	off_t residual;
360 	off_t offset;
361 	int pages;
362 
363 	seg = (bus_dma_segment_t *)bp->bio_data;
364 	end = (bus_dma_segment_t *)bp->bio_data + bp->bio_ma_n;
365 	residual = bp->bio_length;
366 	offset = bp->bio_ma_offset;
367 	pages = g_disk_maxsegs(dp, bp);
368 	while (residual != 0 && pages != 0) {
369 		KASSERT((seg != end),
370 		    ("vlist limit runs off the end"));
371 		g_disk_seg_limit(seg, &offset, &residual, &pages);
372 		seg++;
373 	}
374 	if (pendseg != NULL)
375 		*pendseg = seg;
376 	return (residual);
377 }
378 
379 static bool
380 g_disk_limit(struct disk *dp, struct bio *bp)
381 {
382 	bool limited = false;
383 	off_t maxsz;
384 
385 	maxsz = g_disk_maxsize(dp, bp);
386 
387 	/*
388 	 * XXX: If we have a stripesize we should really use it here.
389 	 *      Care should be taken in the delete case if this is done
390 	 *      as deletes can be very sensitive to size given how they
391 	 *      are processed.
392 	 */
393 	if (bp->bio_length > maxsz) {
394 		bp->bio_length = maxsz;
395 		limited = true;
396 	}
397 
398 	if ((bp->bio_flags & BIO_VLIST) != 0) {
399 		bus_dma_segment_t *firstseg, *endseg;
400 		off_t residual;
401 
402 		firstseg = (bus_dma_segment_t*)bp->bio_data;
403 		residual = g_disk_vlist_limit(dp, bp, &endseg);
404 		if (residual != 0) {
405 			bp->bio_ma_n = endseg - firstseg;
406 			bp->bio_length -= residual;
407 			limited = true;
408 		}
409 	} else if ((bp->bio_flags & BIO_UNMAPPED) != 0) {
410 		bp->bio_ma_n =
411 		    howmany(bp->bio_ma_offset + bp->bio_length, PAGE_SIZE);
412 	}
413 
414 	return (limited);
415 }
416 
417 static void
418 g_disk_start(struct bio *bp)
419 {
420 	struct bio *bp2, *bp3;
421 	struct disk *dp;
422 	struct g_disk_softc *sc;
423 	int error;
424 	off_t off;
425 
426 	biotrack(bp, __func__);
427 
428 	sc = bp->bio_to->private;
429 	if (sc == NULL || (dp = sc->dp) == NULL || dp->d_destroyed) {
430 		g_io_deliver(bp, ENXIO);
431 		return;
432 	}
433 	error = EJUSTRETURN;
434 	switch(bp->bio_cmd) {
435 	case BIO_DELETE:
436 		if (!(dp->d_flags & DISKFLAG_CANDELETE)) {
437 			error = EOPNOTSUPP;
438 			break;
439 		}
440 		/* fall-through */
441 	case BIO_READ:
442 	case BIO_WRITE:
443 		KASSERT((dp->d_flags & DISKFLAG_UNMAPPED_BIO) != 0 ||
444 		    (bp->bio_flags & BIO_UNMAPPED) == 0,
445 		    ("unmapped bio not supported by disk %s", dp->d_name));
446 		off = 0;
447 		bp3 = NULL;
448 		bp2 = g_clone_bio(bp);
449 		if (bp2 == NULL) {
450 			error = ENOMEM;
451 			break;
452 		}
453 		for (;;) {
454 			if (g_disk_limit(dp, bp2)) {
455 				off += bp2->bio_length;
456 
457 				/*
458 				 * To avoid a race, we need to grab the next bio
459 				 * before we schedule this one.  See "notes".
460 				 */
461 				bp3 = g_clone_bio(bp);
462 				if (bp3 == NULL)
463 					bp->bio_error = ENOMEM;
464 			}
465 			bp2->bio_done = g_disk_done;
466 			bp2->bio_pblkno = bp2->bio_offset / dp->d_sectorsize;
467 			bp2->bio_bcount = bp2->bio_length;
468 			bp2->bio_disk = dp;
469 			mtx_lock(&sc->start_mtx);
470 			devstat_start_transaction_bio(dp->d_devstat, bp2);
471 			mtx_unlock(&sc->start_mtx);
472 			dp->d_strategy(bp2);
473 
474 			if (bp3 == NULL)
475 				break;
476 
477 			bp2 = bp3;
478 			bp3 = NULL;
479 			g_disk_advance(dp, bp2, off);
480 		}
481 		break;
482 	case BIO_GETATTR:
483 		/* Give the driver a chance to override */
484 		if (dp->d_getattr != NULL) {
485 			if (bp->bio_disk == NULL)
486 				bp->bio_disk = dp;
487 			error = dp->d_getattr(bp);
488 			if (error != -1)
489 				break;
490 			error = EJUSTRETURN;
491 		}
492 		if (g_handleattr_int(bp, "GEOM::candelete",
493 		    (dp->d_flags & DISKFLAG_CANDELETE) != 0))
494 			break;
495 		else if (g_handleattr_int(bp, "GEOM::fwsectors",
496 		    dp->d_fwsectors))
497 			break;
498 		else if (g_handleattr_int(bp, "GEOM::fwheads", dp->d_fwheads))
499 			break;
500 		else if (g_handleattr_off_t(bp, "GEOM::frontstuff", 0))
501 			break;
502 		else if (g_handleattr_str(bp, "GEOM::ident", dp->d_ident))
503 			break;
504 		else if (g_handleattr_str(bp, "GEOM::descr", dp->d_descr))
505 			break;
506 		else if (g_handleattr_uint16_t(bp, "GEOM::hba_vendor",
507 		    dp->d_hba_vendor))
508 			break;
509 		else if (g_handleattr_uint16_t(bp, "GEOM::hba_device",
510 		    dp->d_hba_device))
511 			break;
512 		else if (g_handleattr_uint16_t(bp, "GEOM::hba_subvendor",
513 		    dp->d_hba_subvendor))
514 			break;
515 		else if (g_handleattr_uint16_t(bp, "GEOM::hba_subdevice",
516 		    dp->d_hba_subdevice))
517 			break;
518 		else if (!strcmp(bp->bio_attribute, "GEOM::kerneldump"))
519 			g_disk_kerneldump(bp, dp);
520 		else if (!strcmp(bp->bio_attribute, "GEOM::setstate"))
521 			g_disk_setstate(bp, sc);
522 		else if (g_handleattr_uint16_t(bp, "GEOM::rotation_rate",
523 		    dp->d_rotation_rate))
524 			break;
525 		else
526 			error = ENOIOCTL;
527 		break;
528 	case BIO_FLUSH:
529 		g_trace(G_T_BIO, "g_disk_flushcache(%s)",
530 		    bp->bio_to->name);
531 		if (!(dp->d_flags & DISKFLAG_CANFLUSHCACHE)) {
532 			error = EOPNOTSUPP;
533 			break;
534 		}
535 		/*FALLTHROUGH*/
536 	case BIO_ZONE:
537 		if (bp->bio_cmd == BIO_ZONE) {
538 			if (!(dp->d_flags & DISKFLAG_CANZONE)) {
539 				error = EOPNOTSUPP;
540 				break;
541 			}
542 			g_trace(G_T_BIO, "g_disk_zone(%s)",
543 			    bp->bio_to->name);
544 		}
545 		bp2 = g_clone_bio(bp);
546 		if (bp2 == NULL) {
547 			g_io_deliver(bp, ENOMEM);
548 			return;
549 		}
550 		bp2->bio_done = g_disk_done;
551 		bp2->bio_disk = dp;
552 		mtx_lock(&sc->start_mtx);
553 		devstat_start_transaction_bio(dp->d_devstat, bp2);
554 		mtx_unlock(&sc->start_mtx);
555 		dp->d_strategy(bp2);
556 		break;
557 	default:
558 		error = EOPNOTSUPP;
559 		break;
560 	}
561 	if (error != EJUSTRETURN)
562 		g_io_deliver(bp, error);
563 	return;
564 }
565 
566 static void
567 g_disk_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp, struct g_consumer *cp, struct g_provider *pp)
568 {
569 	struct bio *bp;
570 	struct disk *dp;
571 	struct g_disk_softc *sc;
572 	char *buf;
573 	int res = 0;
574 
575 	sc = gp->softc;
576 	if (sc == NULL || (dp = sc->dp) == NULL)
577 		return;
578 	if (indent == NULL) {
579 		sbuf_printf(sb, " hd %u", dp->d_fwheads);
580 		sbuf_printf(sb, " sc %u", dp->d_fwsectors);
581 		return;
582 	}
583 	if (pp != NULL) {
584 		sbuf_printf(sb, "%s<fwheads>%u</fwheads>\n",
585 		    indent, dp->d_fwheads);
586 		sbuf_printf(sb, "%s<fwsectors>%u</fwsectors>\n",
587 		    indent, dp->d_fwsectors);
588 
589 		/*
590 		 * "rotationrate" is a little complicated, because the value
591 		 * returned by the drive might not be the RPM; 0 and 1 are
592 		 * special cases, and there's also a valid range.
593 		 */
594 		sbuf_printf(sb, "%s<rotationrate>", indent);
595 		if (dp->d_rotation_rate == DISK_RR_UNKNOWN) /* Old drives */
596 			sbuf_printf(sb, "unknown");	/* don't report RPM. */
597 		else if (dp->d_rotation_rate == DISK_RR_NON_ROTATING)
598 			sbuf_printf(sb, "0");
599 		else if ((dp->d_rotation_rate >= DISK_RR_MIN) &&
600 		    (dp->d_rotation_rate <= DISK_RR_MAX))
601 			sbuf_printf(sb, "%u", dp->d_rotation_rate);
602 		else
603 			sbuf_printf(sb, "invalid");
604 		sbuf_printf(sb, "</rotationrate>\n");
605 		if (dp->d_getattr != NULL) {
606 			buf = g_malloc(DISK_IDENT_SIZE, M_WAITOK);
607 			bp = g_alloc_bio();
608 			bp->bio_disk = dp;
609 			bp->bio_attribute = "GEOM::ident";
610 			bp->bio_length = DISK_IDENT_SIZE;
611 			bp->bio_data = buf;
612 			res = dp->d_getattr(bp);
613 			sbuf_printf(sb, "%s<ident>", indent);
614 			g_conf_printf_escaped(sb, "%s",
615 			    res == 0 ? buf: dp->d_ident);
616 			sbuf_printf(sb, "</ident>\n");
617 			bp->bio_attribute = "GEOM::lunid";
618 			bp->bio_length = DISK_IDENT_SIZE;
619 			bp->bio_data = buf;
620 			if (dp->d_getattr(bp) == 0) {
621 				sbuf_printf(sb, "%s<lunid>", indent);
622 				g_conf_printf_escaped(sb, "%s", buf);
623 				sbuf_printf(sb, "</lunid>\n");
624 			}
625 			bp->bio_attribute = "GEOM::lunname";
626 			bp->bio_length = DISK_IDENT_SIZE;
627 			bp->bio_data = buf;
628 			if (dp->d_getattr(bp) == 0) {
629 				sbuf_printf(sb, "%s<lunname>", indent);
630 				g_conf_printf_escaped(sb, "%s", buf);
631 				sbuf_printf(sb, "</lunname>\n");
632 			}
633 			g_destroy_bio(bp);
634 			g_free(buf);
635 		} else {
636 			sbuf_printf(sb, "%s<ident>", indent);
637 			g_conf_printf_escaped(sb, "%s", dp->d_ident);
638 			sbuf_printf(sb, "</ident>\n");
639 		}
640 		sbuf_printf(sb, "%s<descr>", indent);
641 		g_conf_printf_escaped(sb, "%s", dp->d_descr);
642 		sbuf_printf(sb, "</descr>\n");
643 	}
644 }
645 
646 static void
647 g_disk_resize(void *ptr, int flag)
648 {
649 	struct disk *dp;
650 	struct g_geom *gp;
651 	struct g_provider *pp;
652 
653 	if (flag == EV_CANCEL)
654 		return;
655 	g_topology_assert();
656 
657 	dp = ptr;
658 	gp = dp->d_geom;
659 
660 	if (dp->d_destroyed || gp == NULL)
661 		return;
662 
663 	LIST_FOREACH(pp, &gp->provider, provider) {
664 		if (pp->sectorsize != 0 &&
665 		    pp->sectorsize != dp->d_sectorsize)
666 			g_wither_provider(pp, ENXIO);
667 		else
668 			g_resize_provider(pp, dp->d_mediasize);
669 	}
670 }
671 
672 static void
673 g_disk_create(void *arg, int flag)
674 {
675 	struct g_geom *gp;
676 	struct g_provider *pp;
677 	struct disk *dp;
678 	struct g_disk_softc *sc;
679 	struct disk_alias *dap;
680 	char tmpstr[80];
681 
682 	if (flag == EV_CANCEL)
683 		return;
684 	g_topology_assert();
685 	dp = arg;
686 
687 	mtx_pool_lock(mtxpool_sleep, dp);
688 	dp->d_init_level = DISK_INIT_START;
689 
690 	/*
691 	 * If the disk has already gone away, we can just stop here and
692 	 * call the user's callback to tell him we've cleaned things up.
693 	 */
694 	if (dp->d_goneflag != 0) {
695 		mtx_pool_unlock(mtxpool_sleep, dp);
696 		if (dp->d_gone != NULL)
697 			dp->d_gone(dp);
698 		return;
699 	}
700 	mtx_pool_unlock(mtxpool_sleep, dp);
701 
702 	sc = g_malloc(sizeof(*sc), M_WAITOK | M_ZERO);
703 	mtx_init(&sc->start_mtx, "g_disk_start", NULL, MTX_DEF);
704 	mtx_init(&sc->done_mtx, "g_disk_done", NULL, MTX_DEF);
705 	sc->dp = dp;
706 	gp = g_new_geomf(&g_disk_class, "%s%d", dp->d_name, dp->d_unit);
707 	gp->softc = sc;
708 	LIST_FOREACH(dap, &dp->d_aliases, da_next) {
709 		snprintf(tmpstr, sizeof(tmpstr), "%s%d", dap->da_alias, dp->d_unit);
710 		g_geom_add_alias(gp, tmpstr);
711 	}
712 	pp = g_new_providerf(gp, "%s", gp->name);
713 	devstat_remove_entry(pp->stat);
714 	pp->stat = NULL;
715 	dp->d_devstat->id = pp;
716 	pp->mediasize = dp->d_mediasize;
717 	pp->sectorsize = dp->d_sectorsize;
718 	pp->stripeoffset = dp->d_stripeoffset;
719 	pp->stripesize = dp->d_stripesize;
720 	if ((dp->d_flags & DISKFLAG_UNMAPPED_BIO) != 0)
721 		pp->flags |= G_PF_ACCEPT_UNMAPPED;
722 	if ((dp->d_flags & DISKFLAG_DIRECT_COMPLETION) != 0)
723 		pp->flags |= G_PF_DIRECT_SEND;
724 	pp->flags |= G_PF_DIRECT_RECEIVE;
725 	if (bootverbose)
726 		printf("GEOM: new disk %s\n", gp->name);
727 	sysctl_ctx_init(&sc->sysctl_ctx);
728 	snprintf(tmpstr, sizeof(tmpstr), "GEOM disk %s", gp->name);
729 	sc->sysctl_tree = SYSCTL_ADD_NODE(&sc->sysctl_ctx,
730 		SYSCTL_STATIC_CHILDREN(_kern_geom_disk), OID_AUTO, gp->name,
731 		CTLFLAG_RD, 0, tmpstr);
732 	if (sc->sysctl_tree != NULL) {
733 		SYSCTL_ADD_STRING(&sc->sysctl_ctx,
734 		    SYSCTL_CHILDREN(sc->sysctl_tree), OID_AUTO, "led",
735 		    CTLFLAG_RWTUN, sc->led, sizeof(sc->led),
736 		    "LED name");
737 		SYSCTL_ADD_PROC(&sc->sysctl_ctx,
738 		    SYSCTL_CHILDREN(sc->sysctl_tree), OID_AUTO, "flags",
739 		    CTLTYPE_STRING | CTLFLAG_RD, dp, 0, g_disk_sysctl_flags,
740 		    "A", "Report disk flags");
741 	}
742 	pp->private = sc;
743 	dp->d_geom = gp;
744 	g_error_provider(pp, 0);
745 
746 	mtx_pool_lock(mtxpool_sleep, dp);
747 	dp->d_init_level = DISK_INIT_DONE;
748 
749 	/*
750 	 * If the disk has gone away at this stage, start the withering
751 	 * process for it.
752 	 */
753 	if (dp->d_goneflag != 0) {
754 		mtx_pool_unlock(mtxpool_sleep, dp);
755 		g_wither_provider(pp, ENXIO);
756 		return;
757 	}
758 	mtx_pool_unlock(mtxpool_sleep, dp);
759 
760 }
761 
762 /*
763  * We get this callback after all of the consumers have gone away, and just
764  * before the provider is freed.  If the disk driver provided a d_gone
765  * callback, let them know that it is okay to free resources -- they won't
766  * be getting any more accesses from GEOM.
767  */
768 static void
769 g_disk_providergone(struct g_provider *pp)
770 {
771 	struct disk *dp;
772 	struct g_disk_softc *sc;
773 
774 	sc = (struct g_disk_softc *)pp->private;
775 	dp = sc->dp;
776 	if (dp != NULL && dp->d_gone != NULL)
777 		dp->d_gone(dp);
778 	if (sc->sysctl_tree != NULL) {
779 		sysctl_ctx_free(&sc->sysctl_ctx);
780 		sc->sysctl_tree = NULL;
781 	}
782 	if (sc->led[0] != 0) {
783 		led_set(sc->led, "0");
784 		sc->led[0] = 0;
785 	}
786 	pp->private = NULL;
787 	pp->geom->softc = NULL;
788 	mtx_destroy(&sc->done_mtx);
789 	mtx_destroy(&sc->start_mtx);
790 	g_free(sc);
791 }
792 
793 static void
794 g_disk_destroy(void *ptr, int flag)
795 {
796 	struct disk *dp;
797 	struct g_geom *gp;
798 	struct g_disk_softc *sc;
799 	struct disk_alias *dap, *daptmp;
800 
801 	g_topology_assert();
802 	dp = ptr;
803 	gp = dp->d_geom;
804 	if (gp != NULL) {
805 		sc = gp->softc;
806 		if (sc != NULL)
807 			sc->dp = NULL;
808 		dp->d_geom = NULL;
809 		g_wither_geom(gp, ENXIO);
810 	}
811 	LIST_FOREACH_SAFE(dap, &dp->d_aliases, da_next, daptmp)
812 		g_free(dap);
813 
814 	g_free(dp);
815 }
816 
817 /*
818  * We only allow printable characters in disk ident,
819  * the rest is converted to 'x<HH>'.
820  */
821 static void
822 g_disk_ident_adjust(char *ident, size_t size)
823 {
824 	char *p, tmp[4], newid[DISK_IDENT_SIZE];
825 
826 	newid[0] = '\0';
827 	for (p = ident; *p != '\0'; p++) {
828 		if (isprint(*p)) {
829 			tmp[0] = *p;
830 			tmp[1] = '\0';
831 		} else {
832 			snprintf(tmp, sizeof(tmp), "x%02hhx",
833 			    *(unsigned char *)p);
834 		}
835 		if (strlcat(newid, tmp, sizeof(newid)) >= sizeof(newid))
836 			break;
837 	}
838 	bzero(ident, size);
839 	strlcpy(ident, newid, size);
840 }
841 
842 struct disk *
843 disk_alloc(void)
844 {
845 	struct disk *dp;
846 
847 	dp = g_malloc(sizeof(struct disk), M_WAITOK | M_ZERO);
848 	LIST_INIT(&dp->d_aliases);
849 	return (dp);
850 }
851 
852 void
853 disk_create(struct disk *dp, int version)
854 {
855 
856 	if (version != DISK_VERSION) {
857 		printf("WARNING: Attempt to add disk %s%d %s",
858 		    dp->d_name, dp->d_unit,
859 		    " using incompatible ABI version of disk(9)\n");
860 		printf("WARNING: Ignoring disk %s%d\n",
861 		    dp->d_name, dp->d_unit);
862 		return;
863 	}
864 	if (dp->d_flags & DISKFLAG_RESERVED) {
865 		printf("WARNING: Attempt to add non-MPSAFE disk %s%d\n",
866 		    dp->d_name, dp->d_unit);
867 		printf("WARNING: Ignoring disk %s%d\n",
868 		    dp->d_name, dp->d_unit);
869 		return;
870 	}
871 	KASSERT(dp->d_strategy != NULL, ("disk_create need d_strategy"));
872 	KASSERT(dp->d_name != NULL, ("disk_create need d_name"));
873 	KASSERT(*dp->d_name != 0, ("disk_create need d_name"));
874 	KASSERT(strlen(dp->d_name) < SPECNAMELEN - 4, ("disk name too long"));
875 	if (dp->d_devstat == NULL)
876 		dp->d_devstat = devstat_new_entry(dp->d_name, dp->d_unit,
877 		    dp->d_sectorsize, DEVSTAT_ALL_SUPPORTED,
878 		    DEVSTAT_TYPE_DIRECT, DEVSTAT_PRIORITY_MAX);
879 	dp->d_geom = NULL;
880 
881 	dp->d_init_level = DISK_INIT_NONE;
882 
883 	g_disk_ident_adjust(dp->d_ident, sizeof(dp->d_ident));
884 	g_post_event(g_disk_create, dp, M_WAITOK, dp, NULL);
885 }
886 
887 void
888 disk_destroy(struct disk *dp)
889 {
890 
891 	g_cancel_event(dp);
892 	dp->d_destroyed = 1;
893 	if (dp->d_devstat != NULL)
894 		devstat_remove_entry(dp->d_devstat);
895 	g_post_event(g_disk_destroy, dp, M_WAITOK, NULL);
896 }
897 
898 void
899 disk_add_alias(struct disk *dp, const char *name)
900 {
901 	struct disk_alias *dap;
902 
903 	dap = (struct disk_alias *)g_malloc(
904 		sizeof(struct disk_alias) + strlen(name) + 1, M_WAITOK);
905 	strcpy((char *)(dap + 1), name);
906 	dap->da_alias = (const char *)(dap + 1);
907 	LIST_INSERT_HEAD(&dp->d_aliases, dap, da_next);
908 }
909 
910 void
911 disk_gone(struct disk *dp)
912 {
913 	struct g_geom *gp;
914 	struct g_provider *pp;
915 
916 	mtx_pool_lock(mtxpool_sleep, dp);
917 	dp->d_goneflag = 1;
918 
919 	/*
920 	 * If we're still in the process of creating this disk (the
921 	 * g_disk_create() function is still queued, or is in
922 	 * progress), the init level will not yet be DISK_INIT_DONE.
923 	 *
924 	 * If that is the case, g_disk_create() will see d_goneflag
925 	 * and take care of cleaning things up.
926 	 *
927 	 * If the disk has already been created, we default to
928 	 * withering the provider as usual below.
929 	 *
930 	 * If the caller has not set a d_gone() callback, he will
931 	 * not be any worse off by returning here, because the geom
932 	 * has not been fully setup in any case.
933 	 */
934 	if (dp->d_init_level < DISK_INIT_DONE) {
935 		mtx_pool_unlock(mtxpool_sleep, dp);
936 		return;
937 	}
938 	mtx_pool_unlock(mtxpool_sleep, dp);
939 
940 	gp = dp->d_geom;
941 	if (gp != NULL) {
942 		pp = LIST_FIRST(&gp->provider);
943 		if (pp != NULL) {
944 			KASSERT(LIST_NEXT(pp, provider) == NULL,
945 			    ("geom %p has more than one provider", gp));
946 			g_wither_provider(pp, ENXIO);
947 		}
948 	}
949 }
950 
951 void
952 disk_attr_changed(struct disk *dp, const char *attr, int flag)
953 {
954 	struct g_geom *gp;
955 	struct g_provider *pp;
956 	char devnamebuf[128];
957 
958 	gp = dp->d_geom;
959 	if (gp != NULL)
960 		LIST_FOREACH(pp, &gp->provider, provider)
961 			(void)g_attr_changed(pp, attr, flag);
962 	snprintf(devnamebuf, sizeof(devnamebuf), "devname=%s%d", dp->d_name,
963 	    dp->d_unit);
964 	devctl_notify("GEOM", "disk", attr, devnamebuf);
965 }
966 
967 void
968 disk_media_changed(struct disk *dp, int flag)
969 {
970 	struct g_geom *gp;
971 	struct g_provider *pp;
972 
973 	gp = dp->d_geom;
974 	if (gp != NULL) {
975 		pp = LIST_FIRST(&gp->provider);
976 		if (pp != NULL) {
977 			KASSERT(LIST_NEXT(pp, provider) == NULL,
978 			    ("geom %p has more than one provider", gp));
979 			g_media_changed(pp, flag);
980 		}
981 	}
982 }
983 
984 void
985 disk_media_gone(struct disk *dp, int flag)
986 {
987 	struct g_geom *gp;
988 	struct g_provider *pp;
989 
990 	gp = dp->d_geom;
991 	if (gp != NULL) {
992 		pp = LIST_FIRST(&gp->provider);
993 		if (pp != NULL) {
994 			KASSERT(LIST_NEXT(pp, provider) == NULL,
995 			    ("geom %p has more than one provider", gp));
996 			g_media_gone(pp, flag);
997 		}
998 	}
999 }
1000 
1001 int
1002 disk_resize(struct disk *dp, int flag)
1003 {
1004 
1005 	if (dp->d_destroyed || dp->d_geom == NULL)
1006 		return (0);
1007 
1008 	return (g_post_event(g_disk_resize, dp, flag, NULL));
1009 }
1010 
1011 static void
1012 g_kern_disks(void *p, int flag __unused)
1013 {
1014 	struct sbuf *sb;
1015 	struct g_geom *gp;
1016 	char *sp;
1017 
1018 	sb = p;
1019 	sp = "";
1020 	g_topology_assert();
1021 	LIST_FOREACH(gp, &g_disk_class.geom, geom) {
1022 		sbuf_printf(sb, "%s%s", sp, gp->name);
1023 		sp = " ";
1024 	}
1025 	sbuf_finish(sb);
1026 }
1027 
1028 static int
1029 g_disk_sysctl_flags(SYSCTL_HANDLER_ARGS)
1030 {
1031 	struct disk *dp;
1032 	struct sbuf *sb;
1033 	int error;
1034 
1035 	sb = sbuf_new_auto();
1036 	dp = (struct disk *)arg1;
1037 	sbuf_printf(sb, "%b", dp->d_flags,
1038 		"\20"
1039 		"\2OPEN"
1040 		"\3CANDELETE"
1041 		"\4CANFLUSHCACHE"
1042 		"\5UNMAPPEDBIO"
1043 		"\6DIRECTCOMPLETION"
1044 		"\10CANZONE");
1045 
1046 	sbuf_finish(sb);
1047 	error = SYSCTL_OUT(req, sbuf_data(sb), sbuf_len(sb) + 1);
1048 	sbuf_delete(sb);
1049 	return (error);
1050 }
1051 
1052 static int
1053 sysctl_disks(SYSCTL_HANDLER_ARGS)
1054 {
1055 	int error;
1056 	struct sbuf *sb;
1057 
1058 	sb = sbuf_new_auto();
1059 	g_waitfor_event(g_kern_disks, sb, M_WAITOK, NULL);
1060 	error = SYSCTL_OUT(req, sbuf_data(sb), sbuf_len(sb) + 1);
1061 	sbuf_delete(sb);
1062 	return error;
1063 }
1064 
1065 SYSCTL_PROC(_kern, OID_AUTO, disks,
1066     CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, 0,
1067     sysctl_disks, "A", "names of available disks");
1068