xref: /freebsd/sys/geom/geom_disk.c (revision e4f6a1bfa31a2299612bbf9dae402bb38d38b1df)
1 /*-
2  * SPDX-License-Identifier: BSD-3-Clause
3  *
4  * Copyright (c) 2002 Poul-Henning Kamp
5  * Copyright (c) 2002 Networks Associates Technology, Inc.
6  * All rights reserved.
7  *
8  * This software was developed for the FreeBSD Project by Poul-Henning Kamp
9  * and NAI Labs, the Security Research Division of Network Associates, Inc.
10  * under DARPA/SPAWAR contract N66001-01-C-8035 ("CBOSS"), as part of the
11  * DARPA CHATS research program.
12  *
13  * Redistribution and use in source and binary forms, with or without
14  * modification, are permitted provided that the following conditions
15  * are met:
16  * 1. Redistributions of source code must retain the above copyright
17  *    notice, this list of conditions and the following disclaimer.
18  * 2. Redistributions in binary form must reproduce the above copyright
19  *    notice, this list of conditions and the following disclaimer in the
20  *    documentation and/or other materials provided with the distribution.
21  * 3. The names of the authors may not be used to endorse or promote
22  *    products derived from this software without specific prior written
23  *    permission.
24  *
25  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
29  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35  * SUCH DAMAGE.
36  */
37 
38 #include <sys/cdefs.h>
39 __FBSDID("$FreeBSD$");
40 
41 #include "opt_geom.h"
42 
43 #include <sys/param.h>
44 #include <sys/systm.h>
45 #include <sys/kernel.h>
46 #include <sys/sysctl.h>
47 #include <sys/bio.h>
48 #include <sys/bus.h>
49 #include <sys/ctype.h>
50 #include <sys/fcntl.h>
51 #include <sys/malloc.h>
52 #include <sys/sbuf.h>
53 #include <sys/devicestat.h>
54 #include <machine/md_var.h>
55 
56 #include <sys/lock.h>
57 #include <sys/mutex.h>
58 #include <geom/geom.h>
59 #include <geom/geom_disk.h>
60 #include <geom/geom_int.h>
61 
62 #include <dev/led/led.h>
63 
64 #include <machine/bus.h>
65 
66 struct g_disk_softc {
67 	struct mtx		 done_mtx;
68 	struct disk		*dp;
69 	struct sysctl_ctx_list	sysctl_ctx;
70 	struct sysctl_oid	*sysctl_tree;
71 	char			led[64];
72 	uint32_t		state;
73 	struct mtx		 start_mtx;
74 };
75 
76 static g_access_t g_disk_access;
77 static g_start_t g_disk_start;
78 static g_ioctl_t g_disk_ioctl;
79 static g_dumpconf_t g_disk_dumpconf;
80 static g_provgone_t g_disk_providergone;
81 
82 static int g_disk_sysctl_flags(SYSCTL_HANDLER_ARGS);
83 
84 static struct g_class g_disk_class = {
85 	.name = G_DISK_CLASS_NAME,
86 	.version = G_VERSION,
87 	.start = g_disk_start,
88 	.access = g_disk_access,
89 	.ioctl = g_disk_ioctl,
90 	.providergone = g_disk_providergone,
91 	.dumpconf = g_disk_dumpconf,
92 };
93 
94 SYSCTL_DECL(_kern_geom);
95 static SYSCTL_NODE(_kern_geom, OID_AUTO, disk, CTLFLAG_RW, 0,
96     "GEOM_DISK stuff");
97 
98 DECLARE_GEOM_CLASS(g_disk_class, g_disk);
99 
100 static int
101 g_disk_access(struct g_provider *pp, int r, int w, int e)
102 {
103 	struct disk *dp;
104 	struct g_disk_softc *sc;
105 	int error;
106 
107 	g_trace(G_T_ACCESS, "g_disk_access(%s, %d, %d, %d)",
108 	    pp->name, r, w, e);
109 	g_topology_assert();
110 	sc = pp->private;
111 	if (sc == NULL || (dp = sc->dp) == NULL || dp->d_destroyed) {
112 		/*
113 		 * Allow decreasing access count even if disk is not
114 		 * available anymore.
115 		 */
116 		if (r <= 0 && w <= 0 && e <= 0)
117 			return (0);
118 		return (ENXIO);
119 	}
120 	r += pp->acr;
121 	w += pp->acw;
122 	e += pp->ace;
123 	error = 0;
124 	if ((pp->acr + pp->acw + pp->ace) == 0 && (r + w + e) > 0) {
125 		if (dp->d_open != NULL) {
126 			error = dp->d_open(dp);
127 			if (bootverbose && error != 0)
128 				printf("Opened disk %s -> %d\n",
129 				    pp->name, error);
130 			if (error != 0)
131 				return (error);
132 		}
133 		pp->sectorsize = dp->d_sectorsize;
134 		if (dp->d_maxsize == 0) {
135 			printf("WARNING: Disk drive %s%d has no d_maxsize\n",
136 			    dp->d_name, dp->d_unit);
137 			dp->d_maxsize = DFLTPHYS;
138 		}
139 		if (dp->d_delmaxsize == 0) {
140 			if (bootverbose && dp->d_flags & DISKFLAG_CANDELETE) {
141 				printf("WARNING: Disk drive %s%d has no "
142 				    "d_delmaxsize\n", dp->d_name, dp->d_unit);
143 			}
144 			dp->d_delmaxsize = dp->d_maxsize;
145 		}
146 		pp->stripeoffset = dp->d_stripeoffset;
147 		pp->stripesize = dp->d_stripesize;
148 		dp->d_flags |= DISKFLAG_OPEN;
149 		/*
150 		 * Do not invoke resize event when initial size was zero.
151 		 * Some disks report its size only after first opening.
152 		 */
153 		if (pp->mediasize == 0)
154 			pp->mediasize = dp->d_mediasize;
155 		else
156 			g_resize_provider(pp, dp->d_mediasize);
157 	} else if ((pp->acr + pp->acw + pp->ace) > 0 && (r + w + e) == 0) {
158 		if (dp->d_close != NULL) {
159 			error = dp->d_close(dp);
160 			if (error != 0)
161 				printf("Closed disk %s -> %d\n",
162 				    pp->name, error);
163 		}
164 		sc->state = G_STATE_ACTIVE;
165 		if (sc->led[0] != 0)
166 			led_set(sc->led, "0");
167 		dp->d_flags &= ~DISKFLAG_OPEN;
168 	}
169 	return (error);
170 }
171 
172 static void
173 g_disk_kerneldump(struct bio *bp, struct disk *dp)
174 {
175 	struct g_kerneldump *gkd;
176 	struct g_geom *gp;
177 
178 	gkd = (struct g_kerneldump*)bp->bio_data;
179 	gp = bp->bio_to->geom;
180 	g_trace(G_T_TOPOLOGY, "g_disk_kerneldump(%s, %jd, %jd)",
181 		gp->name, (intmax_t)gkd->offset, (intmax_t)gkd->length);
182 	if (dp->d_dump == NULL) {
183 		g_io_deliver(bp, ENODEV);
184 		return;
185 	}
186 	gkd->di.dumper = dp->d_dump;
187 	gkd->di.priv = dp;
188 	gkd->di.blocksize = dp->d_sectorsize;
189 	gkd->di.maxiosize = dp->d_maxsize;
190 	gkd->di.mediaoffset = gkd->offset;
191 	if ((gkd->offset + gkd->length) > dp->d_mediasize)
192 		gkd->length = dp->d_mediasize - gkd->offset;
193 	gkd->di.mediasize = gkd->length;
194 	g_io_deliver(bp, 0);
195 }
196 
197 static void
198 g_disk_setstate(struct bio *bp, struct g_disk_softc *sc)
199 {
200 	const char *cmd;
201 
202 	memcpy(&sc->state, bp->bio_data, sizeof(sc->state));
203 	if (sc->led[0] != 0) {
204 		switch (sc->state) {
205 		case G_STATE_FAILED:
206 			cmd = "1";
207 			break;
208 		case G_STATE_REBUILD:
209 			cmd = "f5";
210 			break;
211 		case G_STATE_RESYNC:
212 			cmd = "f1";
213 			break;
214 		default:
215 			cmd = "0";
216 			break;
217 		}
218 		led_set(sc->led, cmd);
219 	}
220 	g_io_deliver(bp, 0);
221 }
222 
223 static void
224 g_disk_done(struct bio *bp)
225 {
226 	struct bintime now;
227 	struct bio *bp2;
228 	struct g_disk_softc *sc;
229 
230 	/* See "notes" for why we need a mutex here */
231 	/* XXX: will witness accept a mix of Giant/unGiant drivers here ? */
232 	bp2 = bp->bio_parent;
233 	sc = bp2->bio_to->private;
234 	bp->bio_completed = bp->bio_length - bp->bio_resid;
235 	binuptime(&now);
236 	mtx_lock(&sc->done_mtx);
237 	if (bp2->bio_error == 0)
238 		bp2->bio_error = bp->bio_error;
239 	bp2->bio_completed += bp->bio_completed;
240 
241 	switch (bp->bio_cmd) {
242 	case BIO_ZONE:
243 		bcopy(&bp->bio_zone, &bp2->bio_zone, sizeof(bp->bio_zone));
244 		/*FALLTHROUGH*/
245 	case BIO_READ:
246 	case BIO_WRITE:
247 	case BIO_DELETE:
248 	case BIO_FLUSH:
249 		devstat_end_transaction_bio_bt(sc->dp->d_devstat, bp, &now);
250 		break;
251 	default:
252 		break;
253 	}
254 	bp2->bio_inbed++;
255 	if (bp2->bio_children == bp2->bio_inbed) {
256 		mtx_unlock(&sc->done_mtx);
257 		bp2->bio_resid = bp2->bio_bcount - bp2->bio_completed;
258 		g_io_deliver(bp2, bp2->bio_error);
259 	} else
260 		mtx_unlock(&sc->done_mtx);
261 	g_destroy_bio(bp);
262 }
263 
264 static int
265 g_disk_ioctl(struct g_provider *pp, u_long cmd, void * data, int fflag, struct thread *td)
266 {
267 	struct disk *dp;
268 	struct g_disk_softc *sc;
269 	int error;
270 
271 	sc = pp->private;
272 	dp = sc->dp;
273 
274 	if (dp->d_ioctl == NULL)
275 		return (ENOIOCTL);
276 	error = dp->d_ioctl(dp, cmd, data, fflag, td);
277 	return (error);
278 }
279 
280 static off_t
281 g_disk_maxsize(struct disk *dp, struct bio *bp)
282 {
283 	if (bp->bio_cmd == BIO_DELETE)
284 		return (dp->d_delmaxsize);
285 	return (dp->d_maxsize);
286 }
287 
288 static int
289 g_disk_maxsegs(struct disk *dp, struct bio *bp)
290 {
291 	return ((g_disk_maxsize(dp, bp) / PAGE_SIZE) + 1);
292 }
293 
294 static void
295 g_disk_advance(struct disk *dp, struct bio *bp, off_t off)
296 {
297 
298 	bp->bio_offset += off;
299 	bp->bio_length -= off;
300 
301 	if ((bp->bio_flags & BIO_VLIST) != 0) {
302 		bus_dma_segment_t *seg, *end;
303 
304 		seg = (bus_dma_segment_t *)bp->bio_data;
305 		end = (bus_dma_segment_t *)bp->bio_data + bp->bio_ma_n;
306 		off += bp->bio_ma_offset;
307 		while (off >= seg->ds_len) {
308 			KASSERT((seg != end),
309 			    ("vlist request runs off the end"));
310 			off -= seg->ds_len;
311 			seg++;
312 		}
313 		bp->bio_ma_offset = off;
314 		bp->bio_ma_n = end - seg;
315 		bp->bio_data = (void *)seg;
316 	} else if ((bp->bio_flags & BIO_UNMAPPED) != 0) {
317 		bp->bio_ma += off / PAGE_SIZE;
318 		bp->bio_ma_offset += off;
319 		bp->bio_ma_offset %= PAGE_SIZE;
320 		bp->bio_ma_n -= off / PAGE_SIZE;
321 	} else {
322 		bp->bio_data += off;
323 	}
324 }
325 
326 static void
327 g_disk_seg_limit(bus_dma_segment_t *seg, off_t *poffset,
328     off_t *plength, int *ppages)
329 {
330 	uintptr_t seg_page_base;
331 	uintptr_t seg_page_end;
332 	off_t offset;
333 	off_t length;
334 	int seg_pages;
335 
336 	offset = *poffset;
337 	length = *plength;
338 
339 	if (length > seg->ds_len - offset)
340 		length = seg->ds_len - offset;
341 
342 	seg_page_base = trunc_page(seg->ds_addr + offset);
343 	seg_page_end  = round_page(seg->ds_addr + offset + length);
344 	seg_pages = (seg_page_end - seg_page_base) >> PAGE_SHIFT;
345 
346 	if (seg_pages > *ppages) {
347 		seg_pages = *ppages;
348 		length = (seg_page_base + (seg_pages << PAGE_SHIFT)) -
349 		    (seg->ds_addr + offset);
350 	}
351 
352 	*poffset = 0;
353 	*plength -= length;
354 	*ppages -= seg_pages;
355 }
356 
357 static off_t
358 g_disk_vlist_limit(struct disk *dp, struct bio *bp, bus_dma_segment_t **pendseg)
359 {
360 	bus_dma_segment_t *seg, *end;
361 	off_t residual;
362 	off_t offset;
363 	int pages;
364 
365 	seg = (bus_dma_segment_t *)bp->bio_data;
366 	end = (bus_dma_segment_t *)bp->bio_data + bp->bio_ma_n;
367 	residual = bp->bio_length;
368 	offset = bp->bio_ma_offset;
369 	pages = g_disk_maxsegs(dp, bp);
370 	while (residual != 0 && pages != 0) {
371 		KASSERT((seg != end),
372 		    ("vlist limit runs off the end"));
373 		g_disk_seg_limit(seg, &offset, &residual, &pages);
374 		seg++;
375 	}
376 	if (pendseg != NULL)
377 		*pendseg = seg;
378 	return (residual);
379 }
380 
381 static bool
382 g_disk_limit(struct disk *dp, struct bio *bp)
383 {
384 	bool limited = false;
385 	off_t maxsz;
386 
387 	maxsz = g_disk_maxsize(dp, bp);
388 
389 	/*
390 	 * XXX: If we have a stripesize we should really use it here.
391 	 *      Care should be taken in the delete case if this is done
392 	 *      as deletes can be very sensitive to size given how they
393 	 *      are processed.
394 	 */
395 	if (bp->bio_length > maxsz) {
396 		bp->bio_length = maxsz;
397 		limited = true;
398 	}
399 
400 	if ((bp->bio_flags & BIO_VLIST) != 0) {
401 		bus_dma_segment_t *firstseg, *endseg;
402 		off_t residual;
403 
404 		firstseg = (bus_dma_segment_t*)bp->bio_data;
405 		residual = g_disk_vlist_limit(dp, bp, &endseg);
406 		if (residual != 0) {
407 			bp->bio_ma_n = endseg - firstseg;
408 			bp->bio_length -= residual;
409 			limited = true;
410 		}
411 	} else if ((bp->bio_flags & BIO_UNMAPPED) != 0) {
412 		bp->bio_ma_n =
413 		    howmany(bp->bio_ma_offset + bp->bio_length, PAGE_SIZE);
414 	}
415 
416 	return (limited);
417 }
418 
419 static void
420 g_disk_start(struct bio *bp)
421 {
422 	struct bio *bp2, *bp3;
423 	struct disk *dp;
424 	struct g_disk_softc *sc;
425 	int error;
426 	off_t off;
427 
428 	biotrack(bp, __func__);
429 
430 	sc = bp->bio_to->private;
431 	if (sc == NULL || (dp = sc->dp) == NULL || dp->d_destroyed) {
432 		g_io_deliver(bp, ENXIO);
433 		return;
434 	}
435 	error = EJUSTRETURN;
436 	switch(bp->bio_cmd) {
437 	case BIO_DELETE:
438 		if (!(dp->d_flags & DISKFLAG_CANDELETE)) {
439 			error = EOPNOTSUPP;
440 			break;
441 		}
442 		/* fall-through */
443 	case BIO_READ:
444 	case BIO_WRITE:
445 		KASSERT((dp->d_flags & DISKFLAG_UNMAPPED_BIO) != 0 ||
446 		    (bp->bio_flags & BIO_UNMAPPED) == 0,
447 		    ("unmapped bio not supported by disk %s", dp->d_name));
448 		off = 0;
449 		bp3 = NULL;
450 		bp2 = g_clone_bio(bp);
451 		if (bp2 == NULL) {
452 			error = ENOMEM;
453 			break;
454 		}
455 		for (;;) {
456 			if (g_disk_limit(dp, bp2)) {
457 				off += bp2->bio_length;
458 
459 				/*
460 				 * To avoid a race, we need to grab the next bio
461 				 * before we schedule this one.  See "notes".
462 				 */
463 				bp3 = g_clone_bio(bp);
464 				if (bp3 == NULL)
465 					bp->bio_error = ENOMEM;
466 			}
467 			bp2->bio_done = g_disk_done;
468 			bp2->bio_pblkno = bp2->bio_offset / dp->d_sectorsize;
469 			bp2->bio_bcount = bp2->bio_length;
470 			bp2->bio_disk = dp;
471 			mtx_lock(&sc->start_mtx);
472 			devstat_start_transaction_bio(dp->d_devstat, bp2);
473 			mtx_unlock(&sc->start_mtx);
474 			dp->d_strategy(bp2);
475 
476 			if (bp3 == NULL)
477 				break;
478 
479 			bp2 = bp3;
480 			bp3 = NULL;
481 			g_disk_advance(dp, bp2, off);
482 		}
483 		break;
484 	case BIO_GETATTR:
485 		/* Give the driver a chance to override */
486 		if (dp->d_getattr != NULL) {
487 			if (bp->bio_disk == NULL)
488 				bp->bio_disk = dp;
489 			error = dp->d_getattr(bp);
490 			if (error != -1)
491 				break;
492 			error = EJUSTRETURN;
493 		}
494 		if (g_handleattr_int(bp, "GEOM::candelete",
495 		    (dp->d_flags & DISKFLAG_CANDELETE) != 0))
496 			break;
497 		else if (g_handleattr_int(bp, "GEOM::fwsectors",
498 		    dp->d_fwsectors))
499 			break;
500 		else if (g_handleattr_int(bp, "GEOM::fwheads", dp->d_fwheads))
501 			break;
502 		else if (g_handleattr_off_t(bp, "GEOM::frontstuff", 0))
503 			break;
504 		else if (g_handleattr_str(bp, "GEOM::ident", dp->d_ident))
505 			break;
506 		else if (g_handleattr_str(bp, "GEOM::descr", dp->d_descr))
507 			break;
508 		else if (g_handleattr_uint16_t(bp, "GEOM::hba_vendor",
509 		    dp->d_hba_vendor))
510 			break;
511 		else if (g_handleattr_uint16_t(bp, "GEOM::hba_device",
512 		    dp->d_hba_device))
513 			break;
514 		else if (g_handleattr_uint16_t(bp, "GEOM::hba_subvendor",
515 		    dp->d_hba_subvendor))
516 			break;
517 		else if (g_handleattr_uint16_t(bp, "GEOM::hba_subdevice",
518 		    dp->d_hba_subdevice))
519 			break;
520 		else if (!strcmp(bp->bio_attribute, "GEOM::kerneldump"))
521 			g_disk_kerneldump(bp, dp);
522 		else if (!strcmp(bp->bio_attribute, "GEOM::setstate"))
523 			g_disk_setstate(bp, sc);
524 		else if (g_handleattr_uint16_t(bp, "GEOM::rotation_rate",
525 		    dp->d_rotation_rate))
526 			break;
527 		else
528 			error = ENOIOCTL;
529 		break;
530 	case BIO_FLUSH:
531 		g_trace(G_T_BIO, "g_disk_flushcache(%s)",
532 		    bp->bio_to->name);
533 		if (!(dp->d_flags & DISKFLAG_CANFLUSHCACHE)) {
534 			error = EOPNOTSUPP;
535 			break;
536 		}
537 		/*FALLTHROUGH*/
538 	case BIO_ZONE:
539 		if (bp->bio_cmd == BIO_ZONE) {
540 			if (!(dp->d_flags & DISKFLAG_CANZONE)) {
541 				error = EOPNOTSUPP;
542 				break;
543 			}
544 			g_trace(G_T_BIO, "g_disk_zone(%s)",
545 			    bp->bio_to->name);
546 		}
547 		bp2 = g_clone_bio(bp);
548 		if (bp2 == NULL) {
549 			g_io_deliver(bp, ENOMEM);
550 			return;
551 		}
552 		bp2->bio_done = g_disk_done;
553 		bp2->bio_disk = dp;
554 		mtx_lock(&sc->start_mtx);
555 		devstat_start_transaction_bio(dp->d_devstat, bp2);
556 		mtx_unlock(&sc->start_mtx);
557 		dp->d_strategy(bp2);
558 		break;
559 	default:
560 		error = EOPNOTSUPP;
561 		break;
562 	}
563 	if (error != EJUSTRETURN)
564 		g_io_deliver(bp, error);
565 	return;
566 }
567 
568 static void
569 g_disk_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp, struct g_consumer *cp, struct g_provider *pp)
570 {
571 	struct bio *bp;
572 	struct disk *dp;
573 	struct g_disk_softc *sc;
574 	char *buf;
575 	int res = 0;
576 
577 	sc = gp->softc;
578 	if (sc == NULL || (dp = sc->dp) == NULL)
579 		return;
580 	if (indent == NULL) {
581 		sbuf_printf(sb, " hd %u", dp->d_fwheads);
582 		sbuf_printf(sb, " sc %u", dp->d_fwsectors);
583 		return;
584 	}
585 	if (pp != NULL) {
586 		sbuf_printf(sb, "%s<fwheads>%u</fwheads>\n",
587 		    indent, dp->d_fwheads);
588 		sbuf_printf(sb, "%s<fwsectors>%u</fwsectors>\n",
589 		    indent, dp->d_fwsectors);
590 
591 		/*
592 		 * "rotationrate" is a little complicated, because the value
593 		 * returned by the drive might not be the RPM; 0 and 1 are
594 		 * special cases, and there's also a valid range.
595 		 */
596 		sbuf_printf(sb, "%s<rotationrate>", indent);
597 		if (dp->d_rotation_rate == DISK_RR_UNKNOWN) /* Old drives */
598 			sbuf_printf(sb, "unknown");	/* don't report RPM. */
599 		else if (dp->d_rotation_rate == DISK_RR_NON_ROTATING)
600 			sbuf_printf(sb, "0");
601 		else if ((dp->d_rotation_rate >= DISK_RR_MIN) &&
602 		    (dp->d_rotation_rate <= DISK_RR_MAX))
603 			sbuf_printf(sb, "%u", dp->d_rotation_rate);
604 		else
605 			sbuf_printf(sb, "invalid");
606 		sbuf_printf(sb, "</rotationrate>\n");
607 		if (dp->d_getattr != NULL) {
608 			buf = g_malloc(DISK_IDENT_SIZE, M_WAITOK);
609 			bp = g_alloc_bio();
610 			bp->bio_disk = dp;
611 			bp->bio_attribute = "GEOM::ident";
612 			bp->bio_length = DISK_IDENT_SIZE;
613 			bp->bio_data = buf;
614 			res = dp->d_getattr(bp);
615 			sbuf_printf(sb, "%s<ident>", indent);
616 			g_conf_printf_escaped(sb, "%s",
617 			    res == 0 ? buf: dp->d_ident);
618 			sbuf_printf(sb, "</ident>\n");
619 			bp->bio_attribute = "GEOM::lunid";
620 			bp->bio_length = DISK_IDENT_SIZE;
621 			bp->bio_data = buf;
622 			if (dp->d_getattr(bp) == 0) {
623 				sbuf_printf(sb, "%s<lunid>", indent);
624 				g_conf_printf_escaped(sb, "%s", buf);
625 				sbuf_printf(sb, "</lunid>\n");
626 			}
627 			bp->bio_attribute = "GEOM::lunname";
628 			bp->bio_length = DISK_IDENT_SIZE;
629 			bp->bio_data = buf;
630 			if (dp->d_getattr(bp) == 0) {
631 				sbuf_printf(sb, "%s<lunname>", indent);
632 				g_conf_printf_escaped(sb, "%s", buf);
633 				sbuf_printf(sb, "</lunname>\n");
634 			}
635 			g_destroy_bio(bp);
636 			g_free(buf);
637 		} else {
638 			sbuf_printf(sb, "%s<ident>", indent);
639 			g_conf_printf_escaped(sb, "%s", dp->d_ident);
640 			sbuf_printf(sb, "</ident>\n");
641 		}
642 		sbuf_printf(sb, "%s<descr>", indent);
643 		g_conf_printf_escaped(sb, "%s", dp->d_descr);
644 		sbuf_printf(sb, "</descr>\n");
645 	}
646 }
647 
648 static void
649 g_disk_resize(void *ptr, int flag)
650 {
651 	struct disk *dp;
652 	struct g_geom *gp;
653 	struct g_provider *pp;
654 
655 	if (flag == EV_CANCEL)
656 		return;
657 	g_topology_assert();
658 
659 	dp = ptr;
660 	gp = dp->d_geom;
661 
662 	if (dp->d_destroyed || gp == NULL)
663 		return;
664 
665 	LIST_FOREACH(pp, &gp->provider, provider) {
666 		if (pp->sectorsize != 0 &&
667 		    pp->sectorsize != dp->d_sectorsize)
668 			g_wither_provider(pp, ENXIO);
669 		else
670 			g_resize_provider(pp, dp->d_mediasize);
671 	}
672 }
673 
674 static void
675 g_disk_create(void *arg, int flag)
676 {
677 	struct g_geom *gp;
678 	struct g_provider *pp;
679 	struct disk *dp;
680 	struct g_disk_softc *sc;
681 	struct disk_alias *dap;
682 	char tmpstr[80];
683 
684 	if (flag == EV_CANCEL)
685 		return;
686 	g_topology_assert();
687 	dp = arg;
688 
689 	mtx_pool_lock(mtxpool_sleep, dp);
690 	dp->d_init_level = DISK_INIT_START;
691 
692 	/*
693 	 * If the disk has already gone away, we can just stop here and
694 	 * call the user's callback to tell him we've cleaned things up.
695 	 */
696 	if (dp->d_goneflag != 0) {
697 		mtx_pool_unlock(mtxpool_sleep, dp);
698 		if (dp->d_gone != NULL)
699 			dp->d_gone(dp);
700 		return;
701 	}
702 	mtx_pool_unlock(mtxpool_sleep, dp);
703 
704 	sc = g_malloc(sizeof(*sc), M_WAITOK | M_ZERO);
705 	mtx_init(&sc->start_mtx, "g_disk_start", NULL, MTX_DEF);
706 	mtx_init(&sc->done_mtx, "g_disk_done", NULL, MTX_DEF);
707 	sc->dp = dp;
708 	gp = g_new_geomf(&g_disk_class, "%s%d", dp->d_name, dp->d_unit);
709 	gp->softc = sc;
710 	LIST_FOREACH(dap, &dp->d_aliases, da_next) {
711 		snprintf(tmpstr, sizeof(tmpstr), "%s%d", dap->da_alias, dp->d_unit);
712 		g_geom_add_alias(gp, tmpstr);
713 	}
714 	pp = g_new_providerf(gp, "%s", gp->name);
715 	devstat_remove_entry(pp->stat);
716 	pp->stat = NULL;
717 	dp->d_devstat->id = pp;
718 	pp->mediasize = dp->d_mediasize;
719 	pp->sectorsize = dp->d_sectorsize;
720 	pp->stripeoffset = dp->d_stripeoffset;
721 	pp->stripesize = dp->d_stripesize;
722 	if ((dp->d_flags & DISKFLAG_UNMAPPED_BIO) != 0)
723 		pp->flags |= G_PF_ACCEPT_UNMAPPED;
724 	if ((dp->d_flags & DISKFLAG_DIRECT_COMPLETION) != 0)
725 		pp->flags |= G_PF_DIRECT_SEND;
726 	pp->flags |= G_PF_DIRECT_RECEIVE;
727 	if (bootverbose)
728 		printf("GEOM: new disk %s\n", gp->name);
729 	sysctl_ctx_init(&sc->sysctl_ctx);
730 	snprintf(tmpstr, sizeof(tmpstr), "GEOM disk %s", gp->name);
731 	sc->sysctl_tree = SYSCTL_ADD_NODE(&sc->sysctl_ctx,
732 		SYSCTL_STATIC_CHILDREN(_kern_geom_disk), OID_AUTO, gp->name,
733 		CTLFLAG_RD, 0, tmpstr);
734 	if (sc->sysctl_tree != NULL) {
735 		SYSCTL_ADD_STRING(&sc->sysctl_ctx,
736 		    SYSCTL_CHILDREN(sc->sysctl_tree), OID_AUTO, "led",
737 		    CTLFLAG_RWTUN, sc->led, sizeof(sc->led),
738 		    "LED name");
739 		SYSCTL_ADD_PROC(&sc->sysctl_ctx,
740 		    SYSCTL_CHILDREN(sc->sysctl_tree), OID_AUTO, "flags",
741 		    CTLTYPE_STRING | CTLFLAG_RD, dp, 0, g_disk_sysctl_flags,
742 		    "A", "Report disk flags");
743 	}
744 	pp->private = sc;
745 	dp->d_geom = gp;
746 	g_error_provider(pp, 0);
747 
748 	mtx_pool_lock(mtxpool_sleep, dp);
749 	dp->d_init_level = DISK_INIT_DONE;
750 
751 	/*
752 	 * If the disk has gone away at this stage, start the withering
753 	 * process for it.
754 	 */
755 	if (dp->d_goneflag != 0) {
756 		mtx_pool_unlock(mtxpool_sleep, dp);
757 		g_wither_provider(pp, ENXIO);
758 		return;
759 	}
760 	mtx_pool_unlock(mtxpool_sleep, dp);
761 
762 }
763 
764 /*
765  * We get this callback after all of the consumers have gone away, and just
766  * before the provider is freed.  If the disk driver provided a d_gone
767  * callback, let them know that it is okay to free resources -- they won't
768  * be getting any more accesses from GEOM.
769  */
770 static void
771 g_disk_providergone(struct g_provider *pp)
772 {
773 	struct disk *dp;
774 	struct g_disk_softc *sc;
775 
776 	sc = (struct g_disk_softc *)pp->private;
777 	dp = sc->dp;
778 	if (dp != NULL && dp->d_gone != NULL)
779 		dp->d_gone(dp);
780 	if (sc->sysctl_tree != NULL) {
781 		sysctl_ctx_free(&sc->sysctl_ctx);
782 		sc->sysctl_tree = NULL;
783 	}
784 	if (sc->led[0] != 0) {
785 		led_set(sc->led, "0");
786 		sc->led[0] = 0;
787 	}
788 	pp->private = NULL;
789 	pp->geom->softc = NULL;
790 	mtx_destroy(&sc->done_mtx);
791 	mtx_destroy(&sc->start_mtx);
792 	g_free(sc);
793 }
794 
795 static void
796 g_disk_destroy(void *ptr, int flag)
797 {
798 	struct disk *dp;
799 	struct g_geom *gp;
800 	struct g_disk_softc *sc;
801 	struct disk_alias *dap, *daptmp;
802 
803 	g_topology_assert();
804 	dp = ptr;
805 	gp = dp->d_geom;
806 	if (gp != NULL) {
807 		sc = gp->softc;
808 		if (sc != NULL)
809 			sc->dp = NULL;
810 		dp->d_geom = NULL;
811 		g_wither_geom(gp, ENXIO);
812 	}
813 	LIST_FOREACH_SAFE(dap, &dp->d_aliases, da_next, daptmp)
814 		g_free(dap);
815 
816 	g_free(dp);
817 }
818 
819 /*
820  * We only allow printable characters in disk ident,
821  * the rest is converted to 'x<HH>'.
822  */
823 static void
824 g_disk_ident_adjust(char *ident, size_t size)
825 {
826 	char *p, tmp[4], newid[DISK_IDENT_SIZE];
827 
828 	newid[0] = '\0';
829 	for (p = ident; *p != '\0'; p++) {
830 		if (isprint(*p)) {
831 			tmp[0] = *p;
832 			tmp[1] = '\0';
833 		} else {
834 			snprintf(tmp, sizeof(tmp), "x%02hhx",
835 			    *(unsigned char *)p);
836 		}
837 		if (strlcat(newid, tmp, sizeof(newid)) >= sizeof(newid))
838 			break;
839 	}
840 	bzero(ident, size);
841 	strlcpy(ident, newid, size);
842 }
843 
844 struct disk *
845 disk_alloc(void)
846 {
847 	struct disk *dp;
848 
849 	dp = g_malloc(sizeof(struct disk), M_WAITOK | M_ZERO);
850 	LIST_INIT(&dp->d_aliases);
851 	return (dp);
852 }
853 
854 void
855 disk_create(struct disk *dp, int version)
856 {
857 
858 	if (version != DISK_VERSION) {
859 		printf("WARNING: Attempt to add disk %s%d %s",
860 		    dp->d_name, dp->d_unit,
861 		    " using incompatible ABI version of disk(9)\n");
862 		printf("WARNING: Ignoring disk %s%d\n",
863 		    dp->d_name, dp->d_unit);
864 		return;
865 	}
866 	if (dp->d_flags & DISKFLAG_RESERVED) {
867 		printf("WARNING: Attempt to add non-MPSAFE disk %s%d\n",
868 		    dp->d_name, dp->d_unit);
869 		printf("WARNING: Ignoring disk %s%d\n",
870 		    dp->d_name, dp->d_unit);
871 		return;
872 	}
873 	KASSERT(dp->d_strategy != NULL, ("disk_create need d_strategy"));
874 	KASSERT(dp->d_name != NULL, ("disk_create need d_name"));
875 	KASSERT(*dp->d_name != 0, ("disk_create need d_name"));
876 	KASSERT(strlen(dp->d_name) < SPECNAMELEN - 4, ("disk name too long"));
877 	if (dp->d_devstat == NULL)
878 		dp->d_devstat = devstat_new_entry(dp->d_name, dp->d_unit,
879 		    dp->d_sectorsize, DEVSTAT_ALL_SUPPORTED,
880 		    DEVSTAT_TYPE_DIRECT, DEVSTAT_PRIORITY_MAX);
881 	dp->d_geom = NULL;
882 
883 	dp->d_init_level = DISK_INIT_NONE;
884 
885 	g_disk_ident_adjust(dp->d_ident, sizeof(dp->d_ident));
886 	g_post_event(g_disk_create, dp, M_WAITOK, dp, NULL);
887 }
888 
889 void
890 disk_destroy(struct disk *dp)
891 {
892 
893 	g_cancel_event(dp);
894 	dp->d_destroyed = 1;
895 	if (dp->d_devstat != NULL)
896 		devstat_remove_entry(dp->d_devstat);
897 	g_post_event(g_disk_destroy, dp, M_WAITOK, NULL);
898 }
899 
900 void
901 disk_add_alias(struct disk *dp, const char *name)
902 {
903 	struct disk_alias *dap;
904 
905 	dap = (struct disk_alias *)g_malloc(
906 		sizeof(struct disk_alias) + strlen(name) + 1, M_WAITOK);
907 	strcpy((char *)(dap + 1), name);
908 	dap->da_alias = (const char *)(dap + 1);
909 	LIST_INSERT_HEAD(&dp->d_aliases, dap, da_next);
910 }
911 
912 void
913 disk_gone(struct disk *dp)
914 {
915 	struct g_geom *gp;
916 	struct g_provider *pp;
917 
918 	mtx_pool_lock(mtxpool_sleep, dp);
919 	dp->d_goneflag = 1;
920 
921 	/*
922 	 * If we're still in the process of creating this disk (the
923 	 * g_disk_create() function is still queued, or is in
924 	 * progress), the init level will not yet be DISK_INIT_DONE.
925 	 *
926 	 * If that is the case, g_disk_create() will see d_goneflag
927 	 * and take care of cleaning things up.
928 	 *
929 	 * If the disk has already been created, we default to
930 	 * withering the provider as usual below.
931 	 *
932 	 * If the caller has not set a d_gone() callback, he will
933 	 * not be any worse off by returning here, because the geom
934 	 * has not been fully setup in any case.
935 	 */
936 	if (dp->d_init_level < DISK_INIT_DONE) {
937 		mtx_pool_unlock(mtxpool_sleep, dp);
938 		return;
939 	}
940 	mtx_pool_unlock(mtxpool_sleep, dp);
941 
942 	gp = dp->d_geom;
943 	if (gp != NULL) {
944 		pp = LIST_FIRST(&gp->provider);
945 		if (pp != NULL) {
946 			KASSERT(LIST_NEXT(pp, provider) == NULL,
947 			    ("geom %p has more than one provider", gp));
948 			g_wither_provider(pp, ENXIO);
949 		}
950 	}
951 }
952 
953 void
954 disk_attr_changed(struct disk *dp, const char *attr, int flag)
955 {
956 	struct g_geom *gp;
957 	struct g_provider *pp;
958 	char devnamebuf[128];
959 
960 	gp = dp->d_geom;
961 	if (gp != NULL)
962 		LIST_FOREACH(pp, &gp->provider, provider)
963 			(void)g_attr_changed(pp, attr, flag);
964 	snprintf(devnamebuf, sizeof(devnamebuf), "devname=%s%d", dp->d_name,
965 	    dp->d_unit);
966 	devctl_notify("GEOM", "disk", attr, devnamebuf);
967 }
968 
969 void
970 disk_media_changed(struct disk *dp, int flag)
971 {
972 	struct g_geom *gp;
973 	struct g_provider *pp;
974 
975 	gp = dp->d_geom;
976 	if (gp != NULL) {
977 		pp = LIST_FIRST(&gp->provider);
978 		if (pp != NULL) {
979 			KASSERT(LIST_NEXT(pp, provider) == NULL,
980 			    ("geom %p has more than one provider", gp));
981 			g_media_changed(pp, flag);
982 		}
983 	}
984 }
985 
986 void
987 disk_media_gone(struct disk *dp, int flag)
988 {
989 	struct g_geom *gp;
990 	struct g_provider *pp;
991 
992 	gp = dp->d_geom;
993 	if (gp != NULL) {
994 		pp = LIST_FIRST(&gp->provider);
995 		if (pp != NULL) {
996 			KASSERT(LIST_NEXT(pp, provider) == NULL,
997 			    ("geom %p has more than one provider", gp));
998 			g_media_gone(pp, flag);
999 		}
1000 	}
1001 }
1002 
1003 int
1004 disk_resize(struct disk *dp, int flag)
1005 {
1006 
1007 	if (dp->d_destroyed || dp->d_geom == NULL)
1008 		return (0);
1009 
1010 	return (g_post_event(g_disk_resize, dp, flag, NULL));
1011 }
1012 
1013 static void
1014 g_kern_disks(void *p, int flag __unused)
1015 {
1016 	struct sbuf *sb;
1017 	struct g_geom *gp;
1018 	char *sp;
1019 
1020 	sb = p;
1021 	sp = "";
1022 	g_topology_assert();
1023 	LIST_FOREACH(gp, &g_disk_class.geom, geom) {
1024 		sbuf_printf(sb, "%s%s", sp, gp->name);
1025 		sp = " ";
1026 	}
1027 	sbuf_finish(sb);
1028 }
1029 
1030 static int
1031 g_disk_sysctl_flags(SYSCTL_HANDLER_ARGS)
1032 {
1033 	struct disk *dp;
1034 	struct sbuf *sb;
1035 	int error;
1036 
1037 	sb = sbuf_new_auto();
1038 	dp = (struct disk *)arg1;
1039 	sbuf_printf(sb, "%b", dp->d_flags,
1040 		"\20"
1041 		"\2OPEN"
1042 		"\3CANDELETE"
1043 		"\4CANFLUSHCACHE"
1044 		"\5UNMAPPEDBIO"
1045 		"\6DIRECTCOMPLETION"
1046 		"\10CANZONE");
1047 
1048 	sbuf_finish(sb);
1049 	error = SYSCTL_OUT(req, sbuf_data(sb), sbuf_len(sb) + 1);
1050 	sbuf_delete(sb);
1051 	return (error);
1052 }
1053 
1054 static int
1055 sysctl_disks(SYSCTL_HANDLER_ARGS)
1056 {
1057 	int error;
1058 	struct sbuf *sb;
1059 
1060 	sb = sbuf_new_auto();
1061 	g_waitfor_event(g_kern_disks, sb, M_WAITOK, NULL);
1062 	error = SYSCTL_OUT(req, sbuf_data(sb), sbuf_len(sb) + 1);
1063 	sbuf_delete(sb);
1064 	return error;
1065 }
1066 
1067 SYSCTL_PROC(_kern, OID_AUTO, disks,
1068     CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, 0,
1069     sysctl_disks, "A", "names of available disks");
1070