xref: /freebsd/sys/geom/geom_disk.c (revision 2f513db72b034fd5ef7f080b11be5c711c15186a)
1 /*-
2  * SPDX-License-Identifier: BSD-3-Clause
3  *
4  * Copyright (c) 2002 Poul-Henning Kamp
5  * Copyright (c) 2002 Networks Associates Technology, Inc.
6  * All rights reserved.
7  *
8  * This software was developed for the FreeBSD Project by Poul-Henning Kamp
9  * and NAI Labs, the Security Research Division of Network Associates, Inc.
10  * under DARPA/SPAWAR contract N66001-01-C-8035 ("CBOSS"), as part of the
11  * DARPA CHATS research program.
12  *
13  * Redistribution and use in source and binary forms, with or without
14  * modification, are permitted provided that the following conditions
15  * are met:
16  * 1. Redistributions of source code must retain the above copyright
17  *    notice, this list of conditions and the following disclaimer.
18  * 2. Redistributions in binary form must reproduce the above copyright
19  *    notice, this list of conditions and the following disclaimer in the
20  *    documentation and/or other materials provided with the distribution.
21  * 3. The names of the authors may not be used to endorse or promote
22  *    products derived from this software without specific prior written
23  *    permission.
24  *
25  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
29  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35  * SUCH DAMAGE.
36  */
37 
38 #include <sys/cdefs.h>
39 __FBSDID("$FreeBSD$");
40 
41 #include "opt_geom.h"
42 
43 #include <sys/param.h>
44 #include <sys/systm.h>
45 #include <sys/kernel.h>
46 #include <sys/sysctl.h>
47 #include <sys/bio.h>
48 #include <sys/bus.h>
49 #include <sys/ctype.h>
50 #include <sys/fcntl.h>
51 #include <sys/malloc.h>
52 #include <sys/sbuf.h>
53 #include <sys/devicestat.h>
54 #include <machine/md_var.h>
55 
56 #include <sys/lock.h>
57 #include <sys/mutex.h>
58 #include <geom/geom.h>
59 #include <geom/geom_disk.h>
60 #include <geom/geom_int.h>
61 
62 #include <dev/led/led.h>
63 
64 #include <machine/bus.h>
65 
66 struct g_disk_softc {
67 	struct disk		*dp;
68 	struct devstat		*d_devstat;
69 	struct sysctl_ctx_list	sysctl_ctx;
70 	struct sysctl_oid	*sysctl_tree;
71 	char			led[64];
72 	uint32_t		state;
73 	struct mtx		 done_mtx;
74 };
75 
76 static g_access_t g_disk_access;
77 static g_start_t g_disk_start;
78 static g_ioctl_t g_disk_ioctl;
79 static g_dumpconf_t g_disk_dumpconf;
80 static g_provgone_t g_disk_providergone;
81 
82 static int g_disk_sysctl_flags(SYSCTL_HANDLER_ARGS);
83 
84 static struct g_class g_disk_class = {
85 	.name = G_DISK_CLASS_NAME,
86 	.version = G_VERSION,
87 	.start = g_disk_start,
88 	.access = g_disk_access,
89 	.ioctl = g_disk_ioctl,
90 	.providergone = g_disk_providergone,
91 	.dumpconf = g_disk_dumpconf,
92 };
93 
94 SYSCTL_DECL(_kern_geom);
95 static SYSCTL_NODE(_kern_geom, OID_AUTO, disk, CTLFLAG_RW, 0,
96     "GEOM_DISK stuff");
97 
98 DECLARE_GEOM_CLASS(g_disk_class, g_disk);
99 
100 static int
101 g_disk_access(struct g_provider *pp, int r, int w, int e)
102 {
103 	struct disk *dp;
104 	struct g_disk_softc *sc;
105 	int error;
106 
107 	g_trace(G_T_ACCESS, "g_disk_access(%s, %d, %d, %d)",
108 	    pp->name, r, w, e);
109 	g_topology_assert();
110 	sc = pp->private;
111 	if ((dp = sc->dp) == NULL || dp->d_destroyed) {
112 		/*
113 		 * Allow decreasing access count even if disk is not
114 		 * available anymore.
115 		 */
116 		if (r <= 0 && w <= 0 && e <= 0)
117 			return (0);
118 		return (ENXIO);
119 	}
120 	r += pp->acr;
121 	w += pp->acw;
122 	e += pp->ace;
123 	error = 0;
124 	if ((pp->acr + pp->acw + pp->ace) == 0 && (r + w + e) > 0) {
125 		/*
126 		 * It would be better to defer this decision to d_open if
127 		 * it was able to take flags.
128 		 */
129 		if (w > 0 && (dp->d_flags & DISKFLAG_WRITE_PROTECT) != 0)
130 			error = EROFS;
131 		if (error == 0 && dp->d_open != NULL)
132 			error = dp->d_open(dp);
133 		if (bootverbose && error != 0)
134 			printf("Opened disk %s -> %d\n", pp->name, error);
135 		if (error != 0)
136 			return (error);
137 		pp->sectorsize = dp->d_sectorsize;
138 		if (dp->d_maxsize == 0) {
139 			printf("WARNING: Disk drive %s%d has no d_maxsize\n",
140 			    dp->d_name, dp->d_unit);
141 			dp->d_maxsize = DFLTPHYS;
142 		}
143 		if (dp->d_delmaxsize == 0) {
144 			if (bootverbose && dp->d_flags & DISKFLAG_CANDELETE) {
145 				printf("WARNING: Disk drive %s%d has no "
146 				    "d_delmaxsize\n", dp->d_name, dp->d_unit);
147 			}
148 			dp->d_delmaxsize = dp->d_maxsize;
149 		}
150 		pp->stripeoffset = dp->d_stripeoffset;
151 		pp->stripesize = dp->d_stripesize;
152 		dp->d_flags |= DISKFLAG_OPEN;
153 		/*
154 		 * Do not invoke resize event when initial size was zero.
155 		 * Some disks report its size only after first opening.
156 		 */
157 		if (pp->mediasize == 0)
158 			pp->mediasize = dp->d_mediasize;
159 		else
160 			g_resize_provider(pp, dp->d_mediasize);
161 	} else if ((pp->acr + pp->acw + pp->ace) > 0 && (r + w + e) == 0) {
162 		if (dp->d_close != NULL) {
163 			error = dp->d_close(dp);
164 			if (error != 0)
165 				printf("Closed disk %s -> %d\n",
166 				    pp->name, error);
167 		}
168 		sc->state = G_STATE_ACTIVE;
169 		if (sc->led[0] != 0)
170 			led_set(sc->led, "0");
171 		dp->d_flags &= ~DISKFLAG_OPEN;
172 	}
173 	return (error);
174 }
175 
176 static void
177 g_disk_kerneldump(struct bio *bp, struct disk *dp)
178 {
179 	struct g_kerneldump *gkd;
180 	struct g_geom *gp;
181 
182 	gkd = (struct g_kerneldump*)bp->bio_data;
183 	gp = bp->bio_to->geom;
184 	g_trace(G_T_TOPOLOGY, "g_disk_kerneldump(%s, %jd, %jd)",
185 		gp->name, (intmax_t)gkd->offset, (intmax_t)gkd->length);
186 	if (dp->d_dump == NULL) {
187 		g_io_deliver(bp, ENODEV);
188 		return;
189 	}
190 	gkd->di.dumper = dp->d_dump;
191 	gkd->di.priv = dp;
192 	gkd->di.blocksize = dp->d_sectorsize;
193 	gkd->di.maxiosize = dp->d_maxsize;
194 	gkd->di.mediaoffset = gkd->offset;
195 	if ((gkd->offset + gkd->length) > dp->d_mediasize)
196 		gkd->length = dp->d_mediasize - gkd->offset;
197 	gkd->di.mediasize = gkd->length;
198 	g_io_deliver(bp, 0);
199 }
200 
201 static void
202 g_disk_setstate(struct bio *bp, struct g_disk_softc *sc)
203 {
204 	const char *cmd;
205 
206 	memcpy(&sc->state, bp->bio_data, sizeof(sc->state));
207 	if (sc->led[0] != 0) {
208 		switch (sc->state) {
209 		case G_STATE_FAILED:
210 			cmd = "1";
211 			break;
212 		case G_STATE_REBUILD:
213 			cmd = "f5";
214 			break;
215 		case G_STATE_RESYNC:
216 			cmd = "f1";
217 			break;
218 		default:
219 			cmd = "0";
220 			break;
221 		}
222 		led_set(sc->led, cmd);
223 	}
224 	g_io_deliver(bp, 0);
225 }
226 
227 static void
228 g_disk_done(struct bio *bp)
229 {
230 	struct bintime now;
231 	struct bio *bp2;
232 	struct g_disk_softc *sc;
233 
234 	/* See "notes" for why we need a mutex here */
235 	sc = bp->bio_caller1;
236 	bp2 = bp->bio_parent;
237 	binuptime(&now);
238 	mtx_lock(&sc->done_mtx);
239 	if (bp2->bio_error == 0)
240 		bp2->bio_error = bp->bio_error;
241 	bp2->bio_completed += bp->bio_length - bp->bio_resid;
242 
243 	switch (bp->bio_cmd) {
244 	case BIO_ZONE:
245 		bcopy(&bp->bio_zone, &bp2->bio_zone, sizeof(bp->bio_zone));
246 		/*FALLTHROUGH*/
247 	case BIO_READ:
248 	case BIO_WRITE:
249 	case BIO_DELETE:
250 	case BIO_FLUSH:
251 		devstat_end_transaction_bio_bt(sc->d_devstat, bp, &now);
252 		break;
253 	default:
254 		break;
255 	}
256 	bp2->bio_inbed++;
257 	if (bp2->bio_children == bp2->bio_inbed) {
258 		mtx_unlock(&sc->done_mtx);
259 		bp2->bio_resid = bp2->bio_bcount - bp2->bio_completed;
260 		g_io_deliver(bp2, bp2->bio_error);
261 	} else
262 		mtx_unlock(&sc->done_mtx);
263 	g_destroy_bio(bp);
264 }
265 
266 static int
267 g_disk_ioctl(struct g_provider *pp, u_long cmd, void * data, int fflag, struct thread *td)
268 {
269 	struct disk *dp;
270 	struct g_disk_softc *sc;
271 
272 	sc = pp->private;
273 	dp = sc->dp;
274 	KASSERT(dp != NULL && !dp->d_destroyed,
275 	    ("g_disk_ioctl(%lx) on destroyed disk %s", cmd, pp->name));
276 
277 	if (dp->d_ioctl == NULL)
278 		return (ENOIOCTL);
279 	return (dp->d_ioctl(dp, cmd, data, fflag, td));
280 }
281 
282 static off_t
283 g_disk_maxsize(struct disk *dp, struct bio *bp)
284 {
285 	if (bp->bio_cmd == BIO_DELETE)
286 		return (dp->d_delmaxsize);
287 	return (dp->d_maxsize);
288 }
289 
290 static int
291 g_disk_maxsegs(struct disk *dp, struct bio *bp)
292 {
293 	return ((g_disk_maxsize(dp, bp) / PAGE_SIZE) + 1);
294 }
295 
296 static void
297 g_disk_advance(struct disk *dp, struct bio *bp, off_t off)
298 {
299 
300 	bp->bio_offset += off;
301 	bp->bio_length -= off;
302 
303 	if ((bp->bio_flags & BIO_VLIST) != 0) {
304 		bus_dma_segment_t *seg, *end;
305 
306 		seg = (bus_dma_segment_t *)bp->bio_data;
307 		end = (bus_dma_segment_t *)bp->bio_data + bp->bio_ma_n;
308 		off += bp->bio_ma_offset;
309 		while (off >= seg->ds_len) {
310 			KASSERT((seg != end),
311 			    ("vlist request runs off the end"));
312 			off -= seg->ds_len;
313 			seg++;
314 		}
315 		bp->bio_ma_offset = off;
316 		bp->bio_ma_n = end - seg;
317 		bp->bio_data = (void *)seg;
318 	} else if ((bp->bio_flags & BIO_UNMAPPED) != 0) {
319 		bp->bio_ma += off / PAGE_SIZE;
320 		bp->bio_ma_offset += off;
321 		bp->bio_ma_offset %= PAGE_SIZE;
322 		bp->bio_ma_n -= off / PAGE_SIZE;
323 	} else {
324 		bp->bio_data += off;
325 	}
326 }
327 
328 static void
329 g_disk_seg_limit(bus_dma_segment_t *seg, off_t *poffset,
330     off_t *plength, int *ppages)
331 {
332 	uintptr_t seg_page_base;
333 	uintptr_t seg_page_end;
334 	off_t offset;
335 	off_t length;
336 	int seg_pages;
337 
338 	offset = *poffset;
339 	length = *plength;
340 
341 	if (length > seg->ds_len - offset)
342 		length = seg->ds_len - offset;
343 
344 	seg_page_base = trunc_page(seg->ds_addr + offset);
345 	seg_page_end  = round_page(seg->ds_addr + offset + length);
346 	seg_pages = (seg_page_end - seg_page_base) >> PAGE_SHIFT;
347 
348 	if (seg_pages > *ppages) {
349 		seg_pages = *ppages;
350 		length = (seg_page_base + (seg_pages << PAGE_SHIFT)) -
351 		    (seg->ds_addr + offset);
352 	}
353 
354 	*poffset = 0;
355 	*plength -= length;
356 	*ppages -= seg_pages;
357 }
358 
359 static off_t
360 g_disk_vlist_limit(struct disk *dp, struct bio *bp, bus_dma_segment_t **pendseg)
361 {
362 	bus_dma_segment_t *seg, *end;
363 	off_t residual;
364 	off_t offset;
365 	int pages;
366 
367 	seg = (bus_dma_segment_t *)bp->bio_data;
368 	end = (bus_dma_segment_t *)bp->bio_data + bp->bio_ma_n;
369 	residual = bp->bio_length;
370 	offset = bp->bio_ma_offset;
371 	pages = g_disk_maxsegs(dp, bp);
372 	while (residual != 0 && pages != 0) {
373 		KASSERT((seg != end),
374 		    ("vlist limit runs off the end"));
375 		g_disk_seg_limit(seg, &offset, &residual, &pages);
376 		seg++;
377 	}
378 	if (pendseg != NULL)
379 		*pendseg = seg;
380 	return (residual);
381 }
382 
383 static bool
384 g_disk_limit(struct disk *dp, struct bio *bp)
385 {
386 	bool limited = false;
387 	off_t maxsz;
388 
389 	maxsz = g_disk_maxsize(dp, bp);
390 
391 	/*
392 	 * XXX: If we have a stripesize we should really use it here.
393 	 *      Care should be taken in the delete case if this is done
394 	 *      as deletes can be very sensitive to size given how they
395 	 *      are processed.
396 	 */
397 	if (bp->bio_length > maxsz) {
398 		bp->bio_length = maxsz;
399 		limited = true;
400 	}
401 
402 	if ((bp->bio_flags & BIO_VLIST) != 0) {
403 		bus_dma_segment_t *firstseg, *endseg;
404 		off_t residual;
405 
406 		firstseg = (bus_dma_segment_t*)bp->bio_data;
407 		residual = g_disk_vlist_limit(dp, bp, &endseg);
408 		if (residual != 0) {
409 			bp->bio_ma_n = endseg - firstseg;
410 			bp->bio_length -= residual;
411 			limited = true;
412 		}
413 	} else if ((bp->bio_flags & BIO_UNMAPPED) != 0) {
414 		bp->bio_ma_n =
415 		    howmany(bp->bio_ma_offset + bp->bio_length, PAGE_SIZE);
416 	}
417 
418 	return (limited);
419 }
420 
421 static void
422 g_disk_start(struct bio *bp)
423 {
424 	struct bio *bp2, *bp3;
425 	struct disk *dp;
426 	struct g_disk_softc *sc;
427 	int error;
428 	off_t off;
429 
430 	biotrack(bp, __func__);
431 
432 	sc = bp->bio_to->private;
433 	dp = sc->dp;
434 	KASSERT(dp != NULL && !dp->d_destroyed,
435 	    ("g_disk_start(%p) on destroyed disk %s", bp, bp->bio_to->name));
436 	error = EJUSTRETURN;
437 	switch(bp->bio_cmd) {
438 	case BIO_DELETE:
439 		if (!(dp->d_flags & DISKFLAG_CANDELETE)) {
440 			error = EOPNOTSUPP;
441 			break;
442 		}
443 		/* fall-through */
444 	case BIO_READ:
445 	case BIO_WRITE:
446 		KASSERT((dp->d_flags & DISKFLAG_UNMAPPED_BIO) != 0 ||
447 		    (bp->bio_flags & BIO_UNMAPPED) == 0,
448 		    ("unmapped bio not supported by disk %s", dp->d_name));
449 		off = 0;
450 		bp3 = NULL;
451 		bp2 = g_clone_bio(bp);
452 		if (bp2 == NULL) {
453 			error = ENOMEM;
454 			break;
455 		}
456 		for (;;) {
457 			if (g_disk_limit(dp, bp2)) {
458 				off += bp2->bio_length;
459 
460 				/*
461 				 * To avoid a race, we need to grab the next bio
462 				 * before we schedule this one.  See "notes".
463 				 */
464 				bp3 = g_clone_bio(bp);
465 				if (bp3 == NULL)
466 					bp->bio_error = ENOMEM;
467 			}
468 			bp2->bio_done = g_disk_done;
469 			bp2->bio_caller1 = sc;
470 			bp2->bio_pblkno = bp2->bio_offset / dp->d_sectorsize;
471 			bp2->bio_bcount = bp2->bio_length;
472 			bp2->bio_disk = dp;
473 			devstat_start_transaction_bio(dp->d_devstat, bp2);
474 			dp->d_strategy(bp2);
475 
476 			if (bp3 == NULL)
477 				break;
478 
479 			bp2 = bp3;
480 			bp3 = NULL;
481 			g_disk_advance(dp, bp2, off);
482 		}
483 		break;
484 	case BIO_GETATTR:
485 		/* Give the driver a chance to override */
486 		if (dp->d_getattr != NULL) {
487 			if (bp->bio_disk == NULL)
488 				bp->bio_disk = dp;
489 			error = dp->d_getattr(bp);
490 			if (error != -1)
491 				break;
492 			error = EJUSTRETURN;
493 		}
494 		if (g_handleattr_int(bp, "GEOM::candelete",
495 		    (dp->d_flags & DISKFLAG_CANDELETE) != 0))
496 			break;
497 		else if (g_handleattr_int(bp, "GEOM::fwsectors",
498 		    dp->d_fwsectors))
499 			break;
500 		else if (g_handleattr_int(bp, "GEOM::fwheads", dp->d_fwheads))
501 			break;
502 		else if (g_handleattr_off_t(bp, "GEOM::frontstuff", 0))
503 			break;
504 		else if (g_handleattr_str(bp, "GEOM::ident", dp->d_ident))
505 			break;
506 		else if (g_handleattr_str(bp, "GEOM::descr", dp->d_descr))
507 			break;
508 		else if (g_handleattr_uint16_t(bp, "GEOM::hba_vendor",
509 		    dp->d_hba_vendor))
510 			break;
511 		else if (g_handleattr_uint16_t(bp, "GEOM::hba_device",
512 		    dp->d_hba_device))
513 			break;
514 		else if (g_handleattr_uint16_t(bp, "GEOM::hba_subvendor",
515 		    dp->d_hba_subvendor))
516 			break;
517 		else if (g_handleattr_uint16_t(bp, "GEOM::hba_subdevice",
518 		    dp->d_hba_subdevice))
519 			break;
520 		else if (!strcmp(bp->bio_attribute, "GEOM::kerneldump"))
521 			g_disk_kerneldump(bp, dp);
522 		else if (!strcmp(bp->bio_attribute, "GEOM::setstate"))
523 			g_disk_setstate(bp, sc);
524 		else if (g_handleattr_uint16_t(bp, "GEOM::rotation_rate",
525 		    dp->d_rotation_rate))
526 			break;
527 		else if (g_handleattr_str(bp, "GEOM::attachment",
528 		    dp->d_attachment))
529 			break;
530 		else
531 			error = ENOIOCTL;
532 		break;
533 	case BIO_FLUSH:
534 		g_trace(G_T_BIO, "g_disk_flushcache(%s)",
535 		    bp->bio_to->name);
536 		if (!(dp->d_flags & DISKFLAG_CANFLUSHCACHE)) {
537 			error = EOPNOTSUPP;
538 			break;
539 		}
540 		/*FALLTHROUGH*/
541 	case BIO_ZONE:
542 		if (bp->bio_cmd == BIO_ZONE) {
543 			if (!(dp->d_flags & DISKFLAG_CANZONE)) {
544 				error = EOPNOTSUPP;
545 				break;
546 			}
547 			g_trace(G_T_BIO, "g_disk_zone(%s)",
548 			    bp->bio_to->name);
549 		}
550 		bp2 = g_clone_bio(bp);
551 		if (bp2 == NULL) {
552 			g_io_deliver(bp, ENOMEM);
553 			return;
554 		}
555 		bp2->bio_done = g_disk_done;
556 		bp2->bio_caller1 = sc;
557 		bp2->bio_disk = dp;
558 		devstat_start_transaction_bio(dp->d_devstat, bp2);
559 		dp->d_strategy(bp2);
560 		break;
561 	case BIO_SPEEDUP:
562 		bp2 = g_clone_bio(bp);
563 		if (bp2 == NULL) {
564 			g_io_deliver(bp, ENOMEM);
565 			return;
566 		}
567 		bp2->bio_done = g_disk_done;
568 		bp2->bio_caller1 = sc;
569 		bp2->bio_disk = dp;
570 		dp->d_strategy(bp2);
571 		break;
572 	default:
573 		error = EOPNOTSUPP;
574 		break;
575 	}
576 	if (error != EJUSTRETURN)
577 		g_io_deliver(bp, error);
578 	return;
579 }
580 
581 static void
582 g_disk_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp, struct g_consumer *cp, struct g_provider *pp)
583 {
584 	struct bio *bp;
585 	struct disk *dp;
586 	struct g_disk_softc *sc;
587 	char *buf;
588 	int res = 0;
589 
590 	sc = gp->softc;
591 	if (sc == NULL || (dp = sc->dp) == NULL)
592 		return;
593 	if (indent == NULL) {
594 		sbuf_printf(sb, " hd %u", dp->d_fwheads);
595 		sbuf_printf(sb, " sc %u", dp->d_fwsectors);
596 		return;
597 	}
598 	if (pp != NULL) {
599 		sbuf_printf(sb, "%s<fwheads>%u</fwheads>\n",
600 		    indent, dp->d_fwheads);
601 		sbuf_printf(sb, "%s<fwsectors>%u</fwsectors>\n",
602 		    indent, dp->d_fwsectors);
603 
604 		/*
605 		 * "rotationrate" is a little complicated, because the value
606 		 * returned by the drive might not be the RPM; 0 and 1 are
607 		 * special cases, and there's also a valid range.
608 		 */
609 		sbuf_printf(sb, "%s<rotationrate>", indent);
610 		if (dp->d_rotation_rate == DISK_RR_UNKNOWN) /* Old drives */
611 			sbuf_cat(sb, "unknown");	/* don't report RPM. */
612 		else if (dp->d_rotation_rate == DISK_RR_NON_ROTATING)
613 			sbuf_cat(sb, "0");
614 		else if ((dp->d_rotation_rate >= DISK_RR_MIN) &&
615 		    (dp->d_rotation_rate <= DISK_RR_MAX))
616 			sbuf_printf(sb, "%u", dp->d_rotation_rate);
617 		else
618 			sbuf_cat(sb, "invalid");
619 		sbuf_cat(sb, "</rotationrate>\n");
620 		if (dp->d_getattr != NULL) {
621 			buf = g_malloc(DISK_IDENT_SIZE, M_WAITOK);
622 			bp = g_alloc_bio();
623 			bp->bio_disk = dp;
624 			bp->bio_attribute = "GEOM::ident";
625 			bp->bio_length = DISK_IDENT_SIZE;
626 			bp->bio_data = buf;
627 			res = dp->d_getattr(bp);
628 			sbuf_printf(sb, "%s<ident>", indent);
629 			g_conf_cat_escaped(sb, res == 0 ? buf : dp->d_ident);
630 			sbuf_cat(sb, "</ident>\n");
631 			bp->bio_attribute = "GEOM::lunid";
632 			bp->bio_length = DISK_IDENT_SIZE;
633 			bp->bio_data = buf;
634 			if (dp->d_getattr(bp) == 0) {
635 				sbuf_printf(sb, "%s<lunid>", indent);
636 				g_conf_cat_escaped(sb, buf);
637 				sbuf_cat(sb, "</lunid>\n");
638 			}
639 			bp->bio_attribute = "GEOM::lunname";
640 			bp->bio_length = DISK_IDENT_SIZE;
641 			bp->bio_data = buf;
642 			if (dp->d_getattr(bp) == 0) {
643 				sbuf_printf(sb, "%s<lunname>", indent);
644 				g_conf_cat_escaped(sb, buf);
645 				sbuf_cat(sb, "</lunname>\n");
646 			}
647 			g_destroy_bio(bp);
648 			g_free(buf);
649 		} else {
650 			sbuf_printf(sb, "%s<ident>", indent);
651 			g_conf_cat_escaped(sb, dp->d_ident);
652 			sbuf_cat(sb, "</ident>\n");
653 		}
654 		sbuf_printf(sb, "%s<descr>", indent);
655 		g_conf_cat_escaped(sb, dp->d_descr);
656 		sbuf_cat(sb, "</descr>\n");
657 	}
658 }
659 
660 static void
661 g_disk_resize(void *ptr, int flag)
662 {
663 	struct disk *dp;
664 	struct g_geom *gp;
665 	struct g_provider *pp;
666 
667 	if (flag == EV_CANCEL)
668 		return;
669 	g_topology_assert();
670 
671 	dp = ptr;
672 	gp = dp->d_geom;
673 
674 	if (dp->d_destroyed || gp == NULL)
675 		return;
676 
677 	LIST_FOREACH(pp, &gp->provider, provider) {
678 		if (pp->sectorsize != 0 &&
679 		    pp->sectorsize != dp->d_sectorsize)
680 			g_wither_provider(pp, ENXIO);
681 		else
682 			g_resize_provider(pp, dp->d_mediasize);
683 	}
684 }
685 
686 static void
687 g_disk_create(void *arg, int flag)
688 {
689 	struct g_geom *gp;
690 	struct g_provider *pp;
691 	struct disk *dp;
692 	struct g_disk_softc *sc;
693 	struct disk_alias *dap;
694 	char tmpstr[80];
695 
696 	if (flag == EV_CANCEL)
697 		return;
698 	g_topology_assert();
699 	dp = arg;
700 
701 	mtx_pool_lock(mtxpool_sleep, dp);
702 	dp->d_init_level = DISK_INIT_START;
703 
704 	/*
705 	 * If the disk has already gone away, we can just stop here and
706 	 * call the user's callback to tell him we've cleaned things up.
707 	 */
708 	if (dp->d_goneflag != 0) {
709 		mtx_pool_unlock(mtxpool_sleep, dp);
710 		if (dp->d_gone != NULL)
711 			dp->d_gone(dp);
712 		return;
713 	}
714 	mtx_pool_unlock(mtxpool_sleep, dp);
715 
716 	sc = g_malloc(sizeof(*sc), M_WAITOK | M_ZERO);
717 	mtx_init(&sc->done_mtx, "g_disk_done", NULL, MTX_DEF);
718 	sc->dp = dp;
719 	sc->d_devstat = dp->d_devstat;
720 	gp = g_new_geomf(&g_disk_class, "%s%d", dp->d_name, dp->d_unit);
721 	gp->softc = sc;
722 	LIST_FOREACH(dap, &dp->d_aliases, da_next) {
723 		snprintf(tmpstr, sizeof(tmpstr), "%s%d", dap->da_alias, dp->d_unit);
724 		g_geom_add_alias(gp, tmpstr);
725 	}
726 	pp = g_new_providerf(gp, "%s", gp->name);
727 	devstat_remove_entry(pp->stat);
728 	pp->stat = NULL;
729 	dp->d_devstat->id = pp;
730 	pp->mediasize = dp->d_mediasize;
731 	pp->sectorsize = dp->d_sectorsize;
732 	pp->stripeoffset = dp->d_stripeoffset;
733 	pp->stripesize = dp->d_stripesize;
734 	if ((dp->d_flags & DISKFLAG_UNMAPPED_BIO) != 0)
735 		pp->flags |= G_PF_ACCEPT_UNMAPPED;
736 	if ((dp->d_flags & DISKFLAG_DIRECT_COMPLETION) != 0)
737 		pp->flags |= G_PF_DIRECT_SEND;
738 	pp->flags |= G_PF_DIRECT_RECEIVE;
739 	if (bootverbose)
740 		printf("GEOM: new disk %s\n", gp->name);
741 	sysctl_ctx_init(&sc->sysctl_ctx);
742 	snprintf(tmpstr, sizeof(tmpstr), "GEOM disk %s", gp->name);
743 	sc->sysctl_tree = SYSCTL_ADD_NODE(&sc->sysctl_ctx,
744 		SYSCTL_STATIC_CHILDREN(_kern_geom_disk), OID_AUTO, gp->name,
745 		CTLFLAG_RD, 0, tmpstr);
746 	if (sc->sysctl_tree != NULL) {
747 		SYSCTL_ADD_STRING(&sc->sysctl_ctx,
748 		    SYSCTL_CHILDREN(sc->sysctl_tree), OID_AUTO, "led",
749 		    CTLFLAG_RWTUN, sc->led, sizeof(sc->led),
750 		    "LED name");
751 		SYSCTL_ADD_PROC(&sc->sysctl_ctx,
752 		    SYSCTL_CHILDREN(sc->sysctl_tree), OID_AUTO, "flags",
753 		    CTLTYPE_STRING | CTLFLAG_RD, dp, 0, g_disk_sysctl_flags,
754 		    "A", "Report disk flags");
755 	}
756 	pp->private = sc;
757 	dp->d_geom = gp;
758 	g_error_provider(pp, 0);
759 
760 	mtx_pool_lock(mtxpool_sleep, dp);
761 	dp->d_init_level = DISK_INIT_DONE;
762 
763 	/*
764 	 * If the disk has gone away at this stage, start the withering
765 	 * process for it.
766 	 */
767 	if (dp->d_goneflag != 0) {
768 		mtx_pool_unlock(mtxpool_sleep, dp);
769 		g_wither_provider(pp, ENXIO);
770 		return;
771 	}
772 	mtx_pool_unlock(mtxpool_sleep, dp);
773 
774 }
775 
776 /*
777  * We get this callback after all of the consumers have gone away, and just
778  * before the provider is freed.  If the disk driver provided a d_gone
779  * callback, let them know that it is okay to free resources -- they won't
780  * be getting any more accesses from GEOM.
781  */
782 static void
783 g_disk_providergone(struct g_provider *pp)
784 {
785 	struct disk *dp;
786 	struct g_disk_softc *sc;
787 
788 	sc = (struct g_disk_softc *)pp->private;
789 	dp = sc->dp;
790 	if (dp != NULL && dp->d_gone != NULL)
791 		dp->d_gone(dp);
792 	if (sc->sysctl_tree != NULL) {
793 		sysctl_ctx_free(&sc->sysctl_ctx);
794 		sc->sysctl_tree = NULL;
795 	}
796 	if (sc->led[0] != 0) {
797 		led_set(sc->led, "0");
798 		sc->led[0] = 0;
799 	}
800 	pp->private = NULL;
801 	pp->geom->softc = NULL;
802 	mtx_destroy(&sc->done_mtx);
803 	g_free(sc);
804 }
805 
806 static void
807 g_disk_destroy(void *ptr, int flag)
808 {
809 	struct disk *dp;
810 	struct g_geom *gp;
811 	struct g_disk_softc *sc;
812 	struct disk_alias *dap, *daptmp;
813 
814 	g_topology_assert();
815 	dp = ptr;
816 	gp = dp->d_geom;
817 	if (gp != NULL) {
818 		sc = gp->softc;
819 		if (sc != NULL)
820 			sc->dp = NULL;
821 		dp->d_geom = NULL;
822 		g_wither_geom(gp, ENXIO);
823 	}
824 	LIST_FOREACH_SAFE(dap, &dp->d_aliases, da_next, daptmp)
825 		g_free(dap);
826 
827 	g_free(dp);
828 }
829 
830 /*
831  * We only allow printable characters in disk ident,
832  * the rest is converted to 'x<HH>'.
833  */
834 static void
835 g_disk_ident_adjust(char *ident, size_t size)
836 {
837 	char *p, tmp[4], newid[DISK_IDENT_SIZE];
838 
839 	newid[0] = '\0';
840 	for (p = ident; *p != '\0'; p++) {
841 		if (isprint(*p)) {
842 			tmp[0] = *p;
843 			tmp[1] = '\0';
844 		} else {
845 			snprintf(tmp, sizeof(tmp), "x%02hhx",
846 			    *(unsigned char *)p);
847 		}
848 		if (strlcat(newid, tmp, sizeof(newid)) >= sizeof(newid))
849 			break;
850 	}
851 	bzero(ident, size);
852 	strlcpy(ident, newid, size);
853 }
854 
855 struct disk *
856 disk_alloc(void)
857 {
858 	struct disk *dp;
859 
860 	dp = g_malloc(sizeof(struct disk), M_WAITOK | M_ZERO);
861 	LIST_INIT(&dp->d_aliases);
862 	return (dp);
863 }
864 
865 void
866 disk_create(struct disk *dp, int version)
867 {
868 
869 	if (version != DISK_VERSION) {
870 		printf("WARNING: Attempt to add disk %s%d %s",
871 		    dp->d_name, dp->d_unit,
872 		    " using incompatible ABI version of disk(9)\n");
873 		printf("WARNING: Ignoring disk %s%d\n",
874 		    dp->d_name, dp->d_unit);
875 		return;
876 	}
877 	if (dp->d_flags & DISKFLAG_RESERVED) {
878 		printf("WARNING: Attempt to add non-MPSAFE disk %s%d\n",
879 		    dp->d_name, dp->d_unit);
880 		printf("WARNING: Ignoring disk %s%d\n",
881 		    dp->d_name, dp->d_unit);
882 		return;
883 	}
884 	KASSERT(dp->d_strategy != NULL, ("disk_create need d_strategy"));
885 	KASSERT(dp->d_name != NULL, ("disk_create need d_name"));
886 	KASSERT(*dp->d_name != 0, ("disk_create need d_name"));
887 	KASSERT(strlen(dp->d_name) < SPECNAMELEN - 4, ("disk name too long"));
888 	if (dp->d_devstat == NULL)
889 		dp->d_devstat = devstat_new_entry(dp->d_name, dp->d_unit,
890 		    dp->d_sectorsize, DEVSTAT_ALL_SUPPORTED,
891 		    DEVSTAT_TYPE_DIRECT, DEVSTAT_PRIORITY_MAX);
892 	dp->d_geom = NULL;
893 
894 	dp->d_init_level = DISK_INIT_NONE;
895 
896 	g_disk_ident_adjust(dp->d_ident, sizeof(dp->d_ident));
897 	g_post_event(g_disk_create, dp, M_WAITOK, dp, NULL);
898 }
899 
900 void
901 disk_destroy(struct disk *dp)
902 {
903 
904 	disk_gone(dp);
905 	dp->d_destroyed = 1;
906 	g_cancel_event(dp);
907 	if (dp->d_devstat != NULL)
908 		devstat_remove_entry(dp->d_devstat);
909 	g_post_event(g_disk_destroy, dp, M_WAITOK, NULL);
910 }
911 
912 void
913 disk_add_alias(struct disk *dp, const char *name)
914 {
915 	struct disk_alias *dap;
916 
917 	dap = (struct disk_alias *)g_malloc(
918 		sizeof(struct disk_alias) + strlen(name) + 1, M_WAITOK);
919 	strcpy((char *)(dap + 1), name);
920 	dap->da_alias = (const char *)(dap + 1);
921 	LIST_INSERT_HEAD(&dp->d_aliases, dap, da_next);
922 }
923 
924 void
925 disk_gone(struct disk *dp)
926 {
927 	struct g_geom *gp;
928 	struct g_provider *pp;
929 
930 	mtx_pool_lock(mtxpool_sleep, dp);
931 
932 	/*
933 	 * Second wither call makes no sense, plus we can not access the list
934 	 * of providers without topology lock after calling wither once.
935 	 */
936 	if (dp->d_goneflag != 0) {
937 		mtx_pool_unlock(mtxpool_sleep, dp);
938 		return;
939 	}
940 
941 	dp->d_goneflag = 1;
942 
943 	/*
944 	 * If we're still in the process of creating this disk (the
945 	 * g_disk_create() function is still queued, or is in
946 	 * progress), the init level will not yet be DISK_INIT_DONE.
947 	 *
948 	 * If that is the case, g_disk_create() will see d_goneflag
949 	 * and take care of cleaning things up.
950 	 *
951 	 * If the disk has already been created, we default to
952 	 * withering the provider as usual below.
953 	 *
954 	 * If the caller has not set a d_gone() callback, he will
955 	 * not be any worse off by returning here, because the geom
956 	 * has not been fully setup in any case.
957 	 */
958 	if (dp->d_init_level < DISK_INIT_DONE) {
959 		mtx_pool_unlock(mtxpool_sleep, dp);
960 		return;
961 	}
962 	mtx_pool_unlock(mtxpool_sleep, dp);
963 
964 	gp = dp->d_geom;
965 	pp = LIST_FIRST(&gp->provider);
966 	if (pp != NULL) {
967 		KASSERT(LIST_NEXT(pp, provider) == NULL,
968 		    ("geom %p has more than one provider", gp));
969 		g_wither_provider(pp, ENXIO);
970 	}
971 }
972 
973 void
974 disk_attr_changed(struct disk *dp, const char *attr, int flag)
975 {
976 	struct g_geom *gp;
977 	struct g_provider *pp;
978 	char devnamebuf[128];
979 
980 	gp = dp->d_geom;
981 	if (gp != NULL)
982 		LIST_FOREACH(pp, &gp->provider, provider)
983 			(void)g_attr_changed(pp, attr, flag);
984 	snprintf(devnamebuf, sizeof(devnamebuf), "devname=%s%d", dp->d_name,
985 	    dp->d_unit);
986 	devctl_notify("GEOM", "disk", attr, devnamebuf);
987 }
988 
989 void
990 disk_media_changed(struct disk *dp, int flag)
991 {
992 	struct g_geom *gp;
993 	struct g_provider *pp;
994 
995 	gp = dp->d_geom;
996 	if (gp != NULL) {
997 		pp = LIST_FIRST(&gp->provider);
998 		if (pp != NULL) {
999 			KASSERT(LIST_NEXT(pp, provider) == NULL,
1000 			    ("geom %p has more than one provider", gp));
1001 			g_media_changed(pp, flag);
1002 		}
1003 	}
1004 }
1005 
1006 void
1007 disk_media_gone(struct disk *dp, int flag)
1008 {
1009 	struct g_geom *gp;
1010 	struct g_provider *pp;
1011 
1012 	gp = dp->d_geom;
1013 	if (gp != NULL) {
1014 		pp = LIST_FIRST(&gp->provider);
1015 		if (pp != NULL) {
1016 			KASSERT(LIST_NEXT(pp, provider) == NULL,
1017 			    ("geom %p has more than one provider", gp));
1018 			g_media_gone(pp, flag);
1019 		}
1020 	}
1021 }
1022 
1023 int
1024 disk_resize(struct disk *dp, int flag)
1025 {
1026 
1027 	if (dp->d_destroyed || dp->d_geom == NULL)
1028 		return (0);
1029 
1030 	return (g_post_event(g_disk_resize, dp, flag, NULL));
1031 }
1032 
1033 static void
1034 g_kern_disks(void *p, int flag __unused)
1035 {
1036 	struct sbuf *sb;
1037 	struct g_geom *gp;
1038 	char *sp;
1039 
1040 	sb = p;
1041 	sp = "";
1042 	g_topology_assert();
1043 	LIST_FOREACH(gp, &g_disk_class.geom, geom) {
1044 		sbuf_printf(sb, "%s%s", sp, gp->name);
1045 		sp = " ";
1046 	}
1047 	sbuf_finish(sb);
1048 }
1049 
1050 static int
1051 g_disk_sysctl_flags(SYSCTL_HANDLER_ARGS)
1052 {
1053 	struct disk *dp;
1054 	struct sbuf *sb;
1055 	int error;
1056 
1057 	sb = sbuf_new_auto();
1058 	dp = (struct disk *)arg1;
1059 	sbuf_printf(sb, "%b", dp->d_flags,
1060 		"\20"
1061 		"\2OPEN"
1062 		"\3CANDELETE"
1063 		"\4CANFLUSHCACHE"
1064 		"\5UNMAPPEDBIO"
1065 		"\6DIRECTCOMPLETION"
1066 		"\10CANZONE"
1067 		"\11WRITEPROTECT");
1068 
1069 	sbuf_finish(sb);
1070 	error = SYSCTL_OUT(req, sbuf_data(sb), sbuf_len(sb) + 1);
1071 	sbuf_delete(sb);
1072 	return (error);
1073 }
1074 
1075 static int
1076 sysctl_disks(SYSCTL_HANDLER_ARGS)
1077 {
1078 	int error;
1079 	struct sbuf *sb;
1080 
1081 	sb = sbuf_new_auto();
1082 	g_waitfor_event(g_kern_disks, sb, M_WAITOK, NULL);
1083 	error = SYSCTL_OUT(req, sbuf_data(sb), sbuf_len(sb) + 1);
1084 	sbuf_delete(sb);
1085 	return error;
1086 }
1087 
1088 SYSCTL_PROC(_kern, OID_AUTO, disks,
1089     CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, 0,
1090     sysctl_disks, "A", "names of available disks");
1091