xref: /freebsd/sys/geom/geom_disk.c (revision 792bbaba989533a1fc93823df1720c8c4aaf0442)
1 /*-
2  * Copyright (c) 2002 Poul-Henning Kamp
3  * Copyright (c) 2002 Networks Associates Technology, Inc.
4  * All rights reserved.
5  *
6  * This software was developed for the FreeBSD Project by Poul-Henning Kamp
7  * and NAI Labs, the Security Research Division of Network Associates, Inc.
8  * under DARPA/SPAWAR contract N66001-01-C-8035 ("CBOSS"), as part of the
9  * DARPA CHATS research program.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  * 3. The names of the authors may not be used to endorse or promote
20  *    products derived from this software without specific prior written
21  *    permission.
22  *
23  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
24  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
27  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33  * SUCH DAMAGE.
34  */
35 
36 #include <sys/cdefs.h>
37 __FBSDID("$FreeBSD$");
38 
39 #include "opt_geom.h"
40 
41 #include <sys/param.h>
42 #include <sys/systm.h>
43 #include <sys/kernel.h>
44 #include <sys/sysctl.h>
45 #include <sys/bio.h>
46 #include <sys/bus.h>
47 #include <sys/ctype.h>
48 #include <sys/fcntl.h>
49 #include <sys/malloc.h>
50 #include <sys/sbuf.h>
51 #include <sys/devicestat.h>
52 #include <machine/md_var.h>
53 
54 #include <sys/lock.h>
55 #include <sys/mutex.h>
56 #include <geom/geom.h>
57 #include <geom/geom_disk.h>
58 #include <geom/geom_int.h>
59 
60 #include <dev/led/led.h>
61 
62 #include <machine/bus.h>
63 
64 struct g_disk_softc {
65 	struct mtx		 done_mtx;
66 	struct disk		*dp;
67 	struct sysctl_ctx_list	sysctl_ctx;
68 	struct sysctl_oid	*sysctl_tree;
69 	char			led[64];
70 	uint32_t		state;
71 	struct mtx		 start_mtx;
72 };
73 
74 static g_access_t g_disk_access;
75 static g_start_t g_disk_start;
76 static g_ioctl_t g_disk_ioctl;
77 static g_dumpconf_t g_disk_dumpconf;
78 static g_provgone_t g_disk_providergone;
79 
80 static struct g_class g_disk_class = {
81 	.name = G_DISK_CLASS_NAME,
82 	.version = G_VERSION,
83 	.start = g_disk_start,
84 	.access = g_disk_access,
85 	.ioctl = g_disk_ioctl,
86 	.providergone = g_disk_providergone,
87 	.dumpconf = g_disk_dumpconf,
88 };
89 
90 SYSCTL_DECL(_kern_geom);
91 static SYSCTL_NODE(_kern_geom, OID_AUTO, disk, CTLFLAG_RW, 0,
92     "GEOM_DISK stuff");
93 
94 DECLARE_GEOM_CLASS(g_disk_class, g_disk);
95 
96 static int
97 g_disk_access(struct g_provider *pp, int r, int w, int e)
98 {
99 	struct disk *dp;
100 	struct g_disk_softc *sc;
101 	int error;
102 
103 	g_trace(G_T_ACCESS, "g_disk_access(%s, %d, %d, %d)",
104 	    pp->name, r, w, e);
105 	g_topology_assert();
106 	sc = pp->private;
107 	if (sc == NULL || (dp = sc->dp) == NULL || dp->d_destroyed) {
108 		/*
109 		 * Allow decreasing access count even if disk is not
110 		 * available anymore.
111 		 */
112 		if (r <= 0 && w <= 0 && e <= 0)
113 			return (0);
114 		return (ENXIO);
115 	}
116 	r += pp->acr;
117 	w += pp->acw;
118 	e += pp->ace;
119 	error = 0;
120 	if ((pp->acr + pp->acw + pp->ace) == 0 && (r + w + e) > 0) {
121 		if (dp->d_open != NULL) {
122 			error = dp->d_open(dp);
123 			if (bootverbose && error != 0)
124 				printf("Opened disk %s -> %d\n",
125 				    pp->name, error);
126 			if (error != 0)
127 				return (error);
128 		}
129 		pp->sectorsize = dp->d_sectorsize;
130 		if (dp->d_maxsize == 0) {
131 			printf("WARNING: Disk drive %s%d has no d_maxsize\n",
132 			    dp->d_name, dp->d_unit);
133 			dp->d_maxsize = DFLTPHYS;
134 		}
135 		if (dp->d_delmaxsize == 0) {
136 			if (bootverbose && dp->d_flags & DISKFLAG_CANDELETE) {
137 				printf("WARNING: Disk drive %s%d has no "
138 				    "d_delmaxsize\n", dp->d_name, dp->d_unit);
139 			}
140 			dp->d_delmaxsize = dp->d_maxsize;
141 		}
142 		pp->stripeoffset = dp->d_stripeoffset;
143 		pp->stripesize = dp->d_stripesize;
144 		dp->d_flags |= DISKFLAG_OPEN;
145 		/*
146 		 * Do not invoke resize event when initial size was zero.
147 		 * Some disks report its size only after first opening.
148 		 */
149 		if (pp->mediasize == 0)
150 			pp->mediasize = dp->d_mediasize;
151 		else
152 			g_resize_provider(pp, dp->d_mediasize);
153 	} else if ((pp->acr + pp->acw + pp->ace) > 0 && (r + w + e) == 0) {
154 		if (dp->d_close != NULL) {
155 			error = dp->d_close(dp);
156 			if (error != 0)
157 				printf("Closed disk %s -> %d\n",
158 				    pp->name, error);
159 		}
160 		sc->state = G_STATE_ACTIVE;
161 		if (sc->led[0] != 0)
162 			led_set(sc->led, "0");
163 		dp->d_flags &= ~DISKFLAG_OPEN;
164 	}
165 	return (error);
166 }
167 
168 static void
169 g_disk_kerneldump(struct bio *bp, struct disk *dp)
170 {
171 	struct g_kerneldump *gkd;
172 	struct g_geom *gp;
173 
174 	gkd = (struct g_kerneldump*)bp->bio_data;
175 	gp = bp->bio_to->geom;
176 	g_trace(G_T_TOPOLOGY, "g_disk_kerneldump(%s, %jd, %jd)",
177 		gp->name, (intmax_t)gkd->offset, (intmax_t)gkd->length);
178 	if (dp->d_dump == NULL) {
179 		g_io_deliver(bp, ENODEV);
180 		return;
181 	}
182 	gkd->di.dumper = dp->d_dump;
183 	gkd->di.priv = dp;
184 	gkd->di.blocksize = dp->d_sectorsize;
185 	gkd->di.maxiosize = dp->d_maxsize;
186 	gkd->di.mediaoffset = gkd->offset;
187 	if ((gkd->offset + gkd->length) > dp->d_mediasize)
188 		gkd->length = dp->d_mediasize - gkd->offset;
189 	gkd->di.mediasize = gkd->length;
190 	g_io_deliver(bp, 0);
191 }
192 
193 static void
194 g_disk_setstate(struct bio *bp, struct g_disk_softc *sc)
195 {
196 	const char *cmd;
197 
198 	memcpy(&sc->state, bp->bio_data, sizeof(sc->state));
199 	if (sc->led[0] != 0) {
200 		switch (sc->state) {
201 		case G_STATE_FAILED:
202 			cmd = "1";
203 			break;
204 		case G_STATE_REBUILD:
205 			cmd = "f5";
206 			break;
207 		case G_STATE_RESYNC:
208 			cmd = "f1";
209 			break;
210 		default:
211 			cmd = "0";
212 			break;
213 		}
214 		led_set(sc->led, cmd);
215 	}
216 	g_io_deliver(bp, 0);
217 }
218 
219 static void
220 g_disk_done(struct bio *bp)
221 {
222 	struct bintime now;
223 	struct bio *bp2;
224 	struct g_disk_softc *sc;
225 
226 	/* See "notes" for why we need a mutex here */
227 	/* XXX: will witness accept a mix of Giant/unGiant drivers here ? */
228 	bp2 = bp->bio_parent;
229 	sc = bp2->bio_to->private;
230 	bp->bio_completed = bp->bio_length - bp->bio_resid;
231 	binuptime(&now);
232 	mtx_lock(&sc->done_mtx);
233 	if (bp2->bio_error == 0)
234 		bp2->bio_error = bp->bio_error;
235 	bp2->bio_completed += bp->bio_completed;
236 
237 	switch (bp->bio_cmd) {
238 	case BIO_ZONE:
239 		bcopy(&bp->bio_zone, &bp2->bio_zone, sizeof(bp->bio_zone));
240 		/*FALLTHROUGH*/
241 	case BIO_READ:
242 	case BIO_WRITE:
243 	case BIO_DELETE:
244 	case BIO_FLUSH:
245 		devstat_end_transaction_bio_bt(sc->dp->d_devstat, bp, &now);
246 		break;
247 	default:
248 		break;
249 	}
250 	bp2->bio_inbed++;
251 	if (bp2->bio_children == bp2->bio_inbed) {
252 		mtx_unlock(&sc->done_mtx);
253 		bp2->bio_resid = bp2->bio_bcount - bp2->bio_completed;
254 		g_io_deliver(bp2, bp2->bio_error);
255 	} else
256 		mtx_unlock(&sc->done_mtx);
257 	g_destroy_bio(bp);
258 }
259 
260 static int
261 g_disk_ioctl(struct g_provider *pp, u_long cmd, void * data, int fflag, struct thread *td)
262 {
263 	struct disk *dp;
264 	struct g_disk_softc *sc;
265 	int error;
266 
267 	sc = pp->private;
268 	dp = sc->dp;
269 
270 	if (dp->d_ioctl == NULL)
271 		return (ENOIOCTL);
272 	error = dp->d_ioctl(dp, cmd, data, fflag, td);
273 	return (error);
274 }
275 
276 static off_t
277 g_disk_maxsize(struct disk *dp, struct bio *bp)
278 {
279 	if (bp->bio_cmd == BIO_DELETE)
280 		return (dp->d_delmaxsize);
281 	return (dp->d_maxsize);
282 }
283 
284 static int
285 g_disk_maxsegs(struct disk *dp, struct bio *bp)
286 {
287 	return ((g_disk_maxsize(dp, bp) / PAGE_SIZE) + 1);
288 }
289 
290 static void
291 g_disk_advance(struct disk *dp, struct bio *bp, off_t off)
292 {
293 
294 	bp->bio_offset += off;
295 	bp->bio_length -= off;
296 
297 	if ((bp->bio_flags & BIO_VLIST) != 0) {
298 		bus_dma_segment_t *seg, *end;
299 
300 		seg = (bus_dma_segment_t *)bp->bio_data;
301 		end = (bus_dma_segment_t *)bp->bio_data + bp->bio_ma_n;
302 		off += bp->bio_ma_offset;
303 		while (off >= seg->ds_len) {
304 			KASSERT((seg != end),
305 			    ("vlist request runs off the end"));
306 			off -= seg->ds_len;
307 			seg++;
308 		}
309 		bp->bio_ma_offset = off;
310 		bp->bio_ma_n = end - seg;
311 		bp->bio_data = (void *)seg;
312 	} else if ((bp->bio_flags & BIO_UNMAPPED) != 0) {
313 		bp->bio_ma += off / PAGE_SIZE;
314 		bp->bio_ma_offset += off;
315 		bp->bio_ma_offset %= PAGE_SIZE;
316 		bp->bio_ma_n -= off / PAGE_SIZE;
317 	} else {
318 		bp->bio_data += off;
319 	}
320 }
321 
322 static void
323 g_disk_seg_limit(bus_dma_segment_t *seg, off_t *poffset,
324     off_t *plength, int *ppages)
325 {
326 	uintptr_t seg_page_base;
327 	uintptr_t seg_page_end;
328 	off_t offset;
329 	off_t length;
330 	int seg_pages;
331 
332 	offset = *poffset;
333 	length = *plength;
334 
335 	if (length > seg->ds_len - offset)
336 		length = seg->ds_len - offset;
337 
338 	seg_page_base = trunc_page(seg->ds_addr + offset);
339 	seg_page_end  = round_page(seg->ds_addr + offset + length);
340 	seg_pages = (seg_page_end - seg_page_base) >> PAGE_SHIFT;
341 
342 	if (seg_pages > *ppages) {
343 		seg_pages = *ppages;
344 		length = (seg_page_base + (seg_pages << PAGE_SHIFT)) -
345 		    (seg->ds_addr + offset);
346 	}
347 
348 	*poffset = 0;
349 	*plength -= length;
350 	*ppages -= seg_pages;
351 }
352 
353 static off_t
354 g_disk_vlist_limit(struct disk *dp, struct bio *bp, bus_dma_segment_t **pendseg)
355 {
356 	bus_dma_segment_t *seg, *end;
357 	off_t residual;
358 	off_t offset;
359 	int pages;
360 
361 	seg = (bus_dma_segment_t *)bp->bio_data;
362 	end = (bus_dma_segment_t *)bp->bio_data + bp->bio_ma_n;
363 	residual = bp->bio_length;
364 	offset = bp->bio_ma_offset;
365 	pages = g_disk_maxsegs(dp, bp);
366 	while (residual != 0 && pages != 0) {
367 		KASSERT((seg != end),
368 		    ("vlist limit runs off the end"));
369 		g_disk_seg_limit(seg, &offset, &residual, &pages);
370 		seg++;
371 	}
372 	if (pendseg != NULL)
373 		*pendseg = seg;
374 	return (residual);
375 }
376 
377 static bool
378 g_disk_limit(struct disk *dp, struct bio *bp)
379 {
380 	bool limited = false;
381 	off_t maxsz;
382 
383 	maxsz = g_disk_maxsize(dp, bp);
384 
385 	/*
386 	 * XXX: If we have a stripesize we should really use it here.
387 	 *      Care should be taken in the delete case if this is done
388 	 *      as deletes can be very sensitive to size given how they
389 	 *      are processed.
390 	 */
391 	if (bp->bio_length > maxsz) {
392 		bp->bio_length = maxsz;
393 		limited = true;
394 	}
395 
396 	if ((bp->bio_flags & BIO_VLIST) != 0) {
397 		bus_dma_segment_t *firstseg, *endseg;
398 		off_t residual;
399 
400 		firstseg = (bus_dma_segment_t*)bp->bio_data;
401 		residual = g_disk_vlist_limit(dp, bp, &endseg);
402 		if (residual != 0) {
403 			bp->bio_ma_n = endseg - firstseg;
404 			bp->bio_length -= residual;
405 			limited = true;
406 		}
407 	} else if ((bp->bio_flags & BIO_UNMAPPED) != 0) {
408 		bp->bio_ma_n =
409 		    howmany(bp->bio_ma_offset + bp->bio_length, PAGE_SIZE);
410 	}
411 
412 	return (limited);
413 }
414 
415 static void
416 g_disk_start(struct bio *bp)
417 {
418 	struct bio *bp2, *bp3;
419 	struct disk *dp;
420 	struct g_disk_softc *sc;
421 	int error;
422 	off_t off;
423 
424 	biotrack(bp, __func__);
425 
426 	sc = bp->bio_to->private;
427 	if (sc == NULL || (dp = sc->dp) == NULL || dp->d_destroyed) {
428 		g_io_deliver(bp, ENXIO);
429 		return;
430 	}
431 	error = EJUSTRETURN;
432 	switch(bp->bio_cmd) {
433 	case BIO_DELETE:
434 		if (!(dp->d_flags & DISKFLAG_CANDELETE)) {
435 			error = EOPNOTSUPP;
436 			break;
437 		}
438 		/* fall-through */
439 	case BIO_READ:
440 	case BIO_WRITE:
441 		KASSERT((dp->d_flags & DISKFLAG_UNMAPPED_BIO) != 0 ||
442 		    (bp->bio_flags & BIO_UNMAPPED) == 0,
443 		    ("unmapped bio not supported by disk %s", dp->d_name));
444 		off = 0;
445 		bp3 = NULL;
446 		bp2 = g_clone_bio(bp);
447 		if (bp2 == NULL) {
448 			error = ENOMEM;
449 			break;
450 		}
451 		for (;;) {
452 			if (g_disk_limit(dp, bp2)) {
453 				off += bp2->bio_length;
454 
455 				/*
456 				 * To avoid a race, we need to grab the next bio
457 				 * before we schedule this one.  See "notes".
458 				 */
459 				bp3 = g_clone_bio(bp);
460 				if (bp3 == NULL)
461 					bp->bio_error = ENOMEM;
462 			}
463 			bp2->bio_done = g_disk_done;
464 			bp2->bio_pblkno = bp2->bio_offset / dp->d_sectorsize;
465 			bp2->bio_bcount = bp2->bio_length;
466 			bp2->bio_disk = dp;
467 			mtx_lock(&sc->start_mtx);
468 			devstat_start_transaction_bio(dp->d_devstat, bp2);
469 			mtx_unlock(&sc->start_mtx);
470 			dp->d_strategy(bp2);
471 
472 			if (bp3 == NULL)
473 				break;
474 
475 			bp2 = bp3;
476 			bp3 = NULL;
477 			g_disk_advance(dp, bp2, off);
478 		}
479 		break;
480 	case BIO_GETATTR:
481 		/* Give the driver a chance to override */
482 		if (dp->d_getattr != NULL) {
483 			if (bp->bio_disk == NULL)
484 				bp->bio_disk = dp;
485 			error = dp->d_getattr(bp);
486 			if (error != -1)
487 				break;
488 			error = EJUSTRETURN;
489 		}
490 		if (g_handleattr_int(bp, "GEOM::candelete",
491 		    (dp->d_flags & DISKFLAG_CANDELETE) != 0))
492 			break;
493 		else if (g_handleattr_int(bp, "GEOM::fwsectors",
494 		    dp->d_fwsectors))
495 			break;
496 		else if (g_handleattr_int(bp, "GEOM::fwheads", dp->d_fwheads))
497 			break;
498 		else if (g_handleattr_off_t(bp, "GEOM::frontstuff", 0))
499 			break;
500 		else if (g_handleattr_str(bp, "GEOM::ident", dp->d_ident))
501 			break;
502 		else if (g_handleattr_uint16_t(bp, "GEOM::hba_vendor",
503 		    dp->d_hba_vendor))
504 			break;
505 		else if (g_handleattr_uint16_t(bp, "GEOM::hba_device",
506 		    dp->d_hba_device))
507 			break;
508 		else if (g_handleattr_uint16_t(bp, "GEOM::hba_subvendor",
509 		    dp->d_hba_subvendor))
510 			break;
511 		else if (g_handleattr_uint16_t(bp, "GEOM::hba_subdevice",
512 		    dp->d_hba_subdevice))
513 			break;
514 		else if (!strcmp(bp->bio_attribute, "GEOM::kerneldump"))
515 			g_disk_kerneldump(bp, dp);
516 		else if (!strcmp(bp->bio_attribute, "GEOM::setstate"))
517 			g_disk_setstate(bp, sc);
518 		else if (g_handleattr_uint16_t(bp, "GEOM::rotation_rate",
519 		    dp->d_rotation_rate))
520 			break;
521 		else
522 			error = ENOIOCTL;
523 		break;
524 	case BIO_FLUSH:
525 		g_trace(G_T_BIO, "g_disk_flushcache(%s)",
526 		    bp->bio_to->name);
527 		if (!(dp->d_flags & DISKFLAG_CANFLUSHCACHE)) {
528 			error = EOPNOTSUPP;
529 			break;
530 		}
531 		/*FALLTHROUGH*/
532 	case BIO_ZONE:
533 		if (bp->bio_cmd == BIO_ZONE) {
534 			if (!(dp->d_flags & DISKFLAG_CANZONE)) {
535 				error = EOPNOTSUPP;
536 				break;
537 			}
538 			g_trace(G_T_BIO, "g_disk_zone(%s)",
539 			    bp->bio_to->name);
540 		}
541 		bp2 = g_clone_bio(bp);
542 		if (bp2 == NULL) {
543 			g_io_deliver(bp, ENOMEM);
544 			return;
545 		}
546 		bp2->bio_done = g_disk_done;
547 		bp2->bio_disk = dp;
548 		mtx_lock(&sc->start_mtx);
549 		devstat_start_transaction_bio(dp->d_devstat, bp2);
550 		mtx_unlock(&sc->start_mtx);
551 		dp->d_strategy(bp2);
552 		break;
553 	default:
554 		error = EOPNOTSUPP;
555 		break;
556 	}
557 	if (error != EJUSTRETURN)
558 		g_io_deliver(bp, error);
559 	return;
560 }
561 
562 static void
563 g_disk_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp, struct g_consumer *cp, struct g_provider *pp)
564 {
565 	struct bio *bp;
566 	struct disk *dp;
567 	struct g_disk_softc *sc;
568 	char *buf;
569 	int res = 0;
570 
571 	sc = gp->softc;
572 	if (sc == NULL || (dp = sc->dp) == NULL)
573 		return;
574 	if (indent == NULL) {
575 		sbuf_printf(sb, " hd %u", dp->d_fwheads);
576 		sbuf_printf(sb, " sc %u", dp->d_fwsectors);
577 		return;
578 	}
579 	if (pp != NULL) {
580 		sbuf_printf(sb, "%s<fwheads>%u</fwheads>\n",
581 		    indent, dp->d_fwheads);
582 		sbuf_printf(sb, "%s<fwsectors>%u</fwsectors>\n",
583 		    indent, dp->d_fwsectors);
584 
585 		/*
586 		 * "rotationrate" is a little complicated, because the value
587 		 * returned by the drive might not be the RPM; 0 and 1 are
588 		 * special cases, and there's also a valid range.
589 		 */
590 		sbuf_printf(sb, "%s<rotationrate>", indent);
591 		if (dp->d_rotation_rate == DISK_RR_UNKNOWN) /* Old drives */
592 			sbuf_printf(sb, "unknown");	/* don't report RPM. */
593 		else if (dp->d_rotation_rate == DISK_RR_NON_ROTATING)
594 			sbuf_printf(sb, "0");
595 		else if ((dp->d_rotation_rate >= DISK_RR_MIN) &&
596 		    (dp->d_rotation_rate <= DISK_RR_MAX))
597 			sbuf_printf(sb, "%u", dp->d_rotation_rate);
598 		else
599 			sbuf_printf(sb, "invalid");
600 		sbuf_printf(sb, "</rotationrate>\n");
601 		if (dp->d_getattr != NULL) {
602 			buf = g_malloc(DISK_IDENT_SIZE, M_WAITOK);
603 			bp = g_alloc_bio();
604 			bp->bio_disk = dp;
605 			bp->bio_attribute = "GEOM::ident";
606 			bp->bio_length = DISK_IDENT_SIZE;
607 			bp->bio_data = buf;
608 			res = dp->d_getattr(bp);
609 			sbuf_printf(sb, "%s<ident>", indent);
610 			g_conf_printf_escaped(sb, "%s",
611 			    res == 0 ? buf: dp->d_ident);
612 			sbuf_printf(sb, "</ident>\n");
613 			bp->bio_attribute = "GEOM::lunid";
614 			bp->bio_length = DISK_IDENT_SIZE;
615 			bp->bio_data = buf;
616 			if (dp->d_getattr(bp) == 0) {
617 				sbuf_printf(sb, "%s<lunid>", indent);
618 				g_conf_printf_escaped(sb, "%s", buf);
619 				sbuf_printf(sb, "</lunid>\n");
620 			}
621 			bp->bio_attribute = "GEOM::lunname";
622 			bp->bio_length = DISK_IDENT_SIZE;
623 			bp->bio_data = buf;
624 			if (dp->d_getattr(bp) == 0) {
625 				sbuf_printf(sb, "%s<lunname>", indent);
626 				g_conf_printf_escaped(sb, "%s", buf);
627 				sbuf_printf(sb, "</lunname>\n");
628 			}
629 			g_destroy_bio(bp);
630 			g_free(buf);
631 		} else {
632 			sbuf_printf(sb, "%s<ident>", indent);
633 			g_conf_printf_escaped(sb, "%s", dp->d_ident);
634 			sbuf_printf(sb, "</ident>\n");
635 		}
636 		sbuf_printf(sb, "%s<descr>", indent);
637 		g_conf_printf_escaped(sb, "%s", dp->d_descr);
638 		sbuf_printf(sb, "</descr>\n");
639 	}
640 }
641 
642 static void
643 g_disk_resize(void *ptr, int flag)
644 {
645 	struct disk *dp;
646 	struct g_geom *gp;
647 	struct g_provider *pp;
648 
649 	if (flag == EV_CANCEL)
650 		return;
651 	g_topology_assert();
652 
653 	dp = ptr;
654 	gp = dp->d_geom;
655 
656 	if (dp->d_destroyed || gp == NULL)
657 		return;
658 
659 	LIST_FOREACH(pp, &gp->provider, provider) {
660 		if (pp->sectorsize != 0 &&
661 		    pp->sectorsize != dp->d_sectorsize)
662 			g_wither_provider(pp, ENXIO);
663 		else
664 			g_resize_provider(pp, dp->d_mediasize);
665 	}
666 }
667 
668 static void
669 g_disk_create(void *arg, int flag)
670 {
671 	struct g_geom *gp;
672 	struct g_provider *pp;
673 	struct disk *dp;
674 	struct g_disk_softc *sc;
675 	char tmpstr[80];
676 
677 	if (flag == EV_CANCEL)
678 		return;
679 	g_topology_assert();
680 	dp = arg;
681 
682 	mtx_pool_lock(mtxpool_sleep, dp);
683 	dp->d_init_level = DISK_INIT_START;
684 
685 	/*
686 	 * If the disk has already gone away, we can just stop here and
687 	 * call the user's callback to tell him we've cleaned things up.
688 	 */
689 	if (dp->d_goneflag != 0) {
690 		mtx_pool_unlock(mtxpool_sleep, dp);
691 		if (dp->d_gone != NULL)
692 			dp->d_gone(dp);
693 		return;
694 	}
695 	mtx_pool_unlock(mtxpool_sleep, dp);
696 
697 	sc = g_malloc(sizeof(*sc), M_WAITOK | M_ZERO);
698 	mtx_init(&sc->start_mtx, "g_disk_start", NULL, MTX_DEF);
699 	mtx_init(&sc->done_mtx, "g_disk_done", NULL, MTX_DEF);
700 	sc->dp = dp;
701 	gp = g_new_geomf(&g_disk_class, "%s%d", dp->d_name, dp->d_unit);
702 	gp->softc = sc;
703 	pp = g_new_providerf(gp, "%s", gp->name);
704 	devstat_remove_entry(pp->stat);
705 	pp->stat = NULL;
706 	dp->d_devstat->id = pp;
707 	pp->mediasize = dp->d_mediasize;
708 	pp->sectorsize = dp->d_sectorsize;
709 	pp->stripeoffset = dp->d_stripeoffset;
710 	pp->stripesize = dp->d_stripesize;
711 	if ((dp->d_flags & DISKFLAG_UNMAPPED_BIO) != 0)
712 		pp->flags |= G_PF_ACCEPT_UNMAPPED;
713 	if ((dp->d_flags & DISKFLAG_DIRECT_COMPLETION) != 0)
714 		pp->flags |= G_PF_DIRECT_SEND;
715 	pp->flags |= G_PF_DIRECT_RECEIVE;
716 	if (bootverbose)
717 		printf("GEOM: new disk %s\n", gp->name);
718 	sysctl_ctx_init(&sc->sysctl_ctx);
719 	snprintf(tmpstr, sizeof(tmpstr), "GEOM disk %s", gp->name);
720 	sc->sysctl_tree = SYSCTL_ADD_NODE(&sc->sysctl_ctx,
721 		SYSCTL_STATIC_CHILDREN(_kern_geom_disk), OID_AUTO, gp->name,
722 		CTLFLAG_RD, 0, tmpstr);
723 	if (sc->sysctl_tree != NULL) {
724 		SYSCTL_ADD_STRING(&sc->sysctl_ctx,
725 		    SYSCTL_CHILDREN(sc->sysctl_tree), OID_AUTO, "led",
726 		    CTLFLAG_RWTUN, sc->led, sizeof(sc->led),
727 		    "LED name");
728 	}
729 	pp->private = sc;
730 	dp->d_geom = gp;
731 	g_error_provider(pp, 0);
732 
733 	mtx_pool_lock(mtxpool_sleep, dp);
734 	dp->d_init_level = DISK_INIT_DONE;
735 
736 	/*
737 	 * If the disk has gone away at this stage, start the withering
738 	 * process for it.
739 	 */
740 	if (dp->d_goneflag != 0) {
741 		mtx_pool_unlock(mtxpool_sleep, dp);
742 		g_wither_provider(pp, ENXIO);
743 		return;
744 	}
745 	mtx_pool_unlock(mtxpool_sleep, dp);
746 
747 }
748 
749 /*
750  * We get this callback after all of the consumers have gone away, and just
751  * before the provider is freed.  If the disk driver provided a d_gone
752  * callback, let them know that it is okay to free resources -- they won't
753  * be getting any more accesses from GEOM.
754  */
755 static void
756 g_disk_providergone(struct g_provider *pp)
757 {
758 	struct disk *dp;
759 	struct g_disk_softc *sc;
760 
761 	sc = (struct g_disk_softc *)pp->private;
762 	dp = sc->dp;
763 	if (dp != NULL && dp->d_gone != NULL)
764 		dp->d_gone(dp);
765 	if (sc->sysctl_tree != NULL) {
766 		sysctl_ctx_free(&sc->sysctl_ctx);
767 		sc->sysctl_tree = NULL;
768 	}
769 	if (sc->led[0] != 0) {
770 		led_set(sc->led, "0");
771 		sc->led[0] = 0;
772 	}
773 	pp->private = NULL;
774 	pp->geom->softc = NULL;
775 	mtx_destroy(&sc->done_mtx);
776 	mtx_destroy(&sc->start_mtx);
777 	g_free(sc);
778 }
779 
780 static void
781 g_disk_destroy(void *ptr, int flag)
782 {
783 	struct disk *dp;
784 	struct g_geom *gp;
785 	struct g_disk_softc *sc;
786 
787 	g_topology_assert();
788 	dp = ptr;
789 	gp = dp->d_geom;
790 	if (gp != NULL) {
791 		sc = gp->softc;
792 		if (sc != NULL)
793 			sc->dp = NULL;
794 		dp->d_geom = NULL;
795 		g_wither_geom(gp, ENXIO);
796 	}
797 
798 	g_free(dp);
799 }
800 
801 /*
802  * We only allow printable characters in disk ident,
803  * the rest is converted to 'x<HH>'.
804  */
805 static void
806 g_disk_ident_adjust(char *ident, size_t size)
807 {
808 	char *p, tmp[4], newid[DISK_IDENT_SIZE];
809 
810 	newid[0] = '\0';
811 	for (p = ident; *p != '\0'; p++) {
812 		if (isprint(*p)) {
813 			tmp[0] = *p;
814 			tmp[1] = '\0';
815 		} else {
816 			snprintf(tmp, sizeof(tmp), "x%02hhx",
817 			    *(unsigned char *)p);
818 		}
819 		if (strlcat(newid, tmp, sizeof(newid)) >= sizeof(newid))
820 			break;
821 	}
822 	bzero(ident, size);
823 	strlcpy(ident, newid, size);
824 }
825 
826 struct disk *
827 disk_alloc(void)
828 {
829 
830 	return (g_malloc(sizeof(struct disk), M_WAITOK | M_ZERO));
831 }
832 
833 void
834 disk_create(struct disk *dp, int version)
835 {
836 
837 	if (version != DISK_VERSION) {
838 		printf("WARNING: Attempt to add disk %s%d %s",
839 		    dp->d_name, dp->d_unit,
840 		    " using incompatible ABI version of disk(9)\n");
841 		printf("WARNING: Ignoring disk %s%d\n",
842 		    dp->d_name, dp->d_unit);
843 		return;
844 	}
845 	if (dp->d_flags & DISKFLAG_RESERVED) {
846 		printf("WARNING: Attempt to add non-MPSAFE disk %s%d\n",
847 		    dp->d_name, dp->d_unit);
848 		printf("WARNING: Ignoring disk %s%d\n",
849 		    dp->d_name, dp->d_unit);
850 		return;
851 	}
852 	KASSERT(dp->d_strategy != NULL, ("disk_create need d_strategy"));
853 	KASSERT(dp->d_name != NULL, ("disk_create need d_name"));
854 	KASSERT(*dp->d_name != 0, ("disk_create need d_name"));
855 	KASSERT(strlen(dp->d_name) < SPECNAMELEN - 4, ("disk name too long"));
856 	if (dp->d_devstat == NULL)
857 		dp->d_devstat = devstat_new_entry(dp->d_name, dp->d_unit,
858 		    dp->d_sectorsize, DEVSTAT_ALL_SUPPORTED,
859 		    DEVSTAT_TYPE_DIRECT, DEVSTAT_PRIORITY_MAX);
860 	dp->d_geom = NULL;
861 
862 	dp->d_init_level = DISK_INIT_NONE;
863 
864 	g_disk_ident_adjust(dp->d_ident, sizeof(dp->d_ident));
865 	g_post_event(g_disk_create, dp, M_WAITOK, dp, NULL);
866 }
867 
868 void
869 disk_destroy(struct disk *dp)
870 {
871 
872 	g_cancel_event(dp);
873 	dp->d_destroyed = 1;
874 	if (dp->d_devstat != NULL)
875 		devstat_remove_entry(dp->d_devstat);
876 	g_post_event(g_disk_destroy, dp, M_WAITOK, NULL);
877 }
878 
879 void
880 disk_gone(struct disk *dp)
881 {
882 	struct g_geom *gp;
883 	struct g_provider *pp;
884 
885 	mtx_pool_lock(mtxpool_sleep, dp);
886 	dp->d_goneflag = 1;
887 
888 	/*
889 	 * If we're still in the process of creating this disk (the
890 	 * g_disk_create() function is still queued, or is in
891 	 * progress), the init level will not yet be DISK_INIT_DONE.
892 	 *
893 	 * If that is the case, g_disk_create() will see d_goneflag
894 	 * and take care of cleaning things up.
895 	 *
896 	 * If the disk has already been created, we default to
897 	 * withering the provider as usual below.
898 	 *
899 	 * If the caller has not set a d_gone() callback, he will
900 	 * not be any worse off by returning here, because the geom
901 	 * has not been fully setup in any case.
902 	 */
903 	if (dp->d_init_level < DISK_INIT_DONE) {
904 		mtx_pool_unlock(mtxpool_sleep, dp);
905 		return;
906 	}
907 	mtx_pool_unlock(mtxpool_sleep, dp);
908 
909 	gp = dp->d_geom;
910 	if (gp != NULL) {
911 		pp = LIST_FIRST(&gp->provider);
912 		if (pp != NULL) {
913 			KASSERT(LIST_NEXT(pp, provider) == NULL,
914 			    ("geom %p has more than one provider", gp));
915 			g_wither_provider(pp, ENXIO);
916 		}
917 	}
918 }
919 
920 void
921 disk_attr_changed(struct disk *dp, const char *attr, int flag)
922 {
923 	struct g_geom *gp;
924 	struct g_provider *pp;
925 	char devnamebuf[128];
926 
927 	gp = dp->d_geom;
928 	if (gp != NULL)
929 		LIST_FOREACH(pp, &gp->provider, provider)
930 			(void)g_attr_changed(pp, attr, flag);
931 	snprintf(devnamebuf, sizeof(devnamebuf), "devname=%s%d", dp->d_name,
932 	    dp->d_unit);
933 	devctl_notify("GEOM", "disk", attr, devnamebuf);
934 }
935 
936 void
937 disk_media_changed(struct disk *dp, int flag)
938 {
939 	struct g_geom *gp;
940 	struct g_provider *pp;
941 
942 	gp = dp->d_geom;
943 	if (gp != NULL) {
944 		pp = LIST_FIRST(&gp->provider);
945 		if (pp != NULL) {
946 			KASSERT(LIST_NEXT(pp, provider) == NULL,
947 			    ("geom %p has more than one provider", gp));
948 			g_media_changed(pp, flag);
949 		}
950 	}
951 }
952 
953 void
954 disk_media_gone(struct disk *dp, int flag)
955 {
956 	struct g_geom *gp;
957 	struct g_provider *pp;
958 
959 	gp = dp->d_geom;
960 	if (gp != NULL) {
961 		pp = LIST_FIRST(&gp->provider);
962 		if (pp != NULL) {
963 			KASSERT(LIST_NEXT(pp, provider) == NULL,
964 			    ("geom %p has more than one provider", gp));
965 			g_media_gone(pp, flag);
966 		}
967 	}
968 }
969 
970 int
971 disk_resize(struct disk *dp, int flag)
972 {
973 
974 	if (dp->d_destroyed || dp->d_geom == NULL)
975 		return (0);
976 
977 	return (g_post_event(g_disk_resize, dp, flag, NULL));
978 }
979 
980 static void
981 g_kern_disks(void *p, int flag __unused)
982 {
983 	struct sbuf *sb;
984 	struct g_geom *gp;
985 	char *sp;
986 
987 	sb = p;
988 	sp = "";
989 	g_topology_assert();
990 	LIST_FOREACH(gp, &g_disk_class.geom, geom) {
991 		sbuf_printf(sb, "%s%s", sp, gp->name);
992 		sp = " ";
993 	}
994 	sbuf_finish(sb);
995 }
996 
997 static int
998 sysctl_disks(SYSCTL_HANDLER_ARGS)
999 {
1000 	int error;
1001 	struct sbuf *sb;
1002 
1003 	sb = sbuf_new_auto();
1004 	g_waitfor_event(g_kern_disks, sb, M_WAITOK, NULL);
1005 	error = SYSCTL_OUT(req, sbuf_data(sb), sbuf_len(sb) + 1);
1006 	sbuf_delete(sb);
1007 	return error;
1008 }
1009 
1010 SYSCTL_PROC(_kern, OID_AUTO, disks,
1011     CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, 0,
1012     sysctl_disks, "A", "names of available disks");
1013