xref: /freebsd/sys/geom/part/g_part.c (revision 94942af266ac119ede0ca836f9aa5a5ac0582938)
1 /*-
2  * Copyright (c) 2002, 2005, 2006, 2007 Marcel Moolenaar
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  *
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25  */
26 
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
29 
30 #include <sys/param.h>
31 #include <sys/bio.h>
32 #include <sys/diskmbr.h>
33 #include <sys/endian.h>
34 #include <sys/kernel.h>
35 #include <sys/kobj.h>
36 #include <sys/limits.h>
37 #include <sys/lock.h>
38 #include <sys/malloc.h>
39 #include <sys/mutex.h>
40 #include <sys/queue.h>
41 #include <sys/sbuf.h>
42 #include <sys/systm.h>
43 #include <sys/uuid.h>
44 #include <geom/geom.h>
45 #include <geom/geom_ctl.h>
46 #include <geom/part/g_part.h>
47 
48 #include "g_part_if.h"
49 
50 static kobj_method_t g_part_null_methods[] = {
51 	{ 0, 0 }
52 };
53 
54 static struct g_part_scheme g_part_null_scheme = {
55 	"n/a",
56 	g_part_null_methods,
57 	sizeof(struct g_part_table),
58 };
59 G_PART_SCHEME_DECLARE(g_part_null_scheme);
60 
61 SET_DECLARE(g_part_scheme_set, struct g_part_scheme);
62 
63 struct g_part_alias_list {
64 	const char *lexeme;
65 	enum g_part_alias alias;
66 } g_part_alias_list[G_PART_ALIAS_COUNT] = {
67 	{ "efi", G_PART_ALIAS_EFI },
68 	{ "freebsd", G_PART_ALIAS_FREEBSD },
69 	{ "freebsd-swap", G_PART_ALIAS_FREEBSD_SWAP },
70 	{ "freebsd-ufs", G_PART_ALIAS_FREEBSD_UFS },
71 	{ "freebsd-vinum", G_PART_ALIAS_FREEBSD_VINUM },
72 	{ "mbr", G_PART_ALIAS_MBR }
73 };
74 
75 /*
76  * The GEOM partitioning class.
77  */
78 static g_ctl_req_t g_part_ctlreq;
79 static g_ctl_destroy_geom_t g_part_destroy_geom;
80 static g_taste_t g_part_taste;
81 
82 static g_access_t g_part_access;
83 static g_dumpconf_t g_part_dumpconf;
84 static g_orphan_t g_part_orphan;
85 static g_spoiled_t g_part_spoiled;
86 static g_start_t g_part_start;
87 
88 static struct g_class g_part_class = {
89 	.name = "PART",
90 	.version = G_VERSION,
91 	/* Class methods. */
92 	.ctlreq = g_part_ctlreq,
93 	.destroy_geom = g_part_destroy_geom,
94 	.taste = g_part_taste,
95 	/* Geom methods. */
96 	.access = g_part_access,
97 	.dumpconf = g_part_dumpconf,
98 	.orphan = g_part_orphan,
99 	.spoiled = g_part_spoiled,
100 	.start = g_part_start,
101 };
102 
103 DECLARE_GEOM_CLASS(g_part_class, g_part);
104 
105 enum g_part_ctl {
106 	G_PART_CTL_NONE,
107 	G_PART_CTL_ADD,
108 	G_PART_CTL_COMMIT,
109 	G_PART_CTL_CREATE,
110 	G_PART_CTL_DELETE,
111 	G_PART_CTL_DESTROY,
112 	G_PART_CTL_MODIFY,
113 	G_PART_CTL_MOVE,
114 	G_PART_CTL_RECOVER,
115 	G_PART_CTL_RESIZE,
116 	G_PART_CTL_UNDO
117 };
118 
119 /*
120  * Support functions.
121  */
122 
123 static void g_part_wither(struct g_geom *, int);
124 
125 const char *
126 g_part_alias_name(enum g_part_alias alias)
127 {
128 	int i;
129 
130 	for (i = 0; i < G_PART_ALIAS_COUNT; i++) {
131 		if (g_part_alias_list[i].alias != alias)
132 			continue;
133 		return (g_part_alias_list[i].lexeme);
134 	}
135 
136 	return (NULL);
137 }
138 
139 struct g_part_entry *
140 g_part_new_entry(struct g_part_table *table, int index, quad_t start,
141     quad_t end)
142 {
143 	struct g_part_entry *entry, *last;
144 
145 	last = NULL;
146 	LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) {
147 		if (entry->gpe_index == index)
148 			break;
149 		if (entry->gpe_index > index) {
150 			entry = NULL;
151 			break;
152 		}
153 		last = entry;
154 	}
155 	if (entry == NULL) {
156 		entry = g_malloc(table->gpt_scheme->gps_entrysz,
157 		    M_WAITOK | M_ZERO);
158 		entry->gpe_index = index;
159 		if (last == NULL)
160 			LIST_INSERT_HEAD(&table->gpt_entry, entry, gpe_entry);
161 		else
162 			LIST_INSERT_AFTER(last, entry, gpe_entry);
163 	}
164 	entry->gpe_start = start;
165 	entry->gpe_end = end;
166 	return (entry);
167 }
168 
169 static void
170 g_part_new_provider(struct g_geom *gp, struct g_part_table *table,
171     struct g_part_entry *entry)
172 {
173 	char buf[32];
174 	struct g_consumer *cp;
175 	struct g_provider *pp;
176 
177 	cp = LIST_FIRST(&gp->consumer);
178 	pp = cp->provider;
179 
180 	entry->gpe_offset = entry->gpe_start * pp->sectorsize;
181 
182 	if (entry->gpe_pp == NULL) {
183 		entry->gpe_pp = g_new_providerf(gp, "%s%s", gp->name,
184 		    G_PART_NAME(table, entry, buf, sizeof(buf)));
185 		entry->gpe_pp->private = entry;		/* Close the circle. */
186 	}
187 	entry->gpe_pp->index = entry->gpe_index - 1;	/* index is 1-based. */
188 	entry->gpe_pp->mediasize = (entry->gpe_end - entry->gpe_start + 1) *
189 	    pp->sectorsize;
190 	entry->gpe_pp->sectorsize = pp->sectorsize;
191 	entry->gpe_pp->flags = pp->flags & G_PF_CANDELETE;
192 	if (pp->stripesize > 0) {
193 		entry->gpe_pp->stripesize = pp->stripesize;
194 		entry->gpe_pp->stripeoffset = (pp->stripeoffset +
195 		    entry->gpe_offset) % pp->stripesize;
196 	}
197 	g_error_provider(entry->gpe_pp, 0);
198 }
199 
200 static int
201 g_part_parm_geom(const char *p, struct g_geom **v)
202 {
203 	struct g_geom *gp;
204 
205 	LIST_FOREACH(gp, &g_part_class.geom, geom) {
206 		if (!strcmp(p, gp->name))
207 			break;
208 	}
209 	if (gp == NULL)
210 		return (EINVAL);
211 	*v = gp;
212 	return (0);
213 }
214 
215 static int
216 g_part_parm_provider(const char *p, struct g_provider **v)
217 {
218 	struct g_provider *pp;
219 
220 	pp = g_provider_by_name(p);
221 	if (pp == NULL)
222 		return (EINVAL);
223 	*v = pp;
224 	return (0);
225 }
226 
227 static int
228 g_part_parm_quad(const char *p, quad_t *v)
229 {
230 	char *x;
231 	quad_t q;
232 
233 	q = strtoq(p, &x, 0);
234 	if (*x != '\0' || q < 0)
235 		return (EINVAL);
236 	*v = q;
237 	return (0);
238 }
239 
240 static int
241 g_part_parm_scheme(const char *p, struct g_part_scheme **v)
242 {
243 	struct g_part_scheme **iter, *s;
244 
245 	s = NULL;
246 	SET_FOREACH(iter, g_part_scheme_set) {
247 		if ((*iter)->name == NULL)
248 			continue;
249 		if (!strcasecmp((*iter)->name, p)) {
250 			s = *iter;
251 			break;
252 		}
253 	}
254 	if (s == NULL)
255 		return (EINVAL);
256 	*v = s;
257 	return (0);
258 }
259 
260 static int
261 g_part_parm_str(const char *p, const char **v)
262 {
263 
264 	if (p[0] == '\0')
265 		return (EINVAL);
266 	*v = p;
267 	return (0);
268 }
269 
270 static int
271 g_part_parm_uint(const char *p, u_int *v)
272 {
273 	char *x;
274 	long l;
275 
276 	l = strtol(p, &x, 0);
277 	if (*x != '\0' || l < 0 || l > INT_MAX)
278 		return (EINVAL);
279 	*v = (unsigned int)l;
280 	return (0);
281 }
282 
283 static int
284 g_part_probe(struct g_geom *gp, struct g_consumer *cp, int depth)
285 {
286 	struct g_part_scheme **iter, *scheme;
287 	struct g_part_table *table;
288 	int pri, probe;
289 
290 	table = gp->softc;
291 	scheme = (table != NULL) ? table->gpt_scheme : &g_part_null_scheme;
292 	pri = (scheme != &g_part_null_scheme) ? G_PART_PROBE(table, cp) :
293 	    INT_MIN;
294 	if (pri == 0)
295 		goto done;
296 	if (pri > 0) {	/* error */
297 		scheme = &g_part_null_scheme;
298 		pri = INT_MIN;
299 	}
300 
301 	SET_FOREACH(iter, g_part_scheme_set) {
302 		if ((*iter) == &g_part_null_scheme)
303 			continue;
304 		table = (void *)kobj_create((kobj_class_t)(*iter), M_GEOM,
305 		    M_WAITOK);
306 		table->gpt_gp = gp;
307 		table->gpt_scheme = *iter;
308 		table->gpt_depth = depth;
309 		probe = G_PART_PROBE(table, cp);
310 		if (probe <= 0 && probe > pri) {
311 			pri = probe;
312 			scheme = *iter;
313 			if (gp->softc != NULL)
314 				kobj_delete((kobj_t)gp->softc, M_GEOM);
315 			gp->softc = table;
316 			if (pri == 0)
317 				goto done;
318 		} else
319 			kobj_delete((kobj_t)table, M_GEOM);
320 	}
321 
322 done:
323 	return ((scheme == &g_part_null_scheme) ? ENXIO : 0);
324 }
325 
326 /*
327  * Control request functions.
328  */
329 
330 static int
331 g_part_ctl_add(struct gctl_req *req, struct g_part_parms *gpp)
332 {
333 	char buf[32];
334 	struct g_geom *gp;
335 	struct g_provider *pp;
336 	struct g_part_entry *delent, *last, *entry;
337 	struct g_part_table *table;
338 	struct sbuf *sb;
339 	quad_t end;
340 	unsigned int index;
341 	int error;
342 
343 	gp = gpp->gpp_geom;
344 	G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name));
345 	g_topology_assert();
346 
347 	pp = LIST_FIRST(&gp->consumer)->provider;
348 	table = gp->softc;
349 	end = gpp->gpp_start + gpp->gpp_size - 1;
350 
351 	if (gpp->gpp_start < table->gpt_first ||
352 	    gpp->gpp_start > table->gpt_last) {
353 		gctl_error(req, "%d start '%jd'", EINVAL,
354 		    (intmax_t)gpp->gpp_start);
355 		return (EINVAL);
356 	}
357 	if (end < gpp->gpp_start || end > table->gpt_last) {
358 		gctl_error(req, "%d size '%jd'", EINVAL,
359 		    (intmax_t)gpp->gpp_size);
360 		return (EINVAL);
361 	}
362 	if (gpp->gpp_index > table->gpt_entries) {
363 		gctl_error(req, "%d index '%d'", EINVAL, gpp->gpp_index);
364 		return (EINVAL);
365 	}
366 
367 	delent = last = NULL;
368 	index = (gpp->gpp_index > 0) ? gpp->gpp_index : 1;
369 	LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) {
370 		if (entry->gpe_deleted) {
371 			if (entry->gpe_index == index)
372 				delent = entry;
373 			continue;
374 		}
375 		if (entry->gpe_index == index) {
376 			index = entry->gpe_index + 1;
377 			last = entry;
378 		}
379 		if (gpp->gpp_start >= entry->gpe_start &&
380 		    gpp->gpp_start <= entry->gpe_end) {
381 			gctl_error(req, "%d start '%jd'", ENOSPC,
382 			    (intmax_t)gpp->gpp_start);
383 			return (ENOSPC);
384 		}
385 		if (end >= entry->gpe_start && end <= entry->gpe_end) {
386 			gctl_error(req, "%d end '%jd'", ENOSPC, (intmax_t)end);
387 			return (ENOSPC);
388 		}
389 		if (gpp->gpp_start < entry->gpe_start && end > entry->gpe_end) {
390 			gctl_error(req, "%d size '%jd'", ENOSPC,
391 			    (intmax_t)gpp->gpp_size);
392 			return (ENOSPC);
393 		}
394 	}
395 	if (gpp->gpp_index > 0 && index != gpp->gpp_index) {
396 		gctl_error(req, "%d index '%d'", EEXIST, gpp->gpp_index);
397 		return (EEXIST);
398 	}
399 
400 	entry = (delent == NULL) ? g_malloc(table->gpt_scheme->gps_entrysz,
401 	    M_WAITOK | M_ZERO) : delent;
402 	entry->gpe_index = index;
403 	entry->gpe_start = gpp->gpp_start;
404 	entry->gpe_end = end;
405 	error = G_PART_ADD(table, entry, gpp);
406 	if (error) {
407 		gctl_error(req, "%d", error);
408 		if (delent == NULL)
409 			g_free(entry);
410 		return (error);
411 	}
412 	if (delent == NULL) {
413 		if (last == NULL)
414 			LIST_INSERT_HEAD(&table->gpt_entry, entry, gpe_entry);
415 		else
416 			LIST_INSERT_AFTER(last, entry, gpe_entry);
417 		entry->gpe_created = 1;
418 	} else {
419 		entry->gpe_deleted = 0;
420 		entry->gpe_modified = 1;
421 	}
422 	g_part_new_provider(gp, table, entry);
423 
424 	/* Provide feedback if so requested. */
425 	if (gpp->gpp_parms & G_PART_PARM_OUTPUT) {
426 		sb = sbuf_new(NULL, NULL, 0, SBUF_AUTOEXTEND);
427 		sbuf_printf(sb, "%s%s added\n", gp->name,
428 		    G_PART_NAME(table, entry, buf, sizeof(buf)));
429 		gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1);
430 		sbuf_delete(sb);
431 	}
432 	return (0);
433 }
434 
435 static int
436 g_part_ctl_commit(struct gctl_req *req, struct g_part_parms *gpp)
437 {
438 	struct g_consumer *cp;
439 	struct g_geom *gp;
440 	struct g_provider *pp;
441 	struct g_part_entry *entry, *tmp;
442 	struct g_part_table *table;
443 	char *buf;
444 	int error, i;
445 
446 	gp = gpp->gpp_geom;
447 	G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name));
448 	g_topology_assert();
449 
450 	table = gp->softc;
451 	if (!table->gpt_opened) {
452 		gctl_error(req, "%d", EPERM);
453 		return (EPERM);
454 	}
455 
456 	cp = LIST_FIRST(&gp->consumer);
457 	if ((table->gpt_smhead | table->gpt_smtail) != 0) {
458 		pp = cp->provider;
459 		buf = g_malloc(pp->sectorsize, M_WAITOK | M_ZERO);
460 		while (table->gpt_smhead != 0) {
461 			i = ffs(table->gpt_smhead) - 1;
462 			error = g_write_data(cp, i * pp->sectorsize, buf,
463 			    pp->sectorsize);
464 			if (error) {
465 				g_free(buf);
466 				goto fail;
467 			}
468 			table->gpt_smhead &= ~(1 << i);
469 		}
470 		while (table->gpt_smtail != 0) {
471 			i = ffs(table->gpt_smtail) - 1;
472 			error = g_write_data(cp, pp->mediasize - (i + 1) *
473 			    pp->sectorsize, buf, pp->sectorsize);
474 			if (error) {
475 				g_free(buf);
476 				goto fail;
477 			}
478 			table->gpt_smtail &= ~(1 << i);
479 		}
480 		g_free(buf);
481 	}
482 
483 	if (table->gpt_scheme == &g_part_null_scheme) {
484 		g_access(cp, -1, -1, -1);
485 		g_part_wither(gp, ENXIO);
486 		return (0);
487 	}
488 
489 	error = G_PART_WRITE(table, cp);
490 	if (error)
491 		goto fail;
492 
493 	LIST_FOREACH_SAFE(entry, &table->gpt_entry, gpe_entry, tmp) {
494 		if (!entry->gpe_deleted) {
495 			entry->gpe_created = 0;
496 			entry->gpe_modified = 0;
497 			continue;
498 		}
499 		LIST_REMOVE(entry, gpe_entry);
500 		g_free(entry);
501 	}
502 	table->gpt_created = 0;
503 	table->gpt_opened = 0;
504 	g_access(cp, -1, -1, -1);
505 	return (0);
506 
507 fail:
508 	gctl_error(req, "%d", error);
509 	return (error);
510 }
511 
512 static int
513 g_part_ctl_create(struct gctl_req *req, struct g_part_parms *gpp)
514 {
515 	struct g_consumer *cp;
516 	struct g_geom *gp;
517 	struct g_provider *pp;
518 	struct g_part_scheme *scheme;
519 	struct g_part_table *null, *table;
520 	struct sbuf *sb;
521 	int attr, error;
522 
523 	pp = gpp->gpp_provider;
524 	scheme = gpp->gpp_scheme;
525 	G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, pp->name));
526 	g_topology_assert();
527 
528 	/* Check that there isn't already a g_part geom on the provider. */
529 	error = g_part_parm_geom(pp->name, &gp);
530 	if (!error) {
531 		null = gp->softc;
532 		if (null->gpt_scheme != &g_part_null_scheme) {
533 			gctl_error(req, "%d geom '%s'", EEXIST, pp->name);
534 			return (EEXIST);
535 		}
536 	} else
537 		null = NULL;
538 
539 	if ((gpp->gpp_parms & G_PART_PARM_ENTRIES) &&
540 	    (gpp->gpp_entries < scheme->gps_minent ||
541 	     gpp->gpp_entries > scheme->gps_maxent)) {
542 		gctl_error(req, "%d entries '%d'", EINVAL, gpp->gpp_entries);
543 		return (EINVAL);
544 	}
545 
546 	if (null == NULL)
547 		gp = g_new_geomf(&g_part_class, "%s", pp->name);
548 	gp->softc = kobj_create((kobj_class_t)gpp->gpp_scheme, M_GEOM,
549 	    M_WAITOK);
550 	table = gp->softc;
551 	table->gpt_gp = gp;
552 	table->gpt_scheme = gpp->gpp_scheme;
553 	table->gpt_entries = (gpp->gpp_parms & G_PART_PARM_ENTRIES) ?
554 	    gpp->gpp_entries : scheme->gps_minent;
555 	LIST_INIT(&table->gpt_entry);
556 	if (null == NULL) {
557 		cp = g_new_consumer(gp);
558 		error = g_attach(cp, pp);
559 		if (error == 0)
560 			error = g_access(cp, 1, 1, 1);
561 		if (error != 0) {
562 			g_part_wither(gp, error);
563 			gctl_error(req, "%d geom '%s'", error, pp->name);
564 			return (error);
565 		}
566 		table->gpt_opened = 1;
567 	} else {
568 		cp = LIST_FIRST(&gp->consumer);
569 		table->gpt_opened = null->gpt_opened;
570 		table->gpt_smhead = null->gpt_smhead;
571 		table->gpt_smtail = null->gpt_smtail;
572 	}
573 
574 	g_topology_unlock();
575 
576 	/* Make sure we can nest and if so, determine our depth. */
577 	error = g_getattr("PART::isleaf", cp, &attr);
578 	if (!error && attr) {
579 		error = ENODEV;
580 		goto fail;
581 	}
582 	error = g_getattr("PART::depth", cp, &attr);
583 	table->gpt_depth = (!error) ? attr + 1 : 0;
584 
585 	error = G_PART_CREATE(table, gpp);
586 	if (error)
587 		goto fail;
588 
589 	g_topology_lock();
590 
591 	table->gpt_created = 1;
592 	if (null != NULL)
593 		kobj_delete((kobj_t)null, M_GEOM);
594 
595 	/*
596 	 * Support automatic commit by filling in the gpp_geom
597 	 * parameter.
598 	 */
599 	gpp->gpp_parms |= G_PART_PARM_GEOM;
600 	gpp->gpp_geom = gp;
601 
602 	/* Provide feedback if so requested. */
603 	if (gpp->gpp_parms & G_PART_PARM_OUTPUT) {
604 		sb = sbuf_new(NULL, NULL, 0, SBUF_AUTOEXTEND);
605 		sbuf_printf(sb, "%s created\n", gp->name);
606 		gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1);
607 		sbuf_delete(sb);
608 	}
609 	return (0);
610 
611 fail:
612 	g_topology_lock();
613 	if (null == NULL) {
614 		g_access(cp, -1, -1, -1);
615 		g_part_wither(gp, error);
616 	} else {
617 		kobj_delete((kobj_t)gp->softc, M_GEOM);
618 		gp->softc = null;
619 	}
620 	gctl_error(req, "%d provider", error);
621 	return (error);
622 }
623 
624 static int
625 g_part_ctl_delete(struct gctl_req *req, struct g_part_parms *gpp)
626 {
627 	char buf[32];
628 	struct g_geom *gp;
629 	struct g_provider *pp;
630 	struct g_part_entry *entry;
631 	struct g_part_table *table;
632 	struct sbuf *sb;
633 
634 	gp = gpp->gpp_geom;
635 	G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name));
636 	g_topology_assert();
637 
638 	table = gp->softc;
639 
640 	LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) {
641 		if (entry->gpe_deleted)
642 			continue;
643 		if (entry->gpe_index == gpp->gpp_index)
644 			break;
645 	}
646 	if (entry == NULL) {
647 		gctl_error(req, "%d index '%d'", ENOENT, gpp->gpp_index);
648 		return (ENOENT);
649 	}
650 
651 	pp = entry->gpe_pp;
652 	if (pp->acr > 0 || pp->acw > 0 || pp->ace > 0) {
653 		gctl_error(req, "%d", EBUSY);
654 		return (EBUSY);
655 	}
656 
657 	pp->private = NULL;
658 	entry->gpe_pp = NULL;
659 	if (entry->gpe_created) {
660 		LIST_REMOVE(entry, gpe_entry);
661 		g_free(entry);
662 	} else {
663 		entry->gpe_modified = 0;
664 		entry->gpe_deleted = 1;
665 	}
666 	g_wither_provider(pp, ENXIO);
667 
668 	/* Provide feedback if so requested. */
669 	if (gpp->gpp_parms & G_PART_PARM_OUTPUT) {
670 		sb = sbuf_new(NULL, NULL, 0, SBUF_AUTOEXTEND);
671 		sbuf_printf(sb, "%s%s deleted\n", gp->name,
672 		    G_PART_NAME(table, entry, buf, sizeof(buf)));
673 		gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1);
674 		sbuf_delete(sb);
675 	}
676 	return (0);
677 }
678 
679 static int
680 g_part_ctl_destroy(struct gctl_req *req, struct g_part_parms *gpp)
681 {
682 	struct g_geom *gp;
683 	struct g_part_entry *entry;
684 	struct g_part_table *null, *table;
685 	struct sbuf *sb;
686 	int error;
687 
688 	gp = gpp->gpp_geom;
689 	G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name));
690 	g_topology_assert();
691 
692 	table = gp->softc;
693 	LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) {
694 		if (entry->gpe_deleted)
695 			continue;
696 		gctl_error(req, "%d", EBUSY);
697 		return (EBUSY);
698 	}
699 
700 	error = G_PART_DESTROY(table, gpp);
701 	if (error) {
702 		gctl_error(req, "%d", error);
703 		return (error);
704 	}
705 
706 	gp->softc = kobj_create((kobj_class_t)&g_part_null_scheme, M_GEOM,
707 	    M_WAITOK);
708 	null = gp->softc;
709 	null->gpt_gp = gp;
710 	null->gpt_scheme = &g_part_null_scheme;
711 	LIST_INIT(&null->gpt_entry);
712 	null->gpt_depth = table->gpt_depth;
713 	null->gpt_opened = table->gpt_opened;
714 	null->gpt_smhead = table->gpt_smhead;
715 	null->gpt_smtail = table->gpt_smtail;
716 
717 	while ((entry = LIST_FIRST(&table->gpt_entry)) != NULL) {
718 		LIST_REMOVE(entry, gpe_entry);
719 		g_free(entry);
720 	}
721 	kobj_delete((kobj_t)table, M_GEOM);
722 
723 	/* Provide feedback if so requested. */
724 	if (gpp->gpp_parms & G_PART_PARM_OUTPUT) {
725 		sb = sbuf_new(NULL, NULL, 0, SBUF_AUTOEXTEND);
726 		sbuf_printf(sb, "%s destroyed\n", gp->name);
727 		gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1);
728 		sbuf_delete(sb);
729 	}
730 	return (0);
731 }
732 
733 static int
734 g_part_ctl_modify(struct gctl_req *req, struct g_part_parms *gpp)
735 {
736 	char buf[32];
737 	struct g_geom *gp;
738 	struct g_part_entry *entry;
739 	struct g_part_table *table;
740 	struct sbuf *sb;
741 	int error;
742 
743 	gp = gpp->gpp_geom;
744 	G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name));
745 	g_topology_assert();
746 
747 	table = gp->softc;
748 
749 	LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) {
750 		if (entry->gpe_deleted)
751 			continue;
752 		if (entry->gpe_index == gpp->gpp_index)
753 			break;
754 	}
755 	if (entry == NULL) {
756 		gctl_error(req, "%d index '%d'", ENOENT, gpp->gpp_index);
757 		return (ENOENT);
758 	}
759 
760 	error = G_PART_MODIFY(table, entry, gpp);
761 	if (error) {
762 		gctl_error(req, "%d", error);
763 		return (error);
764 	}
765 
766 	if (!entry->gpe_created)
767 		entry->gpe_modified = 1;
768 
769 	/* Provide feedback if so requested. */
770 	if (gpp->gpp_parms & G_PART_PARM_OUTPUT) {
771 		sb = sbuf_new(NULL, NULL, 0, SBUF_AUTOEXTEND);
772 		sbuf_printf(sb, "%s%s modified\n", gp->name,
773 		    G_PART_NAME(table, entry, buf, sizeof(buf)));
774 		gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1);
775 		sbuf_delete(sb);
776 	}
777 	return (0);
778 }
779 
780 static int
781 g_part_ctl_move(struct gctl_req *req, struct g_part_parms *gpp)
782 {
783 	gctl_error(req, "%d verb 'move'", ENOSYS);
784 	return (ENOSYS);
785 }
786 
787 static int
788 g_part_ctl_recover(struct gctl_req *req, struct g_part_parms *gpp)
789 {
790 	gctl_error(req, "%d verb 'recover'", ENOSYS);
791 	return (ENOSYS);
792 }
793 
794 static int
795 g_part_ctl_resize(struct gctl_req *req, struct g_part_parms *gpp)
796 {
797 	gctl_error(req, "%d verb 'resize'", ENOSYS);
798 	return (ENOSYS);
799 }
800 
801 static int
802 g_part_ctl_undo(struct gctl_req *req, struct g_part_parms *gpp)
803 {
804 	struct g_consumer *cp;
805 	struct g_provider *pp;
806 	struct g_geom *gp;
807 	struct g_part_entry *entry, *tmp;
808 	struct g_part_table *table;
809 	int error, reprobe;
810 
811 	gp = gpp->gpp_geom;
812 	G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name));
813 	g_topology_assert();
814 
815 	table = gp->softc;
816 	if (!table->gpt_opened) {
817 		gctl_error(req, "%d", EPERM);
818 		return (EPERM);
819 	}
820 
821 	cp = LIST_FIRST(&gp->consumer);
822 	LIST_FOREACH_SAFE(entry, &table->gpt_entry, gpe_entry, tmp) {
823 		entry->gpe_modified = 0;
824 		if (entry->gpe_created) {
825 			pp = entry->gpe_pp;
826 			pp->private = NULL;
827 			entry->gpe_pp = NULL;
828 			g_wither_provider(pp, ENXIO);
829 			entry->gpe_deleted = 1;
830 		}
831 		if (entry->gpe_deleted) {
832 			LIST_REMOVE(entry, gpe_entry);
833 			g_free(entry);
834 		}
835 	}
836 
837 	g_topology_unlock();
838 
839 	reprobe = (table->gpt_scheme == &g_part_null_scheme ||
840 	    table->gpt_created) ? 1 : 0;
841 
842 	if (reprobe) {
843 		if (!LIST_EMPTY(&table->gpt_entry)) {
844 			error = EBUSY;
845 			goto fail;
846 		}
847 		error = g_part_probe(gp, cp, table->gpt_depth);
848 		if (error) {
849 			g_topology_lock();
850 			g_access(cp, -1, -1, -1);
851 			g_part_wither(gp, error);
852 			return (0);
853 		}
854 		table = gp->softc;
855 	}
856 
857 	error = G_PART_READ(table, cp);
858 	if (error)
859 		goto fail;
860 
861 	g_topology_lock();
862 
863 	LIST_FOREACH(entry, &table->gpt_entry, gpe_entry)
864 		g_part_new_provider(gp, table, entry);
865 
866 	table->gpt_opened = 0;
867 	g_access(cp, -1, -1, -1);
868 	return (0);
869 
870 fail:
871 	g_topology_lock();
872 	gctl_error(req, "%d", error);
873 	return (error);
874 }
875 
876 static void
877 g_part_wither(struct g_geom *gp, int error)
878 {
879 	struct g_part_entry *entry;
880 	struct g_part_table *table;
881 
882 	table = gp->softc;
883 	if (table != NULL) {
884 		while ((entry = LIST_FIRST(&table->gpt_entry)) != NULL) {
885 			LIST_REMOVE(entry, gpe_entry);
886 			g_free(entry);
887 		}
888 		if (gp->softc != NULL) {
889 			kobj_delete((kobj_t)gp->softc, M_GEOM);
890 			gp->softc = NULL;
891 		}
892 	}
893 	g_wither_geom(gp, error);
894 }
895 
896 /*
897  * Class methods.
898  */
899 
900 static void
901 g_part_ctlreq(struct gctl_req *req, struct g_class *mp, const char *verb)
902 {
903 	struct g_part_parms gpp;
904 	struct g_part_table *table;
905 	struct gctl_req_arg *ap;
906 	const char *p;
907 	enum g_part_ctl ctlreq;
908 	unsigned int i, mparms, oparms, parm;
909 	int auto_commit, close_on_error;
910 	int error, modifies;
911 
912 	G_PART_TRACE((G_T_TOPOLOGY, "%s(%s,%s)", __func__, mp->name, verb));
913 	g_topology_assert();
914 
915 	ctlreq = G_PART_CTL_NONE;
916 	modifies = 1;
917 	mparms = 0;
918 	oparms = G_PART_PARM_FLAGS | G_PART_PARM_OUTPUT | G_PART_PARM_VERSION;
919 	switch (*verb) {
920 	case 'a':
921 		if (!strcmp(verb, "add")) {
922 			ctlreq = G_PART_CTL_ADD;
923 			mparms |= G_PART_PARM_GEOM | G_PART_PARM_SIZE |
924 			    G_PART_PARM_START | G_PART_PARM_TYPE;
925 			oparms |= G_PART_PARM_INDEX | G_PART_PARM_LABEL;
926 		}
927 		break;
928 	case 'c':
929 		if (!strcmp(verb, "commit")) {
930 			ctlreq = G_PART_CTL_COMMIT;
931 			mparms |= G_PART_PARM_GEOM;
932 			modifies = 0;
933 		} else if (!strcmp(verb, "create")) {
934 			ctlreq = G_PART_CTL_CREATE;
935 			mparms |= G_PART_PARM_PROVIDER | G_PART_PARM_SCHEME;
936 			oparms |= G_PART_PARM_ENTRIES;
937 		}
938 		break;
939 	case 'd':
940 		if (!strcmp(verb, "delete")) {
941 			ctlreq = G_PART_CTL_DELETE;
942 			mparms |= G_PART_PARM_GEOM | G_PART_PARM_INDEX;
943 		} else if (!strcmp(verb, "destroy")) {
944 			ctlreq = G_PART_CTL_DESTROY;
945 			mparms |= G_PART_PARM_GEOM;
946 		}
947 		break;
948 	case 'm':
949 		if (!strcmp(verb, "modify")) {
950 			ctlreq = G_PART_CTL_MODIFY;
951 			mparms |= G_PART_PARM_GEOM | G_PART_PARM_INDEX;
952 			oparms |= G_PART_PARM_LABEL | G_PART_PARM_TYPE;
953 		} else if (!strcmp(verb, "move")) {
954 			ctlreq = G_PART_CTL_MOVE;
955 			mparms |= G_PART_PARM_GEOM | G_PART_PARM_INDEX;
956 		}
957 		break;
958 	case 'r':
959 		if (!strcmp(verb, "recover")) {
960 			ctlreq = G_PART_CTL_RECOVER;
961 			mparms |= G_PART_PARM_GEOM;
962 		} else if (!strcmp(verb, "resize")) {
963 			ctlreq = G_PART_CTL_RESIZE;
964 			mparms |= G_PART_PARM_GEOM | G_PART_PARM_INDEX;
965 		}
966 		break;
967 	case 'u':
968 		if (!strcmp(verb, "undo")) {
969 			ctlreq = G_PART_CTL_UNDO;
970 			mparms |= G_PART_PARM_GEOM;
971 			modifies = 0;
972 		}
973 		break;
974 	}
975 	if (ctlreq == G_PART_CTL_NONE) {
976 		gctl_error(req, "%d verb '%s'", EINVAL, verb);
977 		return;
978 	}
979 
980 	bzero(&gpp, sizeof(gpp));
981 	for (i = 0; i < req->narg; i++) {
982 		ap = &req->arg[i];
983 		parm = 0;
984 		switch (ap->name[0]) {
985 		case 'c':
986 			if (!strcmp(ap->name, "class"))
987 				continue;
988 			break;
989 		case 'e':
990 			if (!strcmp(ap->name, "entries"))
991 				parm = G_PART_PARM_ENTRIES;
992 			break;
993 		case 'f':
994 			if (!strcmp(ap->name, "flags"))
995 				parm = G_PART_PARM_FLAGS;
996 			break;
997 		case 'g':
998 			if (!strcmp(ap->name, "geom"))
999 				parm = G_PART_PARM_GEOM;
1000 			break;
1001 		case 'i':
1002 			if (!strcmp(ap->name, "index"))
1003 				parm = G_PART_PARM_INDEX;
1004 			break;
1005 		case 'l':
1006 			if (!strcmp(ap->name, "label"))
1007 				parm = G_PART_PARM_LABEL;
1008 			break;
1009 		case 'o':
1010 			if (!strcmp(ap->name, "output"))
1011 				parm = G_PART_PARM_OUTPUT;
1012 			break;
1013 		case 'p':
1014 			if (!strcmp(ap->name, "provider"))
1015 				parm = G_PART_PARM_PROVIDER;
1016 			break;
1017 		case 's':
1018 			if (!strcmp(ap->name, "scheme"))
1019 				parm = G_PART_PARM_SCHEME;
1020 			else if (!strcmp(ap->name, "size"))
1021 				parm = G_PART_PARM_SIZE;
1022 			else if (!strcmp(ap->name, "start"))
1023 				parm = G_PART_PARM_START;
1024 			break;
1025 		case 't':
1026 			if (!strcmp(ap->name, "type"))
1027 				parm = G_PART_PARM_TYPE;
1028 			break;
1029 		case 'v':
1030 			if (!strcmp(ap->name, "verb"))
1031 				continue;
1032 			else if (!strcmp(ap->name, "version"))
1033 				parm = G_PART_PARM_VERSION;
1034 			break;
1035 		}
1036 		if ((parm & (mparms | oparms)) == 0) {
1037 			gctl_error(req, "%d param '%s'", EINVAL, ap->name);
1038 			return;
1039 		}
1040 		p = gctl_get_asciiparam(req, ap->name);
1041 		if (p == NULL) {
1042 			gctl_error(req, "%d param '%s'", ENOATTR, ap->name);
1043 			return;
1044 		}
1045 		switch (parm) {
1046 		case G_PART_PARM_ENTRIES:
1047 			error = g_part_parm_uint(p, &gpp.gpp_entries);
1048 			break;
1049 		case G_PART_PARM_FLAGS:
1050 			if (p[0] == '\0')
1051 				continue;
1052 			error = g_part_parm_str(p, &gpp.gpp_flags);
1053 			break;
1054 		case G_PART_PARM_GEOM:
1055 			error = g_part_parm_geom(p, &gpp.gpp_geom);
1056 			break;
1057 		case G_PART_PARM_INDEX:
1058 			error = g_part_parm_uint(p, &gpp.gpp_index);
1059 			break;
1060 		case G_PART_PARM_LABEL:
1061 			/* An empty label is always valid. */
1062 			gpp.gpp_label = p;
1063 			error = 0;
1064 			break;
1065 		case G_PART_PARM_OUTPUT:
1066 			error = 0;	/* Write-only parameter */
1067 			break;
1068 		case G_PART_PARM_PROVIDER:
1069 			error = g_part_parm_provider(p, &gpp.gpp_provider);
1070 			break;
1071 		case G_PART_PARM_SCHEME:
1072 			error = g_part_parm_scheme(p, &gpp.gpp_scheme);
1073 			break;
1074 		case G_PART_PARM_SIZE:
1075 			error = g_part_parm_quad(p, &gpp.gpp_size);
1076 			break;
1077 		case G_PART_PARM_START:
1078 			error = g_part_parm_quad(p, &gpp.gpp_start);
1079 			break;
1080 		case G_PART_PARM_TYPE:
1081 			error = g_part_parm_str(p, &gpp.gpp_type);
1082 			break;
1083 		case G_PART_PARM_VERSION:
1084 			error = g_part_parm_uint(p, &gpp.gpp_version);
1085 			break;
1086 		default:
1087 			error = EDOOFUS;
1088 			break;
1089 		}
1090 		if (error) {
1091 			gctl_error(req, "%d %s '%s'", error, ap->name, p);
1092 			return;
1093 		}
1094 		gpp.gpp_parms |= parm;
1095 	}
1096 	if ((gpp.gpp_parms & mparms) != mparms) {
1097 		parm = mparms - (gpp.gpp_parms & mparms);
1098 		gctl_error(req, "%d param '%x'", ENOATTR, parm);
1099 		return;
1100 	}
1101 
1102 	/* Obtain permissions if possible/necessary. */
1103 	close_on_error = 0;
1104 	table = NULL;	/* Suppress uninit. warning. */
1105 	if (modifies && (gpp.gpp_parms & G_PART_PARM_GEOM)) {
1106 		table = gpp.gpp_geom->softc;
1107 		if (table != NULL && !table->gpt_opened) {
1108 			error = g_access(LIST_FIRST(&gpp.gpp_geom->consumer),
1109 			    1, 1, 1);
1110 			if (error) {
1111 				gctl_error(req, "%d geom '%s'", error,
1112 				    gpp.gpp_geom->name);
1113 				return;
1114 			}
1115 			table->gpt_opened = 1;
1116 			close_on_error = 1;
1117 		}
1118 	}
1119 
1120 	error = EDOOFUS;	/* Prevent bogus  uninit. warning. */
1121 	switch (ctlreq) {
1122 	case G_PART_CTL_NONE:
1123 		panic("%s", __func__);
1124 	case G_PART_CTL_ADD:
1125 		error = g_part_ctl_add(req, &gpp);
1126 		break;
1127 	case G_PART_CTL_COMMIT:
1128 		error = g_part_ctl_commit(req, &gpp);
1129 		break;
1130 	case G_PART_CTL_CREATE:
1131 		error = g_part_ctl_create(req, &gpp);
1132 		break;
1133 	case G_PART_CTL_DELETE:
1134 		error = g_part_ctl_delete(req, &gpp);
1135 		break;
1136 	case G_PART_CTL_DESTROY:
1137 		error = g_part_ctl_destroy(req, &gpp);
1138 		break;
1139 	case G_PART_CTL_MODIFY:
1140 		error = g_part_ctl_modify(req, &gpp);
1141 		break;
1142 	case G_PART_CTL_MOVE:
1143 		error = g_part_ctl_move(req, &gpp);
1144 		break;
1145 	case G_PART_CTL_RECOVER:
1146 		error = g_part_ctl_recover(req, &gpp);
1147 		break;
1148 	case G_PART_CTL_RESIZE:
1149 		error = g_part_ctl_resize(req, &gpp);
1150 		break;
1151 	case G_PART_CTL_UNDO:
1152 		error = g_part_ctl_undo(req, &gpp);
1153 		break;
1154 	}
1155 
1156 	/* Implement automatic commit. */
1157 	if (!error) {
1158 		auto_commit = (modifies &&
1159 		    (gpp.gpp_parms & G_PART_PARM_FLAGS) &&
1160 		    strchr(gpp.gpp_flags, 'C') != NULL) ? 1 : 0;
1161 		if (auto_commit) {
1162 			KASSERT(gpp.gpp_parms & G_PART_PARM_GEOM, (__func__));
1163 			error = g_part_ctl_commit(req, &gpp);
1164 		}
1165 	}
1166 
1167 	if (error && close_on_error) {
1168 		g_access(LIST_FIRST(&gpp.gpp_geom->consumer), -1, -1, -1);
1169 		table->gpt_opened = 0;
1170 	}
1171 }
1172 
1173 static int
1174 g_part_destroy_geom(struct gctl_req *req, struct g_class *mp,
1175     struct g_geom *gp)
1176 {
1177 
1178 	G_PART_TRACE((G_T_TOPOLOGY, "%s(%s,%s)", __func__, mp->name, gp->name));
1179 	g_topology_assert();
1180 
1181 	g_part_wither(gp, EINVAL);
1182 	return (0);
1183 }
1184 
1185 static struct g_geom *
1186 g_part_taste(struct g_class *mp, struct g_provider *pp, int flags __unused)
1187 {
1188 	struct g_consumer *cp;
1189 	struct g_geom *gp;
1190 	struct g_part_entry *entry;
1191 	struct g_part_table *table;
1192 	int attr, depth;
1193 	int error;
1194 
1195 	G_PART_TRACE((G_T_TOPOLOGY, "%s(%s,%s)", __func__, mp->name, pp->name));
1196 	g_topology_assert();
1197 
1198 	/*
1199 	 * Create a GEOM with consumer and hook it up to the provider.
1200 	 * With that we become part of the topology. Optain read access
1201 	 * to the provider.
1202 	 */
1203 	gp = g_new_geomf(mp, "%s", pp->name);
1204 	cp = g_new_consumer(gp);
1205 	error = g_attach(cp, pp);
1206 	if (error == 0)
1207 		error = g_access(cp, 1, 0, 0);
1208 	if (error != 0) {
1209 		g_part_wither(gp, error);
1210 		return (NULL);
1211 	}
1212 
1213 	g_topology_unlock();
1214 
1215 	/* Make sure we can nest and if so, determine our depth. */
1216 	error = g_getattr("PART::isleaf", cp, &attr);
1217 	if (!error && attr) {
1218 		error = ENODEV;
1219 		goto fail;
1220 	}
1221 	error = g_getattr("PART::depth", cp, &attr);
1222 	depth = (!error) ? attr + 1 : 0;
1223 
1224 	error = g_part_probe(gp, cp, depth);
1225 	if (error)
1226 		goto fail;
1227 
1228 	table = gp->softc;
1229 	error = G_PART_READ(table, cp);
1230 	if (error)
1231 		goto fail;
1232 
1233 	g_topology_lock();
1234 	LIST_FOREACH(entry, &table->gpt_entry, gpe_entry)
1235 		g_part_new_provider(gp, table, entry);
1236 
1237 	g_access(cp, -1, 0, 0);
1238 	return (gp);
1239 
1240  fail:
1241 	g_topology_lock();
1242 	g_access(cp, -1, 0, 0);
1243 	g_part_wither(gp, error);
1244 	return (NULL);
1245 }
1246 
1247 /*
1248  * Geom methods.
1249  */
1250 
1251 static int
1252 g_part_access(struct g_provider *pp, int dr, int dw, int de)
1253 {
1254 	struct g_consumer *cp;
1255 
1256 	G_PART_TRACE((G_T_ACCESS, "%s(%s,%d,%d,%d)", __func__, pp->name, dr,
1257 	    dw, de));
1258 
1259 	cp = LIST_FIRST(&pp->geom->consumer);
1260 
1261 	/* We always gain write-exclusive access. */
1262 	return (g_access(cp, dr, dw, dw + de));
1263 }
1264 
1265 static void
1266 g_part_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp,
1267     struct g_consumer *cp, struct g_provider *pp)
1268 {
1269 	char buf[64];
1270 	struct g_part_entry *entry;
1271 	struct g_part_table *table;
1272 
1273 	KASSERT(sb != NULL && gp != NULL, (__func__));
1274 	table = gp->softc;
1275 
1276 	if (indent == NULL) {
1277 		KASSERT(cp == NULL && pp != NULL, (__func__));
1278 		entry = pp->private;
1279 		if (entry == NULL)
1280 			return;
1281 		sbuf_printf(sb, " i %u o %ju ty %s", entry->gpe_index,
1282 		    (uintmax_t)entry->gpe_offset,
1283 		    G_PART_TYPE(table, entry, buf, sizeof(buf)));
1284 	} else if (cp != NULL) {	/* Consumer configuration. */
1285 		KASSERT(pp == NULL, (__func__));
1286 		/* none */
1287 	} else if (pp != NULL) {	/* Provider configuration. */
1288 		entry = pp->private;
1289 		if (entry == NULL)
1290 			return;
1291 		sbuf_printf(sb, "%s<index>%u</index>\n", indent,
1292 		    entry->gpe_index);
1293 		sbuf_printf(sb, "%s<type>%s</type>\n", indent,
1294 		    G_PART_TYPE(table, entry, buf, sizeof(buf)));
1295 		sbuf_printf(sb, "%s<offset>%ju</offset>\n", indent,
1296 		    (uintmax_t)entry->gpe_offset);
1297 		sbuf_printf(sb, "%s<length>%ju</length>\n", indent,
1298 		    (uintmax_t)pp->mediasize);
1299 		G_PART_DUMPCONF(table, entry, sb, indent);
1300 	} else {			/* Geom configuration. */
1301 		sbuf_printf(sb, "%s<scheme>%s</scheme>\n", indent,
1302 		    table->gpt_scheme->name);
1303 		sbuf_printf(sb, "%s<entries>%u</entries>\n", indent,
1304 		    table->gpt_entries);
1305 		sbuf_printf(sb, "%s<first>%ju</first>\n", indent,
1306 		    (uintmax_t)table->gpt_first);
1307 		sbuf_printf(sb, "%s<last>%ju</last>\n", indent,
1308 		    (uintmax_t)table->gpt_last);
1309 		G_PART_DUMPCONF(table, NULL, sb, indent);
1310 	}
1311 }
1312 
1313 static void
1314 g_part_orphan(struct g_consumer *cp)
1315 {
1316 	struct g_provider *pp;
1317 
1318 	pp = cp->provider;
1319 	KASSERT(pp != NULL, (__func__));
1320 	G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, pp->name));
1321 	g_topology_assert();
1322 
1323 	KASSERT(pp->error != 0, (__func__));
1324 	g_part_wither(cp->geom, pp->error);
1325 }
1326 
1327 static void
1328 g_part_spoiled(struct g_consumer *cp)
1329 {
1330 
1331 	G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, cp->provider->name));
1332 	g_topology_assert();
1333 
1334 	g_part_wither(cp->geom, ENXIO);
1335 }
1336 
1337 static void
1338 g_part_start(struct bio *bp)
1339 {
1340 	struct bio *bp2;
1341 	struct g_consumer *cp;
1342 	struct g_geom *gp;
1343 	struct g_part_entry *entry;
1344 	struct g_part_table *table;
1345 	struct g_kerneldump *gkd;
1346 	struct g_provider *pp;
1347 	int attr;
1348 
1349 	pp = bp->bio_to;
1350 	gp = pp->geom;
1351 	table = gp->softc;
1352 	cp = LIST_FIRST(&gp->consumer);
1353 
1354 	G_PART_TRACE((G_T_BIO, "%s: cmd=%d, provider=%s", __func__, bp->bio_cmd,
1355 	    pp->name));
1356 
1357 	entry = pp->private;
1358 	if (entry == NULL) {
1359 		g_io_deliver(bp, ENXIO);
1360 		return;
1361 	}
1362 
1363 	switch(bp->bio_cmd) {
1364 	case BIO_DELETE:
1365 	case BIO_READ:
1366 	case BIO_WRITE:
1367 		if (bp->bio_offset >= pp->mediasize) {
1368 			g_io_deliver(bp, EIO);
1369 			return;
1370 		}
1371 		bp2 = g_clone_bio(bp);
1372 		if (bp2 == NULL) {
1373 			g_io_deliver(bp, ENOMEM);
1374 			return;
1375 		}
1376 		if (bp2->bio_offset + bp2->bio_length > pp->mediasize)
1377 			bp2->bio_length = pp->mediasize - bp2->bio_offset;
1378 		bp2->bio_done = g_std_done;
1379 		bp2->bio_offset += entry->gpe_offset;
1380 		g_io_request(bp2, cp);
1381 		return;
1382 	case BIO_FLUSH:
1383 		break;
1384 	case BIO_GETATTR:
1385 		if (!strcmp("GEOM::kerneldump", bp->bio_attribute)) {
1386 			/*
1387 			 * Check that the partition is suitable for kernel
1388 			 * dumps. Typically only swap partitions should be
1389 			 * used.
1390 			 */
1391 			if (!G_PART_DUMPTO(table, entry)) {
1392 				g_io_deliver(bp, ENXIO);
1393 				return;
1394 			}
1395 			gkd = (struct g_kerneldump *)bp->bio_data;
1396 			if (gkd->offset >= pp->mediasize) {
1397 				g_io_deliver(bp, EIO);
1398 				return;
1399 			}
1400 			if (gkd->offset + gkd->length > pp->mediasize)
1401 				gkd->length = pp->mediasize - gkd->offset;
1402 			gkd->offset += entry->gpe_offset;
1403 		} else if (!strcmp("PART::isleaf", bp->bio_attribute)) {
1404 			if (bp->bio_length != sizeof(int)) {
1405 				g_io_deliver(bp, EFAULT);
1406 				return;
1407 			}
1408 			attr = table->gpt_isleaf ? 1 : 0;
1409 			bcopy(&attr, bp->bio_data, sizeof(int));
1410 			bp->bio_completed = sizeof(int);
1411 			g_io_deliver(bp, 0);
1412 			return;
1413 		} else if (!strcmp("PART::depth", bp->bio_attribute)) {
1414 			if (bp->bio_length != sizeof(int)) {
1415 				g_io_deliver(bp, EFAULT);
1416 				return;
1417 			}
1418 			bcopy(&table->gpt_depth, bp->bio_data, sizeof(int));
1419 			bp->bio_completed = sizeof(int);
1420 			g_io_deliver(bp, 0);
1421 			return;
1422 		}
1423 		break;
1424 	default:
1425 		g_io_deliver(bp, EOPNOTSUPP);
1426 		return;
1427 	}
1428 
1429 	bp2 = g_clone_bio(bp);
1430 	if (bp2 == NULL) {
1431 		g_io_deliver(bp, ENOMEM);
1432 		return;
1433 	}
1434 	bp2->bio_done = g_std_done;
1435 	g_io_request(bp2, cp);
1436 }
1437