xref: /freebsd/sys/geom/part/g_part.c (revision 2be1a816b9ff69588e55be0a84cbe2a31efc0f2f)
1 /*-
2  * Copyright (c) 2002, 2005, 2006, 2007 Marcel Moolenaar
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  *
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25  */
26 
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
29 
30 #include <sys/param.h>
31 #include <sys/bio.h>
32 #include <sys/diskmbr.h>
33 #include <sys/endian.h>
34 #include <sys/kernel.h>
35 #include <sys/kobj.h>
36 #include <sys/limits.h>
37 #include <sys/lock.h>
38 #include <sys/malloc.h>
39 #include <sys/mutex.h>
40 #include <sys/queue.h>
41 #include <sys/sbuf.h>
42 #include <sys/systm.h>
43 #include <sys/uuid.h>
44 #include <geom/geom.h>
45 #include <geom/geom_ctl.h>
46 #include <geom/geom_int.h>
47 #include <geom/part/g_part.h>
48 
49 #include "g_part_if.h"
50 
51 static kobj_method_t g_part_null_methods[] = {
52 	{ 0, 0 }
53 };
54 
55 static struct g_part_scheme g_part_null_scheme = {
56 	"(none)",
57 	g_part_null_methods,
58 	sizeof(struct g_part_table),
59 };
60 
61 TAILQ_HEAD(, g_part_scheme) g_part_schemes =
62     TAILQ_HEAD_INITIALIZER(g_part_schemes);
63 
64 struct g_part_alias_list {
65 	const char *lexeme;
66 	enum g_part_alias alias;
67 } g_part_alias_list[G_PART_ALIAS_COUNT] = {
68 	{ "efi", G_PART_ALIAS_EFI },
69 	{ "freebsd", G_PART_ALIAS_FREEBSD },
70 	{ "freebsd-boot", G_PART_ALIAS_FREEBSD_BOOT },
71 	{ "freebsd-swap", G_PART_ALIAS_FREEBSD_SWAP },
72 	{ "freebsd-ufs", G_PART_ALIAS_FREEBSD_UFS },
73 	{ "freebsd-vinum", G_PART_ALIAS_FREEBSD_VINUM },
74 	{ "freebsd-zfs", G_PART_ALIAS_FREEBSD_ZFS },
75 	{ "mbr", G_PART_ALIAS_MBR }
76 };
77 
78 /*
79  * The GEOM partitioning class.
80  */
81 static g_ctl_req_t g_part_ctlreq;
82 static g_ctl_destroy_geom_t g_part_destroy_geom;
83 static g_fini_t g_part_fini;
84 static g_init_t g_part_init;
85 static g_taste_t g_part_taste;
86 
87 static g_access_t g_part_access;
88 static g_dumpconf_t g_part_dumpconf;
89 static g_orphan_t g_part_orphan;
90 static g_spoiled_t g_part_spoiled;
91 static g_start_t g_part_start;
92 
93 static struct g_class g_part_class = {
94 	.name = "PART",
95 	.version = G_VERSION,
96 	/* Class methods. */
97 	.ctlreq = g_part_ctlreq,
98 	.destroy_geom = g_part_destroy_geom,
99 	.fini = g_part_fini,
100 	.init = g_part_init,
101 	.taste = g_part_taste,
102 	/* Geom methods. */
103 	.access = g_part_access,
104 	.dumpconf = g_part_dumpconf,
105 	.orphan = g_part_orphan,
106 	.spoiled = g_part_spoiled,
107 	.start = g_part_start,
108 };
109 
110 DECLARE_GEOM_CLASS(g_part_class, g_part);
111 
112 enum g_part_ctl {
113 	G_PART_CTL_NONE,
114 	G_PART_CTL_ADD,
115 	G_PART_CTL_BOOTCODE,
116 	G_PART_CTL_COMMIT,
117 	G_PART_CTL_CREATE,
118 	G_PART_CTL_DELETE,
119 	G_PART_CTL_DESTROY,
120 	G_PART_CTL_MODIFY,
121 	G_PART_CTL_MOVE,
122 	G_PART_CTL_RECOVER,
123 	G_PART_CTL_RESIZE,
124 	G_PART_CTL_UNDO
125 };
126 
127 /*
128  * Support functions.
129  */
130 
131 static void g_part_wither(struct g_geom *, int);
132 
133 const char *
134 g_part_alias_name(enum g_part_alias alias)
135 {
136 	int i;
137 
138 	for (i = 0; i < G_PART_ALIAS_COUNT; i++) {
139 		if (g_part_alias_list[i].alias != alias)
140 			continue;
141 		return (g_part_alias_list[i].lexeme);
142 	}
143 
144 	return (NULL);
145 }
146 
147 void
148 g_part_geometry_heads(off_t blocks, u_int sectors, off_t *bestchs,
149     u_int *bestheads)
150 {
151 	static u_int candidate_heads[] = { 1, 2, 16, 32, 64, 128, 255, 0 };
152 	off_t chs, cylinders;
153 	u_int heads;
154 	int idx;
155 
156 	*bestchs = 0;
157 	*bestheads = 0;
158 	for (idx = 0; candidate_heads[idx] != 0; idx++) {
159 		heads = candidate_heads[idx];
160 		cylinders = blocks / heads / sectors;
161 		if (cylinders < heads || cylinders < sectors)
162 			break;
163 		if (cylinders > 1023)
164 			continue;
165 		chs = cylinders * heads * sectors;
166 		if (chs > *bestchs || (chs == *bestchs && *bestheads == 1)) {
167 			*bestchs = chs;
168 			*bestheads = heads;
169 		}
170 	}
171 }
172 
173 static void
174 g_part_geometry(struct g_part_table *table, struct g_consumer *cp,
175     off_t blocks)
176 {
177 	static u_int candidate_sectors[] = { 1, 9, 17, 33, 63, 0 };
178 	off_t chs, bestchs;
179 	u_int heads, sectors;
180 	int idx;
181 
182 	if (g_getattr("GEOM::fwsectors", cp, &sectors) != 0 ||
183 	    sectors < 1 || sectors > 63 ||
184 	    g_getattr("GEOM::fwheads", cp, &heads) != 0 ||
185 	    heads < 1 || heads > 255) {
186 		table->gpt_fixgeom = 0;
187 		table->gpt_heads = 0;
188 		table->gpt_sectors = 0;
189 		bestchs = 0;
190 		for (idx = 0; candidate_sectors[idx] != 0; idx++) {
191 			sectors = candidate_sectors[idx];
192 			g_part_geometry_heads(blocks, sectors, &chs, &heads);
193 			if (chs == 0)
194 				continue;
195 			/*
196 			 * Prefer a geometry with sectors > 1, but only if
197 			 * it doesn't bump down the numbver of heads to 1.
198 			 */
199 			if (chs > bestchs || (chs == bestchs && heads > 1 &&
200 			    table->gpt_sectors == 1)) {
201 				bestchs = chs;
202 				table->gpt_heads = heads;
203 				table->gpt_sectors = sectors;
204 			}
205 		}
206 		/*
207 		 * If we didn't find a geometry at all, then the disk is
208 		 * too big. This means we can use the maximum number of
209 		 * heads and sectors.
210 		 */
211 		if (bestchs == 0) {
212 			table->gpt_heads = 255;
213 			table->gpt_sectors = 63;
214 		}
215 	} else {
216 		table->gpt_fixgeom = 1;
217 		table->gpt_heads = heads;
218 		table->gpt_sectors = sectors;
219 	}
220 }
221 
222 struct g_part_entry *
223 g_part_new_entry(struct g_part_table *table, int index, quad_t start,
224     quad_t end)
225 {
226 	struct g_part_entry *entry, *last;
227 
228 	last = NULL;
229 	LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) {
230 		if (entry->gpe_index == index)
231 			break;
232 		if (entry->gpe_index > index) {
233 			entry = NULL;
234 			break;
235 		}
236 		last = entry;
237 	}
238 	if (entry == NULL) {
239 		entry = g_malloc(table->gpt_scheme->gps_entrysz,
240 		    M_WAITOK | M_ZERO);
241 		entry->gpe_index = index;
242 		if (last == NULL)
243 			LIST_INSERT_HEAD(&table->gpt_entry, entry, gpe_entry);
244 		else
245 			LIST_INSERT_AFTER(last, entry, gpe_entry);
246 	}
247 	entry->gpe_start = start;
248 	entry->gpe_end = end;
249 	return (entry);
250 }
251 
252 static void
253 g_part_new_provider(struct g_geom *gp, struct g_part_table *table,
254     struct g_part_entry *entry)
255 {
256 	char buf[32];
257 	struct g_consumer *cp;
258 	struct g_provider *pp;
259 
260 	cp = LIST_FIRST(&gp->consumer);
261 	pp = cp->provider;
262 
263 	entry->gpe_offset = entry->gpe_start * pp->sectorsize;
264 
265 	if (entry->gpe_pp == NULL) {
266 		entry->gpe_pp = g_new_providerf(gp, "%s%s", gp->name,
267 		    G_PART_NAME(table, entry, buf, sizeof(buf)));
268 		entry->gpe_pp->private = entry;		/* Close the circle. */
269 	}
270 	entry->gpe_pp->index = entry->gpe_index - 1;	/* index is 1-based. */
271 	entry->gpe_pp->mediasize = (entry->gpe_end - entry->gpe_start + 1) *
272 	    pp->sectorsize;
273 	entry->gpe_pp->sectorsize = pp->sectorsize;
274 	entry->gpe_pp->flags = pp->flags & G_PF_CANDELETE;
275 	if (pp->stripesize > 0) {
276 		entry->gpe_pp->stripesize = pp->stripesize;
277 		entry->gpe_pp->stripeoffset = (pp->stripeoffset +
278 		    entry->gpe_offset) % pp->stripesize;
279 	}
280 	g_error_provider(entry->gpe_pp, 0);
281 }
282 
283 static int
284 g_part_parm_geom(const char *p, struct g_geom **v)
285 {
286 	struct g_geom *gp;
287 
288 	LIST_FOREACH(gp, &g_part_class.geom, geom) {
289 		if (!strcmp(p, gp->name))
290 			break;
291 	}
292 	if (gp == NULL)
293 		return (EINVAL);
294 	*v = gp;
295 	return (0);
296 }
297 
298 static int
299 g_part_parm_provider(const char *p, struct g_provider **v)
300 {
301 	struct g_provider *pp;
302 
303 	pp = g_provider_by_name(p);
304 	if (pp == NULL)
305 		return (EINVAL);
306 	*v = pp;
307 	return (0);
308 }
309 
310 static int
311 g_part_parm_quad(const char *p, quad_t *v)
312 {
313 	char *x;
314 	quad_t q;
315 
316 	q = strtoq(p, &x, 0);
317 	if (*x != '\0' || q < 0)
318 		return (EINVAL);
319 	*v = q;
320 	return (0);
321 }
322 
323 static int
324 g_part_parm_scheme(const char *p, struct g_part_scheme **v)
325 {
326 	struct g_part_scheme *s;
327 
328 	TAILQ_FOREACH(s, &g_part_schemes, scheme_list) {
329 		if (s == &g_part_null_scheme)
330 			continue;
331 		if (!strcasecmp(s->name, p))
332 			break;
333 	}
334 	if (s == NULL)
335 		return (EINVAL);
336 	*v = s;
337 	return (0);
338 }
339 
340 static int
341 g_part_parm_str(const char *p, const char **v)
342 {
343 
344 	if (p[0] == '\0')
345 		return (EINVAL);
346 	*v = p;
347 	return (0);
348 }
349 
350 static int
351 g_part_parm_uint(const char *p, u_int *v)
352 {
353 	char *x;
354 	long l;
355 
356 	l = strtol(p, &x, 0);
357 	if (*x != '\0' || l < 0 || l > INT_MAX)
358 		return (EINVAL);
359 	*v = (unsigned int)l;
360 	return (0);
361 }
362 
363 static int
364 g_part_probe(struct g_geom *gp, struct g_consumer *cp, int depth)
365 {
366 	struct g_part_scheme *iter, *scheme;
367 	struct g_part_table *table;
368 	int pri, probe;
369 
370 	table = gp->softc;
371 	scheme = (table != NULL) ? table->gpt_scheme : NULL;
372 	pri = (scheme != NULL) ? G_PART_PROBE(table, cp) : INT_MIN;
373 	if (pri == 0)
374 		goto done;
375 	if (pri > 0) {	/* error */
376 		scheme = NULL;
377 		pri = INT_MIN;
378 	}
379 
380 	TAILQ_FOREACH(iter, &g_part_schemes, scheme_list) {
381 		if (iter == &g_part_null_scheme)
382 			continue;
383 		table = (void *)kobj_create((kobj_class_t)iter, M_GEOM,
384 		    M_WAITOK);
385 		table->gpt_gp = gp;
386 		table->gpt_scheme = iter;
387 		table->gpt_depth = depth;
388 		probe = G_PART_PROBE(table, cp);
389 		if (probe <= 0 && probe > pri) {
390 			pri = probe;
391 			scheme = iter;
392 			if (gp->softc != NULL)
393 				kobj_delete((kobj_t)gp->softc, M_GEOM);
394 			gp->softc = table;
395 			if (pri == 0)
396 				goto done;
397 		} else
398 			kobj_delete((kobj_t)table, M_GEOM);
399 	}
400 
401 done:
402 	return ((scheme == NULL) ? ENXIO : 0);
403 }
404 
405 /*
406  * Control request functions.
407  */
408 
409 static int
410 g_part_ctl_add(struct gctl_req *req, struct g_part_parms *gpp)
411 {
412 	char buf[32];
413 	struct g_geom *gp;
414 	struct g_provider *pp;
415 	struct g_part_entry *delent, *last, *entry;
416 	struct g_part_table *table;
417 	struct sbuf *sb;
418 	quad_t end;
419 	unsigned int index;
420 	int error;
421 
422 	gp = gpp->gpp_geom;
423 	G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name));
424 	g_topology_assert();
425 
426 	pp = LIST_FIRST(&gp->consumer)->provider;
427 	table = gp->softc;
428 	end = gpp->gpp_start + gpp->gpp_size - 1;
429 
430 	if (gpp->gpp_start < table->gpt_first ||
431 	    gpp->gpp_start > table->gpt_last) {
432 		gctl_error(req, "%d start '%jd'", EINVAL,
433 		    (intmax_t)gpp->gpp_start);
434 		return (EINVAL);
435 	}
436 	if (end < gpp->gpp_start || end > table->gpt_last) {
437 		gctl_error(req, "%d size '%jd'", EINVAL,
438 		    (intmax_t)gpp->gpp_size);
439 		return (EINVAL);
440 	}
441 	if (gpp->gpp_index > table->gpt_entries) {
442 		gctl_error(req, "%d index '%d'", EINVAL, gpp->gpp_index);
443 		return (EINVAL);
444 	}
445 
446 	delent = last = NULL;
447 	index = (gpp->gpp_index > 0) ? gpp->gpp_index : 1;
448 	LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) {
449 		if (entry->gpe_deleted) {
450 			if (entry->gpe_index == index)
451 				delent = entry;
452 			continue;
453 		}
454 		if (entry->gpe_index == index)
455 			index = entry->gpe_index + 1;
456 		if (entry->gpe_index < index)
457 			last = entry;
458 		if (entry->gpe_internal)
459 			continue;
460 		if (gpp->gpp_start >= entry->gpe_start &&
461 		    gpp->gpp_start <= entry->gpe_end) {
462 			gctl_error(req, "%d start '%jd'", ENOSPC,
463 			    (intmax_t)gpp->gpp_start);
464 			return (ENOSPC);
465 		}
466 		if (end >= entry->gpe_start && end <= entry->gpe_end) {
467 			gctl_error(req, "%d end '%jd'", ENOSPC, (intmax_t)end);
468 			return (ENOSPC);
469 		}
470 		if (gpp->gpp_start < entry->gpe_start && end > entry->gpe_end) {
471 			gctl_error(req, "%d size '%jd'", ENOSPC,
472 			    (intmax_t)gpp->gpp_size);
473 			return (ENOSPC);
474 		}
475 	}
476 	if (gpp->gpp_index > 0 && index != gpp->gpp_index) {
477 		gctl_error(req, "%d index '%d'", EEXIST, gpp->gpp_index);
478 		return (EEXIST);
479 	}
480 
481 	entry = (delent == NULL) ? g_malloc(table->gpt_scheme->gps_entrysz,
482 	    M_WAITOK | M_ZERO) : delent;
483 	entry->gpe_index = index;
484 	entry->gpe_start = gpp->gpp_start;
485 	entry->gpe_end = end;
486 	error = G_PART_ADD(table, entry, gpp);
487 	if (error) {
488 		gctl_error(req, "%d", error);
489 		if (delent == NULL)
490 			g_free(entry);
491 		return (error);
492 	}
493 	if (delent == NULL) {
494 		if (last == NULL)
495 			LIST_INSERT_HEAD(&table->gpt_entry, entry, gpe_entry);
496 		else
497 			LIST_INSERT_AFTER(last, entry, gpe_entry);
498 		entry->gpe_created = 1;
499 	} else {
500 		entry->gpe_deleted = 0;
501 		entry->gpe_modified = 1;
502 	}
503 	g_part_new_provider(gp, table, entry);
504 
505 	/* Provide feedback if so requested. */
506 	if (gpp->gpp_parms & G_PART_PARM_OUTPUT) {
507 		sb = sbuf_new(NULL, NULL, 0, SBUF_AUTOEXTEND);
508 		sbuf_printf(sb, "%s%s added\n", gp->name,
509 		    G_PART_NAME(table, entry, buf, sizeof(buf)));
510 		sbuf_finish(sb);
511 		gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1);
512 		sbuf_delete(sb);
513 	}
514 	return (0);
515 }
516 
517 static int
518 g_part_ctl_bootcode(struct gctl_req *req, struct g_part_parms *gpp)
519 {
520 	struct g_geom *gp;
521 	struct g_part_table *table;
522 	struct sbuf *sb;
523 	int error, sz;
524 
525 	gp = gpp->gpp_geom;
526 	G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name));
527 	g_topology_assert();
528 
529 	table = gp->softc;
530 	sz = table->gpt_scheme->gps_bootcodesz;
531 	if (sz == 0) {
532 		error = ENODEV;
533 		goto fail;
534 	}
535 	if (gpp->gpp_codesize != sz) {
536 		error = EINVAL;
537 		goto fail;
538 	}
539 
540 	error = G_PART_BOOTCODE(table, gpp);
541 	if (error)
542 		goto fail;
543 
544 	/* Provide feedback if so requested. */
545 	if (gpp->gpp_parms & G_PART_PARM_OUTPUT) {
546 		sb = sbuf_new(NULL, NULL, 0, SBUF_AUTOEXTEND);
547 		sbuf_printf(sb, "%s has bootcode\n", gp->name);
548 		sbuf_finish(sb);
549 		gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1);
550 		sbuf_delete(sb);
551 	}
552 	return (0);
553 
554  fail:
555 	gctl_error(req, "%d", error);
556 	return (error);
557 }
558 
559 static int
560 g_part_ctl_commit(struct gctl_req *req, struct g_part_parms *gpp)
561 {
562 	struct g_consumer *cp;
563 	struct g_geom *gp;
564 	struct g_provider *pp;
565 	struct g_part_entry *entry, *tmp;
566 	struct g_part_table *table;
567 	char *buf;
568 	int error, i;
569 
570 	gp = gpp->gpp_geom;
571 	G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name));
572 	g_topology_assert();
573 
574 	table = gp->softc;
575 	if (!table->gpt_opened) {
576 		gctl_error(req, "%d", EPERM);
577 		return (EPERM);
578 	}
579 
580 	cp = LIST_FIRST(&gp->consumer);
581 	if ((table->gpt_smhead | table->gpt_smtail) != 0) {
582 		pp = cp->provider;
583 		buf = g_malloc(pp->sectorsize, M_WAITOK | M_ZERO);
584 		while (table->gpt_smhead != 0) {
585 			i = ffs(table->gpt_smhead) - 1;
586 			error = g_write_data(cp, i * pp->sectorsize, buf,
587 			    pp->sectorsize);
588 			if (error) {
589 				g_free(buf);
590 				goto fail;
591 			}
592 			table->gpt_smhead &= ~(1 << i);
593 		}
594 		while (table->gpt_smtail != 0) {
595 			i = ffs(table->gpt_smtail) - 1;
596 			error = g_write_data(cp, pp->mediasize - (i + 1) *
597 			    pp->sectorsize, buf, pp->sectorsize);
598 			if (error) {
599 				g_free(buf);
600 				goto fail;
601 			}
602 			table->gpt_smtail &= ~(1 << i);
603 		}
604 		g_free(buf);
605 	}
606 
607 	if (table->gpt_scheme == &g_part_null_scheme) {
608 		g_access(cp, -1, -1, -1);
609 		g_part_wither(gp, ENXIO);
610 		return (0);
611 	}
612 
613 	error = G_PART_WRITE(table, cp);
614 	if (error)
615 		goto fail;
616 
617 	LIST_FOREACH_SAFE(entry, &table->gpt_entry, gpe_entry, tmp) {
618 		if (!entry->gpe_deleted) {
619 			entry->gpe_created = 0;
620 			entry->gpe_modified = 0;
621 			continue;
622 		}
623 		LIST_REMOVE(entry, gpe_entry);
624 		g_free(entry);
625 	}
626 	table->gpt_created = 0;
627 	table->gpt_opened = 0;
628 	g_access(cp, -1, -1, -1);
629 	return (0);
630 
631 fail:
632 	gctl_error(req, "%d", error);
633 	return (error);
634 }
635 
636 static int
637 g_part_ctl_create(struct gctl_req *req, struct g_part_parms *gpp)
638 {
639 	struct g_consumer *cp;
640 	struct g_geom *gp;
641 	struct g_provider *pp;
642 	struct g_part_scheme *scheme;
643 	struct g_part_table *null, *table;
644 	struct sbuf *sb;
645 	int attr, error;
646 
647 	pp = gpp->gpp_provider;
648 	scheme = gpp->gpp_scheme;
649 	G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, pp->name));
650 	g_topology_assert();
651 
652 	/* Check that there isn't already a g_part geom on the provider. */
653 	error = g_part_parm_geom(pp->name, &gp);
654 	if (!error) {
655 		null = gp->softc;
656 		if (null->gpt_scheme != &g_part_null_scheme) {
657 			gctl_error(req, "%d geom '%s'", EEXIST, pp->name);
658 			return (EEXIST);
659 		}
660 	} else
661 		null = NULL;
662 
663 	if ((gpp->gpp_parms & G_PART_PARM_ENTRIES) &&
664 	    (gpp->gpp_entries < scheme->gps_minent ||
665 	     gpp->gpp_entries > scheme->gps_maxent)) {
666 		gctl_error(req, "%d entries '%d'", EINVAL, gpp->gpp_entries);
667 		return (EINVAL);
668 	}
669 
670 	if (null == NULL)
671 		gp = g_new_geomf(&g_part_class, "%s", pp->name);
672 	gp->softc = kobj_create((kobj_class_t)gpp->gpp_scheme, M_GEOM,
673 	    M_WAITOK);
674 	table = gp->softc;
675 	table->gpt_gp = gp;
676 	table->gpt_scheme = gpp->gpp_scheme;
677 	table->gpt_entries = (gpp->gpp_parms & G_PART_PARM_ENTRIES) ?
678 	    gpp->gpp_entries : scheme->gps_minent;
679 	LIST_INIT(&table->gpt_entry);
680 	if (null == NULL) {
681 		cp = g_new_consumer(gp);
682 		error = g_attach(cp, pp);
683 		if (error == 0)
684 			error = g_access(cp, 1, 1, 1);
685 		if (error != 0) {
686 			g_part_wither(gp, error);
687 			gctl_error(req, "%d geom '%s'", error, pp->name);
688 			return (error);
689 		}
690 		table->gpt_opened = 1;
691 	} else {
692 		cp = LIST_FIRST(&gp->consumer);
693 		table->gpt_opened = null->gpt_opened;
694 		table->gpt_smhead = null->gpt_smhead;
695 		table->gpt_smtail = null->gpt_smtail;
696 	}
697 
698 	g_topology_unlock();
699 
700 	/* Make sure the provider has media. */
701 	if (pp->mediasize == 0 || pp->sectorsize == 0) {
702 		error = ENODEV;
703 		goto fail;
704 	}
705 
706 	/* Make sure we can nest and if so, determine our depth. */
707 	error = g_getattr("PART::isleaf", cp, &attr);
708 	if (!error && attr) {
709 		error = ENODEV;
710 		goto fail;
711 	}
712 	error = g_getattr("PART::depth", cp, &attr);
713 	table->gpt_depth = (!error) ? attr + 1 : 0;
714 
715 	/* If we're nested, get the absolute sector offset on disk. */
716 	if (table->gpt_depth) {
717 		error = g_getattr("PART::offset", cp, &attr);
718 		if (error)
719 			goto fail;
720 		table->gpt_offset = attr;
721 	}
722 
723 	/*
724 	 * Synthesize a disk geometry. Some partitioning schemes
725 	 * depend on it and since some file systems need it even
726 	 * when the partitition scheme doesn't, we do it here in
727 	 * scheme-independent code.
728 	 */
729 	g_part_geometry(table, cp, pp->mediasize / pp->sectorsize);
730 
731 	error = G_PART_CREATE(table, gpp);
732 	if (error)
733 		goto fail;
734 
735 	g_topology_lock();
736 
737 	table->gpt_created = 1;
738 	if (null != NULL)
739 		kobj_delete((kobj_t)null, M_GEOM);
740 
741 	/*
742 	 * Support automatic commit by filling in the gpp_geom
743 	 * parameter.
744 	 */
745 	gpp->gpp_parms |= G_PART_PARM_GEOM;
746 	gpp->gpp_geom = gp;
747 
748 	/* Provide feedback if so requested. */
749 	if (gpp->gpp_parms & G_PART_PARM_OUTPUT) {
750 		sb = sbuf_new(NULL, NULL, 0, SBUF_AUTOEXTEND);
751 		sbuf_printf(sb, "%s created\n", gp->name);
752 		sbuf_finish(sb);
753 		gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1);
754 		sbuf_delete(sb);
755 	}
756 	return (0);
757 
758 fail:
759 	g_topology_lock();
760 	if (null == NULL) {
761 		g_access(cp, -1, -1, -1);
762 		g_part_wither(gp, error);
763 	} else {
764 		kobj_delete((kobj_t)gp->softc, M_GEOM);
765 		gp->softc = null;
766 	}
767 	gctl_error(req, "%d provider", error);
768 	return (error);
769 }
770 
771 static int
772 g_part_ctl_delete(struct gctl_req *req, struct g_part_parms *gpp)
773 {
774 	char buf[32];
775 	struct g_geom *gp;
776 	struct g_provider *pp;
777 	struct g_part_entry *entry;
778 	struct g_part_table *table;
779 	struct sbuf *sb;
780 
781 	gp = gpp->gpp_geom;
782 	G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name));
783 	g_topology_assert();
784 
785 	table = gp->softc;
786 
787 	LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) {
788 		if (entry->gpe_deleted || entry->gpe_internal)
789 			continue;
790 		if (entry->gpe_index == gpp->gpp_index)
791 			break;
792 	}
793 	if (entry == NULL) {
794 		gctl_error(req, "%d index '%d'", ENOENT, gpp->gpp_index);
795 		return (ENOENT);
796 	}
797 
798 	pp = entry->gpe_pp;
799 	if (pp != NULL) {
800 		if (pp->acr > 0 || pp->acw > 0 || pp->ace > 0) {
801 			gctl_error(req, "%d", EBUSY);
802 			return (EBUSY);
803 		}
804 
805 		pp->private = NULL;
806 		entry->gpe_pp = NULL;
807 	}
808 
809 	if (entry->gpe_created) {
810 		LIST_REMOVE(entry, gpe_entry);
811 		g_free(entry);
812 	} else {
813 		entry->gpe_modified = 0;
814 		entry->gpe_deleted = 1;
815 	}
816 
817 	if (pp != NULL)
818 		g_wither_provider(pp, ENXIO);
819 
820 	/* Provide feedback if so requested. */
821 	if (gpp->gpp_parms & G_PART_PARM_OUTPUT) {
822 		sb = sbuf_new(NULL, NULL, 0, SBUF_AUTOEXTEND);
823 		sbuf_printf(sb, "%s%s deleted\n", gp->name,
824 		    G_PART_NAME(table, entry, buf, sizeof(buf)));
825 		sbuf_finish(sb);
826 		gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1);
827 		sbuf_delete(sb);
828 	}
829 	return (0);
830 }
831 
832 static int
833 g_part_ctl_destroy(struct gctl_req *req, struct g_part_parms *gpp)
834 {
835 	struct g_geom *gp;
836 	struct g_part_entry *entry;
837 	struct g_part_table *null, *table;
838 	struct sbuf *sb;
839 	int error;
840 
841 	gp = gpp->gpp_geom;
842 	G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name));
843 	g_topology_assert();
844 
845 	table = gp->softc;
846 	LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) {
847 		if (entry->gpe_deleted || entry->gpe_internal)
848 			continue;
849 		gctl_error(req, "%d", EBUSY);
850 		return (EBUSY);
851 	}
852 
853 	error = G_PART_DESTROY(table, gpp);
854 	if (error) {
855 		gctl_error(req, "%d", error);
856 		return (error);
857 	}
858 
859 	gp->softc = kobj_create((kobj_class_t)&g_part_null_scheme, M_GEOM,
860 	    M_WAITOK);
861 	null = gp->softc;
862 	null->gpt_gp = gp;
863 	null->gpt_scheme = &g_part_null_scheme;
864 	LIST_INIT(&null->gpt_entry);
865 	null->gpt_depth = table->gpt_depth;
866 	null->gpt_opened = table->gpt_opened;
867 	null->gpt_smhead = table->gpt_smhead;
868 	null->gpt_smtail = table->gpt_smtail;
869 
870 	while ((entry = LIST_FIRST(&table->gpt_entry)) != NULL) {
871 		LIST_REMOVE(entry, gpe_entry);
872 		g_free(entry);
873 	}
874 	kobj_delete((kobj_t)table, M_GEOM);
875 
876 	/* Provide feedback if so requested. */
877 	if (gpp->gpp_parms & G_PART_PARM_OUTPUT) {
878 		sb = sbuf_new(NULL, NULL, 0, SBUF_AUTOEXTEND);
879 		sbuf_printf(sb, "%s destroyed\n", gp->name);
880 		sbuf_finish(sb);
881 		gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1);
882 		sbuf_delete(sb);
883 	}
884 	return (0);
885 }
886 
887 static int
888 g_part_ctl_modify(struct gctl_req *req, struct g_part_parms *gpp)
889 {
890 	char buf[32];
891 	struct g_geom *gp;
892 	struct g_part_entry *entry;
893 	struct g_part_table *table;
894 	struct sbuf *sb;
895 	int error;
896 
897 	gp = gpp->gpp_geom;
898 	G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name));
899 	g_topology_assert();
900 
901 	table = gp->softc;
902 
903 	LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) {
904 		if (entry->gpe_deleted || entry->gpe_internal)
905 			continue;
906 		if (entry->gpe_index == gpp->gpp_index)
907 			break;
908 	}
909 	if (entry == NULL) {
910 		gctl_error(req, "%d index '%d'", ENOENT, gpp->gpp_index);
911 		return (ENOENT);
912 	}
913 
914 	error = G_PART_MODIFY(table, entry, gpp);
915 	if (error) {
916 		gctl_error(req, "%d", error);
917 		return (error);
918 	}
919 
920 	if (!entry->gpe_created)
921 		entry->gpe_modified = 1;
922 
923 	/* Provide feedback if so requested. */
924 	if (gpp->gpp_parms & G_PART_PARM_OUTPUT) {
925 		sb = sbuf_new(NULL, NULL, 0, SBUF_AUTOEXTEND);
926 		sbuf_printf(sb, "%s%s modified\n", gp->name,
927 		    G_PART_NAME(table, entry, buf, sizeof(buf)));
928 		sbuf_finish(sb);
929 		gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1);
930 		sbuf_delete(sb);
931 	}
932 	return (0);
933 }
934 
935 static int
936 g_part_ctl_move(struct gctl_req *req, struct g_part_parms *gpp)
937 {
938 	gctl_error(req, "%d verb 'move'", ENOSYS);
939 	return (ENOSYS);
940 }
941 
942 static int
943 g_part_ctl_recover(struct gctl_req *req, struct g_part_parms *gpp)
944 {
945 	gctl_error(req, "%d verb 'recover'", ENOSYS);
946 	return (ENOSYS);
947 }
948 
949 static int
950 g_part_ctl_resize(struct gctl_req *req, struct g_part_parms *gpp)
951 {
952 	gctl_error(req, "%d verb 'resize'", ENOSYS);
953 	return (ENOSYS);
954 }
955 
956 static int
957 g_part_ctl_undo(struct gctl_req *req, struct g_part_parms *gpp)
958 {
959 	struct g_consumer *cp;
960 	struct g_provider *pp;
961 	struct g_geom *gp;
962 	struct g_part_entry *entry, *tmp;
963 	struct g_part_table *table;
964 	int error, reprobe;
965 
966 	gp = gpp->gpp_geom;
967 	G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name));
968 	g_topology_assert();
969 
970 	table = gp->softc;
971 	if (!table->gpt_opened) {
972 		gctl_error(req, "%d", EPERM);
973 		return (EPERM);
974 	}
975 
976 	cp = LIST_FIRST(&gp->consumer);
977 	LIST_FOREACH_SAFE(entry, &table->gpt_entry, gpe_entry, tmp) {
978 		entry->gpe_modified = 0;
979 		if (entry->gpe_created) {
980 			pp = entry->gpe_pp;
981 			if (pp != NULL) {
982 				pp->private = NULL;
983 				entry->gpe_pp = NULL;
984 				g_wither_provider(pp, ENXIO);
985 			}
986 			entry->gpe_deleted = 1;
987 		}
988 		if (entry->gpe_deleted) {
989 			LIST_REMOVE(entry, gpe_entry);
990 			g_free(entry);
991 		}
992 	}
993 
994 	g_topology_unlock();
995 
996 	reprobe = (table->gpt_scheme == &g_part_null_scheme ||
997 	    table->gpt_created) ? 1 : 0;
998 
999 	if (reprobe) {
1000 		if (!LIST_EMPTY(&table->gpt_entry)) {
1001 			error = EBUSY;
1002 			goto fail;
1003 		}
1004 		error = g_part_probe(gp, cp, table->gpt_depth);
1005 		if (error) {
1006 			g_topology_lock();
1007 			g_access(cp, -1, -1, -1);
1008 			g_part_wither(gp, error);
1009 			return (0);
1010 		}
1011 		table = gp->softc;
1012 	}
1013 
1014 	error = G_PART_READ(table, cp);
1015 	if (error)
1016 		goto fail;
1017 
1018 	g_topology_lock();
1019 
1020 	LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) {
1021 		if (!entry->gpe_internal)
1022 			g_part_new_provider(gp, table, entry);
1023 	}
1024 
1025 	table->gpt_opened = 0;
1026 	g_access(cp, -1, -1, -1);
1027 	return (0);
1028 
1029 fail:
1030 	g_topology_lock();
1031 	gctl_error(req, "%d", error);
1032 	return (error);
1033 }
1034 
1035 static void
1036 g_part_wither(struct g_geom *gp, int error)
1037 {
1038 	struct g_part_entry *entry;
1039 	struct g_part_table *table;
1040 
1041 	table = gp->softc;
1042 	if (table != NULL) {
1043 		while ((entry = LIST_FIRST(&table->gpt_entry)) != NULL) {
1044 			LIST_REMOVE(entry, gpe_entry);
1045 			g_free(entry);
1046 		}
1047 		if (gp->softc != NULL) {
1048 			kobj_delete((kobj_t)gp->softc, M_GEOM);
1049 			gp->softc = NULL;
1050 		}
1051 	}
1052 	g_wither_geom(gp, error);
1053 }
1054 
1055 /*
1056  * Class methods.
1057  */
1058 
1059 static void
1060 g_part_ctlreq(struct gctl_req *req, struct g_class *mp, const char *verb)
1061 {
1062 	struct g_part_parms gpp;
1063 	struct g_part_table *table;
1064 	struct gctl_req_arg *ap;
1065 	const char *p;
1066 	enum g_part_ctl ctlreq;
1067 	unsigned int i, mparms, oparms, parm;
1068 	int auto_commit, close_on_error;
1069 	int error, len, modifies;
1070 
1071 	G_PART_TRACE((G_T_TOPOLOGY, "%s(%s,%s)", __func__, mp->name, verb));
1072 	g_topology_assert();
1073 
1074 	ctlreq = G_PART_CTL_NONE;
1075 	modifies = 1;
1076 	mparms = 0;
1077 	oparms = G_PART_PARM_FLAGS | G_PART_PARM_OUTPUT | G_PART_PARM_VERSION;
1078 	switch (*verb) {
1079 	case 'a':
1080 		if (!strcmp(verb, "add")) {
1081 			ctlreq = G_PART_CTL_ADD;
1082 			mparms |= G_PART_PARM_GEOM | G_PART_PARM_SIZE |
1083 			    G_PART_PARM_START | G_PART_PARM_TYPE;
1084 			oparms |= G_PART_PARM_INDEX | G_PART_PARM_LABEL;
1085 		}
1086 		break;
1087 	case 'b':
1088 		if (!strcmp(verb, "bootcode")) {
1089 			ctlreq = G_PART_CTL_BOOTCODE;
1090 			mparms |= G_PART_PARM_GEOM | G_PART_PARM_BOOTCODE;
1091 		}
1092 		break;
1093 	case 'c':
1094 		if (!strcmp(verb, "commit")) {
1095 			ctlreq = G_PART_CTL_COMMIT;
1096 			mparms |= G_PART_PARM_GEOM;
1097 			modifies = 0;
1098 		} else if (!strcmp(verb, "create")) {
1099 			ctlreq = G_PART_CTL_CREATE;
1100 			mparms |= G_PART_PARM_PROVIDER | G_PART_PARM_SCHEME;
1101 			oparms |= G_PART_PARM_ENTRIES;
1102 		}
1103 		break;
1104 	case 'd':
1105 		if (!strcmp(verb, "delete")) {
1106 			ctlreq = G_PART_CTL_DELETE;
1107 			mparms |= G_PART_PARM_GEOM | G_PART_PARM_INDEX;
1108 		} else if (!strcmp(verb, "destroy")) {
1109 			ctlreq = G_PART_CTL_DESTROY;
1110 			mparms |= G_PART_PARM_GEOM;
1111 		}
1112 		break;
1113 	case 'm':
1114 		if (!strcmp(verb, "modify")) {
1115 			ctlreq = G_PART_CTL_MODIFY;
1116 			mparms |= G_PART_PARM_GEOM | G_PART_PARM_INDEX;
1117 			oparms |= G_PART_PARM_LABEL | G_PART_PARM_TYPE;
1118 		} else if (!strcmp(verb, "move")) {
1119 			ctlreq = G_PART_CTL_MOVE;
1120 			mparms |= G_PART_PARM_GEOM | G_PART_PARM_INDEX;
1121 		}
1122 		break;
1123 	case 'r':
1124 		if (!strcmp(verb, "recover")) {
1125 			ctlreq = G_PART_CTL_RECOVER;
1126 			mparms |= G_PART_PARM_GEOM;
1127 		} else if (!strcmp(verb, "resize")) {
1128 			ctlreq = G_PART_CTL_RESIZE;
1129 			mparms |= G_PART_PARM_GEOM | G_PART_PARM_INDEX;
1130 		}
1131 		break;
1132 	case 'u':
1133 		if (!strcmp(verb, "undo")) {
1134 			ctlreq = G_PART_CTL_UNDO;
1135 			mparms |= G_PART_PARM_GEOM;
1136 			modifies = 0;
1137 		}
1138 		break;
1139 	}
1140 	if (ctlreq == G_PART_CTL_NONE) {
1141 		gctl_error(req, "%d verb '%s'", EINVAL, verb);
1142 		return;
1143 	}
1144 
1145 	bzero(&gpp, sizeof(gpp));
1146 	for (i = 0; i < req->narg; i++) {
1147 		ap = &req->arg[i];
1148 		parm = 0;
1149 		switch (ap->name[0]) {
1150 		case 'b':
1151 			if (!strcmp(ap->name, "bootcode"))
1152 				parm = G_PART_PARM_BOOTCODE;
1153 			break;
1154 		case 'c':
1155 			if (!strcmp(ap->name, "class"))
1156 				continue;
1157 			break;
1158 		case 'e':
1159 			if (!strcmp(ap->name, "entries"))
1160 				parm = G_PART_PARM_ENTRIES;
1161 			break;
1162 		case 'f':
1163 			if (!strcmp(ap->name, "flags"))
1164 				parm = G_PART_PARM_FLAGS;
1165 			break;
1166 		case 'g':
1167 			if (!strcmp(ap->name, "geom"))
1168 				parm = G_PART_PARM_GEOM;
1169 			break;
1170 		case 'i':
1171 			if (!strcmp(ap->name, "index"))
1172 				parm = G_PART_PARM_INDEX;
1173 			break;
1174 		case 'l':
1175 			if (!strcmp(ap->name, "label"))
1176 				parm = G_PART_PARM_LABEL;
1177 			break;
1178 		case 'o':
1179 			if (!strcmp(ap->name, "output"))
1180 				parm = G_PART_PARM_OUTPUT;
1181 			break;
1182 		case 'p':
1183 			if (!strcmp(ap->name, "provider"))
1184 				parm = G_PART_PARM_PROVIDER;
1185 			break;
1186 		case 's':
1187 			if (!strcmp(ap->name, "scheme"))
1188 				parm = G_PART_PARM_SCHEME;
1189 			else if (!strcmp(ap->name, "size"))
1190 				parm = G_PART_PARM_SIZE;
1191 			else if (!strcmp(ap->name, "start"))
1192 				parm = G_PART_PARM_START;
1193 			break;
1194 		case 't':
1195 			if (!strcmp(ap->name, "type"))
1196 				parm = G_PART_PARM_TYPE;
1197 			break;
1198 		case 'v':
1199 			if (!strcmp(ap->name, "verb"))
1200 				continue;
1201 			else if (!strcmp(ap->name, "version"))
1202 				parm = G_PART_PARM_VERSION;
1203 			break;
1204 		}
1205 		if ((parm & (mparms | oparms)) == 0) {
1206 			gctl_error(req, "%d param '%s'", EINVAL, ap->name);
1207 			return;
1208 		}
1209 		if (parm == G_PART_PARM_BOOTCODE)
1210 			p = gctl_get_param(req, ap->name, &len);
1211 		else
1212 			p = gctl_get_asciiparam(req, ap->name);
1213 		if (p == NULL) {
1214 			gctl_error(req, "%d param '%s'", ENOATTR, ap->name);
1215 			return;
1216 		}
1217 		switch (parm) {
1218 		case G_PART_PARM_BOOTCODE:
1219 			gpp.gpp_codeptr = p;
1220 			gpp.gpp_codesize = len;
1221 			error = 0;
1222 			break;
1223 		case G_PART_PARM_ENTRIES:
1224 			error = g_part_parm_uint(p, &gpp.gpp_entries);
1225 			break;
1226 		case G_PART_PARM_FLAGS:
1227 			if (p[0] == '\0')
1228 				continue;
1229 			error = g_part_parm_str(p, &gpp.gpp_flags);
1230 			break;
1231 		case G_PART_PARM_GEOM:
1232 			error = g_part_parm_geom(p, &gpp.gpp_geom);
1233 			break;
1234 		case G_PART_PARM_INDEX:
1235 			error = g_part_parm_uint(p, &gpp.gpp_index);
1236 			break;
1237 		case G_PART_PARM_LABEL:
1238 			/* An empty label is always valid. */
1239 			gpp.gpp_label = p;
1240 			error = 0;
1241 			break;
1242 		case G_PART_PARM_OUTPUT:
1243 			error = 0;	/* Write-only parameter */
1244 			break;
1245 		case G_PART_PARM_PROVIDER:
1246 			error = g_part_parm_provider(p, &gpp.gpp_provider);
1247 			break;
1248 		case G_PART_PARM_SCHEME:
1249 			error = g_part_parm_scheme(p, &gpp.gpp_scheme);
1250 			break;
1251 		case G_PART_PARM_SIZE:
1252 			error = g_part_parm_quad(p, &gpp.gpp_size);
1253 			break;
1254 		case G_PART_PARM_START:
1255 			error = g_part_parm_quad(p, &gpp.gpp_start);
1256 			break;
1257 		case G_PART_PARM_TYPE:
1258 			error = g_part_parm_str(p, &gpp.gpp_type);
1259 			break;
1260 		case G_PART_PARM_VERSION:
1261 			error = g_part_parm_uint(p, &gpp.gpp_version);
1262 			break;
1263 		default:
1264 			error = EDOOFUS;
1265 			break;
1266 		}
1267 		if (error) {
1268 			gctl_error(req, "%d %s '%s'", error, ap->name, p);
1269 			return;
1270 		}
1271 		gpp.gpp_parms |= parm;
1272 	}
1273 	if ((gpp.gpp_parms & mparms) != mparms) {
1274 		parm = mparms - (gpp.gpp_parms & mparms);
1275 		gctl_error(req, "%d param '%x'", ENOATTR, parm);
1276 		return;
1277 	}
1278 
1279 	/* Obtain permissions if possible/necessary. */
1280 	close_on_error = 0;
1281 	table = NULL;	/* Suppress uninit. warning. */
1282 	if (modifies && (gpp.gpp_parms & G_PART_PARM_GEOM)) {
1283 		table = gpp.gpp_geom->softc;
1284 		if (table != NULL && !table->gpt_opened) {
1285 			error = g_access(LIST_FIRST(&gpp.gpp_geom->consumer),
1286 			    1, 1, 1);
1287 			if (error) {
1288 				gctl_error(req, "%d geom '%s'", error,
1289 				    gpp.gpp_geom->name);
1290 				return;
1291 			}
1292 			table->gpt_opened = 1;
1293 			close_on_error = 1;
1294 		}
1295 	}
1296 
1297 	error = EDOOFUS;	/* Prevent bogus  uninit. warning. */
1298 	switch (ctlreq) {
1299 	case G_PART_CTL_NONE:
1300 		panic("%s", __func__);
1301 	case G_PART_CTL_ADD:
1302 		error = g_part_ctl_add(req, &gpp);
1303 		break;
1304 	case G_PART_CTL_BOOTCODE:
1305 		error = g_part_ctl_bootcode(req, &gpp);
1306 		break;
1307 	case G_PART_CTL_COMMIT:
1308 		error = g_part_ctl_commit(req, &gpp);
1309 		break;
1310 	case G_PART_CTL_CREATE:
1311 		error = g_part_ctl_create(req, &gpp);
1312 		break;
1313 	case G_PART_CTL_DELETE:
1314 		error = g_part_ctl_delete(req, &gpp);
1315 		break;
1316 	case G_PART_CTL_DESTROY:
1317 		error = g_part_ctl_destroy(req, &gpp);
1318 		break;
1319 	case G_PART_CTL_MODIFY:
1320 		error = g_part_ctl_modify(req, &gpp);
1321 		break;
1322 	case G_PART_CTL_MOVE:
1323 		error = g_part_ctl_move(req, &gpp);
1324 		break;
1325 	case G_PART_CTL_RECOVER:
1326 		error = g_part_ctl_recover(req, &gpp);
1327 		break;
1328 	case G_PART_CTL_RESIZE:
1329 		error = g_part_ctl_resize(req, &gpp);
1330 		break;
1331 	case G_PART_CTL_UNDO:
1332 		error = g_part_ctl_undo(req, &gpp);
1333 		break;
1334 	}
1335 
1336 	/* Implement automatic commit. */
1337 	if (!error) {
1338 		auto_commit = (modifies &&
1339 		    (gpp.gpp_parms & G_PART_PARM_FLAGS) &&
1340 		    strchr(gpp.gpp_flags, 'C') != NULL) ? 1 : 0;
1341 		if (auto_commit) {
1342 			KASSERT(gpp.gpp_parms & G_PART_PARM_GEOM, (__func__));
1343 			error = g_part_ctl_commit(req, &gpp);
1344 		}
1345 	}
1346 
1347 	if (error && close_on_error) {
1348 		g_access(LIST_FIRST(&gpp.gpp_geom->consumer), -1, -1, -1);
1349 		table->gpt_opened = 0;
1350 	}
1351 }
1352 
1353 static int
1354 g_part_destroy_geom(struct gctl_req *req, struct g_class *mp,
1355     struct g_geom *gp)
1356 {
1357 
1358 	G_PART_TRACE((G_T_TOPOLOGY, "%s(%s,%s)", __func__, mp->name, gp->name));
1359 	g_topology_assert();
1360 
1361 	g_part_wither(gp, EINVAL);
1362 	return (0);
1363 }
1364 
1365 static struct g_geom *
1366 g_part_taste(struct g_class *mp, struct g_provider *pp, int flags __unused)
1367 {
1368 	struct g_consumer *cp;
1369 	struct g_geom *gp;
1370 	struct g_part_entry *entry;
1371 	struct g_part_table *table;
1372 	int attr, depth;
1373 	int error;
1374 
1375 	G_PART_TRACE((G_T_TOPOLOGY, "%s(%s,%s)", __func__, mp->name, pp->name));
1376 	g_topology_assert();
1377 
1378 	/*
1379 	 * Create a GEOM with consumer and hook it up to the provider.
1380 	 * With that we become part of the topology. Optain read access
1381 	 * to the provider.
1382 	 */
1383 	gp = g_new_geomf(mp, "%s", pp->name);
1384 	cp = g_new_consumer(gp);
1385 	error = g_attach(cp, pp);
1386 	if (error == 0)
1387 		error = g_access(cp, 1, 0, 0);
1388 	if (error != 0) {
1389 		g_part_wither(gp, error);
1390 		return (NULL);
1391 	}
1392 
1393 	g_topology_unlock();
1394 
1395 	/*
1396 	 * Short-circuit the whole probing galore when there's no
1397 	 * media present.
1398 	 */
1399 	if (pp->mediasize == 0 || pp->sectorsize == 0) {
1400 		error = ENODEV;
1401 		goto fail;
1402 	}
1403 
1404 	/* Make sure we can nest and if so, determine our depth. */
1405 	error = g_getattr("PART::isleaf", cp, &attr);
1406 	if (!error && attr) {
1407 		error = ENODEV;
1408 		goto fail;
1409 	}
1410 	error = g_getattr("PART::depth", cp, &attr);
1411 	depth = (!error) ? attr + 1 : 0;
1412 
1413 	error = g_part_probe(gp, cp, depth);
1414 	if (error)
1415 		goto fail;
1416 
1417 	table = gp->softc;
1418 
1419 	/* If we're nested, get the absolute sector offset on disk. */
1420 	if (table->gpt_depth) {
1421 		error = g_getattr("PART::offset", cp, &attr);
1422 		if (error)
1423 			goto fail;
1424 		table->gpt_offset = attr;
1425 	}
1426 
1427 	/*
1428 	 * Synthesize a disk geometry. Some partitioning schemes
1429 	 * depend on it and since some file systems need it even
1430 	 * when the partitition scheme doesn't, we do it here in
1431 	 * scheme-independent code.
1432 	 */
1433 	g_part_geometry(table, cp, pp->mediasize / pp->sectorsize);
1434 
1435 	error = G_PART_READ(table, cp);
1436 	if (error)
1437 		goto fail;
1438 
1439 	g_topology_lock();
1440 	LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) {
1441 		if (!entry->gpe_internal)
1442 			g_part_new_provider(gp, table, entry);
1443 	}
1444 
1445 	g_access(cp, -1, 0, 0);
1446 	return (gp);
1447 
1448  fail:
1449 	g_topology_lock();
1450 	g_access(cp, -1, 0, 0);
1451 	g_part_wither(gp, error);
1452 	return (NULL);
1453 }
1454 
1455 /*
1456  * Geom methods.
1457  */
1458 
1459 static int
1460 g_part_access(struct g_provider *pp, int dr, int dw, int de)
1461 {
1462 	struct g_consumer *cp;
1463 
1464 	G_PART_TRACE((G_T_ACCESS, "%s(%s,%d,%d,%d)", __func__, pp->name, dr,
1465 	    dw, de));
1466 
1467 	cp = LIST_FIRST(&pp->geom->consumer);
1468 
1469 	/* We always gain write-exclusive access. */
1470 	return (g_access(cp, dr, dw, dw + de));
1471 }
1472 
1473 static void
1474 g_part_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp,
1475     struct g_consumer *cp, struct g_provider *pp)
1476 {
1477 	char buf[64];
1478 	struct g_part_entry *entry;
1479 	struct g_part_table *table;
1480 
1481 	KASSERT(sb != NULL && gp != NULL, (__func__));
1482 	table = gp->softc;
1483 
1484 	if (indent == NULL) {
1485 		KASSERT(cp == NULL && pp != NULL, (__func__));
1486 		entry = pp->private;
1487 		if (entry == NULL)
1488 			return;
1489 		sbuf_printf(sb, " i %u o %ju ty %s", entry->gpe_index,
1490 		    (uintmax_t)entry->gpe_offset,
1491 		    G_PART_TYPE(table, entry, buf, sizeof(buf)));
1492 		/*
1493 		 * libdisk compatibility quirk - the scheme dumps the
1494 		 * slicer name and partition type in a way that is
1495 		 * compatible with libdisk. When libdisk is not used
1496 		 * anymore, this should go away.
1497 		 */
1498 		G_PART_DUMPCONF(table, entry, sb, indent);
1499 	} else if (cp != NULL) {	/* Consumer configuration. */
1500 		KASSERT(pp == NULL, (__func__));
1501 		/* none */
1502 	} else if (pp != NULL) {	/* Provider configuration. */
1503 		entry = pp->private;
1504 		if (entry == NULL)
1505 			return;
1506 		sbuf_printf(sb, "%s<index>%u</index>\n", indent,
1507 		    entry->gpe_index);
1508 		sbuf_printf(sb, "%s<type>%s</type>\n", indent,
1509 		    G_PART_TYPE(table, entry, buf, sizeof(buf)));
1510 		sbuf_printf(sb, "%s<offset>%ju</offset>\n", indent,
1511 		    (uintmax_t)entry->gpe_offset);
1512 		sbuf_printf(sb, "%s<length>%ju</length>\n", indent,
1513 		    (uintmax_t)pp->mediasize);
1514 		G_PART_DUMPCONF(table, entry, sb, indent);
1515 	} else {			/* Geom configuration. */
1516 		sbuf_printf(sb, "%s<scheme>%s</scheme>\n", indent,
1517 		    table->gpt_scheme->name);
1518 		sbuf_printf(sb, "%s<entries>%u</entries>\n", indent,
1519 		    table->gpt_entries);
1520 		sbuf_printf(sb, "%s<first>%ju</first>\n", indent,
1521 		    (uintmax_t)table->gpt_first);
1522 		sbuf_printf(sb, "%s<last>%ju</last>\n", indent,
1523 		    (uintmax_t)table->gpt_last);
1524 		sbuf_printf(sb, "%s<fwsectors>%u</fwsectors>\n", indent,
1525 		    table->gpt_sectors);
1526 		sbuf_printf(sb, "%s<fwheads>%u</fwheads>\n", indent,
1527 		    table->gpt_heads);
1528 		G_PART_DUMPCONF(table, NULL, sb, indent);
1529 	}
1530 }
1531 
1532 static void
1533 g_part_orphan(struct g_consumer *cp)
1534 {
1535 	struct g_provider *pp;
1536 
1537 	pp = cp->provider;
1538 	KASSERT(pp != NULL, (__func__));
1539 	G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, pp->name));
1540 	g_topology_assert();
1541 
1542 	KASSERT(pp->error != 0, (__func__));
1543 	g_part_wither(cp->geom, pp->error);
1544 }
1545 
1546 static void
1547 g_part_spoiled(struct g_consumer *cp)
1548 {
1549 
1550 	G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, cp->provider->name));
1551 	g_topology_assert();
1552 
1553 	g_part_wither(cp->geom, ENXIO);
1554 }
1555 
1556 static void
1557 g_part_start(struct bio *bp)
1558 {
1559 	struct bio *bp2;
1560 	struct g_consumer *cp;
1561 	struct g_geom *gp;
1562 	struct g_part_entry *entry;
1563 	struct g_part_table *table;
1564 	struct g_kerneldump *gkd;
1565 	struct g_provider *pp;
1566 
1567 	pp = bp->bio_to;
1568 	gp = pp->geom;
1569 	table = gp->softc;
1570 	cp = LIST_FIRST(&gp->consumer);
1571 
1572 	G_PART_TRACE((G_T_BIO, "%s: cmd=%d, provider=%s", __func__, bp->bio_cmd,
1573 	    pp->name));
1574 
1575 	entry = pp->private;
1576 	if (entry == NULL) {
1577 		g_io_deliver(bp, ENXIO);
1578 		return;
1579 	}
1580 
1581 	switch(bp->bio_cmd) {
1582 	case BIO_DELETE:
1583 	case BIO_READ:
1584 	case BIO_WRITE:
1585 		if (bp->bio_offset >= pp->mediasize) {
1586 			g_io_deliver(bp, EIO);
1587 			return;
1588 		}
1589 		bp2 = g_clone_bio(bp);
1590 		if (bp2 == NULL) {
1591 			g_io_deliver(bp, ENOMEM);
1592 			return;
1593 		}
1594 		if (bp2->bio_offset + bp2->bio_length > pp->mediasize)
1595 			bp2->bio_length = pp->mediasize - bp2->bio_offset;
1596 		bp2->bio_done = g_std_done;
1597 		bp2->bio_offset += entry->gpe_offset;
1598 		g_io_request(bp2, cp);
1599 		return;
1600 	case BIO_FLUSH:
1601 		break;
1602 	case BIO_GETATTR:
1603 		if (g_handleattr_int(bp, "GEOM::fwheads", table->gpt_heads))
1604 			return;
1605 		if (g_handleattr_int(bp, "GEOM::fwsectors", table->gpt_sectors))
1606 			return;
1607 		if (g_handleattr_int(bp, "PART::isleaf", table->gpt_isleaf))
1608 			return;
1609 		if (g_handleattr_int(bp, "PART::depth", table->gpt_depth))
1610 			return;
1611 		if (g_handleattr_int(bp, "PART::offset",
1612 		    table->gpt_offset + entry->gpe_start))
1613 			return;
1614 		if (!strcmp("GEOM::kerneldump", bp->bio_attribute)) {
1615 			/*
1616 			 * Check that the partition is suitable for kernel
1617 			 * dumps. Typically only swap partitions should be
1618 			 * used.
1619 			 */
1620 			if (!G_PART_DUMPTO(table, entry)) {
1621 				g_io_deliver(bp, ENXIO);
1622 				return;
1623 			}
1624 			gkd = (struct g_kerneldump *)bp->bio_data;
1625 			if (gkd->offset >= pp->mediasize) {
1626 				g_io_deliver(bp, EIO);
1627 				return;
1628 			}
1629 			if (gkd->offset + gkd->length > pp->mediasize)
1630 				gkd->length = pp->mediasize - gkd->offset;
1631 			gkd->offset += entry->gpe_offset;
1632 		}
1633 		break;
1634 	default:
1635 		g_io_deliver(bp, EOPNOTSUPP);
1636 		return;
1637 	}
1638 
1639 	bp2 = g_clone_bio(bp);
1640 	if (bp2 == NULL) {
1641 		g_io_deliver(bp, ENOMEM);
1642 		return;
1643 	}
1644 	bp2->bio_done = g_std_done;
1645 	g_io_request(bp2, cp);
1646 }
1647 
1648 static void
1649 g_part_init(struct g_class *mp)
1650 {
1651 
1652 	TAILQ_INSERT_TAIL(&g_part_schemes, &g_part_null_scheme, scheme_list);
1653 }
1654 
1655 static void
1656 g_part_fini(struct g_class *mp)
1657 {
1658 
1659 	TAILQ_REMOVE(&g_part_schemes, &g_part_null_scheme, scheme_list);
1660 }
1661 
1662 static void
1663 g_part_unload_event(void *arg, int flag)
1664 {
1665 	struct g_consumer *cp;
1666 	struct g_geom *gp;
1667 	struct g_provider *pp;
1668 	struct g_part_scheme *scheme;
1669 	struct g_part_table *table;
1670 	uintptr_t *xchg;
1671 	int acc, error;
1672 
1673 	if (flag == EV_CANCEL)
1674 		return;
1675 
1676 	xchg = arg;
1677 	error = 0;
1678 	scheme = (void *)(*xchg);
1679 
1680 	g_topology_assert();
1681 
1682 	LIST_FOREACH(gp, &g_part_class.geom, geom) {
1683 		table = gp->softc;
1684 		if (table->gpt_scheme != scheme)
1685 			continue;
1686 
1687 		acc = 0;
1688 		LIST_FOREACH(pp, &gp->provider, provider)
1689 			acc += pp->acr + pp->acw + pp->ace;
1690 		LIST_FOREACH(cp, &gp->consumer, consumer)
1691 			acc += cp->acr + cp->acw + cp->ace;
1692 
1693 		if (!acc)
1694 			g_part_wither(gp, ENOSYS);
1695 		else
1696 			error = EBUSY;
1697 	}
1698 
1699 	if (!error)
1700 		TAILQ_REMOVE(&g_part_schemes, scheme, scheme_list);
1701 
1702 	*xchg = error;
1703 }
1704 
1705 int
1706 g_part_modevent(module_t mod, int type, struct g_part_scheme *scheme)
1707 {
1708 	uintptr_t arg;
1709 	int error;
1710 
1711 	switch (type) {
1712 	case MOD_LOAD:
1713 		TAILQ_INSERT_TAIL(&g_part_schemes, scheme, scheme_list);
1714 
1715 		error = g_retaste(&g_part_class);
1716 		if (error)
1717 			TAILQ_REMOVE(&g_part_schemes, scheme, scheme_list);
1718 		break;
1719 	case MOD_UNLOAD:
1720 		arg = (uintptr_t)scheme;
1721 		error = g_waitfor_event(g_part_unload_event, &arg, M_WAITOK,
1722 		    NULL);
1723 		if (!error)
1724 			error = (arg == (uintptr_t)scheme) ? EDOOFUS : arg;
1725 		break;
1726 	default:
1727 		error = EOPNOTSUPP;
1728 		break;
1729 	}
1730 
1731 	return (error);
1732 }
1733