xref: /freebsd/sys/geom/part/g_part.c (revision c2bce4a2fcf3083607e00a1734b47c249751c8a8)
1 /*-
2  * Copyright (c) 2002, 2005-2009 Marcel Moolenaar
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  *
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25  */
26 
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
29 
30 #include <sys/param.h>
31 #include <sys/bio.h>
32 #include <sys/diskmbr.h>
33 #include <sys/endian.h>
34 #include <sys/kernel.h>
35 #include <sys/kobj.h>
36 #include <sys/limits.h>
37 #include <sys/lock.h>
38 #include <sys/malloc.h>
39 #include <sys/mutex.h>
40 #include <sys/queue.h>
41 #include <sys/sbuf.h>
42 #include <sys/systm.h>
43 #include <sys/uuid.h>
44 #include <geom/geom.h>
45 #include <geom/geom_ctl.h>
46 #include <geom/geom_int.h>
47 #include <geom/part/g_part.h>
48 
49 #include "g_part_if.h"
50 
51 #ifndef _PATH_DEV
52 #define _PATH_DEV "/dev/"
53 #endif
54 
55 static kobj_method_t g_part_null_methods[] = {
56 	{ 0, 0 }
57 };
58 
59 static struct g_part_scheme g_part_null_scheme = {
60 	"(none)",
61 	g_part_null_methods,
62 	sizeof(struct g_part_table),
63 };
64 
65 TAILQ_HEAD(, g_part_scheme) g_part_schemes =
66     TAILQ_HEAD_INITIALIZER(g_part_schemes);
67 
68 struct g_part_alias_list {
69 	const char *lexeme;
70 	enum g_part_alias alias;
71 } g_part_alias_list[G_PART_ALIAS_COUNT] = {
72 	{ "apple-boot", G_PART_ALIAS_APPLE_BOOT },
73 	{ "apple-hfs", G_PART_ALIAS_APPLE_HFS },
74 	{ "apple-label", G_PART_ALIAS_APPLE_LABEL },
75 	{ "apple-raid", G_PART_ALIAS_APPLE_RAID },
76 	{ "apple-raid-offline", G_PART_ALIAS_APPLE_RAID_OFFLINE },
77 	{ "apple-tv-recovery", G_PART_ALIAS_APPLE_TV_RECOVERY },
78 	{ "apple-ufs", G_PART_ALIAS_APPLE_UFS },
79 	{ "bios-boot", G_PART_ALIAS_BIOS_BOOT },
80 	{ "ebr", G_PART_ALIAS_EBR },
81 	{ "efi", G_PART_ALIAS_EFI },
82 	{ "fat32", G_PART_ALIAS_MS_FAT32 },
83 	{ "freebsd", G_PART_ALIAS_FREEBSD },
84 	{ "freebsd-boot", G_PART_ALIAS_FREEBSD_BOOT },
85 	{ "freebsd-swap", G_PART_ALIAS_FREEBSD_SWAP },
86 	{ "freebsd-ufs", G_PART_ALIAS_FREEBSD_UFS },
87 	{ "freebsd-vinum", G_PART_ALIAS_FREEBSD_VINUM },
88 	{ "freebsd-zfs", G_PART_ALIAS_FREEBSD_ZFS },
89 	{ "linux-data", G_PART_ALIAS_LINUX_DATA },
90 	{ "linux-lvm", G_PART_ALIAS_LINUX_LVM },
91 	{ "linux-raid", G_PART_ALIAS_LINUX_RAID },
92 	{ "linux-swap", G_PART_ALIAS_LINUX_SWAP },
93 	{ "mbr", G_PART_ALIAS_MBR },
94 	{ "ms-basic-data", G_PART_ALIAS_MS_BASIC_DATA },
95 	{ "ms-ldm-data", G_PART_ALIAS_MS_LDM_DATA },
96 	{ "ms-ldm-metadata", G_PART_ALIAS_MS_LDM_METADATA },
97 	{ "ms-reserved", G_PART_ALIAS_MS_RESERVED },
98 	{ "ntfs", G_PART_ALIAS_MS_NTFS },
99 	{ "netbsd-ccd", G_PART_ALIAS_NETBSD_CCD },
100 	{ "netbsd-cgd", G_PART_ALIAS_NETBSD_CGD },
101 	{ "netbsd-ffs", G_PART_ALIAS_NETBSD_FFS },
102 	{ "netbsd-lfs", G_PART_ALIAS_NETBSD_LFS },
103 	{ "netbsd-raid", G_PART_ALIAS_NETBSD_RAID },
104 	{ "netbsd-swap", G_PART_ALIAS_NETBSD_SWAP },
105 };
106 
107 /*
108  * The GEOM partitioning class.
109  */
110 static g_ctl_req_t g_part_ctlreq;
111 static g_ctl_destroy_geom_t g_part_destroy_geom;
112 static g_fini_t g_part_fini;
113 static g_init_t g_part_init;
114 static g_taste_t g_part_taste;
115 
116 static g_access_t g_part_access;
117 static g_dumpconf_t g_part_dumpconf;
118 static g_orphan_t g_part_orphan;
119 static g_spoiled_t g_part_spoiled;
120 static g_start_t g_part_start;
121 
122 static struct g_class g_part_class = {
123 	.name = "PART",
124 	.version = G_VERSION,
125 	/* Class methods. */
126 	.ctlreq = g_part_ctlreq,
127 	.destroy_geom = g_part_destroy_geom,
128 	.fini = g_part_fini,
129 	.init = g_part_init,
130 	.taste = g_part_taste,
131 	/* Geom methods. */
132 	.access = g_part_access,
133 	.dumpconf = g_part_dumpconf,
134 	.orphan = g_part_orphan,
135 	.spoiled = g_part_spoiled,
136 	.start = g_part_start,
137 };
138 
139 DECLARE_GEOM_CLASS(g_part_class, g_part);
140 
141 /*
142  * Support functions.
143  */
144 
145 static void g_part_wither(struct g_geom *, int);
146 
147 const char *
148 g_part_alias_name(enum g_part_alias alias)
149 {
150 	int i;
151 
152 	for (i = 0; i < G_PART_ALIAS_COUNT; i++) {
153 		if (g_part_alias_list[i].alias != alias)
154 			continue;
155 		return (g_part_alias_list[i].lexeme);
156 	}
157 
158 	return (NULL);
159 }
160 
161 void
162 g_part_geometry_heads(off_t blocks, u_int sectors, off_t *bestchs,
163     u_int *bestheads)
164 {
165 	static u_int candidate_heads[] = { 1, 2, 16, 32, 64, 128, 255, 0 };
166 	off_t chs, cylinders;
167 	u_int heads;
168 	int idx;
169 
170 	*bestchs = 0;
171 	*bestheads = 0;
172 	for (idx = 0; candidate_heads[idx] != 0; idx++) {
173 		heads = candidate_heads[idx];
174 		cylinders = blocks / heads / sectors;
175 		if (cylinders < heads || cylinders < sectors)
176 			break;
177 		if (cylinders > 1023)
178 			continue;
179 		chs = cylinders * heads * sectors;
180 		if (chs > *bestchs || (chs == *bestchs && *bestheads == 1)) {
181 			*bestchs = chs;
182 			*bestheads = heads;
183 		}
184 	}
185 }
186 
187 static void
188 g_part_geometry(struct g_part_table *table, struct g_consumer *cp,
189     off_t blocks)
190 {
191 	static u_int candidate_sectors[] = { 1, 9, 17, 33, 63, 0 };
192 	off_t chs, bestchs;
193 	u_int heads, sectors;
194 	int idx;
195 
196 	if (g_getattr("GEOM::fwsectors", cp, &sectors) != 0 || sectors == 0 ||
197 	    g_getattr("GEOM::fwheads", cp, &heads) != 0 || heads == 0) {
198 		table->gpt_fixgeom = 0;
199 		table->gpt_heads = 0;
200 		table->gpt_sectors = 0;
201 		bestchs = 0;
202 		for (idx = 0; candidate_sectors[idx] != 0; idx++) {
203 			sectors = candidate_sectors[idx];
204 			g_part_geometry_heads(blocks, sectors, &chs, &heads);
205 			if (chs == 0)
206 				continue;
207 			/*
208 			 * Prefer a geometry with sectors > 1, but only if
209 			 * it doesn't bump down the numbver of heads to 1.
210 			 */
211 			if (chs > bestchs || (chs == bestchs && heads > 1 &&
212 			    table->gpt_sectors == 1)) {
213 				bestchs = chs;
214 				table->gpt_heads = heads;
215 				table->gpt_sectors = sectors;
216 			}
217 		}
218 		/*
219 		 * If we didn't find a geometry at all, then the disk is
220 		 * too big. This means we can use the maximum number of
221 		 * heads and sectors.
222 		 */
223 		if (bestchs == 0) {
224 			table->gpt_heads = 255;
225 			table->gpt_sectors = 63;
226 		}
227 	} else {
228 		table->gpt_fixgeom = 1;
229 		table->gpt_heads = heads;
230 		table->gpt_sectors = sectors;
231 	}
232 }
233 
234 struct g_part_entry *
235 g_part_new_entry(struct g_part_table *table, int index, quad_t start,
236     quad_t end)
237 {
238 	struct g_part_entry *entry, *last;
239 
240 	last = NULL;
241 	LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) {
242 		if (entry->gpe_index == index)
243 			break;
244 		if (entry->gpe_index > index) {
245 			entry = NULL;
246 			break;
247 		}
248 		last = entry;
249 	}
250 	if (entry == NULL) {
251 		entry = g_malloc(table->gpt_scheme->gps_entrysz,
252 		    M_WAITOK | M_ZERO);
253 		entry->gpe_index = index;
254 		if (last == NULL)
255 			LIST_INSERT_HEAD(&table->gpt_entry, entry, gpe_entry);
256 		else
257 			LIST_INSERT_AFTER(last, entry, gpe_entry);
258 	} else
259 		entry->gpe_offset = 0;
260 	entry->gpe_start = start;
261 	entry->gpe_end = end;
262 	return (entry);
263 }
264 
265 static void
266 g_part_new_provider(struct g_geom *gp, struct g_part_table *table,
267     struct g_part_entry *entry)
268 {
269 	struct g_consumer *cp;
270 	struct g_provider *pp;
271 	struct sbuf *sb;
272 	off_t offset;
273 
274 	cp = LIST_FIRST(&gp->consumer);
275 	pp = cp->provider;
276 
277 	offset = entry->gpe_start * pp->sectorsize;
278 	if (entry->gpe_offset < offset)
279 		entry->gpe_offset = offset;
280 
281 	if (entry->gpe_pp == NULL) {
282 		sb = sbuf_new_auto();
283 		G_PART_FULLNAME(table, entry, sb, gp->name);
284 		sbuf_finish(sb);
285 		entry->gpe_pp = g_new_providerf(gp, "%s", sbuf_data(sb));
286 		sbuf_delete(sb);
287 		entry->gpe_pp->private = entry;		/* Close the circle. */
288 	}
289 	entry->gpe_pp->index = entry->gpe_index - 1;	/* index is 1-based. */
290 	entry->gpe_pp->mediasize = (entry->gpe_end - entry->gpe_start + 1) *
291 	    pp->sectorsize;
292 	entry->gpe_pp->mediasize -= entry->gpe_offset - offset;
293 	entry->gpe_pp->sectorsize = pp->sectorsize;
294 	entry->gpe_pp->flags = pp->flags & G_PF_CANDELETE;
295 	entry->gpe_pp->stripesize = pp->stripesize;
296 	entry->gpe_pp->stripeoffset = pp->stripeoffset + entry->gpe_offset;
297 	if (pp->stripesize > 0)
298 		entry->gpe_pp->stripeoffset %= pp->stripesize;
299 	g_error_provider(entry->gpe_pp, 0);
300 }
301 
302 static struct g_geom*
303 g_part_find_geom(const char *name)
304 {
305 	struct g_geom *gp;
306 	LIST_FOREACH(gp, &g_part_class.geom, geom) {
307 		if (!strcmp(name, gp->name))
308 			break;
309 	}
310 	return (gp);
311 }
312 
313 static int
314 g_part_parm_geom(struct gctl_req *req, const char *name, struct g_geom **v)
315 {
316 	struct g_geom *gp;
317 	const char *gname;
318 
319 	gname = gctl_get_asciiparam(req, name);
320 	if (gname == NULL)
321 		return (ENOATTR);
322 	if (strncmp(gname, _PATH_DEV, sizeof(_PATH_DEV) - 1) == 0)
323 		gname += sizeof(_PATH_DEV) - 1;
324 	gp = g_part_find_geom(gname);
325 	if (gp == NULL) {
326 		gctl_error(req, "%d %s '%s'", EINVAL, name, gname);
327 		return (EINVAL);
328 	}
329 	*v = gp;
330 	return (0);
331 }
332 
333 static int
334 g_part_parm_provider(struct gctl_req *req, const char *name,
335     struct g_provider **v)
336 {
337 	struct g_provider *pp;
338 	const char *pname;
339 
340 	pname = gctl_get_asciiparam(req, name);
341 	if (pname == NULL)
342 		return (ENOATTR);
343 	if (strncmp(pname, _PATH_DEV, sizeof(_PATH_DEV) - 1) == 0)
344 		pname += sizeof(_PATH_DEV) - 1;
345 	pp = g_provider_by_name(pname);
346 	if (pp == NULL) {
347 		gctl_error(req, "%d %s '%s'", EINVAL, name, pname);
348 		return (EINVAL);
349 	}
350 	*v = pp;
351 	return (0);
352 }
353 
354 static int
355 g_part_parm_quad(struct gctl_req *req, const char *name, quad_t *v)
356 {
357 	const char *p;
358 	char *x;
359 	quad_t q;
360 
361 	p = gctl_get_asciiparam(req, name);
362 	if (p == NULL)
363 		return (ENOATTR);
364 	q = strtoq(p, &x, 0);
365 	if (*x != '\0' || q < 0) {
366 		gctl_error(req, "%d %s '%s'", EINVAL, name, p);
367 		return (EINVAL);
368 	}
369 	*v = q;
370 	return (0);
371 }
372 
373 static int
374 g_part_parm_scheme(struct gctl_req *req, const char *name,
375     struct g_part_scheme **v)
376 {
377 	struct g_part_scheme *s;
378 	const char *p;
379 
380 	p = gctl_get_asciiparam(req, name);
381 	if (p == NULL)
382 		return (ENOATTR);
383 	TAILQ_FOREACH(s, &g_part_schemes, scheme_list) {
384 		if (s == &g_part_null_scheme)
385 			continue;
386 		if (!strcasecmp(s->name, p))
387 			break;
388 	}
389 	if (s == NULL) {
390 		gctl_error(req, "%d %s '%s'", EINVAL, name, p);
391 		return (EINVAL);
392 	}
393 	*v = s;
394 	return (0);
395 }
396 
397 static int
398 g_part_parm_str(struct gctl_req *req, const char *name, const char **v)
399 {
400 	const char *p;
401 
402 	p = gctl_get_asciiparam(req, name);
403 	if (p == NULL)
404 		return (ENOATTR);
405 	/* An empty label is always valid. */
406 	if (strcmp(name, "label") != 0 && p[0] == '\0') {
407 		gctl_error(req, "%d %s '%s'", EINVAL, name, p);
408 		return (EINVAL);
409 	}
410 	*v = p;
411 	return (0);
412 }
413 
414 static int
415 g_part_parm_intmax(struct gctl_req *req, const char *name, u_int *v)
416 {
417 	const intmax_t *p;
418 	int size;
419 
420 	p = gctl_get_param(req, name, &size);
421 	if (p == NULL)
422 		return (ENOATTR);
423 	if (size != sizeof(*p) || *p < 0 || *p > INT_MAX) {
424 		gctl_error(req, "%d %s '%jd'", EINVAL, name, *p);
425 		return (EINVAL);
426 	}
427 	*v = (u_int)*p;
428 	return (0);
429 }
430 
431 static int
432 g_part_parm_uint32(struct gctl_req *req, const char *name, u_int *v)
433 {
434 	const uint32_t *p;
435 	int size;
436 
437 	p = gctl_get_param(req, name, &size);
438 	if (p == NULL)
439 		return (ENOATTR);
440 	if (size != sizeof(*p) || *p > INT_MAX) {
441 		gctl_error(req, "%d %s '%u'", EINVAL, name, (unsigned int)*p);
442 		return (EINVAL);
443 	}
444 	*v = (u_int)*p;
445 	return (0);
446 }
447 
448 static int
449 g_part_parm_bootcode(struct gctl_req *req, const char *name, const void **v,
450     unsigned int *s)
451 {
452 	const void *p;
453 	int size;
454 
455 	p = gctl_get_param(req, name, &size);
456 	if (p == NULL)
457 		return (ENOATTR);
458 	*v = p;
459 	*s = size;
460 	return (0);
461 }
462 
463 static int
464 g_part_probe(struct g_geom *gp, struct g_consumer *cp, int depth)
465 {
466 	struct g_part_scheme *iter, *scheme;
467 	struct g_part_table *table;
468 	int pri, probe;
469 
470 	table = gp->softc;
471 	scheme = (table != NULL) ? table->gpt_scheme : NULL;
472 	pri = (scheme != NULL) ? G_PART_PROBE(table, cp) : INT_MIN;
473 	if (pri == 0)
474 		goto done;
475 	if (pri > 0) {	/* error */
476 		scheme = NULL;
477 		pri = INT_MIN;
478 	}
479 
480 	TAILQ_FOREACH(iter, &g_part_schemes, scheme_list) {
481 		if (iter == &g_part_null_scheme)
482 			continue;
483 		table = (void *)kobj_create((kobj_class_t)iter, M_GEOM,
484 		    M_WAITOK);
485 		table->gpt_gp = gp;
486 		table->gpt_scheme = iter;
487 		table->gpt_depth = depth;
488 		probe = G_PART_PROBE(table, cp);
489 		if (probe <= 0 && probe > pri) {
490 			pri = probe;
491 			scheme = iter;
492 			if (gp->softc != NULL)
493 				kobj_delete((kobj_t)gp->softc, M_GEOM);
494 			gp->softc = table;
495 			if (pri == 0)
496 				goto done;
497 		} else
498 			kobj_delete((kobj_t)table, M_GEOM);
499 	}
500 
501 done:
502 	return ((scheme == NULL) ? ENXIO : 0);
503 }
504 
505 /*
506  * Control request functions.
507  */
508 
509 static int
510 g_part_ctl_add(struct gctl_req *req, struct g_part_parms *gpp)
511 {
512 	struct g_geom *gp;
513 	struct g_provider *pp;
514 	struct g_part_entry *delent, *last, *entry;
515 	struct g_part_table *table;
516 	struct sbuf *sb;
517 	quad_t end;
518 	unsigned int index;
519 	int error;
520 
521 	gp = gpp->gpp_geom;
522 	G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name));
523 	g_topology_assert();
524 
525 	pp = LIST_FIRST(&gp->consumer)->provider;
526 	table = gp->softc;
527 	end = gpp->gpp_start + gpp->gpp_size - 1;
528 
529 	if (gpp->gpp_start < table->gpt_first ||
530 	    gpp->gpp_start > table->gpt_last) {
531 		gctl_error(req, "%d start '%jd'", EINVAL,
532 		    (intmax_t)gpp->gpp_start);
533 		return (EINVAL);
534 	}
535 	if (end < gpp->gpp_start || end > table->gpt_last) {
536 		gctl_error(req, "%d size '%jd'", EINVAL,
537 		    (intmax_t)gpp->gpp_size);
538 		return (EINVAL);
539 	}
540 	if (gpp->gpp_index > table->gpt_entries) {
541 		gctl_error(req, "%d index '%d'", EINVAL, gpp->gpp_index);
542 		return (EINVAL);
543 	}
544 
545 	delent = last = NULL;
546 	index = (gpp->gpp_index > 0) ? gpp->gpp_index : 1;
547 	LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) {
548 		if (entry->gpe_deleted) {
549 			if (entry->gpe_index == index)
550 				delent = entry;
551 			continue;
552 		}
553 		if (entry->gpe_index == index)
554 			index = entry->gpe_index + 1;
555 		if (entry->gpe_index < index)
556 			last = entry;
557 		if (entry->gpe_internal)
558 			continue;
559 		if (gpp->gpp_start >= entry->gpe_start &&
560 		    gpp->gpp_start <= entry->gpe_end) {
561 			gctl_error(req, "%d start '%jd'", ENOSPC,
562 			    (intmax_t)gpp->gpp_start);
563 			return (ENOSPC);
564 		}
565 		if (end >= entry->gpe_start && end <= entry->gpe_end) {
566 			gctl_error(req, "%d end '%jd'", ENOSPC, (intmax_t)end);
567 			return (ENOSPC);
568 		}
569 		if (gpp->gpp_start < entry->gpe_start && end > entry->gpe_end) {
570 			gctl_error(req, "%d size '%jd'", ENOSPC,
571 			    (intmax_t)gpp->gpp_size);
572 			return (ENOSPC);
573 		}
574 	}
575 	if (gpp->gpp_index > 0 && index != gpp->gpp_index) {
576 		gctl_error(req, "%d index '%d'", EEXIST, gpp->gpp_index);
577 		return (EEXIST);
578 	}
579 	if (index > table->gpt_entries) {
580 		gctl_error(req, "%d index '%d'", ENOSPC, index);
581 		return (ENOSPC);
582 	}
583 
584 	entry = (delent == NULL) ? g_malloc(table->gpt_scheme->gps_entrysz,
585 	    M_WAITOK | M_ZERO) : delent;
586 	entry->gpe_index = index;
587 	entry->gpe_start = gpp->gpp_start;
588 	entry->gpe_end = end;
589 	error = G_PART_ADD(table, entry, gpp);
590 	if (error) {
591 		gctl_error(req, "%d", error);
592 		if (delent == NULL)
593 			g_free(entry);
594 		return (error);
595 	}
596 	if (delent == NULL) {
597 		if (last == NULL)
598 			LIST_INSERT_HEAD(&table->gpt_entry, entry, gpe_entry);
599 		else
600 			LIST_INSERT_AFTER(last, entry, gpe_entry);
601 		entry->gpe_created = 1;
602 	} else {
603 		entry->gpe_deleted = 0;
604 		entry->gpe_modified = 1;
605 	}
606 	g_part_new_provider(gp, table, entry);
607 
608 	/* Provide feedback if so requested. */
609 	if (gpp->gpp_parms & G_PART_PARM_OUTPUT) {
610 		sb = sbuf_new_auto();
611 		G_PART_FULLNAME(table, entry, sb, gp->name);
612 		sbuf_cat(sb, " added\n");
613 		sbuf_finish(sb);
614 		gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1);
615 		sbuf_delete(sb);
616 	}
617 	return (0);
618 }
619 
620 static int
621 g_part_ctl_bootcode(struct gctl_req *req, struct g_part_parms *gpp)
622 {
623 	struct g_geom *gp;
624 	struct g_part_table *table;
625 	struct sbuf *sb;
626 	int error, sz;
627 
628 	gp = gpp->gpp_geom;
629 	G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name));
630 	g_topology_assert();
631 
632 	table = gp->softc;
633 	sz = table->gpt_scheme->gps_bootcodesz;
634 	if (sz == 0) {
635 		error = ENODEV;
636 		goto fail;
637 	}
638 	if (gpp->gpp_codesize > sz) {
639 		error = EFBIG;
640 		goto fail;
641 	}
642 
643 	error = G_PART_BOOTCODE(table, gpp);
644 	if (error)
645 		goto fail;
646 
647 	/* Provide feedback if so requested. */
648 	if (gpp->gpp_parms & G_PART_PARM_OUTPUT) {
649 		sb = sbuf_new_auto();
650 		sbuf_printf(sb, "bootcode written to %s\n", gp->name);
651 		sbuf_finish(sb);
652 		gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1);
653 		sbuf_delete(sb);
654 	}
655 	return (0);
656 
657  fail:
658 	gctl_error(req, "%d", error);
659 	return (error);
660 }
661 
662 static int
663 g_part_ctl_commit(struct gctl_req *req, struct g_part_parms *gpp)
664 {
665 	struct g_consumer *cp;
666 	struct g_geom *gp;
667 	struct g_provider *pp;
668 	struct g_part_entry *entry, *tmp;
669 	struct g_part_table *table;
670 	char *buf;
671 	int error, i;
672 
673 	gp = gpp->gpp_geom;
674 	G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name));
675 	g_topology_assert();
676 
677 	table = gp->softc;
678 	if (!table->gpt_opened) {
679 		gctl_error(req, "%d", EPERM);
680 		return (EPERM);
681 	}
682 
683 	g_topology_unlock();
684 
685 	cp = LIST_FIRST(&gp->consumer);
686 	if ((table->gpt_smhead | table->gpt_smtail) != 0) {
687 		pp = cp->provider;
688 		buf = g_malloc(pp->sectorsize, M_WAITOK | M_ZERO);
689 		while (table->gpt_smhead != 0) {
690 			i = ffs(table->gpt_smhead) - 1;
691 			error = g_write_data(cp, i * pp->sectorsize, buf,
692 			    pp->sectorsize);
693 			if (error) {
694 				g_free(buf);
695 				goto fail;
696 			}
697 			table->gpt_smhead &= ~(1 << i);
698 		}
699 		while (table->gpt_smtail != 0) {
700 			i = ffs(table->gpt_smtail) - 1;
701 			error = g_write_data(cp, pp->mediasize - (i + 1) *
702 			    pp->sectorsize, buf, pp->sectorsize);
703 			if (error) {
704 				g_free(buf);
705 				goto fail;
706 			}
707 			table->gpt_smtail &= ~(1 << i);
708 		}
709 		g_free(buf);
710 	}
711 
712 	if (table->gpt_scheme == &g_part_null_scheme) {
713 		g_topology_lock();
714 		g_access(cp, -1, -1, -1);
715 		g_part_wither(gp, ENXIO);
716 		return (0);
717 	}
718 
719 	error = G_PART_WRITE(table, cp);
720 	if (error)
721 		goto fail;
722 
723 	LIST_FOREACH_SAFE(entry, &table->gpt_entry, gpe_entry, tmp) {
724 		if (!entry->gpe_deleted) {
725 			entry->gpe_created = 0;
726 			entry->gpe_modified = 0;
727 			continue;
728 		}
729 		LIST_REMOVE(entry, gpe_entry);
730 		g_free(entry);
731 	}
732 	table->gpt_created = 0;
733 	table->gpt_opened = 0;
734 
735 	g_topology_lock();
736 	g_access(cp, -1, -1, -1);
737 	return (0);
738 
739 fail:
740 	g_topology_lock();
741 	gctl_error(req, "%d", error);
742 	return (error);
743 }
744 
745 static int
746 g_part_ctl_create(struct gctl_req *req, struct g_part_parms *gpp)
747 {
748 	struct g_consumer *cp;
749 	struct g_geom *gp;
750 	struct g_provider *pp;
751 	struct g_part_scheme *scheme;
752 	struct g_part_table *null, *table;
753 	struct sbuf *sb;
754 	int attr, error;
755 
756 	pp = gpp->gpp_provider;
757 	scheme = gpp->gpp_scheme;
758 	G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, pp->name));
759 	g_topology_assert();
760 
761 	/* Check that there isn't already a g_part geom on the provider. */
762 	gp = g_part_find_geom(pp->name);
763 	if (gp != NULL) {
764 		null = gp->softc;
765 		if (null->gpt_scheme != &g_part_null_scheme) {
766 			gctl_error(req, "%d geom '%s'", EEXIST, pp->name);
767 			return (EEXIST);
768 		}
769 	} else
770 		null = NULL;
771 
772 	if ((gpp->gpp_parms & G_PART_PARM_ENTRIES) &&
773 	    (gpp->gpp_entries < scheme->gps_minent ||
774 	     gpp->gpp_entries > scheme->gps_maxent)) {
775 		gctl_error(req, "%d entries '%d'", EINVAL, gpp->gpp_entries);
776 		return (EINVAL);
777 	}
778 
779 	if (null == NULL)
780 		gp = g_new_geomf(&g_part_class, "%s", pp->name);
781 	gp->softc = kobj_create((kobj_class_t)gpp->gpp_scheme, M_GEOM,
782 	    M_WAITOK);
783 	table = gp->softc;
784 	table->gpt_gp = gp;
785 	table->gpt_scheme = gpp->gpp_scheme;
786 	table->gpt_entries = (gpp->gpp_parms & G_PART_PARM_ENTRIES) ?
787 	    gpp->gpp_entries : scheme->gps_minent;
788 	LIST_INIT(&table->gpt_entry);
789 	if (null == NULL) {
790 		cp = g_new_consumer(gp);
791 		error = g_attach(cp, pp);
792 		if (error == 0)
793 			error = g_access(cp, 1, 1, 1);
794 		if (error != 0) {
795 			g_part_wither(gp, error);
796 			gctl_error(req, "%d geom '%s'", error, pp->name);
797 			return (error);
798 		}
799 		table->gpt_opened = 1;
800 	} else {
801 		cp = LIST_FIRST(&gp->consumer);
802 		table->gpt_opened = null->gpt_opened;
803 		table->gpt_smhead = null->gpt_smhead;
804 		table->gpt_smtail = null->gpt_smtail;
805 	}
806 
807 	g_topology_unlock();
808 
809 	/* Make sure the provider has media. */
810 	if (pp->mediasize == 0 || pp->sectorsize == 0) {
811 		error = ENODEV;
812 		goto fail;
813 	}
814 
815 	/* Make sure we can nest and if so, determine our depth. */
816 	error = g_getattr("PART::isleaf", cp, &attr);
817 	if (!error && attr) {
818 		error = ENODEV;
819 		goto fail;
820 	}
821 	error = g_getattr("PART::depth", cp, &attr);
822 	table->gpt_depth = (!error) ? attr + 1 : 0;
823 
824 	/*
825 	 * Synthesize a disk geometry. Some partitioning schemes
826 	 * depend on it and since some file systems need it even
827 	 * when the partitition scheme doesn't, we do it here in
828 	 * scheme-independent code.
829 	 */
830 	g_part_geometry(table, cp, pp->mediasize / pp->sectorsize);
831 
832 	error = G_PART_CREATE(table, gpp);
833 	if (error)
834 		goto fail;
835 
836 	g_topology_lock();
837 
838 	table->gpt_created = 1;
839 	if (null != NULL)
840 		kobj_delete((kobj_t)null, M_GEOM);
841 
842 	/*
843 	 * Support automatic commit by filling in the gpp_geom
844 	 * parameter.
845 	 */
846 	gpp->gpp_parms |= G_PART_PARM_GEOM;
847 	gpp->gpp_geom = gp;
848 
849 	/* Provide feedback if so requested. */
850 	if (gpp->gpp_parms & G_PART_PARM_OUTPUT) {
851 		sb = sbuf_new_auto();
852 		sbuf_printf(sb, "%s created\n", gp->name);
853 		sbuf_finish(sb);
854 		gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1);
855 		sbuf_delete(sb);
856 	}
857 	return (0);
858 
859 fail:
860 	g_topology_lock();
861 	if (null == NULL) {
862 		g_access(cp, -1, -1, -1);
863 		g_part_wither(gp, error);
864 	} else {
865 		kobj_delete((kobj_t)gp->softc, M_GEOM);
866 		gp->softc = null;
867 	}
868 	gctl_error(req, "%d provider", error);
869 	return (error);
870 }
871 
872 static int
873 g_part_ctl_delete(struct gctl_req *req, struct g_part_parms *gpp)
874 {
875 	struct g_geom *gp;
876 	struct g_provider *pp;
877 	struct g_part_entry *entry;
878 	struct g_part_table *table;
879 	struct sbuf *sb;
880 
881 	gp = gpp->gpp_geom;
882 	G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name));
883 	g_topology_assert();
884 
885 	table = gp->softc;
886 
887 	LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) {
888 		if (entry->gpe_deleted || entry->gpe_internal)
889 			continue;
890 		if (entry->gpe_index == gpp->gpp_index)
891 			break;
892 	}
893 	if (entry == NULL) {
894 		gctl_error(req, "%d index '%d'", ENOENT, gpp->gpp_index);
895 		return (ENOENT);
896 	}
897 
898 	pp = entry->gpe_pp;
899 	if (pp != NULL) {
900 		if (pp->acr > 0 || pp->acw > 0 || pp->ace > 0) {
901 			gctl_error(req, "%d", EBUSY);
902 			return (EBUSY);
903 		}
904 
905 		pp->private = NULL;
906 		entry->gpe_pp = NULL;
907 	}
908 
909 	if (pp != NULL)
910 		g_wither_provider(pp, ENXIO);
911 
912 	/* Provide feedback if so requested. */
913 	if (gpp->gpp_parms & G_PART_PARM_OUTPUT) {
914 		sb = sbuf_new_auto();
915 		G_PART_FULLNAME(table, entry, sb, gp->name);
916 		sbuf_cat(sb, " deleted\n");
917 		sbuf_finish(sb);
918 		gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1);
919 		sbuf_delete(sb);
920 	}
921 
922 	if (entry->gpe_created) {
923 		LIST_REMOVE(entry, gpe_entry);
924 		g_free(entry);
925 	} else {
926 		entry->gpe_modified = 0;
927 		entry->gpe_deleted = 1;
928 	}
929 	return (0);
930 }
931 
932 static int
933 g_part_ctl_destroy(struct gctl_req *req, struct g_part_parms *gpp)
934 {
935 	struct g_consumer *cp;
936 	struct g_geom *gp;
937 	struct g_provider *pp;
938 	struct g_part_entry *entry, *tmp;
939 	struct g_part_table *null, *table;
940 	struct sbuf *sb;
941 	int error;
942 
943 	gp = gpp->gpp_geom;
944 	G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name));
945 	g_topology_assert();
946 
947 	table = gp->softc;
948 	/* Check for busy providers. */
949 	LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) {
950 		if (entry->gpe_deleted || entry->gpe_internal)
951 			continue;
952 		if (gpp->gpp_force) {
953 			pp = entry->gpe_pp;
954 			if (pp == NULL)
955 				continue;
956 			if (pp->acr == 0 && pp->acw == 0 && pp->ace == 0)
957 				continue;
958 		}
959 		gctl_error(req, "%d", EBUSY);
960 		return (EBUSY);
961 	}
962 
963 	if (gpp->gpp_force) {
964 		/* Destroy all providers. */
965 		LIST_FOREACH_SAFE(entry, &table->gpt_entry, gpe_entry, tmp) {
966 			pp = entry->gpe_pp;
967 			if (pp != NULL) {
968 				pp->private = NULL;
969 				g_wither_provider(pp, ENXIO);
970 			}
971 			LIST_REMOVE(entry, gpe_entry);
972 			g_free(entry);
973 		}
974 	}
975 
976 	error = G_PART_DESTROY(table, gpp);
977 	if (error) {
978 		gctl_error(req, "%d", error);
979 		return (error);
980 	}
981 
982 	gp->softc = kobj_create((kobj_class_t)&g_part_null_scheme, M_GEOM,
983 	    M_WAITOK);
984 	null = gp->softc;
985 	null->gpt_gp = gp;
986 	null->gpt_scheme = &g_part_null_scheme;
987 	LIST_INIT(&null->gpt_entry);
988 
989 	cp = LIST_FIRST(&gp->consumer);
990 	pp = cp->provider;
991 	null->gpt_last = pp->mediasize / pp->sectorsize - 1;
992 
993 	null->gpt_depth = table->gpt_depth;
994 	null->gpt_opened = table->gpt_opened;
995 	null->gpt_smhead = table->gpt_smhead;
996 	null->gpt_smtail = table->gpt_smtail;
997 
998 	while ((entry = LIST_FIRST(&table->gpt_entry)) != NULL) {
999 		LIST_REMOVE(entry, gpe_entry);
1000 		g_free(entry);
1001 	}
1002 	kobj_delete((kobj_t)table, M_GEOM);
1003 
1004 	/* Provide feedback if so requested. */
1005 	if (gpp->gpp_parms & G_PART_PARM_OUTPUT) {
1006 		sb = sbuf_new_auto();
1007 		sbuf_printf(sb, "%s destroyed\n", gp->name);
1008 		sbuf_finish(sb);
1009 		gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1);
1010 		sbuf_delete(sb);
1011 	}
1012 	return (0);
1013 }
1014 
1015 static int
1016 g_part_ctl_modify(struct gctl_req *req, struct g_part_parms *gpp)
1017 {
1018 	struct g_geom *gp;
1019 	struct g_part_entry *entry;
1020 	struct g_part_table *table;
1021 	struct sbuf *sb;
1022 	int error;
1023 
1024 	gp = gpp->gpp_geom;
1025 	G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name));
1026 	g_topology_assert();
1027 
1028 	table = gp->softc;
1029 
1030 	LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) {
1031 		if (entry->gpe_deleted || entry->gpe_internal)
1032 			continue;
1033 		if (entry->gpe_index == gpp->gpp_index)
1034 			break;
1035 	}
1036 	if (entry == NULL) {
1037 		gctl_error(req, "%d index '%d'", ENOENT, gpp->gpp_index);
1038 		return (ENOENT);
1039 	}
1040 
1041 	error = G_PART_MODIFY(table, entry, gpp);
1042 	if (error) {
1043 		gctl_error(req, "%d", error);
1044 		return (error);
1045 	}
1046 
1047 	if (!entry->gpe_created)
1048 		entry->gpe_modified = 1;
1049 
1050 	/* Provide feedback if so requested. */
1051 	if (gpp->gpp_parms & G_PART_PARM_OUTPUT) {
1052 		sb = sbuf_new_auto();
1053 		G_PART_FULLNAME(table, entry, sb, gp->name);
1054 		sbuf_cat(sb, " modified\n");
1055 		sbuf_finish(sb);
1056 		gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1);
1057 		sbuf_delete(sb);
1058 	}
1059 	return (0);
1060 }
1061 
1062 static int
1063 g_part_ctl_move(struct gctl_req *req, struct g_part_parms *gpp)
1064 {
1065 	gctl_error(req, "%d verb 'move'", ENOSYS);
1066 	return (ENOSYS);
1067 }
1068 
1069 static int
1070 g_part_ctl_recover(struct gctl_req *req, struct g_part_parms *gpp)
1071 {
1072 	struct g_part_table *table;
1073 	struct g_geom *gp;
1074 	struct sbuf *sb;
1075 	int error, recovered;
1076 
1077 	gp = gpp->gpp_geom;
1078 	G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name));
1079 	g_topology_assert();
1080 	table = gp->softc;
1081 	error = recovered = 0;
1082 
1083 	if (table->gpt_corrupt) {
1084 		error = G_PART_RECOVER(table);
1085 		if (error) {
1086 			gctl_error(req, "%d recovering '%s' failed",
1087 			    error, gp->name);
1088 			return (error);
1089 		}
1090 		recovered = 1;
1091 	}
1092 	/* Provide feedback if so requested. */
1093 	if (gpp->gpp_parms & G_PART_PARM_OUTPUT) {
1094 		sb = sbuf_new_auto();
1095 		if (recovered)
1096 			sbuf_printf(sb, "%s recovered\n", gp->name);
1097 		else
1098 			sbuf_printf(sb, "%s recovering is not needed\n",
1099 			    gp->name);
1100 		sbuf_finish(sb);
1101 		gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1);
1102 		sbuf_delete(sb);
1103 	}
1104 	return (0);
1105 }
1106 
1107 static int
1108 g_part_ctl_resize(struct gctl_req *req, struct g_part_parms *gpp)
1109 {
1110 	struct g_geom *gp;
1111 	struct g_provider *pp;
1112 	struct g_part_entry *pe, *entry;
1113 	struct g_part_table *table;
1114 	struct sbuf *sb;
1115 	quad_t end;
1116 	int error;
1117 
1118 	gp = gpp->gpp_geom;
1119 	G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name));
1120 	g_topology_assert();
1121 	table = gp->softc;
1122 
1123 	/* check gpp_index */
1124 	LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) {
1125 		if (entry->gpe_deleted || entry->gpe_internal)
1126 			continue;
1127 		if (entry->gpe_index == gpp->gpp_index)
1128 			break;
1129 	}
1130 	if (entry == NULL) {
1131 		gctl_error(req, "%d index '%d'", ENOENT, gpp->gpp_index);
1132 		return (ENOENT);
1133 	}
1134 
1135 	/* check gpp_size */
1136 	end = entry->gpe_start + gpp->gpp_size - 1;
1137 	if (gpp->gpp_size < 1 || end > table->gpt_last) {
1138 		gctl_error(req, "%d size '%jd'", EINVAL,
1139 		    (intmax_t)gpp->gpp_size);
1140 		return (EINVAL);
1141 	}
1142 
1143 	LIST_FOREACH(pe, &table->gpt_entry, gpe_entry) {
1144 		if (pe->gpe_deleted || pe->gpe_internal || pe == entry)
1145 			continue;
1146 		if (end >= pe->gpe_start && end <= pe->gpe_end) {
1147 			gctl_error(req, "%d end '%jd'", ENOSPC,
1148 			    (intmax_t)end);
1149 			return (ENOSPC);
1150 		}
1151 		if (entry->gpe_start < pe->gpe_start && end > pe->gpe_end) {
1152 			gctl_error(req, "%d size '%jd'", ENOSPC,
1153 			    (intmax_t)gpp->gpp_size);
1154 			return (ENOSPC);
1155 		}
1156 	}
1157 
1158 	pp = entry->gpe_pp;
1159 	if ((g_debugflags & 16) == 0 &&
1160 	    (pp->acr > 0 || pp->acw > 0 || pp->ace > 0)) {
1161 		gctl_error(req, "%d", EBUSY);
1162 		return (EBUSY);
1163 	}
1164 
1165 	error = G_PART_RESIZE(table, entry, gpp);
1166 	if (error) {
1167 		gctl_error(req, "%d", error);
1168 		return (error);
1169 	}
1170 
1171 	if (!entry->gpe_created)
1172 		entry->gpe_modified = 1;
1173 
1174 	/* update mediasize of changed provider */
1175 	pp->mediasize = (entry->gpe_end - entry->gpe_start + 1) *
1176 		pp->sectorsize;
1177 
1178 	/* Provide feedback if so requested. */
1179 	if (gpp->gpp_parms & G_PART_PARM_OUTPUT) {
1180 		sb = sbuf_new_auto();
1181 		G_PART_FULLNAME(table, entry, sb, gp->name);
1182 		sbuf_cat(sb, " resized\n");
1183 		sbuf_finish(sb);
1184 		gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1);
1185 		sbuf_delete(sb);
1186 	}
1187 	return (0);
1188 }
1189 
1190 static int
1191 g_part_ctl_setunset(struct gctl_req *req, struct g_part_parms *gpp,
1192     unsigned int set)
1193 {
1194 	struct g_geom *gp;
1195 	struct g_part_entry *entry;
1196 	struct g_part_table *table;
1197 	struct sbuf *sb;
1198 	int error;
1199 
1200 	gp = gpp->gpp_geom;
1201 	G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name));
1202 	g_topology_assert();
1203 
1204 	table = gp->softc;
1205 
1206 	LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) {
1207 		if (entry->gpe_deleted || entry->gpe_internal)
1208 			continue;
1209 		if (entry->gpe_index == gpp->gpp_index)
1210 			break;
1211 	}
1212 	if (entry == NULL) {
1213 		gctl_error(req, "%d index '%d'", ENOENT, gpp->gpp_index);
1214 		return (ENOENT);
1215 	}
1216 
1217 	error = G_PART_SETUNSET(table, entry, gpp->gpp_attrib, set);
1218 	if (error) {
1219 		gctl_error(req, "%d attrib '%s'", error, gpp->gpp_attrib);
1220 		return (error);
1221 	}
1222 
1223 	/* Provide feedback if so requested. */
1224 	if (gpp->gpp_parms & G_PART_PARM_OUTPUT) {
1225 		sb = sbuf_new_auto();
1226 		sbuf_printf(sb, "%s %sset on ", gpp->gpp_attrib,
1227 		    (set) ? "" : "un");
1228 		G_PART_FULLNAME(table, entry, sb, gp->name);
1229 		sbuf_printf(sb, "\n");
1230 		sbuf_finish(sb);
1231 		gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1);
1232 		sbuf_delete(sb);
1233 	}
1234 	return (0);
1235 }
1236 
1237 static int
1238 g_part_ctl_undo(struct gctl_req *req, struct g_part_parms *gpp)
1239 {
1240 	struct g_consumer *cp;
1241 	struct g_provider *pp;
1242 	struct g_geom *gp;
1243 	struct g_part_entry *entry, *tmp;
1244 	struct g_part_table *table;
1245 	int error, reprobe;
1246 
1247 	gp = gpp->gpp_geom;
1248 	G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name));
1249 	g_topology_assert();
1250 
1251 	table = gp->softc;
1252 	if (!table->gpt_opened) {
1253 		gctl_error(req, "%d", EPERM);
1254 		return (EPERM);
1255 	}
1256 
1257 	cp = LIST_FIRST(&gp->consumer);
1258 	LIST_FOREACH_SAFE(entry, &table->gpt_entry, gpe_entry, tmp) {
1259 		entry->gpe_modified = 0;
1260 		if (entry->gpe_created) {
1261 			pp = entry->gpe_pp;
1262 			if (pp != NULL) {
1263 				pp->private = NULL;
1264 				entry->gpe_pp = NULL;
1265 				g_wither_provider(pp, ENXIO);
1266 			}
1267 			entry->gpe_deleted = 1;
1268 		}
1269 		if (entry->gpe_deleted) {
1270 			LIST_REMOVE(entry, gpe_entry);
1271 			g_free(entry);
1272 		}
1273 	}
1274 
1275 	g_topology_unlock();
1276 
1277 	reprobe = (table->gpt_scheme == &g_part_null_scheme ||
1278 	    table->gpt_created) ? 1 : 0;
1279 
1280 	if (reprobe) {
1281 		LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) {
1282 			if (entry->gpe_internal)
1283 				continue;
1284 			error = EBUSY;
1285 			goto fail;
1286 		}
1287 		while ((entry = LIST_FIRST(&table->gpt_entry)) != NULL) {
1288 			LIST_REMOVE(entry, gpe_entry);
1289 			g_free(entry);
1290 		}
1291 		error = g_part_probe(gp, cp, table->gpt_depth);
1292 		if (error) {
1293 			g_topology_lock();
1294 			g_access(cp, -1, -1, -1);
1295 			g_part_wither(gp, error);
1296 			return (0);
1297 		}
1298 		table = gp->softc;
1299 
1300 		/*
1301 		 * Synthesize a disk geometry. Some partitioning schemes
1302 		 * depend on it and since some file systems need it even
1303 		 * when the partitition scheme doesn't, we do it here in
1304 		 * scheme-independent code.
1305 		 */
1306 		pp = cp->provider;
1307 		g_part_geometry(table, cp, pp->mediasize / pp->sectorsize);
1308 	}
1309 
1310 	error = G_PART_READ(table, cp);
1311 	if (error)
1312 		goto fail;
1313 
1314 	g_topology_lock();
1315 
1316 	LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) {
1317 		if (!entry->gpe_internal)
1318 			g_part_new_provider(gp, table, entry);
1319 	}
1320 
1321 	table->gpt_opened = 0;
1322 	g_access(cp, -1, -1, -1);
1323 	return (0);
1324 
1325 fail:
1326 	g_topology_lock();
1327 	gctl_error(req, "%d", error);
1328 	return (error);
1329 }
1330 
1331 static void
1332 g_part_wither(struct g_geom *gp, int error)
1333 {
1334 	struct g_part_entry *entry;
1335 	struct g_part_table *table;
1336 
1337 	table = gp->softc;
1338 	if (table != NULL) {
1339 		G_PART_DESTROY(table, NULL);
1340 		while ((entry = LIST_FIRST(&table->gpt_entry)) != NULL) {
1341 			LIST_REMOVE(entry, gpe_entry);
1342 			g_free(entry);
1343 		}
1344 		if (gp->softc != NULL) {
1345 			kobj_delete((kobj_t)gp->softc, M_GEOM);
1346 			gp->softc = NULL;
1347 		}
1348 	}
1349 	g_wither_geom(gp, error);
1350 }
1351 
1352 /*
1353  * Class methods.
1354  */
1355 
1356 static void
1357 g_part_ctlreq(struct gctl_req *req, struct g_class *mp, const char *verb)
1358 {
1359 	struct g_part_parms gpp;
1360 	struct g_part_table *table;
1361 	struct gctl_req_arg *ap;
1362 	enum g_part_ctl ctlreq;
1363 	unsigned int i, mparms, oparms, parm;
1364 	int auto_commit, close_on_error;
1365 	int error, modifies;
1366 
1367 	G_PART_TRACE((G_T_TOPOLOGY, "%s(%s,%s)", __func__, mp->name, verb));
1368 	g_topology_assert();
1369 
1370 	ctlreq = G_PART_CTL_NONE;
1371 	modifies = 1;
1372 	mparms = 0;
1373 	oparms = G_PART_PARM_FLAGS | G_PART_PARM_OUTPUT | G_PART_PARM_VERSION;
1374 	switch (*verb) {
1375 	case 'a':
1376 		if (!strcmp(verb, "add")) {
1377 			ctlreq = G_PART_CTL_ADD;
1378 			mparms |= G_PART_PARM_GEOM | G_PART_PARM_SIZE |
1379 			    G_PART_PARM_START | G_PART_PARM_TYPE;
1380 			oparms |= G_PART_PARM_INDEX | G_PART_PARM_LABEL;
1381 		}
1382 		break;
1383 	case 'b':
1384 		if (!strcmp(verb, "bootcode")) {
1385 			ctlreq = G_PART_CTL_BOOTCODE;
1386 			mparms |= G_PART_PARM_GEOM | G_PART_PARM_BOOTCODE;
1387 		}
1388 		break;
1389 	case 'c':
1390 		if (!strcmp(verb, "commit")) {
1391 			ctlreq = G_PART_CTL_COMMIT;
1392 			mparms |= G_PART_PARM_GEOM;
1393 			modifies = 0;
1394 		} else if (!strcmp(verb, "create")) {
1395 			ctlreq = G_PART_CTL_CREATE;
1396 			mparms |= G_PART_PARM_PROVIDER | G_PART_PARM_SCHEME;
1397 			oparms |= G_PART_PARM_ENTRIES;
1398 		}
1399 		break;
1400 	case 'd':
1401 		if (!strcmp(verb, "delete")) {
1402 			ctlreq = G_PART_CTL_DELETE;
1403 			mparms |= G_PART_PARM_GEOM | G_PART_PARM_INDEX;
1404 		} else if (!strcmp(verb, "destroy")) {
1405 			ctlreq = G_PART_CTL_DESTROY;
1406 			mparms |= G_PART_PARM_GEOM;
1407 			oparms |= G_PART_PARM_FORCE;
1408 		}
1409 		break;
1410 	case 'm':
1411 		if (!strcmp(verb, "modify")) {
1412 			ctlreq = G_PART_CTL_MODIFY;
1413 			mparms |= G_PART_PARM_GEOM | G_PART_PARM_INDEX;
1414 			oparms |= G_PART_PARM_LABEL | G_PART_PARM_TYPE;
1415 		} else if (!strcmp(verb, "move")) {
1416 			ctlreq = G_PART_CTL_MOVE;
1417 			mparms |= G_PART_PARM_GEOM | G_PART_PARM_INDEX;
1418 		}
1419 		break;
1420 	case 'r':
1421 		if (!strcmp(verb, "recover")) {
1422 			ctlreq = G_PART_CTL_RECOVER;
1423 			mparms |= G_PART_PARM_GEOM;
1424 		} else if (!strcmp(verb, "resize")) {
1425 			ctlreq = G_PART_CTL_RESIZE;
1426 			mparms |= G_PART_PARM_GEOM | G_PART_PARM_INDEX |
1427 			    G_PART_PARM_SIZE;
1428 		}
1429 		break;
1430 	case 's':
1431 		if (!strcmp(verb, "set")) {
1432 			ctlreq = G_PART_CTL_SET;
1433 			mparms |= G_PART_PARM_ATTRIB | G_PART_PARM_GEOM |
1434 			    G_PART_PARM_INDEX;
1435 		}
1436 		break;
1437 	case 'u':
1438 		if (!strcmp(verb, "undo")) {
1439 			ctlreq = G_PART_CTL_UNDO;
1440 			mparms |= G_PART_PARM_GEOM;
1441 			modifies = 0;
1442 		} else if (!strcmp(verb, "unset")) {
1443 			ctlreq = G_PART_CTL_UNSET;
1444 			mparms |= G_PART_PARM_ATTRIB | G_PART_PARM_GEOM |
1445 			    G_PART_PARM_INDEX;
1446 		}
1447 		break;
1448 	}
1449 	if (ctlreq == G_PART_CTL_NONE) {
1450 		gctl_error(req, "%d verb '%s'", EINVAL, verb);
1451 		return;
1452 	}
1453 
1454 	bzero(&gpp, sizeof(gpp));
1455 	for (i = 0; i < req->narg; i++) {
1456 		ap = &req->arg[i];
1457 		parm = 0;
1458 		switch (ap->name[0]) {
1459 		case 'a':
1460 			if (!strcmp(ap->name, "arg0")) {
1461 				parm = mparms &
1462 				    (G_PART_PARM_GEOM | G_PART_PARM_PROVIDER);
1463 			}
1464 			if (!strcmp(ap->name, "attrib"))
1465 				parm = G_PART_PARM_ATTRIB;
1466 			break;
1467 		case 'b':
1468 			if (!strcmp(ap->name, "bootcode"))
1469 				parm = G_PART_PARM_BOOTCODE;
1470 			break;
1471 		case 'c':
1472 			if (!strcmp(ap->name, "class"))
1473 				continue;
1474 			break;
1475 		case 'e':
1476 			if (!strcmp(ap->name, "entries"))
1477 				parm = G_PART_PARM_ENTRIES;
1478 			break;
1479 		case 'f':
1480 			if (!strcmp(ap->name, "flags"))
1481 				parm = G_PART_PARM_FLAGS;
1482 			else if (!strcmp(ap->name, "force"))
1483 				parm = G_PART_PARM_FORCE;
1484 			break;
1485 		case 'i':
1486 			if (!strcmp(ap->name, "index"))
1487 				parm = G_PART_PARM_INDEX;
1488 			break;
1489 		case 'l':
1490 			if (!strcmp(ap->name, "label"))
1491 				parm = G_PART_PARM_LABEL;
1492 			break;
1493 		case 'o':
1494 			if (!strcmp(ap->name, "output"))
1495 				parm = G_PART_PARM_OUTPUT;
1496 			break;
1497 		case 's':
1498 			if (!strcmp(ap->name, "scheme"))
1499 				parm = G_PART_PARM_SCHEME;
1500 			else if (!strcmp(ap->name, "size"))
1501 				parm = G_PART_PARM_SIZE;
1502 			else if (!strcmp(ap->name, "start"))
1503 				parm = G_PART_PARM_START;
1504 			break;
1505 		case 't':
1506 			if (!strcmp(ap->name, "type"))
1507 				parm = G_PART_PARM_TYPE;
1508 			break;
1509 		case 'v':
1510 			if (!strcmp(ap->name, "verb"))
1511 				continue;
1512 			else if (!strcmp(ap->name, "version"))
1513 				parm = G_PART_PARM_VERSION;
1514 			break;
1515 		}
1516 		if ((parm & (mparms | oparms)) == 0) {
1517 			gctl_error(req, "%d param '%s'", EINVAL, ap->name);
1518 			return;
1519 		}
1520 		switch (parm) {
1521 		case G_PART_PARM_ATTRIB:
1522 			error = g_part_parm_str(req, ap->name,
1523 			    &gpp.gpp_attrib);
1524 			break;
1525 		case G_PART_PARM_BOOTCODE:
1526 			error = g_part_parm_bootcode(req, ap->name,
1527 			    &gpp.gpp_codeptr, &gpp.gpp_codesize);
1528 			break;
1529 		case G_PART_PARM_ENTRIES:
1530 			error = g_part_parm_intmax(req, ap->name,
1531 			    &gpp.gpp_entries);
1532 			break;
1533 		case G_PART_PARM_FLAGS:
1534 			error = g_part_parm_str(req, ap->name, &gpp.gpp_flags);
1535 			break;
1536 		case G_PART_PARM_FORCE:
1537 			error = g_part_parm_uint32(req, ap->name,
1538 			    &gpp.gpp_force);
1539 			break;
1540 		case G_PART_PARM_GEOM:
1541 			error = g_part_parm_geom(req, ap->name, &gpp.gpp_geom);
1542 			break;
1543 		case G_PART_PARM_INDEX:
1544 			error = g_part_parm_intmax(req, ap->name,
1545 			    &gpp.gpp_index);
1546 			break;
1547 		case G_PART_PARM_LABEL:
1548 			error = g_part_parm_str(req, ap->name, &gpp.gpp_label);
1549 			break;
1550 		case G_PART_PARM_OUTPUT:
1551 			error = 0;	/* Write-only parameter */
1552 			break;
1553 		case G_PART_PARM_PROVIDER:
1554 			error = g_part_parm_provider(req, ap->name,
1555 			    &gpp.gpp_provider);
1556 			break;
1557 		case G_PART_PARM_SCHEME:
1558 			error = g_part_parm_scheme(req, ap->name,
1559 			    &gpp.gpp_scheme);
1560 			break;
1561 		case G_PART_PARM_SIZE:
1562 			error = g_part_parm_quad(req, ap->name, &gpp.gpp_size);
1563 			break;
1564 		case G_PART_PARM_START:
1565 			error = g_part_parm_quad(req, ap->name,
1566 			    &gpp.gpp_start);
1567 			break;
1568 		case G_PART_PARM_TYPE:
1569 			error = g_part_parm_str(req, ap->name, &gpp.gpp_type);
1570 			break;
1571 		case G_PART_PARM_VERSION:
1572 			error = g_part_parm_uint32(req, ap->name,
1573 			    &gpp.gpp_version);
1574 			break;
1575 		default:
1576 			error = EDOOFUS;
1577 			gctl_error(req, "%d %s", error, ap->name);
1578 			break;
1579 		}
1580 		if (error != 0) {
1581 			if (error == ENOATTR) {
1582 				gctl_error(req, "%d param '%s'", error,
1583 				    ap->name);
1584 			}
1585 			return;
1586 		}
1587 		gpp.gpp_parms |= parm;
1588 	}
1589 	if ((gpp.gpp_parms & mparms) != mparms) {
1590 		parm = mparms - (gpp.gpp_parms & mparms);
1591 		gctl_error(req, "%d param '%x'", ENOATTR, parm);
1592 		return;
1593 	}
1594 
1595 	/* Obtain permissions if possible/necessary. */
1596 	close_on_error = 0;
1597 	table = NULL;
1598 	if (modifies && (gpp.gpp_parms & G_PART_PARM_GEOM)) {
1599 		table = gpp.gpp_geom->softc;
1600 		if (table != NULL && table->gpt_corrupt &&
1601 		    ctlreq != G_PART_CTL_DESTROY &&
1602 		    ctlreq != G_PART_CTL_RECOVER) {
1603 			gctl_error(req, "%d table '%s' is corrupt",
1604 			    EPERM, gpp.gpp_geom->name);
1605 			return;
1606 		}
1607 		if (table != NULL && !table->gpt_opened) {
1608 			error = g_access(LIST_FIRST(&gpp.gpp_geom->consumer),
1609 			    1, 1, 1);
1610 			if (error) {
1611 				gctl_error(req, "%d geom '%s'", error,
1612 				    gpp.gpp_geom->name);
1613 				return;
1614 			}
1615 			table->gpt_opened = 1;
1616 			close_on_error = 1;
1617 		}
1618 	}
1619 
1620 	/* Allow the scheme to check or modify the parameters. */
1621 	if (table != NULL) {
1622 		error = G_PART_PRECHECK(table, ctlreq, &gpp);
1623 		if (error) {
1624 			gctl_error(req, "%d pre-check failed", error);
1625 			goto out;
1626 		}
1627 	} else
1628 		error = EDOOFUS;	/* Prevent bogus uninit. warning. */
1629 
1630 	switch (ctlreq) {
1631 	case G_PART_CTL_NONE:
1632 		panic("%s", __func__);
1633 	case G_PART_CTL_ADD:
1634 		error = g_part_ctl_add(req, &gpp);
1635 		break;
1636 	case G_PART_CTL_BOOTCODE:
1637 		error = g_part_ctl_bootcode(req, &gpp);
1638 		break;
1639 	case G_PART_CTL_COMMIT:
1640 		error = g_part_ctl_commit(req, &gpp);
1641 		break;
1642 	case G_PART_CTL_CREATE:
1643 		error = g_part_ctl_create(req, &gpp);
1644 		break;
1645 	case G_PART_CTL_DELETE:
1646 		error = g_part_ctl_delete(req, &gpp);
1647 		break;
1648 	case G_PART_CTL_DESTROY:
1649 		error = g_part_ctl_destroy(req, &gpp);
1650 		break;
1651 	case G_PART_CTL_MODIFY:
1652 		error = g_part_ctl_modify(req, &gpp);
1653 		break;
1654 	case G_PART_CTL_MOVE:
1655 		error = g_part_ctl_move(req, &gpp);
1656 		break;
1657 	case G_PART_CTL_RECOVER:
1658 		error = g_part_ctl_recover(req, &gpp);
1659 		break;
1660 	case G_PART_CTL_RESIZE:
1661 		error = g_part_ctl_resize(req, &gpp);
1662 		break;
1663 	case G_PART_CTL_SET:
1664 		error = g_part_ctl_setunset(req, &gpp, 1);
1665 		break;
1666 	case G_PART_CTL_UNDO:
1667 		error = g_part_ctl_undo(req, &gpp);
1668 		break;
1669 	case G_PART_CTL_UNSET:
1670 		error = g_part_ctl_setunset(req, &gpp, 0);
1671 		break;
1672 	}
1673 
1674 	/* Implement automatic commit. */
1675 	if (!error) {
1676 		auto_commit = (modifies &&
1677 		    (gpp.gpp_parms & G_PART_PARM_FLAGS) &&
1678 		    strchr(gpp.gpp_flags, 'C') != NULL) ? 1 : 0;
1679 		if (auto_commit) {
1680 			KASSERT(gpp.gpp_parms & G_PART_PARM_GEOM, ("%s",
1681 			    __func__));
1682 			error = g_part_ctl_commit(req, &gpp);
1683 		}
1684 	}
1685 
1686  out:
1687 	if (error && close_on_error) {
1688 		g_access(LIST_FIRST(&gpp.gpp_geom->consumer), -1, -1, -1);
1689 		table->gpt_opened = 0;
1690 	}
1691 }
1692 
1693 static int
1694 g_part_destroy_geom(struct gctl_req *req, struct g_class *mp,
1695     struct g_geom *gp)
1696 {
1697 
1698 	G_PART_TRACE((G_T_TOPOLOGY, "%s(%s,%s)", __func__, mp->name, gp->name));
1699 	g_topology_assert();
1700 
1701 	g_part_wither(gp, EINVAL);
1702 	return (0);
1703 }
1704 
1705 static struct g_geom *
1706 g_part_taste(struct g_class *mp, struct g_provider *pp, int flags __unused)
1707 {
1708 	struct g_consumer *cp;
1709 	struct g_geom *gp;
1710 	struct g_part_entry *entry;
1711 	struct g_part_table *table;
1712 	struct root_hold_token *rht;
1713 	int attr, depth;
1714 	int error;
1715 
1716 	G_PART_TRACE((G_T_TOPOLOGY, "%s(%s,%s)", __func__, mp->name, pp->name));
1717 	g_topology_assert();
1718 
1719 	/* Skip providers that are already open for writing. */
1720 	if (pp->acw > 0)
1721 		return (NULL);
1722 
1723 	/*
1724 	 * Create a GEOM with consumer and hook it up to the provider.
1725 	 * With that we become part of the topology. Optain read access
1726 	 * to the provider.
1727 	 */
1728 	gp = g_new_geomf(mp, "%s", pp->name);
1729 	cp = g_new_consumer(gp);
1730 	error = g_attach(cp, pp);
1731 	if (error == 0)
1732 		error = g_access(cp, 1, 0, 0);
1733 	if (error != 0) {
1734 		g_part_wither(gp, error);
1735 		return (NULL);
1736 	}
1737 
1738 	rht = root_mount_hold(mp->name);
1739 	g_topology_unlock();
1740 
1741 	/*
1742 	 * Short-circuit the whole probing galore when there's no
1743 	 * media present.
1744 	 */
1745 	if (pp->mediasize == 0 || pp->sectorsize == 0) {
1746 		error = ENODEV;
1747 		goto fail;
1748 	}
1749 
1750 	/* Make sure we can nest and if so, determine our depth. */
1751 	error = g_getattr("PART::isleaf", cp, &attr);
1752 	if (!error && attr) {
1753 		error = ENODEV;
1754 		goto fail;
1755 	}
1756 	error = g_getattr("PART::depth", cp, &attr);
1757 	depth = (!error) ? attr + 1 : 0;
1758 
1759 	error = g_part_probe(gp, cp, depth);
1760 	if (error)
1761 		goto fail;
1762 
1763 	table = gp->softc;
1764 
1765 	/*
1766 	 * Synthesize a disk geometry. Some partitioning schemes
1767 	 * depend on it and since some file systems need it even
1768 	 * when the partitition scheme doesn't, we do it here in
1769 	 * scheme-independent code.
1770 	 */
1771 	g_part_geometry(table, cp, pp->mediasize / pp->sectorsize);
1772 
1773 	error = G_PART_READ(table, cp);
1774 	if (error)
1775 		goto fail;
1776 
1777 	g_topology_lock();
1778 	LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) {
1779 		if (!entry->gpe_internal)
1780 			g_part_new_provider(gp, table, entry);
1781 	}
1782 
1783 	root_mount_rel(rht);
1784 	g_access(cp, -1, 0, 0);
1785 	return (gp);
1786 
1787  fail:
1788 	g_topology_lock();
1789 	root_mount_rel(rht);
1790 	g_access(cp, -1, 0, 0);
1791 	g_part_wither(gp, error);
1792 	return (NULL);
1793 }
1794 
1795 /*
1796  * Geom methods.
1797  */
1798 
1799 static int
1800 g_part_access(struct g_provider *pp, int dr, int dw, int de)
1801 {
1802 	struct g_consumer *cp;
1803 
1804 	G_PART_TRACE((G_T_ACCESS, "%s(%s,%d,%d,%d)", __func__, pp->name, dr,
1805 	    dw, de));
1806 
1807 	cp = LIST_FIRST(&pp->geom->consumer);
1808 
1809 	/* We always gain write-exclusive access. */
1810 	return (g_access(cp, dr, dw, dw + de));
1811 }
1812 
1813 static void
1814 g_part_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp,
1815     struct g_consumer *cp, struct g_provider *pp)
1816 {
1817 	char buf[64];
1818 	struct g_part_entry *entry;
1819 	struct g_part_table *table;
1820 
1821 	KASSERT(sb != NULL && gp != NULL, ("%s", __func__));
1822 	table = gp->softc;
1823 
1824 	if (indent == NULL) {
1825 		KASSERT(cp == NULL && pp != NULL, ("%s", __func__));
1826 		entry = pp->private;
1827 		if (entry == NULL)
1828 			return;
1829 		sbuf_printf(sb, " i %u o %ju ty %s", entry->gpe_index,
1830 		    (uintmax_t)entry->gpe_offset,
1831 		    G_PART_TYPE(table, entry, buf, sizeof(buf)));
1832 		/*
1833 		 * libdisk compatibility quirk - the scheme dumps the
1834 		 * slicer name and partition type in a way that is
1835 		 * compatible with libdisk. When libdisk is not used
1836 		 * anymore, this should go away.
1837 		 */
1838 		G_PART_DUMPCONF(table, entry, sb, indent);
1839 	} else if (cp != NULL) {	/* Consumer configuration. */
1840 		KASSERT(pp == NULL, ("%s", __func__));
1841 		/* none */
1842 	} else if (pp != NULL) {	/* Provider configuration. */
1843 		entry = pp->private;
1844 		if (entry == NULL)
1845 			return;
1846 		sbuf_printf(sb, "%s<start>%ju</start>\n", indent,
1847 		    (uintmax_t)entry->gpe_start);
1848 		sbuf_printf(sb, "%s<end>%ju</end>\n", indent,
1849 		    (uintmax_t)entry->gpe_end);
1850 		sbuf_printf(sb, "%s<index>%u</index>\n", indent,
1851 		    entry->gpe_index);
1852 		sbuf_printf(sb, "%s<type>%s</type>\n", indent,
1853 		    G_PART_TYPE(table, entry, buf, sizeof(buf)));
1854 		sbuf_printf(sb, "%s<offset>%ju</offset>\n", indent,
1855 		    (uintmax_t)entry->gpe_offset);
1856 		sbuf_printf(sb, "%s<length>%ju</length>\n", indent,
1857 		    (uintmax_t)pp->mediasize);
1858 		G_PART_DUMPCONF(table, entry, sb, indent);
1859 	} else {			/* Geom configuration. */
1860 		sbuf_printf(sb, "%s<scheme>%s</scheme>\n", indent,
1861 		    table->gpt_scheme->name);
1862 		sbuf_printf(sb, "%s<entries>%u</entries>\n", indent,
1863 		    table->gpt_entries);
1864 		sbuf_printf(sb, "%s<first>%ju</first>\n", indent,
1865 		    (uintmax_t)table->gpt_first);
1866 		sbuf_printf(sb, "%s<last>%ju</last>\n", indent,
1867 		    (uintmax_t)table->gpt_last);
1868 		sbuf_printf(sb, "%s<fwsectors>%u</fwsectors>\n", indent,
1869 		    table->gpt_sectors);
1870 		sbuf_printf(sb, "%s<fwheads>%u</fwheads>\n", indent,
1871 		    table->gpt_heads);
1872 		sbuf_printf(sb, "%s<state>%s</state>\n", indent,
1873 		    table->gpt_corrupt ? "CORRUPT": "OK");
1874 		sbuf_printf(sb, "%s<modified>%s</modified>\n", indent,
1875 		    table->gpt_opened ? "true": "false");
1876 		G_PART_DUMPCONF(table, NULL, sb, indent);
1877 	}
1878 }
1879 
1880 static void
1881 g_part_orphan(struct g_consumer *cp)
1882 {
1883 	struct g_provider *pp;
1884 	struct g_part_table *table;
1885 
1886 	pp = cp->provider;
1887 	KASSERT(pp != NULL, ("%s", __func__));
1888 	G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, pp->name));
1889 	g_topology_assert();
1890 
1891 	KASSERT(pp->error != 0, ("%s", __func__));
1892 	table = cp->geom->softc;
1893 	if (table != NULL && table->gpt_opened)
1894 		g_access(cp, -1, -1, -1);
1895 	g_part_wither(cp->geom, pp->error);
1896 }
1897 
1898 static void
1899 g_part_spoiled(struct g_consumer *cp)
1900 {
1901 
1902 	G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, cp->provider->name));
1903 	g_topology_assert();
1904 
1905 	g_part_wither(cp->geom, ENXIO);
1906 }
1907 
1908 static void
1909 g_part_start(struct bio *bp)
1910 {
1911 	struct bio *bp2;
1912 	struct g_consumer *cp;
1913 	struct g_geom *gp;
1914 	struct g_part_entry *entry;
1915 	struct g_part_table *table;
1916 	struct g_kerneldump *gkd;
1917 	struct g_provider *pp;
1918 
1919 	pp = bp->bio_to;
1920 	gp = pp->geom;
1921 	table = gp->softc;
1922 	cp = LIST_FIRST(&gp->consumer);
1923 
1924 	G_PART_TRACE((G_T_BIO, "%s: cmd=%d, provider=%s", __func__, bp->bio_cmd,
1925 	    pp->name));
1926 
1927 	entry = pp->private;
1928 	if (entry == NULL) {
1929 		g_io_deliver(bp, ENXIO);
1930 		return;
1931 	}
1932 
1933 	switch(bp->bio_cmd) {
1934 	case BIO_DELETE:
1935 	case BIO_READ:
1936 	case BIO_WRITE:
1937 		if (bp->bio_offset >= pp->mediasize) {
1938 			g_io_deliver(bp, EIO);
1939 			return;
1940 		}
1941 		bp2 = g_clone_bio(bp);
1942 		if (bp2 == NULL) {
1943 			g_io_deliver(bp, ENOMEM);
1944 			return;
1945 		}
1946 		if (bp2->bio_offset + bp2->bio_length > pp->mediasize)
1947 			bp2->bio_length = pp->mediasize - bp2->bio_offset;
1948 		bp2->bio_done = g_std_done;
1949 		bp2->bio_offset += entry->gpe_offset;
1950 		g_io_request(bp2, cp);
1951 		return;
1952 	case BIO_FLUSH:
1953 		break;
1954 	case BIO_GETATTR:
1955 		if (g_handleattr_int(bp, "GEOM::fwheads", table->gpt_heads))
1956 			return;
1957 		if (g_handleattr_int(bp, "GEOM::fwsectors", table->gpt_sectors))
1958 			return;
1959 		if (g_handleattr_int(bp, "PART::isleaf", table->gpt_isleaf))
1960 			return;
1961 		if (g_handleattr_int(bp, "PART::depth", table->gpt_depth))
1962 			return;
1963 		if (g_handleattr_str(bp, "PART::scheme",
1964 		    table->gpt_scheme->name))
1965 			return;
1966 		if (!strcmp("GEOM::kerneldump", bp->bio_attribute)) {
1967 			/*
1968 			 * Check that the partition is suitable for kernel
1969 			 * dumps. Typically only swap partitions should be
1970 			 * used.
1971 			 */
1972 			if (!G_PART_DUMPTO(table, entry)) {
1973 				g_io_deliver(bp, ENODEV);
1974 				printf("GEOM_PART: Partition '%s' not suitable"
1975 				    " for kernel dumps (wrong type?)\n",
1976 				    pp->name);
1977 				return;
1978 			}
1979 			gkd = (struct g_kerneldump *)bp->bio_data;
1980 			if (gkd->offset >= pp->mediasize) {
1981 				g_io_deliver(bp, EIO);
1982 				return;
1983 			}
1984 			if (gkd->offset + gkd->length > pp->mediasize)
1985 				gkd->length = pp->mediasize - gkd->offset;
1986 			gkd->offset += entry->gpe_offset;
1987 		}
1988 		break;
1989 	default:
1990 		g_io_deliver(bp, EOPNOTSUPP);
1991 		return;
1992 	}
1993 
1994 	bp2 = g_clone_bio(bp);
1995 	if (bp2 == NULL) {
1996 		g_io_deliver(bp, ENOMEM);
1997 		return;
1998 	}
1999 	bp2->bio_done = g_std_done;
2000 	g_io_request(bp2, cp);
2001 }
2002 
2003 static void
2004 g_part_init(struct g_class *mp)
2005 {
2006 
2007 	TAILQ_INSERT_HEAD(&g_part_schemes, &g_part_null_scheme, scheme_list);
2008 }
2009 
2010 static void
2011 g_part_fini(struct g_class *mp)
2012 {
2013 
2014 	TAILQ_REMOVE(&g_part_schemes, &g_part_null_scheme, scheme_list);
2015 }
2016 
2017 static void
2018 g_part_unload_event(void *arg, int flag)
2019 {
2020 	struct g_consumer *cp;
2021 	struct g_geom *gp;
2022 	struct g_provider *pp;
2023 	struct g_part_scheme *scheme;
2024 	struct g_part_table *table;
2025 	uintptr_t *xchg;
2026 	int acc, error;
2027 
2028 	if (flag == EV_CANCEL)
2029 		return;
2030 
2031 	xchg = arg;
2032 	error = 0;
2033 	scheme = (void *)(*xchg);
2034 
2035 	g_topology_assert();
2036 
2037 	LIST_FOREACH(gp, &g_part_class.geom, geom) {
2038 		table = gp->softc;
2039 		if (table->gpt_scheme != scheme)
2040 			continue;
2041 
2042 		acc = 0;
2043 		LIST_FOREACH(pp, &gp->provider, provider)
2044 			acc += pp->acr + pp->acw + pp->ace;
2045 		LIST_FOREACH(cp, &gp->consumer, consumer)
2046 			acc += cp->acr + cp->acw + cp->ace;
2047 
2048 		if (!acc)
2049 			g_part_wither(gp, ENOSYS);
2050 		else
2051 			error = EBUSY;
2052 	}
2053 
2054 	if (!error)
2055 		TAILQ_REMOVE(&g_part_schemes, scheme, scheme_list);
2056 
2057 	*xchg = error;
2058 }
2059 
2060 int
2061 g_part_modevent(module_t mod, int type, struct g_part_scheme *scheme)
2062 {
2063 	uintptr_t arg;
2064 	int error;
2065 
2066 	switch (type) {
2067 	case MOD_LOAD:
2068 		TAILQ_INSERT_TAIL(&g_part_schemes, scheme, scheme_list);
2069 
2070 		error = g_retaste(&g_part_class);
2071 		if (error)
2072 			TAILQ_REMOVE(&g_part_schemes, scheme, scheme_list);
2073 		break;
2074 	case MOD_UNLOAD:
2075 		arg = (uintptr_t)scheme;
2076 		error = g_waitfor_event(g_part_unload_event, &arg, M_WAITOK,
2077 		    NULL);
2078 		if (!error)
2079 			error = (arg == (uintptr_t)scheme) ? EDOOFUS : arg;
2080 		break;
2081 	default:
2082 		error = EOPNOTSUPP;
2083 		break;
2084 	}
2085 
2086 	return (error);
2087 }
2088