xref: /freebsd/sys/geom/part/g_part.c (revision 30d239bc4c510432e65a84fa1c14ed67a3ab1c92)
1 /*-
2  * Copyright (c) 2002, 2005, 2006, 2007 Marcel Moolenaar
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  *
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25  */
26 
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
29 
30 #include <sys/param.h>
31 #include <sys/bio.h>
32 #include <sys/diskmbr.h>
33 #include <sys/endian.h>
34 #include <sys/kernel.h>
35 #include <sys/kobj.h>
36 #include <sys/limits.h>
37 #include <sys/lock.h>
38 #include <sys/malloc.h>
39 #include <sys/mutex.h>
40 #include <sys/queue.h>
41 #include <sys/sbuf.h>
42 #include <sys/systm.h>
43 #include <sys/uuid.h>
44 #include <geom/geom.h>
45 #include <geom/geom_ctl.h>
46 #include <geom/part/g_part.h>
47 
48 #include "g_part_if.h"
49 
50 static kobj_method_t g_part_null_methods[] = {
51 	{ 0, 0 }
52 };
53 
54 static struct g_part_scheme g_part_null_scheme = {
55 	"n/a",
56 	g_part_null_methods,
57 	sizeof(struct g_part_table),
58 };
59 G_PART_SCHEME_DECLARE(g_part_null_scheme);
60 
61 SET_DECLARE(g_part_scheme_set, struct g_part_scheme);
62 
63 struct g_part_alias_list {
64 	const char *lexeme;
65 	enum g_part_alias alias;
66 } g_part_alias_list[G_PART_ALIAS_COUNT] = {
67 	{ "efi", G_PART_ALIAS_EFI },
68 	{ "freebsd", G_PART_ALIAS_FREEBSD },
69 	{ "freebsd-swap", G_PART_ALIAS_FREEBSD_SWAP },
70 	{ "freebsd-ufs", G_PART_ALIAS_FREEBSD_UFS },
71 	{ "freebsd-vinum", G_PART_ALIAS_FREEBSD_VINUM },
72 	{ "freebsd-zfs", G_PART_ALIAS_FREEBSD_ZFS },
73 	{ "mbr", G_PART_ALIAS_MBR }
74 };
75 
76 /*
77  * The GEOM partitioning class.
78  */
79 static g_ctl_req_t g_part_ctlreq;
80 static g_ctl_destroy_geom_t g_part_destroy_geom;
81 static g_taste_t g_part_taste;
82 
83 static g_access_t g_part_access;
84 static g_dumpconf_t g_part_dumpconf;
85 static g_orphan_t g_part_orphan;
86 static g_spoiled_t g_part_spoiled;
87 static g_start_t g_part_start;
88 
89 static struct g_class g_part_class = {
90 	.name = "PART",
91 	.version = G_VERSION,
92 	/* Class methods. */
93 	.ctlreq = g_part_ctlreq,
94 	.destroy_geom = g_part_destroy_geom,
95 	.taste = g_part_taste,
96 	/* Geom methods. */
97 	.access = g_part_access,
98 	.dumpconf = g_part_dumpconf,
99 	.orphan = g_part_orphan,
100 	.spoiled = g_part_spoiled,
101 	.start = g_part_start,
102 };
103 
104 DECLARE_GEOM_CLASS(g_part_class, g_part);
105 
106 enum g_part_ctl {
107 	G_PART_CTL_NONE,
108 	G_PART_CTL_ADD,
109 	G_PART_CTL_COMMIT,
110 	G_PART_CTL_CREATE,
111 	G_PART_CTL_DELETE,
112 	G_PART_CTL_DESTROY,
113 	G_PART_CTL_MODIFY,
114 	G_PART_CTL_MOVE,
115 	G_PART_CTL_RECOVER,
116 	G_PART_CTL_RESIZE,
117 	G_PART_CTL_UNDO
118 };
119 
120 /*
121  * Support functions.
122  */
123 
124 static void g_part_wither(struct g_geom *, int);
125 
126 const char *
127 g_part_alias_name(enum g_part_alias alias)
128 {
129 	int i;
130 
131 	for (i = 0; i < G_PART_ALIAS_COUNT; i++) {
132 		if (g_part_alias_list[i].alias != alias)
133 			continue;
134 		return (g_part_alias_list[i].lexeme);
135 	}
136 
137 	return (NULL);
138 }
139 
140 void
141 g_part_geometry_heads(off_t blocks, u_int sectors, off_t *bestchs,
142     u_int *bestheads)
143 {
144 	static u_int candidate_heads[] = { 1, 2, 16, 32, 64, 128, 255, 0 };
145 	off_t chs, cylinders;
146 	u_int heads;
147 	int idx;
148 
149 	*bestchs = 0;
150 	*bestheads = 0;
151 	for (idx = 0; candidate_heads[idx] != 0; idx++) {
152 		heads = candidate_heads[idx];
153 		cylinders = blocks / heads / sectors;
154 		if (cylinders < heads || cylinders < sectors)
155 			break;
156 		if (cylinders > 1023)
157 			continue;
158 		chs = cylinders * heads * sectors;
159 		if (chs > *bestchs || (chs == *bestchs && *bestheads == 1)) {
160 			*bestchs = chs;
161 			*bestheads = heads;
162 		}
163 	}
164 }
165 
166 static void
167 g_part_geometry(struct g_part_table *table, struct g_consumer *cp,
168     off_t blocks)
169 {
170 	static u_int candidate_sectors[] = { 1, 9, 17, 33, 63, 0 };
171 	off_t chs, bestchs;
172 	u_int heads, sectors;
173 	int idx;
174 
175 	if (g_getattr("GEOM::fwsectors", cp, &sectors) != 0 ||
176 	    sectors < 1 || sectors > 63 ||
177 	    g_getattr("GEOM::fwheads", cp, &heads) != 0 ||
178 	    heads < 1 || heads > 255) {
179 		table->gpt_fixgeom = 0;
180 		table->gpt_heads = 0;
181 		table->gpt_sectors = 0;
182 		bestchs = 0;
183 		for (idx = 0; candidate_sectors[idx] != 0; idx++) {
184 			sectors = candidate_sectors[idx];
185 			g_part_geometry_heads(blocks, sectors, &chs, &heads);
186 			if (chs == 0)
187 				continue;
188 			/*
189 			 * Prefer a geometry with sectors > 1, but only if
190 			 * it doesn't bump down the numbver of heads to 1.
191 			 */
192 			if (chs > bestchs || (chs == bestchs && heads > 1 &&
193 			    table->gpt_sectors == 1)) {
194 				bestchs = chs;
195 				table->gpt_heads = heads;
196 				table->gpt_sectors = sectors;
197 			}
198 		}
199 		/*
200 		 * If we didn't find a geometry at all, then the disk is
201 		 * too big. This means we can use the maximum number of
202 		 * heads and sectors.
203 		 */
204 		if (bestchs == 0) {
205 			table->gpt_heads = 255;
206 			table->gpt_sectors = 63;
207 		}
208 	} else {
209 		table->gpt_fixgeom = 1;
210 		table->gpt_heads = heads;
211 		table->gpt_sectors = sectors;
212 	}
213 }
214 
215 struct g_part_entry *
216 g_part_new_entry(struct g_part_table *table, int index, quad_t start,
217     quad_t end)
218 {
219 	struct g_part_entry *entry, *last;
220 
221 	last = NULL;
222 	LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) {
223 		if (entry->gpe_index == index)
224 			break;
225 		if (entry->gpe_index > index) {
226 			entry = NULL;
227 			break;
228 		}
229 		last = entry;
230 	}
231 	if (entry == NULL) {
232 		entry = g_malloc(table->gpt_scheme->gps_entrysz,
233 		    M_WAITOK | M_ZERO);
234 		entry->gpe_index = index;
235 		if (last == NULL)
236 			LIST_INSERT_HEAD(&table->gpt_entry, entry, gpe_entry);
237 		else
238 			LIST_INSERT_AFTER(last, entry, gpe_entry);
239 	}
240 	entry->gpe_start = start;
241 	entry->gpe_end = end;
242 	return (entry);
243 }
244 
245 static void
246 g_part_new_provider(struct g_geom *gp, struct g_part_table *table,
247     struct g_part_entry *entry)
248 {
249 	char buf[32];
250 	struct g_consumer *cp;
251 	struct g_provider *pp;
252 
253 	cp = LIST_FIRST(&gp->consumer);
254 	pp = cp->provider;
255 
256 	entry->gpe_offset = entry->gpe_start * pp->sectorsize;
257 
258 	if (entry->gpe_pp == NULL) {
259 		entry->gpe_pp = g_new_providerf(gp, "%s%s", gp->name,
260 		    G_PART_NAME(table, entry, buf, sizeof(buf)));
261 		entry->gpe_pp->private = entry;		/* Close the circle. */
262 	}
263 	entry->gpe_pp->index = entry->gpe_index - 1;	/* index is 1-based. */
264 	entry->gpe_pp->mediasize = (entry->gpe_end - entry->gpe_start + 1) *
265 	    pp->sectorsize;
266 	entry->gpe_pp->sectorsize = pp->sectorsize;
267 	entry->gpe_pp->flags = pp->flags & G_PF_CANDELETE;
268 	if (pp->stripesize > 0) {
269 		entry->gpe_pp->stripesize = pp->stripesize;
270 		entry->gpe_pp->stripeoffset = (pp->stripeoffset +
271 		    entry->gpe_offset) % pp->stripesize;
272 	}
273 	g_error_provider(entry->gpe_pp, 0);
274 }
275 
276 static int
277 g_part_parm_geom(const char *p, struct g_geom **v)
278 {
279 	struct g_geom *gp;
280 
281 	LIST_FOREACH(gp, &g_part_class.geom, geom) {
282 		if (!strcmp(p, gp->name))
283 			break;
284 	}
285 	if (gp == NULL)
286 		return (EINVAL);
287 	*v = gp;
288 	return (0);
289 }
290 
291 static int
292 g_part_parm_provider(const char *p, struct g_provider **v)
293 {
294 	struct g_provider *pp;
295 
296 	pp = g_provider_by_name(p);
297 	if (pp == NULL)
298 		return (EINVAL);
299 	*v = pp;
300 	return (0);
301 }
302 
303 static int
304 g_part_parm_quad(const char *p, quad_t *v)
305 {
306 	char *x;
307 	quad_t q;
308 
309 	q = strtoq(p, &x, 0);
310 	if (*x != '\0' || q < 0)
311 		return (EINVAL);
312 	*v = q;
313 	return (0);
314 }
315 
316 static int
317 g_part_parm_scheme(const char *p, struct g_part_scheme **v)
318 {
319 	struct g_part_scheme **iter, *s;
320 
321 	s = NULL;
322 	SET_FOREACH(iter, g_part_scheme_set) {
323 		if ((*iter)->name == NULL)
324 			continue;
325 		if (!strcasecmp((*iter)->name, p)) {
326 			s = *iter;
327 			break;
328 		}
329 	}
330 	if (s == NULL)
331 		return (EINVAL);
332 	*v = s;
333 	return (0);
334 }
335 
336 static int
337 g_part_parm_str(const char *p, const char **v)
338 {
339 
340 	if (p[0] == '\0')
341 		return (EINVAL);
342 	*v = p;
343 	return (0);
344 }
345 
346 static int
347 g_part_parm_uint(const char *p, u_int *v)
348 {
349 	char *x;
350 	long l;
351 
352 	l = strtol(p, &x, 0);
353 	if (*x != '\0' || l < 0 || l > INT_MAX)
354 		return (EINVAL);
355 	*v = (unsigned int)l;
356 	return (0);
357 }
358 
359 static int
360 g_part_probe(struct g_geom *gp, struct g_consumer *cp, int depth)
361 {
362 	struct g_part_scheme **iter, *scheme;
363 	struct g_part_table *table;
364 	int pri, probe;
365 
366 	table = gp->softc;
367 	scheme = (table != NULL) ? table->gpt_scheme : &g_part_null_scheme;
368 	pri = (scheme != &g_part_null_scheme) ? G_PART_PROBE(table, cp) :
369 	    INT_MIN;
370 	if (pri == 0)
371 		goto done;
372 	if (pri > 0) {	/* error */
373 		scheme = &g_part_null_scheme;
374 		pri = INT_MIN;
375 	}
376 
377 	SET_FOREACH(iter, g_part_scheme_set) {
378 		if ((*iter) == &g_part_null_scheme)
379 			continue;
380 		table = (void *)kobj_create((kobj_class_t)(*iter), M_GEOM,
381 		    M_WAITOK);
382 		table->gpt_gp = gp;
383 		table->gpt_scheme = *iter;
384 		table->gpt_depth = depth;
385 		probe = G_PART_PROBE(table, cp);
386 		if (probe <= 0 && probe > pri) {
387 			pri = probe;
388 			scheme = *iter;
389 			if (gp->softc != NULL)
390 				kobj_delete((kobj_t)gp->softc, M_GEOM);
391 			gp->softc = table;
392 			if (pri == 0)
393 				goto done;
394 		} else
395 			kobj_delete((kobj_t)table, M_GEOM);
396 	}
397 
398 done:
399 	return ((scheme == &g_part_null_scheme) ? ENXIO : 0);
400 }
401 
402 /*
403  * Control request functions.
404  */
405 
406 static int
407 g_part_ctl_add(struct gctl_req *req, struct g_part_parms *gpp)
408 {
409 	char buf[32];
410 	struct g_geom *gp;
411 	struct g_provider *pp;
412 	struct g_part_entry *delent, *last, *entry;
413 	struct g_part_table *table;
414 	struct sbuf *sb;
415 	quad_t end;
416 	unsigned int index;
417 	int error;
418 
419 	gp = gpp->gpp_geom;
420 	G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name));
421 	g_topology_assert();
422 
423 	pp = LIST_FIRST(&gp->consumer)->provider;
424 	table = gp->softc;
425 	end = gpp->gpp_start + gpp->gpp_size - 1;
426 
427 	if (gpp->gpp_start < table->gpt_first ||
428 	    gpp->gpp_start > table->gpt_last) {
429 		gctl_error(req, "%d start '%jd'", EINVAL,
430 		    (intmax_t)gpp->gpp_start);
431 		return (EINVAL);
432 	}
433 	if (end < gpp->gpp_start || end > table->gpt_last) {
434 		gctl_error(req, "%d size '%jd'", EINVAL,
435 		    (intmax_t)gpp->gpp_size);
436 		return (EINVAL);
437 	}
438 	if (gpp->gpp_index > table->gpt_entries) {
439 		gctl_error(req, "%d index '%d'", EINVAL, gpp->gpp_index);
440 		return (EINVAL);
441 	}
442 
443 	delent = last = NULL;
444 	index = (gpp->gpp_index > 0) ? gpp->gpp_index : 1;
445 	LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) {
446 		if (entry->gpe_deleted) {
447 			if (entry->gpe_index == index)
448 				delent = entry;
449 			continue;
450 		}
451 		if (entry->gpe_index == index) {
452 			index = entry->gpe_index + 1;
453 			last = entry;
454 		}
455 		if (gpp->gpp_start >= entry->gpe_start &&
456 		    gpp->gpp_start <= entry->gpe_end) {
457 			gctl_error(req, "%d start '%jd'", ENOSPC,
458 			    (intmax_t)gpp->gpp_start);
459 			return (ENOSPC);
460 		}
461 		if (end >= entry->gpe_start && end <= entry->gpe_end) {
462 			gctl_error(req, "%d end '%jd'", ENOSPC, (intmax_t)end);
463 			return (ENOSPC);
464 		}
465 		if (gpp->gpp_start < entry->gpe_start && end > entry->gpe_end) {
466 			gctl_error(req, "%d size '%jd'", ENOSPC,
467 			    (intmax_t)gpp->gpp_size);
468 			return (ENOSPC);
469 		}
470 	}
471 	if (gpp->gpp_index > 0 && index != gpp->gpp_index) {
472 		gctl_error(req, "%d index '%d'", EEXIST, gpp->gpp_index);
473 		return (EEXIST);
474 	}
475 
476 	entry = (delent == NULL) ? g_malloc(table->gpt_scheme->gps_entrysz,
477 	    M_WAITOK | M_ZERO) : delent;
478 	entry->gpe_index = index;
479 	entry->gpe_start = gpp->gpp_start;
480 	entry->gpe_end = end;
481 	error = G_PART_ADD(table, entry, gpp);
482 	if (error) {
483 		gctl_error(req, "%d", error);
484 		if (delent == NULL)
485 			g_free(entry);
486 		return (error);
487 	}
488 	if (delent == NULL) {
489 		if (last == NULL)
490 			LIST_INSERT_HEAD(&table->gpt_entry, entry, gpe_entry);
491 		else
492 			LIST_INSERT_AFTER(last, entry, gpe_entry);
493 		entry->gpe_created = 1;
494 	} else {
495 		entry->gpe_deleted = 0;
496 		entry->gpe_modified = 1;
497 	}
498 	g_part_new_provider(gp, table, entry);
499 
500 	/* Provide feedback if so requested. */
501 	if (gpp->gpp_parms & G_PART_PARM_OUTPUT) {
502 		sb = sbuf_new(NULL, NULL, 0, SBUF_AUTOEXTEND);
503 		sbuf_printf(sb, "%s%s added\n", gp->name,
504 		    G_PART_NAME(table, entry, buf, sizeof(buf)));
505 		sbuf_finish(sb);
506 		gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1);
507 		sbuf_delete(sb);
508 	}
509 	return (0);
510 }
511 
512 static int
513 g_part_ctl_commit(struct gctl_req *req, struct g_part_parms *gpp)
514 {
515 	struct g_consumer *cp;
516 	struct g_geom *gp;
517 	struct g_provider *pp;
518 	struct g_part_entry *entry, *tmp;
519 	struct g_part_table *table;
520 	char *buf;
521 	int error, i;
522 
523 	gp = gpp->gpp_geom;
524 	G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name));
525 	g_topology_assert();
526 
527 	table = gp->softc;
528 	if (!table->gpt_opened) {
529 		gctl_error(req, "%d", EPERM);
530 		return (EPERM);
531 	}
532 
533 	cp = LIST_FIRST(&gp->consumer);
534 	if ((table->gpt_smhead | table->gpt_smtail) != 0) {
535 		pp = cp->provider;
536 		buf = g_malloc(pp->sectorsize, M_WAITOK | M_ZERO);
537 		while (table->gpt_smhead != 0) {
538 			i = ffs(table->gpt_smhead) - 1;
539 			error = g_write_data(cp, i * pp->sectorsize, buf,
540 			    pp->sectorsize);
541 			if (error) {
542 				g_free(buf);
543 				goto fail;
544 			}
545 			table->gpt_smhead &= ~(1 << i);
546 		}
547 		while (table->gpt_smtail != 0) {
548 			i = ffs(table->gpt_smtail) - 1;
549 			error = g_write_data(cp, pp->mediasize - (i + 1) *
550 			    pp->sectorsize, buf, pp->sectorsize);
551 			if (error) {
552 				g_free(buf);
553 				goto fail;
554 			}
555 			table->gpt_smtail &= ~(1 << i);
556 		}
557 		g_free(buf);
558 	}
559 
560 	if (table->gpt_scheme == &g_part_null_scheme) {
561 		g_access(cp, -1, -1, -1);
562 		g_part_wither(gp, ENXIO);
563 		return (0);
564 	}
565 
566 	error = G_PART_WRITE(table, cp);
567 	if (error)
568 		goto fail;
569 
570 	LIST_FOREACH_SAFE(entry, &table->gpt_entry, gpe_entry, tmp) {
571 		if (!entry->gpe_deleted) {
572 			entry->gpe_created = 0;
573 			entry->gpe_modified = 0;
574 			continue;
575 		}
576 		LIST_REMOVE(entry, gpe_entry);
577 		g_free(entry);
578 	}
579 	table->gpt_created = 0;
580 	table->gpt_opened = 0;
581 	g_access(cp, -1, -1, -1);
582 	return (0);
583 
584 fail:
585 	gctl_error(req, "%d", error);
586 	return (error);
587 }
588 
589 static int
590 g_part_ctl_create(struct gctl_req *req, struct g_part_parms *gpp)
591 {
592 	struct g_consumer *cp;
593 	struct g_geom *gp;
594 	struct g_provider *pp;
595 	struct g_part_scheme *scheme;
596 	struct g_part_table *null, *table;
597 	struct sbuf *sb;
598 	int attr, error;
599 
600 	pp = gpp->gpp_provider;
601 	scheme = gpp->gpp_scheme;
602 	G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, pp->name));
603 	g_topology_assert();
604 
605 	/* Check that there isn't already a g_part geom on the provider. */
606 	error = g_part_parm_geom(pp->name, &gp);
607 	if (!error) {
608 		null = gp->softc;
609 		if (null->gpt_scheme != &g_part_null_scheme) {
610 			gctl_error(req, "%d geom '%s'", EEXIST, pp->name);
611 			return (EEXIST);
612 		}
613 	} else
614 		null = NULL;
615 
616 	if ((gpp->gpp_parms & G_PART_PARM_ENTRIES) &&
617 	    (gpp->gpp_entries < scheme->gps_minent ||
618 	     gpp->gpp_entries > scheme->gps_maxent)) {
619 		gctl_error(req, "%d entries '%d'", EINVAL, gpp->gpp_entries);
620 		return (EINVAL);
621 	}
622 
623 	if (null == NULL)
624 		gp = g_new_geomf(&g_part_class, "%s", pp->name);
625 	gp->softc = kobj_create((kobj_class_t)gpp->gpp_scheme, M_GEOM,
626 	    M_WAITOK);
627 	table = gp->softc;
628 	table->gpt_gp = gp;
629 	table->gpt_scheme = gpp->gpp_scheme;
630 	table->gpt_entries = (gpp->gpp_parms & G_PART_PARM_ENTRIES) ?
631 	    gpp->gpp_entries : scheme->gps_minent;
632 	LIST_INIT(&table->gpt_entry);
633 	if (null == NULL) {
634 		cp = g_new_consumer(gp);
635 		error = g_attach(cp, pp);
636 		if (error == 0)
637 			error = g_access(cp, 1, 1, 1);
638 		if (error != 0) {
639 			g_part_wither(gp, error);
640 			gctl_error(req, "%d geom '%s'", error, pp->name);
641 			return (error);
642 		}
643 		table->gpt_opened = 1;
644 	} else {
645 		cp = LIST_FIRST(&gp->consumer);
646 		table->gpt_opened = null->gpt_opened;
647 		table->gpt_smhead = null->gpt_smhead;
648 		table->gpt_smtail = null->gpt_smtail;
649 	}
650 
651 	g_topology_unlock();
652 
653 	/* Make sure the provider has media. */
654 	if (pp->mediasize == 0 || pp->sectorsize == 0) {
655 		error = ENODEV;
656 		goto fail;
657 	}
658 
659 	/* Make sure we can nest and if so, determine our depth. */
660 	error = g_getattr("PART::isleaf", cp, &attr);
661 	if (!error && attr) {
662 		error = ENODEV;
663 		goto fail;
664 	}
665 	error = g_getattr("PART::depth", cp, &attr);
666 	table->gpt_depth = (!error) ? attr + 1 : 0;
667 
668 	/*
669 	 * Synthesize a disk geometry. Some partitioning schemes
670 	 * depend on it and since some file systems need it even
671 	 * when the partitition scheme doesn't, we do it here in
672 	 * scheme-independent code.
673 	 */
674 	g_part_geometry(table, cp, pp->mediasize / pp->sectorsize);
675 
676 	error = G_PART_CREATE(table, gpp);
677 	if (error)
678 		goto fail;
679 
680 	g_topology_lock();
681 
682 	table->gpt_created = 1;
683 	if (null != NULL)
684 		kobj_delete((kobj_t)null, M_GEOM);
685 
686 	/*
687 	 * Support automatic commit by filling in the gpp_geom
688 	 * parameter.
689 	 */
690 	gpp->gpp_parms |= G_PART_PARM_GEOM;
691 	gpp->gpp_geom = gp;
692 
693 	/* Provide feedback if so requested. */
694 	if (gpp->gpp_parms & G_PART_PARM_OUTPUT) {
695 		sb = sbuf_new(NULL, NULL, 0, SBUF_AUTOEXTEND);
696 		sbuf_printf(sb, "%s created\n", gp->name);
697 		sbuf_finish(sb);
698 		gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1);
699 		sbuf_delete(sb);
700 	}
701 	return (0);
702 
703 fail:
704 	g_topology_lock();
705 	if (null == NULL) {
706 		g_access(cp, -1, -1, -1);
707 		g_part_wither(gp, error);
708 	} else {
709 		kobj_delete((kobj_t)gp->softc, M_GEOM);
710 		gp->softc = null;
711 	}
712 	gctl_error(req, "%d provider", error);
713 	return (error);
714 }
715 
716 static int
717 g_part_ctl_delete(struct gctl_req *req, struct g_part_parms *gpp)
718 {
719 	char buf[32];
720 	struct g_geom *gp;
721 	struct g_provider *pp;
722 	struct g_part_entry *entry;
723 	struct g_part_table *table;
724 	struct sbuf *sb;
725 
726 	gp = gpp->gpp_geom;
727 	G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name));
728 	g_topology_assert();
729 
730 	table = gp->softc;
731 
732 	LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) {
733 		if (entry->gpe_deleted)
734 			continue;
735 		if (entry->gpe_index == gpp->gpp_index)
736 			break;
737 	}
738 	if (entry == NULL) {
739 		gctl_error(req, "%d index '%d'", ENOENT, gpp->gpp_index);
740 		return (ENOENT);
741 	}
742 
743 	pp = entry->gpe_pp;
744 	if (pp->acr > 0 || pp->acw > 0 || pp->ace > 0) {
745 		gctl_error(req, "%d", EBUSY);
746 		return (EBUSY);
747 	}
748 
749 	pp->private = NULL;
750 	entry->gpe_pp = NULL;
751 	if (entry->gpe_created) {
752 		LIST_REMOVE(entry, gpe_entry);
753 		g_free(entry);
754 	} else {
755 		entry->gpe_modified = 0;
756 		entry->gpe_deleted = 1;
757 	}
758 	g_wither_provider(pp, ENXIO);
759 
760 	/* Provide feedback if so requested. */
761 	if (gpp->gpp_parms & G_PART_PARM_OUTPUT) {
762 		sb = sbuf_new(NULL, NULL, 0, SBUF_AUTOEXTEND);
763 		sbuf_printf(sb, "%s%s deleted\n", gp->name,
764 		    G_PART_NAME(table, entry, buf, sizeof(buf)));
765 		sbuf_finish(sb);
766 		gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1);
767 		sbuf_delete(sb);
768 	}
769 	return (0);
770 }
771 
772 static int
773 g_part_ctl_destroy(struct gctl_req *req, struct g_part_parms *gpp)
774 {
775 	struct g_geom *gp;
776 	struct g_part_entry *entry;
777 	struct g_part_table *null, *table;
778 	struct sbuf *sb;
779 	int error;
780 
781 	gp = gpp->gpp_geom;
782 	G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name));
783 	g_topology_assert();
784 
785 	table = gp->softc;
786 	LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) {
787 		if (entry->gpe_deleted)
788 			continue;
789 		gctl_error(req, "%d", EBUSY);
790 		return (EBUSY);
791 	}
792 
793 	error = G_PART_DESTROY(table, gpp);
794 	if (error) {
795 		gctl_error(req, "%d", error);
796 		return (error);
797 	}
798 
799 	gp->softc = kobj_create((kobj_class_t)&g_part_null_scheme, M_GEOM,
800 	    M_WAITOK);
801 	null = gp->softc;
802 	null->gpt_gp = gp;
803 	null->gpt_scheme = &g_part_null_scheme;
804 	LIST_INIT(&null->gpt_entry);
805 	null->gpt_depth = table->gpt_depth;
806 	null->gpt_opened = table->gpt_opened;
807 	null->gpt_smhead = table->gpt_smhead;
808 	null->gpt_smtail = table->gpt_smtail;
809 
810 	while ((entry = LIST_FIRST(&table->gpt_entry)) != NULL) {
811 		LIST_REMOVE(entry, gpe_entry);
812 		g_free(entry);
813 	}
814 	kobj_delete((kobj_t)table, M_GEOM);
815 
816 	/* Provide feedback if so requested. */
817 	if (gpp->gpp_parms & G_PART_PARM_OUTPUT) {
818 		sb = sbuf_new(NULL, NULL, 0, SBUF_AUTOEXTEND);
819 		sbuf_printf(sb, "%s destroyed\n", gp->name);
820 		sbuf_finish(sb);
821 		gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1);
822 		sbuf_delete(sb);
823 	}
824 	return (0);
825 }
826 
827 static int
828 g_part_ctl_modify(struct gctl_req *req, struct g_part_parms *gpp)
829 {
830 	char buf[32];
831 	struct g_geom *gp;
832 	struct g_part_entry *entry;
833 	struct g_part_table *table;
834 	struct sbuf *sb;
835 	int error;
836 
837 	gp = gpp->gpp_geom;
838 	G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name));
839 	g_topology_assert();
840 
841 	table = gp->softc;
842 
843 	LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) {
844 		if (entry->gpe_deleted)
845 			continue;
846 		if (entry->gpe_index == gpp->gpp_index)
847 			break;
848 	}
849 	if (entry == NULL) {
850 		gctl_error(req, "%d index '%d'", ENOENT, gpp->gpp_index);
851 		return (ENOENT);
852 	}
853 
854 	error = G_PART_MODIFY(table, entry, gpp);
855 	if (error) {
856 		gctl_error(req, "%d", error);
857 		return (error);
858 	}
859 
860 	if (!entry->gpe_created)
861 		entry->gpe_modified = 1;
862 
863 	/* Provide feedback if so requested. */
864 	if (gpp->gpp_parms & G_PART_PARM_OUTPUT) {
865 		sb = sbuf_new(NULL, NULL, 0, SBUF_AUTOEXTEND);
866 		sbuf_printf(sb, "%s%s modified\n", gp->name,
867 		    G_PART_NAME(table, entry, buf, sizeof(buf)));
868 		sbuf_finish(sb);
869 		gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1);
870 		sbuf_delete(sb);
871 	}
872 	return (0);
873 }
874 
875 static int
876 g_part_ctl_move(struct gctl_req *req, struct g_part_parms *gpp)
877 {
878 	gctl_error(req, "%d verb 'move'", ENOSYS);
879 	return (ENOSYS);
880 }
881 
882 static int
883 g_part_ctl_recover(struct gctl_req *req, struct g_part_parms *gpp)
884 {
885 	gctl_error(req, "%d verb 'recover'", ENOSYS);
886 	return (ENOSYS);
887 }
888 
889 static int
890 g_part_ctl_resize(struct gctl_req *req, struct g_part_parms *gpp)
891 {
892 	gctl_error(req, "%d verb 'resize'", ENOSYS);
893 	return (ENOSYS);
894 }
895 
896 static int
897 g_part_ctl_undo(struct gctl_req *req, struct g_part_parms *gpp)
898 {
899 	struct g_consumer *cp;
900 	struct g_provider *pp;
901 	struct g_geom *gp;
902 	struct g_part_entry *entry, *tmp;
903 	struct g_part_table *table;
904 	int error, reprobe;
905 
906 	gp = gpp->gpp_geom;
907 	G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name));
908 	g_topology_assert();
909 
910 	table = gp->softc;
911 	if (!table->gpt_opened) {
912 		gctl_error(req, "%d", EPERM);
913 		return (EPERM);
914 	}
915 
916 	cp = LIST_FIRST(&gp->consumer);
917 	LIST_FOREACH_SAFE(entry, &table->gpt_entry, gpe_entry, tmp) {
918 		entry->gpe_modified = 0;
919 		if (entry->gpe_created) {
920 			pp = entry->gpe_pp;
921 			pp->private = NULL;
922 			entry->gpe_pp = NULL;
923 			g_wither_provider(pp, ENXIO);
924 			entry->gpe_deleted = 1;
925 		}
926 		if (entry->gpe_deleted) {
927 			LIST_REMOVE(entry, gpe_entry);
928 			g_free(entry);
929 		}
930 	}
931 
932 	g_topology_unlock();
933 
934 	reprobe = (table->gpt_scheme == &g_part_null_scheme ||
935 	    table->gpt_created) ? 1 : 0;
936 
937 	if (reprobe) {
938 		if (!LIST_EMPTY(&table->gpt_entry)) {
939 			error = EBUSY;
940 			goto fail;
941 		}
942 		error = g_part_probe(gp, cp, table->gpt_depth);
943 		if (error) {
944 			g_topology_lock();
945 			g_access(cp, -1, -1, -1);
946 			g_part_wither(gp, error);
947 			return (0);
948 		}
949 		table = gp->softc;
950 	}
951 
952 	error = G_PART_READ(table, cp);
953 	if (error)
954 		goto fail;
955 
956 	g_topology_lock();
957 
958 	LIST_FOREACH(entry, &table->gpt_entry, gpe_entry)
959 		g_part_new_provider(gp, table, entry);
960 
961 	table->gpt_opened = 0;
962 	g_access(cp, -1, -1, -1);
963 	return (0);
964 
965 fail:
966 	g_topology_lock();
967 	gctl_error(req, "%d", error);
968 	return (error);
969 }
970 
971 static void
972 g_part_wither(struct g_geom *gp, int error)
973 {
974 	struct g_part_entry *entry;
975 	struct g_part_table *table;
976 
977 	table = gp->softc;
978 	if (table != NULL) {
979 		while ((entry = LIST_FIRST(&table->gpt_entry)) != NULL) {
980 			LIST_REMOVE(entry, gpe_entry);
981 			g_free(entry);
982 		}
983 		if (gp->softc != NULL) {
984 			kobj_delete((kobj_t)gp->softc, M_GEOM);
985 			gp->softc = NULL;
986 		}
987 	}
988 	g_wither_geom(gp, error);
989 }
990 
991 /*
992  * Class methods.
993  */
994 
995 static void
996 g_part_ctlreq(struct gctl_req *req, struct g_class *mp, const char *verb)
997 {
998 	struct g_part_parms gpp;
999 	struct g_part_table *table;
1000 	struct gctl_req_arg *ap;
1001 	const char *p;
1002 	enum g_part_ctl ctlreq;
1003 	unsigned int i, mparms, oparms, parm;
1004 	int auto_commit, close_on_error;
1005 	int error, modifies;
1006 
1007 	G_PART_TRACE((G_T_TOPOLOGY, "%s(%s,%s)", __func__, mp->name, verb));
1008 	g_topology_assert();
1009 
1010 	ctlreq = G_PART_CTL_NONE;
1011 	modifies = 1;
1012 	mparms = 0;
1013 	oparms = G_PART_PARM_FLAGS | G_PART_PARM_OUTPUT | G_PART_PARM_VERSION;
1014 	switch (*verb) {
1015 	case 'a':
1016 		if (!strcmp(verb, "add")) {
1017 			ctlreq = G_PART_CTL_ADD;
1018 			mparms |= G_PART_PARM_GEOM | G_PART_PARM_SIZE |
1019 			    G_PART_PARM_START | G_PART_PARM_TYPE;
1020 			oparms |= G_PART_PARM_INDEX | G_PART_PARM_LABEL;
1021 		}
1022 		break;
1023 	case 'c':
1024 		if (!strcmp(verb, "commit")) {
1025 			ctlreq = G_PART_CTL_COMMIT;
1026 			mparms |= G_PART_PARM_GEOM;
1027 			modifies = 0;
1028 		} else if (!strcmp(verb, "create")) {
1029 			ctlreq = G_PART_CTL_CREATE;
1030 			mparms |= G_PART_PARM_PROVIDER | G_PART_PARM_SCHEME;
1031 			oparms |= G_PART_PARM_ENTRIES;
1032 		}
1033 		break;
1034 	case 'd':
1035 		if (!strcmp(verb, "delete")) {
1036 			ctlreq = G_PART_CTL_DELETE;
1037 			mparms |= G_PART_PARM_GEOM | G_PART_PARM_INDEX;
1038 		} else if (!strcmp(verb, "destroy")) {
1039 			ctlreq = G_PART_CTL_DESTROY;
1040 			mparms |= G_PART_PARM_GEOM;
1041 		}
1042 		break;
1043 	case 'm':
1044 		if (!strcmp(verb, "modify")) {
1045 			ctlreq = G_PART_CTL_MODIFY;
1046 			mparms |= G_PART_PARM_GEOM | G_PART_PARM_INDEX;
1047 			oparms |= G_PART_PARM_LABEL | G_PART_PARM_TYPE;
1048 		} else if (!strcmp(verb, "move")) {
1049 			ctlreq = G_PART_CTL_MOVE;
1050 			mparms |= G_PART_PARM_GEOM | G_PART_PARM_INDEX;
1051 		}
1052 		break;
1053 	case 'r':
1054 		if (!strcmp(verb, "recover")) {
1055 			ctlreq = G_PART_CTL_RECOVER;
1056 			mparms |= G_PART_PARM_GEOM;
1057 		} else if (!strcmp(verb, "resize")) {
1058 			ctlreq = G_PART_CTL_RESIZE;
1059 			mparms |= G_PART_PARM_GEOM | G_PART_PARM_INDEX;
1060 		}
1061 		break;
1062 	case 'u':
1063 		if (!strcmp(verb, "undo")) {
1064 			ctlreq = G_PART_CTL_UNDO;
1065 			mparms |= G_PART_PARM_GEOM;
1066 			modifies = 0;
1067 		}
1068 		break;
1069 	}
1070 	if (ctlreq == G_PART_CTL_NONE) {
1071 		gctl_error(req, "%d verb '%s'", EINVAL, verb);
1072 		return;
1073 	}
1074 
1075 	bzero(&gpp, sizeof(gpp));
1076 	for (i = 0; i < req->narg; i++) {
1077 		ap = &req->arg[i];
1078 		parm = 0;
1079 		switch (ap->name[0]) {
1080 		case 'c':
1081 			if (!strcmp(ap->name, "class"))
1082 				continue;
1083 			break;
1084 		case 'e':
1085 			if (!strcmp(ap->name, "entries"))
1086 				parm = G_PART_PARM_ENTRIES;
1087 			break;
1088 		case 'f':
1089 			if (!strcmp(ap->name, "flags"))
1090 				parm = G_PART_PARM_FLAGS;
1091 			break;
1092 		case 'g':
1093 			if (!strcmp(ap->name, "geom"))
1094 				parm = G_PART_PARM_GEOM;
1095 			break;
1096 		case 'i':
1097 			if (!strcmp(ap->name, "index"))
1098 				parm = G_PART_PARM_INDEX;
1099 			break;
1100 		case 'l':
1101 			if (!strcmp(ap->name, "label"))
1102 				parm = G_PART_PARM_LABEL;
1103 			break;
1104 		case 'o':
1105 			if (!strcmp(ap->name, "output"))
1106 				parm = G_PART_PARM_OUTPUT;
1107 			break;
1108 		case 'p':
1109 			if (!strcmp(ap->name, "provider"))
1110 				parm = G_PART_PARM_PROVIDER;
1111 			break;
1112 		case 's':
1113 			if (!strcmp(ap->name, "scheme"))
1114 				parm = G_PART_PARM_SCHEME;
1115 			else if (!strcmp(ap->name, "size"))
1116 				parm = G_PART_PARM_SIZE;
1117 			else if (!strcmp(ap->name, "start"))
1118 				parm = G_PART_PARM_START;
1119 			break;
1120 		case 't':
1121 			if (!strcmp(ap->name, "type"))
1122 				parm = G_PART_PARM_TYPE;
1123 			break;
1124 		case 'v':
1125 			if (!strcmp(ap->name, "verb"))
1126 				continue;
1127 			else if (!strcmp(ap->name, "version"))
1128 				parm = G_PART_PARM_VERSION;
1129 			break;
1130 		}
1131 		if ((parm & (mparms | oparms)) == 0) {
1132 			gctl_error(req, "%d param '%s'", EINVAL, ap->name);
1133 			return;
1134 		}
1135 		p = gctl_get_asciiparam(req, ap->name);
1136 		if (p == NULL) {
1137 			gctl_error(req, "%d param '%s'", ENOATTR, ap->name);
1138 			return;
1139 		}
1140 		switch (parm) {
1141 		case G_PART_PARM_ENTRIES:
1142 			error = g_part_parm_uint(p, &gpp.gpp_entries);
1143 			break;
1144 		case G_PART_PARM_FLAGS:
1145 			if (p[0] == '\0')
1146 				continue;
1147 			error = g_part_parm_str(p, &gpp.gpp_flags);
1148 			break;
1149 		case G_PART_PARM_GEOM:
1150 			error = g_part_parm_geom(p, &gpp.gpp_geom);
1151 			break;
1152 		case G_PART_PARM_INDEX:
1153 			error = g_part_parm_uint(p, &gpp.gpp_index);
1154 			break;
1155 		case G_PART_PARM_LABEL:
1156 			/* An empty label is always valid. */
1157 			gpp.gpp_label = p;
1158 			error = 0;
1159 			break;
1160 		case G_PART_PARM_OUTPUT:
1161 			error = 0;	/* Write-only parameter */
1162 			break;
1163 		case G_PART_PARM_PROVIDER:
1164 			error = g_part_parm_provider(p, &gpp.gpp_provider);
1165 			break;
1166 		case G_PART_PARM_SCHEME:
1167 			error = g_part_parm_scheme(p, &gpp.gpp_scheme);
1168 			break;
1169 		case G_PART_PARM_SIZE:
1170 			error = g_part_parm_quad(p, &gpp.gpp_size);
1171 			break;
1172 		case G_PART_PARM_START:
1173 			error = g_part_parm_quad(p, &gpp.gpp_start);
1174 			break;
1175 		case G_PART_PARM_TYPE:
1176 			error = g_part_parm_str(p, &gpp.gpp_type);
1177 			break;
1178 		case G_PART_PARM_VERSION:
1179 			error = g_part_parm_uint(p, &gpp.gpp_version);
1180 			break;
1181 		default:
1182 			error = EDOOFUS;
1183 			break;
1184 		}
1185 		if (error) {
1186 			gctl_error(req, "%d %s '%s'", error, ap->name, p);
1187 			return;
1188 		}
1189 		gpp.gpp_parms |= parm;
1190 	}
1191 	if ((gpp.gpp_parms & mparms) != mparms) {
1192 		parm = mparms - (gpp.gpp_parms & mparms);
1193 		gctl_error(req, "%d param '%x'", ENOATTR, parm);
1194 		return;
1195 	}
1196 
1197 	/* Obtain permissions if possible/necessary. */
1198 	close_on_error = 0;
1199 	table = NULL;	/* Suppress uninit. warning. */
1200 	if (modifies && (gpp.gpp_parms & G_PART_PARM_GEOM)) {
1201 		table = gpp.gpp_geom->softc;
1202 		if (table != NULL && !table->gpt_opened) {
1203 			error = g_access(LIST_FIRST(&gpp.gpp_geom->consumer),
1204 			    1, 1, 1);
1205 			if (error) {
1206 				gctl_error(req, "%d geom '%s'", error,
1207 				    gpp.gpp_geom->name);
1208 				return;
1209 			}
1210 			table->gpt_opened = 1;
1211 			close_on_error = 1;
1212 		}
1213 	}
1214 
1215 	error = EDOOFUS;	/* Prevent bogus  uninit. warning. */
1216 	switch (ctlreq) {
1217 	case G_PART_CTL_NONE:
1218 		panic("%s", __func__);
1219 	case G_PART_CTL_ADD:
1220 		error = g_part_ctl_add(req, &gpp);
1221 		break;
1222 	case G_PART_CTL_COMMIT:
1223 		error = g_part_ctl_commit(req, &gpp);
1224 		break;
1225 	case G_PART_CTL_CREATE:
1226 		error = g_part_ctl_create(req, &gpp);
1227 		break;
1228 	case G_PART_CTL_DELETE:
1229 		error = g_part_ctl_delete(req, &gpp);
1230 		break;
1231 	case G_PART_CTL_DESTROY:
1232 		error = g_part_ctl_destroy(req, &gpp);
1233 		break;
1234 	case G_PART_CTL_MODIFY:
1235 		error = g_part_ctl_modify(req, &gpp);
1236 		break;
1237 	case G_PART_CTL_MOVE:
1238 		error = g_part_ctl_move(req, &gpp);
1239 		break;
1240 	case G_PART_CTL_RECOVER:
1241 		error = g_part_ctl_recover(req, &gpp);
1242 		break;
1243 	case G_PART_CTL_RESIZE:
1244 		error = g_part_ctl_resize(req, &gpp);
1245 		break;
1246 	case G_PART_CTL_UNDO:
1247 		error = g_part_ctl_undo(req, &gpp);
1248 		break;
1249 	}
1250 
1251 	/* Implement automatic commit. */
1252 	if (!error) {
1253 		auto_commit = (modifies &&
1254 		    (gpp.gpp_parms & G_PART_PARM_FLAGS) &&
1255 		    strchr(gpp.gpp_flags, 'C') != NULL) ? 1 : 0;
1256 		if (auto_commit) {
1257 			KASSERT(gpp.gpp_parms & G_PART_PARM_GEOM, (__func__));
1258 			error = g_part_ctl_commit(req, &gpp);
1259 		}
1260 	}
1261 
1262 	if (error && close_on_error) {
1263 		g_access(LIST_FIRST(&gpp.gpp_geom->consumer), -1, -1, -1);
1264 		table->gpt_opened = 0;
1265 	}
1266 }
1267 
1268 static int
1269 g_part_destroy_geom(struct gctl_req *req, struct g_class *mp,
1270     struct g_geom *gp)
1271 {
1272 
1273 	G_PART_TRACE((G_T_TOPOLOGY, "%s(%s,%s)", __func__, mp->name, gp->name));
1274 	g_topology_assert();
1275 
1276 	g_part_wither(gp, EINVAL);
1277 	return (0);
1278 }
1279 
1280 static struct g_geom *
1281 g_part_taste(struct g_class *mp, struct g_provider *pp, int flags __unused)
1282 {
1283 	struct g_consumer *cp;
1284 	struct g_geom *gp;
1285 	struct g_part_entry *entry;
1286 	struct g_part_table *table;
1287 	int attr, depth;
1288 	int error;
1289 
1290 	G_PART_TRACE((G_T_TOPOLOGY, "%s(%s,%s)", __func__, mp->name, pp->name));
1291 	g_topology_assert();
1292 
1293 	/*
1294 	 * Create a GEOM with consumer and hook it up to the provider.
1295 	 * With that we become part of the topology. Optain read access
1296 	 * to the provider.
1297 	 */
1298 	gp = g_new_geomf(mp, "%s", pp->name);
1299 	cp = g_new_consumer(gp);
1300 	error = g_attach(cp, pp);
1301 	if (error == 0)
1302 		error = g_access(cp, 1, 0, 0);
1303 	if (error != 0) {
1304 		g_part_wither(gp, error);
1305 		return (NULL);
1306 	}
1307 
1308 	g_topology_unlock();
1309 
1310 	/*
1311 	 * Short-circuit the whole probing galore when there's no
1312 	 * media present.
1313 	 */
1314 	if (pp->mediasize == 0 || pp->sectorsize == 0) {
1315 		error = ENODEV;
1316 		goto fail;
1317 	}
1318 
1319 	/* Make sure we can nest and if so, determine our depth. */
1320 	error = g_getattr("PART::isleaf", cp, &attr);
1321 	if (!error && attr) {
1322 		error = ENODEV;
1323 		goto fail;
1324 	}
1325 	error = g_getattr("PART::depth", cp, &attr);
1326 	depth = (!error) ? attr + 1 : 0;
1327 
1328 	error = g_part_probe(gp, cp, depth);
1329 	if (error)
1330 		goto fail;
1331 
1332 	table = gp->softc;
1333 
1334 	/*
1335 	 * Synthesize a disk geometry. Some partitioning schemes
1336 	 * depend on it and since some file systems need it even
1337 	 * when the partitition scheme doesn't, we do it here in
1338 	 * scheme-independent code.
1339 	 */
1340 	g_part_geometry(table, cp, pp->mediasize / pp->sectorsize);
1341 
1342 	error = G_PART_READ(table, cp);
1343 	if (error)
1344 		goto fail;
1345 
1346 	g_topology_lock();
1347 	LIST_FOREACH(entry, &table->gpt_entry, gpe_entry)
1348 		g_part_new_provider(gp, table, entry);
1349 
1350 	g_access(cp, -1, 0, 0);
1351 	return (gp);
1352 
1353  fail:
1354 	g_topology_lock();
1355 	g_access(cp, -1, 0, 0);
1356 	g_part_wither(gp, error);
1357 	return (NULL);
1358 }
1359 
1360 /*
1361  * Geom methods.
1362  */
1363 
1364 static int
1365 g_part_access(struct g_provider *pp, int dr, int dw, int de)
1366 {
1367 	struct g_consumer *cp;
1368 
1369 	G_PART_TRACE((G_T_ACCESS, "%s(%s,%d,%d,%d)", __func__, pp->name, dr,
1370 	    dw, de));
1371 
1372 	cp = LIST_FIRST(&pp->geom->consumer);
1373 
1374 	/* We always gain write-exclusive access. */
1375 	return (g_access(cp, dr, dw, dw + de));
1376 }
1377 
1378 static void
1379 g_part_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp,
1380     struct g_consumer *cp, struct g_provider *pp)
1381 {
1382 	char buf[64];
1383 	struct g_part_entry *entry;
1384 	struct g_part_table *table;
1385 
1386 	KASSERT(sb != NULL && gp != NULL, (__func__));
1387 	table = gp->softc;
1388 
1389 	if (indent == NULL) {
1390 		KASSERT(cp == NULL && pp != NULL, (__func__));
1391 		entry = pp->private;
1392 		if (entry == NULL)
1393 			return;
1394 		sbuf_printf(sb, " i %u o %ju ty %s", entry->gpe_index,
1395 		    (uintmax_t)entry->gpe_offset,
1396 		    G_PART_TYPE(table, entry, buf, sizeof(buf)));
1397 	} else if (cp != NULL) {	/* Consumer configuration. */
1398 		KASSERT(pp == NULL, (__func__));
1399 		/* none */
1400 	} else if (pp != NULL) {	/* Provider configuration. */
1401 		entry = pp->private;
1402 		if (entry == NULL)
1403 			return;
1404 		sbuf_printf(sb, "%s<index>%u</index>\n", indent,
1405 		    entry->gpe_index);
1406 		sbuf_printf(sb, "%s<type>%s</type>\n", indent,
1407 		    G_PART_TYPE(table, entry, buf, sizeof(buf)));
1408 		sbuf_printf(sb, "%s<offset>%ju</offset>\n", indent,
1409 		    (uintmax_t)entry->gpe_offset);
1410 		sbuf_printf(sb, "%s<length>%ju</length>\n", indent,
1411 		    (uintmax_t)pp->mediasize);
1412 		G_PART_DUMPCONF(table, entry, sb, indent);
1413 	} else {			/* Geom configuration. */
1414 		sbuf_printf(sb, "%s<scheme>%s</scheme>\n", indent,
1415 		    table->gpt_scheme->name);
1416 		sbuf_printf(sb, "%s<entries>%u</entries>\n", indent,
1417 		    table->gpt_entries);
1418 		sbuf_printf(sb, "%s<first>%ju</first>\n", indent,
1419 		    (uintmax_t)table->gpt_first);
1420 		sbuf_printf(sb, "%s<last>%ju</last>\n", indent,
1421 		    (uintmax_t)table->gpt_last);
1422 		sbuf_printf(sb, "%s<fwsectors>%u</fwsectors>\n", indent,
1423 		    table->gpt_sectors);
1424 		sbuf_printf(sb, "%s<fwheads>%u</fwheads>\n", indent,
1425 		    table->gpt_heads);
1426 		G_PART_DUMPCONF(table, NULL, sb, indent);
1427 	}
1428 }
1429 
1430 static void
1431 g_part_orphan(struct g_consumer *cp)
1432 {
1433 	struct g_provider *pp;
1434 
1435 	pp = cp->provider;
1436 	KASSERT(pp != NULL, (__func__));
1437 	G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, pp->name));
1438 	g_topology_assert();
1439 
1440 	KASSERT(pp->error != 0, (__func__));
1441 	g_part_wither(cp->geom, pp->error);
1442 }
1443 
1444 static void
1445 g_part_spoiled(struct g_consumer *cp)
1446 {
1447 
1448 	G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, cp->provider->name));
1449 	g_topology_assert();
1450 
1451 	g_part_wither(cp->geom, ENXIO);
1452 }
1453 
1454 static void
1455 g_part_start(struct bio *bp)
1456 {
1457 	struct bio *bp2;
1458 	struct g_consumer *cp;
1459 	struct g_geom *gp;
1460 	struct g_part_entry *entry;
1461 	struct g_part_table *table;
1462 	struct g_kerneldump *gkd;
1463 	struct g_provider *pp;
1464 
1465 	pp = bp->bio_to;
1466 	gp = pp->geom;
1467 	table = gp->softc;
1468 	cp = LIST_FIRST(&gp->consumer);
1469 
1470 	G_PART_TRACE((G_T_BIO, "%s: cmd=%d, provider=%s", __func__, bp->bio_cmd,
1471 	    pp->name));
1472 
1473 	entry = pp->private;
1474 	if (entry == NULL) {
1475 		g_io_deliver(bp, ENXIO);
1476 		return;
1477 	}
1478 
1479 	switch(bp->bio_cmd) {
1480 	case BIO_DELETE:
1481 	case BIO_READ:
1482 	case BIO_WRITE:
1483 		if (bp->bio_offset >= pp->mediasize) {
1484 			g_io_deliver(bp, EIO);
1485 			return;
1486 		}
1487 		bp2 = g_clone_bio(bp);
1488 		if (bp2 == NULL) {
1489 			g_io_deliver(bp, ENOMEM);
1490 			return;
1491 		}
1492 		if (bp2->bio_offset + bp2->bio_length > pp->mediasize)
1493 			bp2->bio_length = pp->mediasize - bp2->bio_offset;
1494 		bp2->bio_done = g_std_done;
1495 		bp2->bio_offset += entry->gpe_offset;
1496 		g_io_request(bp2, cp);
1497 		return;
1498 	case BIO_FLUSH:
1499 		break;
1500 	case BIO_GETATTR:
1501 		if (g_handleattr_int(bp, "GEOM::fwheads", table->gpt_heads))
1502 			return;
1503 		if (g_handleattr_int(bp, "GEOM::fwsectors", table->gpt_sectors))
1504 			return;
1505 		if (g_handleattr_int(bp, "PART::isleaf", table->gpt_isleaf))
1506 			return;
1507 		if (g_handleattr_int(bp, "PART::depth", table->gpt_depth))
1508 			return;
1509 		if (!strcmp("GEOM::kerneldump", bp->bio_attribute)) {
1510 			/*
1511 			 * Check that the partition is suitable for kernel
1512 			 * dumps. Typically only swap partitions should be
1513 			 * used.
1514 			 */
1515 			if (!G_PART_DUMPTO(table, entry)) {
1516 				g_io_deliver(bp, ENXIO);
1517 				return;
1518 			}
1519 			gkd = (struct g_kerneldump *)bp->bio_data;
1520 			if (gkd->offset >= pp->mediasize) {
1521 				g_io_deliver(bp, EIO);
1522 				return;
1523 			}
1524 			if (gkd->offset + gkd->length > pp->mediasize)
1525 				gkd->length = pp->mediasize - gkd->offset;
1526 			gkd->offset += entry->gpe_offset;
1527 		}
1528 		break;
1529 	default:
1530 		g_io_deliver(bp, EOPNOTSUPP);
1531 		return;
1532 	}
1533 
1534 	bp2 = g_clone_bio(bp);
1535 	if (bp2 == NULL) {
1536 		g_io_deliver(bp, ENOMEM);
1537 		return;
1538 	}
1539 	bp2->bio_done = g_std_done;
1540 	g_io_request(bp2, cp);
1541 }
1542