xref: /freebsd/sys/geom/part/g_part.c (revision b28624fde638caadd4a89f50c9b7e7da0f98c4d2)
1 /*-
2  * Copyright (c) 2002, 2005, 2006, 2007 Marcel Moolenaar
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  *
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25  */
26 
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
29 
30 #include <sys/param.h>
31 #include <sys/bio.h>
32 #include <sys/diskmbr.h>
33 #include <sys/endian.h>
34 #include <sys/kernel.h>
35 #include <sys/kobj.h>
36 #include <sys/limits.h>
37 #include <sys/lock.h>
38 #include <sys/malloc.h>
39 #include <sys/mutex.h>
40 #include <sys/queue.h>
41 #include <sys/sbuf.h>
42 #include <sys/systm.h>
43 #include <sys/uuid.h>
44 #include <geom/geom.h>
45 #include <geom/geom_ctl.h>
46 #include <geom/part/g_part.h>
47 
48 #include "g_part_if.h"
49 
50 static kobj_method_t g_part_null_methods[] = {
51 	{ 0, 0 }
52 };
53 
54 static struct g_part_scheme g_part_null_scheme = {
55 	"n/a",
56 	g_part_null_methods,
57 	sizeof(struct g_part_table),
58 };
59 G_PART_SCHEME_DECLARE(g_part_null_scheme);
60 
61 SET_DECLARE(g_part_scheme_set, struct g_part_scheme);
62 
63 struct g_part_alias_list {
64 	const char *lexeme;
65 	enum g_part_alias alias;
66 } g_part_alias_list[G_PART_ALIAS_COUNT] = {
67 	{ "efi", G_PART_ALIAS_EFI },
68 	{ "freebsd", G_PART_ALIAS_FREEBSD },
69 	{ "freebsd-swap", G_PART_ALIAS_FREEBSD_SWAP },
70 	{ "freebsd-ufs", G_PART_ALIAS_FREEBSD_UFS },
71 	{ "freebsd-vinum", G_PART_ALIAS_FREEBSD_VINUM },
72 	{ "mbr", G_PART_ALIAS_MBR }
73 };
74 
75 /*
76  * The GEOM partitioning class.
77  */
78 static g_ctl_req_t g_part_ctlreq;
79 static g_ctl_destroy_geom_t g_part_destroy_geom;
80 static g_taste_t g_part_taste;
81 
82 static g_access_t g_part_access;
83 static g_dumpconf_t g_part_dumpconf;
84 static g_orphan_t g_part_orphan;
85 static g_spoiled_t g_part_spoiled;
86 static g_start_t g_part_start;
87 
88 static struct g_class g_part_class = {
89 	.name = "PART",
90 	.version = G_VERSION,
91 	/* Class methods. */
92 	.ctlreq = g_part_ctlreq,
93 	.destroy_geom = g_part_destroy_geom,
94 	.taste = g_part_taste,
95 	/* Geom methods. */
96 	.access = g_part_access,
97 	.dumpconf = g_part_dumpconf,
98 	.orphan = g_part_orphan,
99 	.spoiled = g_part_spoiled,
100 	.start = g_part_start,
101 };
102 
103 DECLARE_GEOM_CLASS(g_part_class, g_part);
104 
105 enum g_part_ctl {
106 	G_PART_CTL_NONE,
107 	G_PART_CTL_ADD,
108 	G_PART_CTL_COMMIT,
109 	G_PART_CTL_CREATE,
110 	G_PART_CTL_DELETE,
111 	G_PART_CTL_DESTROY,
112 	G_PART_CTL_MODIFY,
113 	G_PART_CTL_MOVE,
114 	G_PART_CTL_RECOVER,
115 	G_PART_CTL_RESIZE,
116 	G_PART_CTL_UNDO
117 };
118 
119 /*
120  * Support functions.
121  */
122 
123 static void g_part_wither(struct g_geom *, int);
124 
125 const char *
126 g_part_alias_name(enum g_part_alias alias)
127 {
128 	int i;
129 
130 	for (i = 0; i < G_PART_ALIAS_COUNT; i++) {
131 		if (g_part_alias_list[i].alias != alias)
132 			continue;
133 		return (g_part_alias_list[i].lexeme);
134 	}
135 
136 	return (NULL);
137 }
138 
139 void
140 g_part_geometry_heads(off_t blocks, u_int sectors, off_t *bestchs,
141     u_int *bestheads)
142 {
143 	static u_int candidate_heads[] = { 1, 2, 16, 32, 64, 128, 255, 0 };
144 	off_t chs, cylinders;
145 	u_int heads;
146 	int idx;
147 
148 	*bestchs = 0;
149 	*bestheads = 0;
150 	for (idx = 0; candidate_heads[idx] != 0; idx++) {
151 		heads = candidate_heads[idx];
152 		cylinders = blocks / heads / sectors;
153 		if (cylinders < heads || cylinders < sectors)
154 			break;
155 		if (cylinders > 1023)
156 			continue;
157 		chs = cylinders * heads * sectors;
158 		if (chs > *bestchs || (chs == *bestchs && *bestheads == 1)) {
159 			*bestchs = chs;
160 			*bestheads = heads;
161 		}
162 	}
163 }
164 
165 static void
166 g_part_geometry(struct g_part_table *table, struct g_consumer *cp,
167     off_t blocks)
168 {
169 	static u_int candidate_sectors[] = { 1, 9, 17, 33, 63, 0 };
170 	off_t chs, bestchs;
171 	u_int heads, sectors;
172 	int idx;
173 
174 	if (g_getattr("GEOM::fwsectors", cp, &sectors) != 0 ||
175 	    sectors < 1 || sectors > 63 ||
176 	    g_getattr("GEOM::fwheads", cp, &heads) != 0 ||
177 	    heads < 1 || heads > 255) {
178 		table->gpt_fixgeom = 0;
179 		table->gpt_heads = 0;
180 		table->gpt_sectors = 0;
181 		bestchs = 0;
182 		for (idx = 0; candidate_sectors[idx] != 0; idx++) {
183 			sectors = candidate_sectors[idx];
184 			g_part_geometry_heads(blocks, sectors, &chs, &heads);
185 			if (chs == 0)
186 				continue;
187 			/*
188 			 * Prefer a geometry with sectors > 1, but only if
189 			 * it doesn't bump down the numbver of heads to 1.
190 			 */
191 			if (chs > bestchs || (chs == bestchs && heads > 1 &&
192 			    table->gpt_sectors == 1)) {
193 				bestchs = chs;
194 				table->gpt_heads = heads;
195 				table->gpt_sectors = sectors;
196 			}
197 		}
198 		/*
199 		 * If we didn't find a geometry at all, then the disk is
200 		 * too big. This means we can use the maximum number of
201 		 * heads and sectors.
202 		 */
203 		if (bestchs == 0) {
204 			table->gpt_heads = 255;
205 			table->gpt_sectors = 63;
206 		}
207 	} else {
208 		table->gpt_fixgeom = 1;
209 		table->gpt_heads = heads;
210 		table->gpt_sectors = sectors;
211 	}
212 }
213 
214 struct g_part_entry *
215 g_part_new_entry(struct g_part_table *table, int index, quad_t start,
216     quad_t end)
217 {
218 	struct g_part_entry *entry, *last;
219 
220 	last = NULL;
221 	LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) {
222 		if (entry->gpe_index == index)
223 			break;
224 		if (entry->gpe_index > index) {
225 			entry = NULL;
226 			break;
227 		}
228 		last = entry;
229 	}
230 	if (entry == NULL) {
231 		entry = g_malloc(table->gpt_scheme->gps_entrysz,
232 		    M_WAITOK | M_ZERO);
233 		entry->gpe_index = index;
234 		if (last == NULL)
235 			LIST_INSERT_HEAD(&table->gpt_entry, entry, gpe_entry);
236 		else
237 			LIST_INSERT_AFTER(last, entry, gpe_entry);
238 	}
239 	entry->gpe_start = start;
240 	entry->gpe_end = end;
241 	return (entry);
242 }
243 
244 static void
245 g_part_new_provider(struct g_geom *gp, struct g_part_table *table,
246     struct g_part_entry *entry)
247 {
248 	char buf[32];
249 	struct g_consumer *cp;
250 	struct g_provider *pp;
251 
252 	cp = LIST_FIRST(&gp->consumer);
253 	pp = cp->provider;
254 
255 	entry->gpe_offset = entry->gpe_start * pp->sectorsize;
256 
257 	if (entry->gpe_pp == NULL) {
258 		entry->gpe_pp = g_new_providerf(gp, "%s%s", gp->name,
259 		    G_PART_NAME(table, entry, buf, sizeof(buf)));
260 		entry->gpe_pp->private = entry;		/* Close the circle. */
261 	}
262 	entry->gpe_pp->index = entry->gpe_index - 1;	/* index is 1-based. */
263 	entry->gpe_pp->mediasize = (entry->gpe_end - entry->gpe_start + 1) *
264 	    pp->sectorsize;
265 	entry->gpe_pp->sectorsize = pp->sectorsize;
266 	entry->gpe_pp->flags = pp->flags & G_PF_CANDELETE;
267 	if (pp->stripesize > 0) {
268 		entry->gpe_pp->stripesize = pp->stripesize;
269 		entry->gpe_pp->stripeoffset = (pp->stripeoffset +
270 		    entry->gpe_offset) % pp->stripesize;
271 	}
272 	g_error_provider(entry->gpe_pp, 0);
273 }
274 
275 static int
276 g_part_parm_geom(const char *p, struct g_geom **v)
277 {
278 	struct g_geom *gp;
279 
280 	LIST_FOREACH(gp, &g_part_class.geom, geom) {
281 		if (!strcmp(p, gp->name))
282 			break;
283 	}
284 	if (gp == NULL)
285 		return (EINVAL);
286 	*v = gp;
287 	return (0);
288 }
289 
290 static int
291 g_part_parm_provider(const char *p, struct g_provider **v)
292 {
293 	struct g_provider *pp;
294 
295 	pp = g_provider_by_name(p);
296 	if (pp == NULL)
297 		return (EINVAL);
298 	*v = pp;
299 	return (0);
300 }
301 
302 static int
303 g_part_parm_quad(const char *p, quad_t *v)
304 {
305 	char *x;
306 	quad_t q;
307 
308 	q = strtoq(p, &x, 0);
309 	if (*x != '\0' || q < 0)
310 		return (EINVAL);
311 	*v = q;
312 	return (0);
313 }
314 
315 static int
316 g_part_parm_scheme(const char *p, struct g_part_scheme **v)
317 {
318 	struct g_part_scheme **iter, *s;
319 
320 	s = NULL;
321 	SET_FOREACH(iter, g_part_scheme_set) {
322 		if ((*iter)->name == NULL)
323 			continue;
324 		if (!strcasecmp((*iter)->name, p)) {
325 			s = *iter;
326 			break;
327 		}
328 	}
329 	if (s == NULL)
330 		return (EINVAL);
331 	*v = s;
332 	return (0);
333 }
334 
335 static int
336 g_part_parm_str(const char *p, const char **v)
337 {
338 
339 	if (p[0] == '\0')
340 		return (EINVAL);
341 	*v = p;
342 	return (0);
343 }
344 
345 static int
346 g_part_parm_uint(const char *p, u_int *v)
347 {
348 	char *x;
349 	long l;
350 
351 	l = strtol(p, &x, 0);
352 	if (*x != '\0' || l < 0 || l > INT_MAX)
353 		return (EINVAL);
354 	*v = (unsigned int)l;
355 	return (0);
356 }
357 
358 static int
359 g_part_probe(struct g_geom *gp, struct g_consumer *cp, int depth)
360 {
361 	struct g_part_scheme **iter, *scheme;
362 	struct g_part_table *table;
363 	int pri, probe;
364 
365 	table = gp->softc;
366 	scheme = (table != NULL) ? table->gpt_scheme : &g_part_null_scheme;
367 	pri = (scheme != &g_part_null_scheme) ? G_PART_PROBE(table, cp) :
368 	    INT_MIN;
369 	if (pri == 0)
370 		goto done;
371 	if (pri > 0) {	/* error */
372 		scheme = &g_part_null_scheme;
373 		pri = INT_MIN;
374 	}
375 
376 	SET_FOREACH(iter, g_part_scheme_set) {
377 		if ((*iter) == &g_part_null_scheme)
378 			continue;
379 		table = (void *)kobj_create((kobj_class_t)(*iter), M_GEOM,
380 		    M_WAITOK);
381 		table->gpt_gp = gp;
382 		table->gpt_scheme = *iter;
383 		table->gpt_depth = depth;
384 		probe = G_PART_PROBE(table, cp);
385 		if (probe <= 0 && probe > pri) {
386 			pri = probe;
387 			scheme = *iter;
388 			if (gp->softc != NULL)
389 				kobj_delete((kobj_t)gp->softc, M_GEOM);
390 			gp->softc = table;
391 			if (pri == 0)
392 				goto done;
393 		} else
394 			kobj_delete((kobj_t)table, M_GEOM);
395 	}
396 
397 done:
398 	return ((scheme == &g_part_null_scheme) ? ENXIO : 0);
399 }
400 
401 /*
402  * Control request functions.
403  */
404 
405 static int
406 g_part_ctl_add(struct gctl_req *req, struct g_part_parms *gpp)
407 {
408 	char buf[32];
409 	struct g_geom *gp;
410 	struct g_provider *pp;
411 	struct g_part_entry *delent, *last, *entry;
412 	struct g_part_table *table;
413 	struct sbuf *sb;
414 	quad_t end;
415 	unsigned int index;
416 	int error;
417 
418 	gp = gpp->gpp_geom;
419 	G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name));
420 	g_topology_assert();
421 
422 	pp = LIST_FIRST(&gp->consumer)->provider;
423 	table = gp->softc;
424 	end = gpp->gpp_start + gpp->gpp_size - 1;
425 
426 	if (gpp->gpp_start < table->gpt_first ||
427 	    gpp->gpp_start > table->gpt_last) {
428 		gctl_error(req, "%d start '%jd'", EINVAL,
429 		    (intmax_t)gpp->gpp_start);
430 		return (EINVAL);
431 	}
432 	if (end < gpp->gpp_start || end > table->gpt_last) {
433 		gctl_error(req, "%d size '%jd'", EINVAL,
434 		    (intmax_t)gpp->gpp_size);
435 		return (EINVAL);
436 	}
437 	if (gpp->gpp_index > table->gpt_entries) {
438 		gctl_error(req, "%d index '%d'", EINVAL, gpp->gpp_index);
439 		return (EINVAL);
440 	}
441 
442 	delent = last = NULL;
443 	index = (gpp->gpp_index > 0) ? gpp->gpp_index : 1;
444 	LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) {
445 		if (entry->gpe_deleted) {
446 			if (entry->gpe_index == index)
447 				delent = entry;
448 			continue;
449 		}
450 		if (entry->gpe_index == index) {
451 			index = entry->gpe_index + 1;
452 			last = entry;
453 		}
454 		if (gpp->gpp_start >= entry->gpe_start &&
455 		    gpp->gpp_start <= entry->gpe_end) {
456 			gctl_error(req, "%d start '%jd'", ENOSPC,
457 			    (intmax_t)gpp->gpp_start);
458 			return (ENOSPC);
459 		}
460 		if (end >= entry->gpe_start && end <= entry->gpe_end) {
461 			gctl_error(req, "%d end '%jd'", ENOSPC, (intmax_t)end);
462 			return (ENOSPC);
463 		}
464 		if (gpp->gpp_start < entry->gpe_start && end > entry->gpe_end) {
465 			gctl_error(req, "%d size '%jd'", ENOSPC,
466 			    (intmax_t)gpp->gpp_size);
467 			return (ENOSPC);
468 		}
469 	}
470 	if (gpp->gpp_index > 0 && index != gpp->gpp_index) {
471 		gctl_error(req, "%d index '%d'", EEXIST, gpp->gpp_index);
472 		return (EEXIST);
473 	}
474 
475 	entry = (delent == NULL) ? g_malloc(table->gpt_scheme->gps_entrysz,
476 	    M_WAITOK | M_ZERO) : delent;
477 	entry->gpe_index = index;
478 	entry->gpe_start = gpp->gpp_start;
479 	entry->gpe_end = end;
480 	error = G_PART_ADD(table, entry, gpp);
481 	if (error) {
482 		gctl_error(req, "%d", error);
483 		if (delent == NULL)
484 			g_free(entry);
485 		return (error);
486 	}
487 	if (delent == NULL) {
488 		if (last == NULL)
489 			LIST_INSERT_HEAD(&table->gpt_entry, entry, gpe_entry);
490 		else
491 			LIST_INSERT_AFTER(last, entry, gpe_entry);
492 		entry->gpe_created = 1;
493 	} else {
494 		entry->gpe_deleted = 0;
495 		entry->gpe_modified = 1;
496 	}
497 	g_part_new_provider(gp, table, entry);
498 
499 	/* Provide feedback if so requested. */
500 	if (gpp->gpp_parms & G_PART_PARM_OUTPUT) {
501 		sb = sbuf_new(NULL, NULL, 0, SBUF_AUTOEXTEND);
502 		sbuf_printf(sb, "%s%s added\n", gp->name,
503 		    G_PART_NAME(table, entry, buf, sizeof(buf)));
504 		sbuf_finish(sb);
505 		gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1);
506 		sbuf_delete(sb);
507 	}
508 	return (0);
509 }
510 
511 static int
512 g_part_ctl_commit(struct gctl_req *req, struct g_part_parms *gpp)
513 {
514 	struct g_consumer *cp;
515 	struct g_geom *gp;
516 	struct g_provider *pp;
517 	struct g_part_entry *entry, *tmp;
518 	struct g_part_table *table;
519 	char *buf;
520 	int error, i;
521 
522 	gp = gpp->gpp_geom;
523 	G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name));
524 	g_topology_assert();
525 
526 	table = gp->softc;
527 	if (!table->gpt_opened) {
528 		gctl_error(req, "%d", EPERM);
529 		return (EPERM);
530 	}
531 
532 	cp = LIST_FIRST(&gp->consumer);
533 	if ((table->gpt_smhead | table->gpt_smtail) != 0) {
534 		pp = cp->provider;
535 		buf = g_malloc(pp->sectorsize, M_WAITOK | M_ZERO);
536 		while (table->gpt_smhead != 0) {
537 			i = ffs(table->gpt_smhead) - 1;
538 			error = g_write_data(cp, i * pp->sectorsize, buf,
539 			    pp->sectorsize);
540 			if (error) {
541 				g_free(buf);
542 				goto fail;
543 			}
544 			table->gpt_smhead &= ~(1 << i);
545 		}
546 		while (table->gpt_smtail != 0) {
547 			i = ffs(table->gpt_smtail) - 1;
548 			error = g_write_data(cp, pp->mediasize - (i + 1) *
549 			    pp->sectorsize, buf, pp->sectorsize);
550 			if (error) {
551 				g_free(buf);
552 				goto fail;
553 			}
554 			table->gpt_smtail &= ~(1 << i);
555 		}
556 		g_free(buf);
557 	}
558 
559 	if (table->gpt_scheme == &g_part_null_scheme) {
560 		g_access(cp, -1, -1, -1);
561 		g_part_wither(gp, ENXIO);
562 		return (0);
563 	}
564 
565 	error = G_PART_WRITE(table, cp);
566 	if (error)
567 		goto fail;
568 
569 	LIST_FOREACH_SAFE(entry, &table->gpt_entry, gpe_entry, tmp) {
570 		if (!entry->gpe_deleted) {
571 			entry->gpe_created = 0;
572 			entry->gpe_modified = 0;
573 			continue;
574 		}
575 		LIST_REMOVE(entry, gpe_entry);
576 		g_free(entry);
577 	}
578 	table->gpt_created = 0;
579 	table->gpt_opened = 0;
580 	g_access(cp, -1, -1, -1);
581 	return (0);
582 
583 fail:
584 	gctl_error(req, "%d", error);
585 	return (error);
586 }
587 
588 static int
589 g_part_ctl_create(struct gctl_req *req, struct g_part_parms *gpp)
590 {
591 	struct g_consumer *cp;
592 	struct g_geom *gp;
593 	struct g_provider *pp;
594 	struct g_part_scheme *scheme;
595 	struct g_part_table *null, *table;
596 	struct sbuf *sb;
597 	int attr, error;
598 
599 	pp = gpp->gpp_provider;
600 	scheme = gpp->gpp_scheme;
601 	G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, pp->name));
602 	g_topology_assert();
603 
604 	/* Check that there isn't already a g_part geom on the provider. */
605 	error = g_part_parm_geom(pp->name, &gp);
606 	if (!error) {
607 		null = gp->softc;
608 		if (null->gpt_scheme != &g_part_null_scheme) {
609 			gctl_error(req, "%d geom '%s'", EEXIST, pp->name);
610 			return (EEXIST);
611 		}
612 	} else
613 		null = NULL;
614 
615 	if ((gpp->gpp_parms & G_PART_PARM_ENTRIES) &&
616 	    (gpp->gpp_entries < scheme->gps_minent ||
617 	     gpp->gpp_entries > scheme->gps_maxent)) {
618 		gctl_error(req, "%d entries '%d'", EINVAL, gpp->gpp_entries);
619 		return (EINVAL);
620 	}
621 
622 	if (null == NULL)
623 		gp = g_new_geomf(&g_part_class, "%s", pp->name);
624 	gp->softc = kobj_create((kobj_class_t)gpp->gpp_scheme, M_GEOM,
625 	    M_WAITOK);
626 	table = gp->softc;
627 	table->gpt_gp = gp;
628 	table->gpt_scheme = gpp->gpp_scheme;
629 	table->gpt_entries = (gpp->gpp_parms & G_PART_PARM_ENTRIES) ?
630 	    gpp->gpp_entries : scheme->gps_minent;
631 	LIST_INIT(&table->gpt_entry);
632 	if (null == NULL) {
633 		cp = g_new_consumer(gp);
634 		error = g_attach(cp, pp);
635 		if (error == 0)
636 			error = g_access(cp, 1, 1, 1);
637 		if (error != 0) {
638 			g_part_wither(gp, error);
639 			gctl_error(req, "%d geom '%s'", error, pp->name);
640 			return (error);
641 		}
642 		table->gpt_opened = 1;
643 	} else {
644 		cp = LIST_FIRST(&gp->consumer);
645 		table->gpt_opened = null->gpt_opened;
646 		table->gpt_smhead = null->gpt_smhead;
647 		table->gpt_smtail = null->gpt_smtail;
648 	}
649 
650 	g_topology_unlock();
651 
652 	/* Make sure the provider has media. */
653 	if (pp->mediasize == 0 || pp->sectorsize == 0) {
654 		error = ENODEV;
655 		goto fail;
656 	}
657 
658 	/* Make sure we can nest and if so, determine our depth. */
659 	error = g_getattr("PART::isleaf", cp, &attr);
660 	if (!error && attr) {
661 		error = ENODEV;
662 		goto fail;
663 	}
664 	error = g_getattr("PART::depth", cp, &attr);
665 	table->gpt_depth = (!error) ? attr + 1 : 0;
666 
667 	/*
668 	 * Synthesize a disk geometry. Some partitioning schemes
669 	 * depend on it and since some file systems need it even
670 	 * when the partitition scheme doesn't, we do it here in
671 	 * scheme-independent code.
672 	 */
673 	g_part_geometry(table, cp, pp->mediasize / pp->sectorsize);
674 
675 	error = G_PART_CREATE(table, gpp);
676 	if (error)
677 		goto fail;
678 
679 	g_topology_lock();
680 
681 	table->gpt_created = 1;
682 	if (null != NULL)
683 		kobj_delete((kobj_t)null, M_GEOM);
684 
685 	/*
686 	 * Support automatic commit by filling in the gpp_geom
687 	 * parameter.
688 	 */
689 	gpp->gpp_parms |= G_PART_PARM_GEOM;
690 	gpp->gpp_geom = gp;
691 
692 	/* Provide feedback if so requested. */
693 	if (gpp->gpp_parms & G_PART_PARM_OUTPUT) {
694 		sb = sbuf_new(NULL, NULL, 0, SBUF_AUTOEXTEND);
695 		sbuf_printf(sb, "%s created\n", gp->name);
696 		sbuf_finish(sb);
697 		gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1);
698 		sbuf_delete(sb);
699 	}
700 	return (0);
701 
702 fail:
703 	g_topology_lock();
704 	if (null == NULL) {
705 		g_access(cp, -1, -1, -1);
706 		g_part_wither(gp, error);
707 	} else {
708 		kobj_delete((kobj_t)gp->softc, M_GEOM);
709 		gp->softc = null;
710 	}
711 	gctl_error(req, "%d provider", error);
712 	return (error);
713 }
714 
715 static int
716 g_part_ctl_delete(struct gctl_req *req, struct g_part_parms *gpp)
717 {
718 	char buf[32];
719 	struct g_geom *gp;
720 	struct g_provider *pp;
721 	struct g_part_entry *entry;
722 	struct g_part_table *table;
723 	struct sbuf *sb;
724 
725 	gp = gpp->gpp_geom;
726 	G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name));
727 	g_topology_assert();
728 
729 	table = gp->softc;
730 
731 	LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) {
732 		if (entry->gpe_deleted)
733 			continue;
734 		if (entry->gpe_index == gpp->gpp_index)
735 			break;
736 	}
737 	if (entry == NULL) {
738 		gctl_error(req, "%d index '%d'", ENOENT, gpp->gpp_index);
739 		return (ENOENT);
740 	}
741 
742 	pp = entry->gpe_pp;
743 	if (pp->acr > 0 || pp->acw > 0 || pp->ace > 0) {
744 		gctl_error(req, "%d", EBUSY);
745 		return (EBUSY);
746 	}
747 
748 	pp->private = NULL;
749 	entry->gpe_pp = NULL;
750 	if (entry->gpe_created) {
751 		LIST_REMOVE(entry, gpe_entry);
752 		g_free(entry);
753 	} else {
754 		entry->gpe_modified = 0;
755 		entry->gpe_deleted = 1;
756 	}
757 	g_wither_provider(pp, ENXIO);
758 
759 	/* Provide feedback if so requested. */
760 	if (gpp->gpp_parms & G_PART_PARM_OUTPUT) {
761 		sb = sbuf_new(NULL, NULL, 0, SBUF_AUTOEXTEND);
762 		sbuf_printf(sb, "%s%s deleted\n", gp->name,
763 		    G_PART_NAME(table, entry, buf, sizeof(buf)));
764 		sbuf_finish(sb);
765 		gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1);
766 		sbuf_delete(sb);
767 	}
768 	return (0);
769 }
770 
771 static int
772 g_part_ctl_destroy(struct gctl_req *req, struct g_part_parms *gpp)
773 {
774 	struct g_geom *gp;
775 	struct g_part_entry *entry;
776 	struct g_part_table *null, *table;
777 	struct sbuf *sb;
778 	int error;
779 
780 	gp = gpp->gpp_geom;
781 	G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name));
782 	g_topology_assert();
783 
784 	table = gp->softc;
785 	LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) {
786 		if (entry->gpe_deleted)
787 			continue;
788 		gctl_error(req, "%d", EBUSY);
789 		return (EBUSY);
790 	}
791 
792 	error = G_PART_DESTROY(table, gpp);
793 	if (error) {
794 		gctl_error(req, "%d", error);
795 		return (error);
796 	}
797 
798 	gp->softc = kobj_create((kobj_class_t)&g_part_null_scheme, M_GEOM,
799 	    M_WAITOK);
800 	null = gp->softc;
801 	null->gpt_gp = gp;
802 	null->gpt_scheme = &g_part_null_scheme;
803 	LIST_INIT(&null->gpt_entry);
804 	null->gpt_depth = table->gpt_depth;
805 	null->gpt_opened = table->gpt_opened;
806 	null->gpt_smhead = table->gpt_smhead;
807 	null->gpt_smtail = table->gpt_smtail;
808 
809 	while ((entry = LIST_FIRST(&table->gpt_entry)) != NULL) {
810 		LIST_REMOVE(entry, gpe_entry);
811 		g_free(entry);
812 	}
813 	kobj_delete((kobj_t)table, M_GEOM);
814 
815 	/* Provide feedback if so requested. */
816 	if (gpp->gpp_parms & G_PART_PARM_OUTPUT) {
817 		sb = sbuf_new(NULL, NULL, 0, SBUF_AUTOEXTEND);
818 		sbuf_printf(sb, "%s destroyed\n", gp->name);
819 		sbuf_finish(sb);
820 		gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1);
821 		sbuf_delete(sb);
822 	}
823 	return (0);
824 }
825 
826 static int
827 g_part_ctl_modify(struct gctl_req *req, struct g_part_parms *gpp)
828 {
829 	char buf[32];
830 	struct g_geom *gp;
831 	struct g_part_entry *entry;
832 	struct g_part_table *table;
833 	struct sbuf *sb;
834 	int error;
835 
836 	gp = gpp->gpp_geom;
837 	G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name));
838 	g_topology_assert();
839 
840 	table = gp->softc;
841 
842 	LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) {
843 		if (entry->gpe_deleted)
844 			continue;
845 		if (entry->gpe_index == gpp->gpp_index)
846 			break;
847 	}
848 	if (entry == NULL) {
849 		gctl_error(req, "%d index '%d'", ENOENT, gpp->gpp_index);
850 		return (ENOENT);
851 	}
852 
853 	error = G_PART_MODIFY(table, entry, gpp);
854 	if (error) {
855 		gctl_error(req, "%d", error);
856 		return (error);
857 	}
858 
859 	if (!entry->gpe_created)
860 		entry->gpe_modified = 1;
861 
862 	/* Provide feedback if so requested. */
863 	if (gpp->gpp_parms & G_PART_PARM_OUTPUT) {
864 		sb = sbuf_new(NULL, NULL, 0, SBUF_AUTOEXTEND);
865 		sbuf_printf(sb, "%s%s modified\n", gp->name,
866 		    G_PART_NAME(table, entry, buf, sizeof(buf)));
867 		sbuf_finish(sb);
868 		gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1);
869 		sbuf_delete(sb);
870 	}
871 	return (0);
872 }
873 
874 static int
875 g_part_ctl_move(struct gctl_req *req, struct g_part_parms *gpp)
876 {
877 	gctl_error(req, "%d verb 'move'", ENOSYS);
878 	return (ENOSYS);
879 }
880 
881 static int
882 g_part_ctl_recover(struct gctl_req *req, struct g_part_parms *gpp)
883 {
884 	gctl_error(req, "%d verb 'recover'", ENOSYS);
885 	return (ENOSYS);
886 }
887 
888 static int
889 g_part_ctl_resize(struct gctl_req *req, struct g_part_parms *gpp)
890 {
891 	gctl_error(req, "%d verb 'resize'", ENOSYS);
892 	return (ENOSYS);
893 }
894 
895 static int
896 g_part_ctl_undo(struct gctl_req *req, struct g_part_parms *gpp)
897 {
898 	struct g_consumer *cp;
899 	struct g_provider *pp;
900 	struct g_geom *gp;
901 	struct g_part_entry *entry, *tmp;
902 	struct g_part_table *table;
903 	int error, reprobe;
904 
905 	gp = gpp->gpp_geom;
906 	G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name));
907 	g_topology_assert();
908 
909 	table = gp->softc;
910 	if (!table->gpt_opened) {
911 		gctl_error(req, "%d", EPERM);
912 		return (EPERM);
913 	}
914 
915 	cp = LIST_FIRST(&gp->consumer);
916 	LIST_FOREACH_SAFE(entry, &table->gpt_entry, gpe_entry, tmp) {
917 		entry->gpe_modified = 0;
918 		if (entry->gpe_created) {
919 			pp = entry->gpe_pp;
920 			pp->private = NULL;
921 			entry->gpe_pp = NULL;
922 			g_wither_provider(pp, ENXIO);
923 			entry->gpe_deleted = 1;
924 		}
925 		if (entry->gpe_deleted) {
926 			LIST_REMOVE(entry, gpe_entry);
927 			g_free(entry);
928 		}
929 	}
930 
931 	g_topology_unlock();
932 
933 	reprobe = (table->gpt_scheme == &g_part_null_scheme ||
934 	    table->gpt_created) ? 1 : 0;
935 
936 	if (reprobe) {
937 		if (!LIST_EMPTY(&table->gpt_entry)) {
938 			error = EBUSY;
939 			goto fail;
940 		}
941 		error = g_part_probe(gp, cp, table->gpt_depth);
942 		if (error) {
943 			g_topology_lock();
944 			g_access(cp, -1, -1, -1);
945 			g_part_wither(gp, error);
946 			return (0);
947 		}
948 		table = gp->softc;
949 	}
950 
951 	error = G_PART_READ(table, cp);
952 	if (error)
953 		goto fail;
954 
955 	g_topology_lock();
956 
957 	LIST_FOREACH(entry, &table->gpt_entry, gpe_entry)
958 		g_part_new_provider(gp, table, entry);
959 
960 	table->gpt_opened = 0;
961 	g_access(cp, -1, -1, -1);
962 	return (0);
963 
964 fail:
965 	g_topology_lock();
966 	gctl_error(req, "%d", error);
967 	return (error);
968 }
969 
970 static void
971 g_part_wither(struct g_geom *gp, int error)
972 {
973 	struct g_part_entry *entry;
974 	struct g_part_table *table;
975 
976 	table = gp->softc;
977 	if (table != NULL) {
978 		while ((entry = LIST_FIRST(&table->gpt_entry)) != NULL) {
979 			LIST_REMOVE(entry, gpe_entry);
980 			g_free(entry);
981 		}
982 		if (gp->softc != NULL) {
983 			kobj_delete((kobj_t)gp->softc, M_GEOM);
984 			gp->softc = NULL;
985 		}
986 	}
987 	g_wither_geom(gp, error);
988 }
989 
990 /*
991  * Class methods.
992  */
993 
994 static void
995 g_part_ctlreq(struct gctl_req *req, struct g_class *mp, const char *verb)
996 {
997 	struct g_part_parms gpp;
998 	struct g_part_table *table;
999 	struct gctl_req_arg *ap;
1000 	const char *p;
1001 	enum g_part_ctl ctlreq;
1002 	unsigned int i, mparms, oparms, parm;
1003 	int auto_commit, close_on_error;
1004 	int error, modifies;
1005 
1006 	G_PART_TRACE((G_T_TOPOLOGY, "%s(%s,%s)", __func__, mp->name, verb));
1007 	g_topology_assert();
1008 
1009 	ctlreq = G_PART_CTL_NONE;
1010 	modifies = 1;
1011 	mparms = 0;
1012 	oparms = G_PART_PARM_FLAGS | G_PART_PARM_OUTPUT | G_PART_PARM_VERSION;
1013 	switch (*verb) {
1014 	case 'a':
1015 		if (!strcmp(verb, "add")) {
1016 			ctlreq = G_PART_CTL_ADD;
1017 			mparms |= G_PART_PARM_GEOM | G_PART_PARM_SIZE |
1018 			    G_PART_PARM_START | G_PART_PARM_TYPE;
1019 			oparms |= G_PART_PARM_INDEX | G_PART_PARM_LABEL;
1020 		}
1021 		break;
1022 	case 'c':
1023 		if (!strcmp(verb, "commit")) {
1024 			ctlreq = G_PART_CTL_COMMIT;
1025 			mparms |= G_PART_PARM_GEOM;
1026 			modifies = 0;
1027 		} else if (!strcmp(verb, "create")) {
1028 			ctlreq = G_PART_CTL_CREATE;
1029 			mparms |= G_PART_PARM_PROVIDER | G_PART_PARM_SCHEME;
1030 			oparms |= G_PART_PARM_ENTRIES;
1031 		}
1032 		break;
1033 	case 'd':
1034 		if (!strcmp(verb, "delete")) {
1035 			ctlreq = G_PART_CTL_DELETE;
1036 			mparms |= G_PART_PARM_GEOM | G_PART_PARM_INDEX;
1037 		} else if (!strcmp(verb, "destroy")) {
1038 			ctlreq = G_PART_CTL_DESTROY;
1039 			mparms |= G_PART_PARM_GEOM;
1040 		}
1041 		break;
1042 	case 'm':
1043 		if (!strcmp(verb, "modify")) {
1044 			ctlreq = G_PART_CTL_MODIFY;
1045 			mparms |= G_PART_PARM_GEOM | G_PART_PARM_INDEX;
1046 			oparms |= G_PART_PARM_LABEL | G_PART_PARM_TYPE;
1047 		} else if (!strcmp(verb, "move")) {
1048 			ctlreq = G_PART_CTL_MOVE;
1049 			mparms |= G_PART_PARM_GEOM | G_PART_PARM_INDEX;
1050 		}
1051 		break;
1052 	case 'r':
1053 		if (!strcmp(verb, "recover")) {
1054 			ctlreq = G_PART_CTL_RECOVER;
1055 			mparms |= G_PART_PARM_GEOM;
1056 		} else if (!strcmp(verb, "resize")) {
1057 			ctlreq = G_PART_CTL_RESIZE;
1058 			mparms |= G_PART_PARM_GEOM | G_PART_PARM_INDEX;
1059 		}
1060 		break;
1061 	case 'u':
1062 		if (!strcmp(verb, "undo")) {
1063 			ctlreq = G_PART_CTL_UNDO;
1064 			mparms |= G_PART_PARM_GEOM;
1065 			modifies = 0;
1066 		}
1067 		break;
1068 	}
1069 	if (ctlreq == G_PART_CTL_NONE) {
1070 		gctl_error(req, "%d verb '%s'", EINVAL, verb);
1071 		return;
1072 	}
1073 
1074 	bzero(&gpp, sizeof(gpp));
1075 	for (i = 0; i < req->narg; i++) {
1076 		ap = &req->arg[i];
1077 		parm = 0;
1078 		switch (ap->name[0]) {
1079 		case 'c':
1080 			if (!strcmp(ap->name, "class"))
1081 				continue;
1082 			break;
1083 		case 'e':
1084 			if (!strcmp(ap->name, "entries"))
1085 				parm = G_PART_PARM_ENTRIES;
1086 			break;
1087 		case 'f':
1088 			if (!strcmp(ap->name, "flags"))
1089 				parm = G_PART_PARM_FLAGS;
1090 			break;
1091 		case 'g':
1092 			if (!strcmp(ap->name, "geom"))
1093 				parm = G_PART_PARM_GEOM;
1094 			break;
1095 		case 'i':
1096 			if (!strcmp(ap->name, "index"))
1097 				parm = G_PART_PARM_INDEX;
1098 			break;
1099 		case 'l':
1100 			if (!strcmp(ap->name, "label"))
1101 				parm = G_PART_PARM_LABEL;
1102 			break;
1103 		case 'o':
1104 			if (!strcmp(ap->name, "output"))
1105 				parm = G_PART_PARM_OUTPUT;
1106 			break;
1107 		case 'p':
1108 			if (!strcmp(ap->name, "provider"))
1109 				parm = G_PART_PARM_PROVIDER;
1110 			break;
1111 		case 's':
1112 			if (!strcmp(ap->name, "scheme"))
1113 				parm = G_PART_PARM_SCHEME;
1114 			else if (!strcmp(ap->name, "size"))
1115 				parm = G_PART_PARM_SIZE;
1116 			else if (!strcmp(ap->name, "start"))
1117 				parm = G_PART_PARM_START;
1118 			break;
1119 		case 't':
1120 			if (!strcmp(ap->name, "type"))
1121 				parm = G_PART_PARM_TYPE;
1122 			break;
1123 		case 'v':
1124 			if (!strcmp(ap->name, "verb"))
1125 				continue;
1126 			else if (!strcmp(ap->name, "version"))
1127 				parm = G_PART_PARM_VERSION;
1128 			break;
1129 		}
1130 		if ((parm & (mparms | oparms)) == 0) {
1131 			gctl_error(req, "%d param '%s'", EINVAL, ap->name);
1132 			return;
1133 		}
1134 		p = gctl_get_asciiparam(req, ap->name);
1135 		if (p == NULL) {
1136 			gctl_error(req, "%d param '%s'", ENOATTR, ap->name);
1137 			return;
1138 		}
1139 		switch (parm) {
1140 		case G_PART_PARM_ENTRIES:
1141 			error = g_part_parm_uint(p, &gpp.gpp_entries);
1142 			break;
1143 		case G_PART_PARM_FLAGS:
1144 			if (p[0] == '\0')
1145 				continue;
1146 			error = g_part_parm_str(p, &gpp.gpp_flags);
1147 			break;
1148 		case G_PART_PARM_GEOM:
1149 			error = g_part_parm_geom(p, &gpp.gpp_geom);
1150 			break;
1151 		case G_PART_PARM_INDEX:
1152 			error = g_part_parm_uint(p, &gpp.gpp_index);
1153 			break;
1154 		case G_PART_PARM_LABEL:
1155 			/* An empty label is always valid. */
1156 			gpp.gpp_label = p;
1157 			error = 0;
1158 			break;
1159 		case G_PART_PARM_OUTPUT:
1160 			error = 0;	/* Write-only parameter */
1161 			break;
1162 		case G_PART_PARM_PROVIDER:
1163 			error = g_part_parm_provider(p, &gpp.gpp_provider);
1164 			break;
1165 		case G_PART_PARM_SCHEME:
1166 			error = g_part_parm_scheme(p, &gpp.gpp_scheme);
1167 			break;
1168 		case G_PART_PARM_SIZE:
1169 			error = g_part_parm_quad(p, &gpp.gpp_size);
1170 			break;
1171 		case G_PART_PARM_START:
1172 			error = g_part_parm_quad(p, &gpp.gpp_start);
1173 			break;
1174 		case G_PART_PARM_TYPE:
1175 			error = g_part_parm_str(p, &gpp.gpp_type);
1176 			break;
1177 		case G_PART_PARM_VERSION:
1178 			error = g_part_parm_uint(p, &gpp.gpp_version);
1179 			break;
1180 		default:
1181 			error = EDOOFUS;
1182 			break;
1183 		}
1184 		if (error) {
1185 			gctl_error(req, "%d %s '%s'", error, ap->name, p);
1186 			return;
1187 		}
1188 		gpp.gpp_parms |= parm;
1189 	}
1190 	if ((gpp.gpp_parms & mparms) != mparms) {
1191 		parm = mparms - (gpp.gpp_parms & mparms);
1192 		gctl_error(req, "%d param '%x'", ENOATTR, parm);
1193 		return;
1194 	}
1195 
1196 	/* Obtain permissions if possible/necessary. */
1197 	close_on_error = 0;
1198 	table = NULL;	/* Suppress uninit. warning. */
1199 	if (modifies && (gpp.gpp_parms & G_PART_PARM_GEOM)) {
1200 		table = gpp.gpp_geom->softc;
1201 		if (table != NULL && !table->gpt_opened) {
1202 			error = g_access(LIST_FIRST(&gpp.gpp_geom->consumer),
1203 			    1, 1, 1);
1204 			if (error) {
1205 				gctl_error(req, "%d geom '%s'", error,
1206 				    gpp.gpp_geom->name);
1207 				return;
1208 			}
1209 			table->gpt_opened = 1;
1210 			close_on_error = 1;
1211 		}
1212 	}
1213 
1214 	error = EDOOFUS;	/* Prevent bogus  uninit. warning. */
1215 	switch (ctlreq) {
1216 	case G_PART_CTL_NONE:
1217 		panic("%s", __func__);
1218 	case G_PART_CTL_ADD:
1219 		error = g_part_ctl_add(req, &gpp);
1220 		break;
1221 	case G_PART_CTL_COMMIT:
1222 		error = g_part_ctl_commit(req, &gpp);
1223 		break;
1224 	case G_PART_CTL_CREATE:
1225 		error = g_part_ctl_create(req, &gpp);
1226 		break;
1227 	case G_PART_CTL_DELETE:
1228 		error = g_part_ctl_delete(req, &gpp);
1229 		break;
1230 	case G_PART_CTL_DESTROY:
1231 		error = g_part_ctl_destroy(req, &gpp);
1232 		break;
1233 	case G_PART_CTL_MODIFY:
1234 		error = g_part_ctl_modify(req, &gpp);
1235 		break;
1236 	case G_PART_CTL_MOVE:
1237 		error = g_part_ctl_move(req, &gpp);
1238 		break;
1239 	case G_PART_CTL_RECOVER:
1240 		error = g_part_ctl_recover(req, &gpp);
1241 		break;
1242 	case G_PART_CTL_RESIZE:
1243 		error = g_part_ctl_resize(req, &gpp);
1244 		break;
1245 	case G_PART_CTL_UNDO:
1246 		error = g_part_ctl_undo(req, &gpp);
1247 		break;
1248 	}
1249 
1250 	/* Implement automatic commit. */
1251 	if (!error) {
1252 		auto_commit = (modifies &&
1253 		    (gpp.gpp_parms & G_PART_PARM_FLAGS) &&
1254 		    strchr(gpp.gpp_flags, 'C') != NULL) ? 1 : 0;
1255 		if (auto_commit) {
1256 			KASSERT(gpp.gpp_parms & G_PART_PARM_GEOM, (__func__));
1257 			error = g_part_ctl_commit(req, &gpp);
1258 		}
1259 	}
1260 
1261 	if (error && close_on_error) {
1262 		g_access(LIST_FIRST(&gpp.gpp_geom->consumer), -1, -1, -1);
1263 		table->gpt_opened = 0;
1264 	}
1265 }
1266 
1267 static int
1268 g_part_destroy_geom(struct gctl_req *req, struct g_class *mp,
1269     struct g_geom *gp)
1270 {
1271 
1272 	G_PART_TRACE((G_T_TOPOLOGY, "%s(%s,%s)", __func__, mp->name, gp->name));
1273 	g_topology_assert();
1274 
1275 	g_part_wither(gp, EINVAL);
1276 	return (0);
1277 }
1278 
1279 static struct g_geom *
1280 g_part_taste(struct g_class *mp, struct g_provider *pp, int flags __unused)
1281 {
1282 	struct g_consumer *cp;
1283 	struct g_geom *gp;
1284 	struct g_part_entry *entry;
1285 	struct g_part_table *table;
1286 	int attr, depth;
1287 	int error;
1288 
1289 	G_PART_TRACE((G_T_TOPOLOGY, "%s(%s,%s)", __func__, mp->name, pp->name));
1290 	g_topology_assert();
1291 
1292 	/*
1293 	 * Create a GEOM with consumer and hook it up to the provider.
1294 	 * With that we become part of the topology. Optain read access
1295 	 * to the provider.
1296 	 */
1297 	gp = g_new_geomf(mp, "%s", pp->name);
1298 	cp = g_new_consumer(gp);
1299 	error = g_attach(cp, pp);
1300 	if (error == 0)
1301 		error = g_access(cp, 1, 0, 0);
1302 	if (error != 0) {
1303 		g_part_wither(gp, error);
1304 		return (NULL);
1305 	}
1306 
1307 	g_topology_unlock();
1308 
1309 	/*
1310 	 * Short-circuit the whole probing galore when there's no
1311 	 * media present.
1312 	 */
1313 	if (pp->mediasize == 0 || pp->sectorsize == 0) {
1314 		error = ENODEV;
1315 		goto fail;
1316 	}
1317 
1318 	/* Make sure we can nest and if so, determine our depth. */
1319 	error = g_getattr("PART::isleaf", cp, &attr);
1320 	if (!error && attr) {
1321 		error = ENODEV;
1322 		goto fail;
1323 	}
1324 	error = g_getattr("PART::depth", cp, &attr);
1325 	depth = (!error) ? attr + 1 : 0;
1326 
1327 	error = g_part_probe(gp, cp, depth);
1328 	if (error)
1329 		goto fail;
1330 
1331 	table = gp->softc;
1332 
1333 	/*
1334 	 * Synthesize a disk geometry. Some partitioning schemes
1335 	 * depend on it and since some file systems need it even
1336 	 * when the partitition scheme doesn't, we do it here in
1337 	 * scheme-independent code.
1338 	 */
1339 	g_part_geometry(table, cp, pp->mediasize / pp->sectorsize);
1340 
1341 	error = G_PART_READ(table, cp);
1342 	if (error)
1343 		goto fail;
1344 
1345 	g_topology_lock();
1346 	LIST_FOREACH(entry, &table->gpt_entry, gpe_entry)
1347 		g_part_new_provider(gp, table, entry);
1348 
1349 	g_access(cp, -1, 0, 0);
1350 	return (gp);
1351 
1352  fail:
1353 	g_topology_lock();
1354 	g_access(cp, -1, 0, 0);
1355 	g_part_wither(gp, error);
1356 	return (NULL);
1357 }
1358 
1359 /*
1360  * Geom methods.
1361  */
1362 
1363 static int
1364 g_part_access(struct g_provider *pp, int dr, int dw, int de)
1365 {
1366 	struct g_consumer *cp;
1367 
1368 	G_PART_TRACE((G_T_ACCESS, "%s(%s,%d,%d,%d)", __func__, pp->name, dr,
1369 	    dw, de));
1370 
1371 	cp = LIST_FIRST(&pp->geom->consumer);
1372 
1373 	/* We always gain write-exclusive access. */
1374 	return (g_access(cp, dr, dw, dw + de));
1375 }
1376 
1377 static void
1378 g_part_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp,
1379     struct g_consumer *cp, struct g_provider *pp)
1380 {
1381 	char buf[64];
1382 	struct g_part_entry *entry;
1383 	struct g_part_table *table;
1384 
1385 	KASSERT(sb != NULL && gp != NULL, (__func__));
1386 	table = gp->softc;
1387 
1388 	if (indent == NULL) {
1389 		KASSERT(cp == NULL && pp != NULL, (__func__));
1390 		entry = pp->private;
1391 		if (entry == NULL)
1392 			return;
1393 		sbuf_printf(sb, " i %u o %ju ty %s", entry->gpe_index,
1394 		    (uintmax_t)entry->gpe_offset,
1395 		    G_PART_TYPE(table, entry, buf, sizeof(buf)));
1396 	} else if (cp != NULL) {	/* Consumer configuration. */
1397 		KASSERT(pp == NULL, (__func__));
1398 		/* none */
1399 	} else if (pp != NULL) {	/* Provider configuration. */
1400 		entry = pp->private;
1401 		if (entry == NULL)
1402 			return;
1403 		sbuf_printf(sb, "%s<index>%u</index>\n", indent,
1404 		    entry->gpe_index);
1405 		sbuf_printf(sb, "%s<type>%s</type>\n", indent,
1406 		    G_PART_TYPE(table, entry, buf, sizeof(buf)));
1407 		sbuf_printf(sb, "%s<offset>%ju</offset>\n", indent,
1408 		    (uintmax_t)entry->gpe_offset);
1409 		sbuf_printf(sb, "%s<length>%ju</length>\n", indent,
1410 		    (uintmax_t)pp->mediasize);
1411 		G_PART_DUMPCONF(table, entry, sb, indent);
1412 	} else {			/* Geom configuration. */
1413 		sbuf_printf(sb, "%s<scheme>%s</scheme>\n", indent,
1414 		    table->gpt_scheme->name);
1415 		sbuf_printf(sb, "%s<entries>%u</entries>\n", indent,
1416 		    table->gpt_entries);
1417 		sbuf_printf(sb, "%s<first>%ju</first>\n", indent,
1418 		    (uintmax_t)table->gpt_first);
1419 		sbuf_printf(sb, "%s<last>%ju</last>\n", indent,
1420 		    (uintmax_t)table->gpt_last);
1421 		sbuf_printf(sb, "%s<fwsectors>%u</fwsectors>\n", indent,
1422 		    table->gpt_sectors);
1423 		sbuf_printf(sb, "%s<fwheads>%u</fwheads>\n", indent,
1424 		    table->gpt_heads);
1425 		G_PART_DUMPCONF(table, NULL, sb, indent);
1426 	}
1427 }
1428 
1429 static void
1430 g_part_orphan(struct g_consumer *cp)
1431 {
1432 	struct g_provider *pp;
1433 
1434 	pp = cp->provider;
1435 	KASSERT(pp != NULL, (__func__));
1436 	G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, pp->name));
1437 	g_topology_assert();
1438 
1439 	KASSERT(pp->error != 0, (__func__));
1440 	g_part_wither(cp->geom, pp->error);
1441 }
1442 
1443 static void
1444 g_part_spoiled(struct g_consumer *cp)
1445 {
1446 
1447 	G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, cp->provider->name));
1448 	g_topology_assert();
1449 
1450 	g_part_wither(cp->geom, ENXIO);
1451 }
1452 
1453 static void
1454 g_part_start(struct bio *bp)
1455 {
1456 	struct bio *bp2;
1457 	struct g_consumer *cp;
1458 	struct g_geom *gp;
1459 	struct g_part_entry *entry;
1460 	struct g_part_table *table;
1461 	struct g_kerneldump *gkd;
1462 	struct g_provider *pp;
1463 
1464 	pp = bp->bio_to;
1465 	gp = pp->geom;
1466 	table = gp->softc;
1467 	cp = LIST_FIRST(&gp->consumer);
1468 
1469 	G_PART_TRACE((G_T_BIO, "%s: cmd=%d, provider=%s", __func__, bp->bio_cmd,
1470 	    pp->name));
1471 
1472 	entry = pp->private;
1473 	if (entry == NULL) {
1474 		g_io_deliver(bp, ENXIO);
1475 		return;
1476 	}
1477 
1478 	switch(bp->bio_cmd) {
1479 	case BIO_DELETE:
1480 	case BIO_READ:
1481 	case BIO_WRITE:
1482 		if (bp->bio_offset >= pp->mediasize) {
1483 			g_io_deliver(bp, EIO);
1484 			return;
1485 		}
1486 		bp2 = g_clone_bio(bp);
1487 		if (bp2 == NULL) {
1488 			g_io_deliver(bp, ENOMEM);
1489 			return;
1490 		}
1491 		if (bp2->bio_offset + bp2->bio_length > pp->mediasize)
1492 			bp2->bio_length = pp->mediasize - bp2->bio_offset;
1493 		bp2->bio_done = g_std_done;
1494 		bp2->bio_offset += entry->gpe_offset;
1495 		g_io_request(bp2, cp);
1496 		return;
1497 	case BIO_FLUSH:
1498 		break;
1499 	case BIO_GETATTR:
1500 		if (g_handleattr_int(bp, "GEOM::fwheads", table->gpt_heads))
1501 			return;
1502 		if (g_handleattr_int(bp, "GEOM::fwsectors", table->gpt_sectors))
1503 			return;
1504 		if (g_handleattr_int(bp, "PART::isleaf", table->gpt_isleaf))
1505 			return;
1506 		if (g_handleattr_int(bp, "PART::depth", table->gpt_depth))
1507 			return;
1508 		if (!strcmp("GEOM::kerneldump", bp->bio_attribute)) {
1509 			/*
1510 			 * Check that the partition is suitable for kernel
1511 			 * dumps. Typically only swap partitions should be
1512 			 * used.
1513 			 */
1514 			if (!G_PART_DUMPTO(table, entry)) {
1515 				g_io_deliver(bp, ENXIO);
1516 				return;
1517 			}
1518 			gkd = (struct g_kerneldump *)bp->bio_data;
1519 			if (gkd->offset >= pp->mediasize) {
1520 				g_io_deliver(bp, EIO);
1521 				return;
1522 			}
1523 			if (gkd->offset + gkd->length > pp->mediasize)
1524 				gkd->length = pp->mediasize - gkd->offset;
1525 			gkd->offset += entry->gpe_offset;
1526 		}
1527 		break;
1528 	default:
1529 		g_io_deliver(bp, EOPNOTSUPP);
1530 		return;
1531 	}
1532 
1533 	bp2 = g_clone_bio(bp);
1534 	if (bp2 == NULL) {
1535 		g_io_deliver(bp, ENOMEM);
1536 		return;
1537 	}
1538 	bp2->bio_done = g_std_done;
1539 	g_io_request(bp2, cp);
1540 }
1541