xref: /freebsd/sys/geom/part/g_part.c (revision 35a04710d7286aa9538917fd7f8e417dbee95b82)
1 /*-
2  * Copyright (c) 2002, 2005, 2006, 2007 Marcel Moolenaar
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  *
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25  */
26 
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
29 
30 #include <sys/param.h>
31 #include <sys/bio.h>
32 #include <sys/diskmbr.h>
33 #include <sys/endian.h>
34 #include <sys/kernel.h>
35 #include <sys/kobj.h>
36 #include <sys/limits.h>
37 #include <sys/lock.h>
38 #include <sys/malloc.h>
39 #include <sys/mutex.h>
40 #include <sys/queue.h>
41 #include <sys/sbuf.h>
42 #include <sys/systm.h>
43 #include <sys/uuid.h>
44 #include <geom/geom.h>
45 #include <geom/geom_ctl.h>
46 #include <geom/part/g_part.h>
47 
48 #include "g_part_if.h"
49 
50 static kobj_method_t g_part_null_methods[] = {
51 	{ 0, 0 }
52 };
53 
54 static struct g_part_scheme g_part_null_scheme = {
55 	"n/a",
56 	g_part_null_methods,
57 	sizeof(struct g_part_table),
58 };
59 G_PART_SCHEME_DECLARE(g_part_null_scheme);
60 
61 SET_DECLARE(g_part_scheme_set, struct g_part_scheme);
62 
63 struct g_part_alias_list {
64 	const char *lexeme;
65 	enum g_part_alias alias;
66 } g_part_alias_list[G_PART_ALIAS_COUNT] = {
67 	{ "efi", G_PART_ALIAS_EFI },
68 	{ "freebsd", G_PART_ALIAS_FREEBSD },
69 	{ "freebsd-boot", G_PART_ALIAS_FREEBSD_BOOT },
70 	{ "freebsd-swap", G_PART_ALIAS_FREEBSD_SWAP },
71 	{ "freebsd-ufs", G_PART_ALIAS_FREEBSD_UFS },
72 	{ "freebsd-vinum", G_PART_ALIAS_FREEBSD_VINUM },
73 	{ "freebsd-zfs", G_PART_ALIAS_FREEBSD_ZFS },
74 	{ "mbr", G_PART_ALIAS_MBR }
75 };
76 
77 /*
78  * The GEOM partitioning class.
79  */
80 static g_ctl_req_t g_part_ctlreq;
81 static g_ctl_destroy_geom_t g_part_destroy_geom;
82 static g_taste_t g_part_taste;
83 
84 static g_access_t g_part_access;
85 static g_dumpconf_t g_part_dumpconf;
86 static g_orphan_t g_part_orphan;
87 static g_spoiled_t g_part_spoiled;
88 static g_start_t g_part_start;
89 
90 static struct g_class g_part_class = {
91 	.name = "PART",
92 	.version = G_VERSION,
93 	/* Class methods. */
94 	.ctlreq = g_part_ctlreq,
95 	.destroy_geom = g_part_destroy_geom,
96 	.taste = g_part_taste,
97 	/* Geom methods. */
98 	.access = g_part_access,
99 	.dumpconf = g_part_dumpconf,
100 	.orphan = g_part_orphan,
101 	.spoiled = g_part_spoiled,
102 	.start = g_part_start,
103 };
104 
105 DECLARE_GEOM_CLASS(g_part_class, g_part);
106 
107 enum g_part_ctl {
108 	G_PART_CTL_NONE,
109 	G_PART_CTL_ADD,
110 	G_PART_CTL_COMMIT,
111 	G_PART_CTL_CREATE,
112 	G_PART_CTL_DELETE,
113 	G_PART_CTL_DESTROY,
114 	G_PART_CTL_MODIFY,
115 	G_PART_CTL_MOVE,
116 	G_PART_CTL_RECOVER,
117 	G_PART_CTL_RESIZE,
118 	G_PART_CTL_UNDO
119 };
120 
121 /*
122  * Support functions.
123  */
124 
125 static void g_part_wither(struct g_geom *, int);
126 
127 const char *
128 g_part_alias_name(enum g_part_alias alias)
129 {
130 	int i;
131 
132 	for (i = 0; i < G_PART_ALIAS_COUNT; i++) {
133 		if (g_part_alias_list[i].alias != alias)
134 			continue;
135 		return (g_part_alias_list[i].lexeme);
136 	}
137 
138 	return (NULL);
139 }
140 
141 void
142 g_part_geometry_heads(off_t blocks, u_int sectors, off_t *bestchs,
143     u_int *bestheads)
144 {
145 	static u_int candidate_heads[] = { 1, 2, 16, 32, 64, 128, 255, 0 };
146 	off_t chs, cylinders;
147 	u_int heads;
148 	int idx;
149 
150 	*bestchs = 0;
151 	*bestheads = 0;
152 	for (idx = 0; candidate_heads[idx] != 0; idx++) {
153 		heads = candidate_heads[idx];
154 		cylinders = blocks / heads / sectors;
155 		if (cylinders < heads || cylinders < sectors)
156 			break;
157 		if (cylinders > 1023)
158 			continue;
159 		chs = cylinders * heads * sectors;
160 		if (chs > *bestchs || (chs == *bestchs && *bestheads == 1)) {
161 			*bestchs = chs;
162 			*bestheads = heads;
163 		}
164 	}
165 }
166 
167 static void
168 g_part_geometry(struct g_part_table *table, struct g_consumer *cp,
169     off_t blocks)
170 {
171 	static u_int candidate_sectors[] = { 1, 9, 17, 33, 63, 0 };
172 	off_t chs, bestchs;
173 	u_int heads, sectors;
174 	int idx;
175 
176 	if (g_getattr("GEOM::fwsectors", cp, &sectors) != 0 ||
177 	    sectors < 1 || sectors > 63 ||
178 	    g_getattr("GEOM::fwheads", cp, &heads) != 0 ||
179 	    heads < 1 || heads > 255) {
180 		table->gpt_fixgeom = 0;
181 		table->gpt_heads = 0;
182 		table->gpt_sectors = 0;
183 		bestchs = 0;
184 		for (idx = 0; candidate_sectors[idx] != 0; idx++) {
185 			sectors = candidate_sectors[idx];
186 			g_part_geometry_heads(blocks, sectors, &chs, &heads);
187 			if (chs == 0)
188 				continue;
189 			/*
190 			 * Prefer a geometry with sectors > 1, but only if
191 			 * it doesn't bump down the numbver of heads to 1.
192 			 */
193 			if (chs > bestchs || (chs == bestchs && heads > 1 &&
194 			    table->gpt_sectors == 1)) {
195 				bestchs = chs;
196 				table->gpt_heads = heads;
197 				table->gpt_sectors = sectors;
198 			}
199 		}
200 		/*
201 		 * If we didn't find a geometry at all, then the disk is
202 		 * too big. This means we can use the maximum number of
203 		 * heads and sectors.
204 		 */
205 		if (bestchs == 0) {
206 			table->gpt_heads = 255;
207 			table->gpt_sectors = 63;
208 		}
209 	} else {
210 		table->gpt_fixgeom = 1;
211 		table->gpt_heads = heads;
212 		table->gpt_sectors = sectors;
213 	}
214 }
215 
216 struct g_part_entry *
217 g_part_new_entry(struct g_part_table *table, int index, quad_t start,
218     quad_t end)
219 {
220 	struct g_part_entry *entry, *last;
221 
222 	last = NULL;
223 	LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) {
224 		if (entry->gpe_index == index)
225 			break;
226 		if (entry->gpe_index > index) {
227 			entry = NULL;
228 			break;
229 		}
230 		last = entry;
231 	}
232 	if (entry == NULL) {
233 		entry = g_malloc(table->gpt_scheme->gps_entrysz,
234 		    M_WAITOK | M_ZERO);
235 		entry->gpe_index = index;
236 		if (last == NULL)
237 			LIST_INSERT_HEAD(&table->gpt_entry, entry, gpe_entry);
238 		else
239 			LIST_INSERT_AFTER(last, entry, gpe_entry);
240 	}
241 	entry->gpe_start = start;
242 	entry->gpe_end = end;
243 	return (entry);
244 }
245 
246 static void
247 g_part_new_provider(struct g_geom *gp, struct g_part_table *table,
248     struct g_part_entry *entry)
249 {
250 	char buf[32];
251 	struct g_consumer *cp;
252 	struct g_provider *pp;
253 
254 	cp = LIST_FIRST(&gp->consumer);
255 	pp = cp->provider;
256 
257 	entry->gpe_offset = entry->gpe_start * pp->sectorsize;
258 
259 	if (entry->gpe_pp == NULL) {
260 		entry->gpe_pp = g_new_providerf(gp, "%s%s", gp->name,
261 		    G_PART_NAME(table, entry, buf, sizeof(buf)));
262 		entry->gpe_pp->private = entry;		/* Close the circle. */
263 	}
264 	entry->gpe_pp->index = entry->gpe_index - 1;	/* index is 1-based. */
265 	entry->gpe_pp->mediasize = (entry->gpe_end - entry->gpe_start + 1) *
266 	    pp->sectorsize;
267 	entry->gpe_pp->sectorsize = pp->sectorsize;
268 	entry->gpe_pp->flags = pp->flags & G_PF_CANDELETE;
269 	if (pp->stripesize > 0) {
270 		entry->gpe_pp->stripesize = pp->stripesize;
271 		entry->gpe_pp->stripeoffset = (pp->stripeoffset +
272 		    entry->gpe_offset) % pp->stripesize;
273 	}
274 	g_error_provider(entry->gpe_pp, 0);
275 }
276 
277 static int
278 g_part_parm_geom(const char *p, struct g_geom **v)
279 {
280 	struct g_geom *gp;
281 
282 	LIST_FOREACH(gp, &g_part_class.geom, geom) {
283 		if (!strcmp(p, gp->name))
284 			break;
285 	}
286 	if (gp == NULL)
287 		return (EINVAL);
288 	*v = gp;
289 	return (0);
290 }
291 
292 static int
293 g_part_parm_provider(const char *p, struct g_provider **v)
294 {
295 	struct g_provider *pp;
296 
297 	pp = g_provider_by_name(p);
298 	if (pp == NULL)
299 		return (EINVAL);
300 	*v = pp;
301 	return (0);
302 }
303 
304 static int
305 g_part_parm_quad(const char *p, quad_t *v)
306 {
307 	char *x;
308 	quad_t q;
309 
310 	q = strtoq(p, &x, 0);
311 	if (*x != '\0' || q < 0)
312 		return (EINVAL);
313 	*v = q;
314 	return (0);
315 }
316 
317 static int
318 g_part_parm_scheme(const char *p, struct g_part_scheme **v)
319 {
320 	struct g_part_scheme **iter, *s;
321 
322 	s = NULL;
323 	SET_FOREACH(iter, g_part_scheme_set) {
324 		if ((*iter)->name == NULL)
325 			continue;
326 		if (!strcasecmp((*iter)->name, p)) {
327 			s = *iter;
328 			break;
329 		}
330 	}
331 	if (s == NULL)
332 		return (EINVAL);
333 	*v = s;
334 	return (0);
335 }
336 
337 static int
338 g_part_parm_str(const char *p, const char **v)
339 {
340 
341 	if (p[0] == '\0')
342 		return (EINVAL);
343 	*v = p;
344 	return (0);
345 }
346 
347 static int
348 g_part_parm_uint(const char *p, u_int *v)
349 {
350 	char *x;
351 	long l;
352 
353 	l = strtol(p, &x, 0);
354 	if (*x != '\0' || l < 0 || l > INT_MAX)
355 		return (EINVAL);
356 	*v = (unsigned int)l;
357 	return (0);
358 }
359 
360 static int
361 g_part_probe(struct g_geom *gp, struct g_consumer *cp, int depth)
362 {
363 	struct g_part_scheme **iter, *scheme;
364 	struct g_part_table *table;
365 	int pri, probe;
366 
367 	table = gp->softc;
368 	scheme = (table != NULL) ? table->gpt_scheme : &g_part_null_scheme;
369 	pri = (scheme != &g_part_null_scheme) ? G_PART_PROBE(table, cp) :
370 	    INT_MIN;
371 	if (pri == 0)
372 		goto done;
373 	if (pri > 0) {	/* error */
374 		scheme = &g_part_null_scheme;
375 		pri = INT_MIN;
376 	}
377 
378 	SET_FOREACH(iter, g_part_scheme_set) {
379 		if ((*iter) == &g_part_null_scheme)
380 			continue;
381 		table = (void *)kobj_create((kobj_class_t)(*iter), M_GEOM,
382 		    M_WAITOK);
383 		table->gpt_gp = gp;
384 		table->gpt_scheme = *iter;
385 		table->gpt_depth = depth;
386 		probe = G_PART_PROBE(table, cp);
387 		if (probe <= 0 && probe > pri) {
388 			pri = probe;
389 			scheme = *iter;
390 			if (gp->softc != NULL)
391 				kobj_delete((kobj_t)gp->softc, M_GEOM);
392 			gp->softc = table;
393 			if (pri == 0)
394 				goto done;
395 		} else
396 			kobj_delete((kobj_t)table, M_GEOM);
397 	}
398 
399 done:
400 	return ((scheme == &g_part_null_scheme) ? ENXIO : 0);
401 }
402 
403 /*
404  * Control request functions.
405  */
406 
407 static int
408 g_part_ctl_add(struct gctl_req *req, struct g_part_parms *gpp)
409 {
410 	char buf[32];
411 	struct g_geom *gp;
412 	struct g_provider *pp;
413 	struct g_part_entry *delent, *last, *entry;
414 	struct g_part_table *table;
415 	struct sbuf *sb;
416 	quad_t end;
417 	unsigned int index;
418 	int error;
419 
420 	gp = gpp->gpp_geom;
421 	G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name));
422 	g_topology_assert();
423 
424 	pp = LIST_FIRST(&gp->consumer)->provider;
425 	table = gp->softc;
426 	end = gpp->gpp_start + gpp->gpp_size - 1;
427 
428 	if (gpp->gpp_start < table->gpt_first ||
429 	    gpp->gpp_start > table->gpt_last) {
430 		gctl_error(req, "%d start '%jd'", EINVAL,
431 		    (intmax_t)gpp->gpp_start);
432 		return (EINVAL);
433 	}
434 	if (end < gpp->gpp_start || end > table->gpt_last) {
435 		gctl_error(req, "%d size '%jd'", EINVAL,
436 		    (intmax_t)gpp->gpp_size);
437 		return (EINVAL);
438 	}
439 	if (gpp->gpp_index > table->gpt_entries) {
440 		gctl_error(req, "%d index '%d'", EINVAL, gpp->gpp_index);
441 		return (EINVAL);
442 	}
443 
444 	delent = last = NULL;
445 	index = (gpp->gpp_index > 0) ? gpp->gpp_index : 1;
446 	LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) {
447 		if (entry->gpe_deleted) {
448 			if (entry->gpe_index == index)
449 				delent = entry;
450 			continue;
451 		}
452 		if (entry->gpe_index == index) {
453 			index = entry->gpe_index + 1;
454 			last = entry;
455 		}
456 		if (gpp->gpp_start >= entry->gpe_start &&
457 		    gpp->gpp_start <= entry->gpe_end) {
458 			gctl_error(req, "%d start '%jd'", ENOSPC,
459 			    (intmax_t)gpp->gpp_start);
460 			return (ENOSPC);
461 		}
462 		if (end >= entry->gpe_start && end <= entry->gpe_end) {
463 			gctl_error(req, "%d end '%jd'", ENOSPC, (intmax_t)end);
464 			return (ENOSPC);
465 		}
466 		if (gpp->gpp_start < entry->gpe_start && end > entry->gpe_end) {
467 			gctl_error(req, "%d size '%jd'", ENOSPC,
468 			    (intmax_t)gpp->gpp_size);
469 			return (ENOSPC);
470 		}
471 	}
472 	if (gpp->gpp_index > 0 && index != gpp->gpp_index) {
473 		gctl_error(req, "%d index '%d'", EEXIST, gpp->gpp_index);
474 		return (EEXIST);
475 	}
476 
477 	entry = (delent == NULL) ? g_malloc(table->gpt_scheme->gps_entrysz,
478 	    M_WAITOK | M_ZERO) : delent;
479 	entry->gpe_index = index;
480 	entry->gpe_start = gpp->gpp_start;
481 	entry->gpe_end = end;
482 	error = G_PART_ADD(table, entry, gpp);
483 	if (error) {
484 		gctl_error(req, "%d", error);
485 		if (delent == NULL)
486 			g_free(entry);
487 		return (error);
488 	}
489 	if (delent == NULL) {
490 		if (last == NULL)
491 			LIST_INSERT_HEAD(&table->gpt_entry, entry, gpe_entry);
492 		else
493 			LIST_INSERT_AFTER(last, entry, gpe_entry);
494 		entry->gpe_created = 1;
495 	} else {
496 		entry->gpe_deleted = 0;
497 		entry->gpe_modified = 1;
498 	}
499 	g_part_new_provider(gp, table, entry);
500 
501 	/* Provide feedback if so requested. */
502 	if (gpp->gpp_parms & G_PART_PARM_OUTPUT) {
503 		sb = sbuf_new(NULL, NULL, 0, SBUF_AUTOEXTEND);
504 		sbuf_printf(sb, "%s%s added\n", gp->name,
505 		    G_PART_NAME(table, entry, buf, sizeof(buf)));
506 		sbuf_finish(sb);
507 		gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1);
508 		sbuf_delete(sb);
509 	}
510 	return (0);
511 }
512 
513 static int
514 g_part_ctl_commit(struct gctl_req *req, struct g_part_parms *gpp)
515 {
516 	struct g_consumer *cp;
517 	struct g_geom *gp;
518 	struct g_provider *pp;
519 	struct g_part_entry *entry, *tmp;
520 	struct g_part_table *table;
521 	char *buf;
522 	int error, i;
523 
524 	gp = gpp->gpp_geom;
525 	G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name));
526 	g_topology_assert();
527 
528 	table = gp->softc;
529 	if (!table->gpt_opened) {
530 		gctl_error(req, "%d", EPERM);
531 		return (EPERM);
532 	}
533 
534 	cp = LIST_FIRST(&gp->consumer);
535 	if ((table->gpt_smhead | table->gpt_smtail) != 0) {
536 		pp = cp->provider;
537 		buf = g_malloc(pp->sectorsize, M_WAITOK | M_ZERO);
538 		while (table->gpt_smhead != 0) {
539 			i = ffs(table->gpt_smhead) - 1;
540 			error = g_write_data(cp, i * pp->sectorsize, buf,
541 			    pp->sectorsize);
542 			if (error) {
543 				g_free(buf);
544 				goto fail;
545 			}
546 			table->gpt_smhead &= ~(1 << i);
547 		}
548 		while (table->gpt_smtail != 0) {
549 			i = ffs(table->gpt_smtail) - 1;
550 			error = g_write_data(cp, pp->mediasize - (i + 1) *
551 			    pp->sectorsize, buf, pp->sectorsize);
552 			if (error) {
553 				g_free(buf);
554 				goto fail;
555 			}
556 			table->gpt_smtail &= ~(1 << i);
557 		}
558 		g_free(buf);
559 	}
560 
561 	if (table->gpt_scheme == &g_part_null_scheme) {
562 		g_access(cp, -1, -1, -1);
563 		g_part_wither(gp, ENXIO);
564 		return (0);
565 	}
566 
567 	error = G_PART_WRITE(table, cp);
568 	if (error)
569 		goto fail;
570 
571 	LIST_FOREACH_SAFE(entry, &table->gpt_entry, gpe_entry, tmp) {
572 		if (!entry->gpe_deleted) {
573 			entry->gpe_created = 0;
574 			entry->gpe_modified = 0;
575 			continue;
576 		}
577 		LIST_REMOVE(entry, gpe_entry);
578 		g_free(entry);
579 	}
580 	table->gpt_created = 0;
581 	table->gpt_opened = 0;
582 	g_access(cp, -1, -1, -1);
583 	return (0);
584 
585 fail:
586 	gctl_error(req, "%d", error);
587 	return (error);
588 }
589 
590 static int
591 g_part_ctl_create(struct gctl_req *req, struct g_part_parms *gpp)
592 {
593 	struct g_consumer *cp;
594 	struct g_geom *gp;
595 	struct g_provider *pp;
596 	struct g_part_scheme *scheme;
597 	struct g_part_table *null, *table;
598 	struct sbuf *sb;
599 	int attr, error;
600 
601 	pp = gpp->gpp_provider;
602 	scheme = gpp->gpp_scheme;
603 	G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, pp->name));
604 	g_topology_assert();
605 
606 	/* Check that there isn't already a g_part geom on the provider. */
607 	error = g_part_parm_geom(pp->name, &gp);
608 	if (!error) {
609 		null = gp->softc;
610 		if (null->gpt_scheme != &g_part_null_scheme) {
611 			gctl_error(req, "%d geom '%s'", EEXIST, pp->name);
612 			return (EEXIST);
613 		}
614 	} else
615 		null = NULL;
616 
617 	if ((gpp->gpp_parms & G_PART_PARM_ENTRIES) &&
618 	    (gpp->gpp_entries < scheme->gps_minent ||
619 	     gpp->gpp_entries > scheme->gps_maxent)) {
620 		gctl_error(req, "%d entries '%d'", EINVAL, gpp->gpp_entries);
621 		return (EINVAL);
622 	}
623 
624 	if (null == NULL)
625 		gp = g_new_geomf(&g_part_class, "%s", pp->name);
626 	gp->softc = kobj_create((kobj_class_t)gpp->gpp_scheme, M_GEOM,
627 	    M_WAITOK);
628 	table = gp->softc;
629 	table->gpt_gp = gp;
630 	table->gpt_scheme = gpp->gpp_scheme;
631 	table->gpt_entries = (gpp->gpp_parms & G_PART_PARM_ENTRIES) ?
632 	    gpp->gpp_entries : scheme->gps_minent;
633 	LIST_INIT(&table->gpt_entry);
634 	if (null == NULL) {
635 		cp = g_new_consumer(gp);
636 		error = g_attach(cp, pp);
637 		if (error == 0)
638 			error = g_access(cp, 1, 1, 1);
639 		if (error != 0) {
640 			g_part_wither(gp, error);
641 			gctl_error(req, "%d geom '%s'", error, pp->name);
642 			return (error);
643 		}
644 		table->gpt_opened = 1;
645 	} else {
646 		cp = LIST_FIRST(&gp->consumer);
647 		table->gpt_opened = null->gpt_opened;
648 		table->gpt_smhead = null->gpt_smhead;
649 		table->gpt_smtail = null->gpt_smtail;
650 	}
651 
652 	g_topology_unlock();
653 
654 	/* Make sure the provider has media. */
655 	if (pp->mediasize == 0 || pp->sectorsize == 0) {
656 		error = ENODEV;
657 		goto fail;
658 	}
659 
660 	/* Make sure we can nest and if so, determine our depth. */
661 	error = g_getattr("PART::isleaf", cp, &attr);
662 	if (!error && attr) {
663 		error = ENODEV;
664 		goto fail;
665 	}
666 	error = g_getattr("PART::depth", cp, &attr);
667 	table->gpt_depth = (!error) ? attr + 1 : 0;
668 
669 	/*
670 	 * Synthesize a disk geometry. Some partitioning schemes
671 	 * depend on it and since some file systems need it even
672 	 * when the partitition scheme doesn't, we do it here in
673 	 * scheme-independent code.
674 	 */
675 	g_part_geometry(table, cp, pp->mediasize / pp->sectorsize);
676 
677 	error = G_PART_CREATE(table, gpp);
678 	if (error)
679 		goto fail;
680 
681 	g_topology_lock();
682 
683 	table->gpt_created = 1;
684 	if (null != NULL)
685 		kobj_delete((kobj_t)null, M_GEOM);
686 
687 	/*
688 	 * Support automatic commit by filling in the gpp_geom
689 	 * parameter.
690 	 */
691 	gpp->gpp_parms |= G_PART_PARM_GEOM;
692 	gpp->gpp_geom = gp;
693 
694 	/* Provide feedback if so requested. */
695 	if (gpp->gpp_parms & G_PART_PARM_OUTPUT) {
696 		sb = sbuf_new(NULL, NULL, 0, SBUF_AUTOEXTEND);
697 		sbuf_printf(sb, "%s created\n", gp->name);
698 		sbuf_finish(sb);
699 		gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1);
700 		sbuf_delete(sb);
701 	}
702 	return (0);
703 
704 fail:
705 	g_topology_lock();
706 	if (null == NULL) {
707 		g_access(cp, -1, -1, -1);
708 		g_part_wither(gp, error);
709 	} else {
710 		kobj_delete((kobj_t)gp->softc, M_GEOM);
711 		gp->softc = null;
712 	}
713 	gctl_error(req, "%d provider", error);
714 	return (error);
715 }
716 
717 static int
718 g_part_ctl_delete(struct gctl_req *req, struct g_part_parms *gpp)
719 {
720 	char buf[32];
721 	struct g_geom *gp;
722 	struct g_provider *pp;
723 	struct g_part_entry *entry;
724 	struct g_part_table *table;
725 	struct sbuf *sb;
726 
727 	gp = gpp->gpp_geom;
728 	G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name));
729 	g_topology_assert();
730 
731 	table = gp->softc;
732 
733 	LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) {
734 		if (entry->gpe_deleted)
735 			continue;
736 		if (entry->gpe_index == gpp->gpp_index)
737 			break;
738 	}
739 	if (entry == NULL) {
740 		gctl_error(req, "%d index '%d'", ENOENT, gpp->gpp_index);
741 		return (ENOENT);
742 	}
743 
744 	pp = entry->gpe_pp;
745 	if (pp->acr > 0 || pp->acw > 0 || pp->ace > 0) {
746 		gctl_error(req, "%d", EBUSY);
747 		return (EBUSY);
748 	}
749 
750 	pp->private = NULL;
751 	entry->gpe_pp = NULL;
752 	if (entry->gpe_created) {
753 		LIST_REMOVE(entry, gpe_entry);
754 		g_free(entry);
755 	} else {
756 		entry->gpe_modified = 0;
757 		entry->gpe_deleted = 1;
758 	}
759 	g_wither_provider(pp, ENXIO);
760 
761 	/* Provide feedback if so requested. */
762 	if (gpp->gpp_parms & G_PART_PARM_OUTPUT) {
763 		sb = sbuf_new(NULL, NULL, 0, SBUF_AUTOEXTEND);
764 		sbuf_printf(sb, "%s%s deleted\n", gp->name,
765 		    G_PART_NAME(table, entry, buf, sizeof(buf)));
766 		sbuf_finish(sb);
767 		gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1);
768 		sbuf_delete(sb);
769 	}
770 	return (0);
771 }
772 
773 static int
774 g_part_ctl_destroy(struct gctl_req *req, struct g_part_parms *gpp)
775 {
776 	struct g_geom *gp;
777 	struct g_part_entry *entry;
778 	struct g_part_table *null, *table;
779 	struct sbuf *sb;
780 	int error;
781 
782 	gp = gpp->gpp_geom;
783 	G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name));
784 	g_topology_assert();
785 
786 	table = gp->softc;
787 	LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) {
788 		if (entry->gpe_deleted)
789 			continue;
790 		gctl_error(req, "%d", EBUSY);
791 		return (EBUSY);
792 	}
793 
794 	error = G_PART_DESTROY(table, gpp);
795 	if (error) {
796 		gctl_error(req, "%d", error);
797 		return (error);
798 	}
799 
800 	gp->softc = kobj_create((kobj_class_t)&g_part_null_scheme, M_GEOM,
801 	    M_WAITOK);
802 	null = gp->softc;
803 	null->gpt_gp = gp;
804 	null->gpt_scheme = &g_part_null_scheme;
805 	LIST_INIT(&null->gpt_entry);
806 	null->gpt_depth = table->gpt_depth;
807 	null->gpt_opened = table->gpt_opened;
808 	null->gpt_smhead = table->gpt_smhead;
809 	null->gpt_smtail = table->gpt_smtail;
810 
811 	while ((entry = LIST_FIRST(&table->gpt_entry)) != NULL) {
812 		LIST_REMOVE(entry, gpe_entry);
813 		g_free(entry);
814 	}
815 	kobj_delete((kobj_t)table, M_GEOM);
816 
817 	/* Provide feedback if so requested. */
818 	if (gpp->gpp_parms & G_PART_PARM_OUTPUT) {
819 		sb = sbuf_new(NULL, NULL, 0, SBUF_AUTOEXTEND);
820 		sbuf_printf(sb, "%s destroyed\n", gp->name);
821 		sbuf_finish(sb);
822 		gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1);
823 		sbuf_delete(sb);
824 	}
825 	return (0);
826 }
827 
828 static int
829 g_part_ctl_modify(struct gctl_req *req, struct g_part_parms *gpp)
830 {
831 	char buf[32];
832 	struct g_geom *gp;
833 	struct g_part_entry *entry;
834 	struct g_part_table *table;
835 	struct sbuf *sb;
836 	int error;
837 
838 	gp = gpp->gpp_geom;
839 	G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name));
840 	g_topology_assert();
841 
842 	table = gp->softc;
843 
844 	LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) {
845 		if (entry->gpe_deleted)
846 			continue;
847 		if (entry->gpe_index == gpp->gpp_index)
848 			break;
849 	}
850 	if (entry == NULL) {
851 		gctl_error(req, "%d index '%d'", ENOENT, gpp->gpp_index);
852 		return (ENOENT);
853 	}
854 
855 	error = G_PART_MODIFY(table, entry, gpp);
856 	if (error) {
857 		gctl_error(req, "%d", error);
858 		return (error);
859 	}
860 
861 	if (!entry->gpe_created)
862 		entry->gpe_modified = 1;
863 
864 	/* Provide feedback if so requested. */
865 	if (gpp->gpp_parms & G_PART_PARM_OUTPUT) {
866 		sb = sbuf_new(NULL, NULL, 0, SBUF_AUTOEXTEND);
867 		sbuf_printf(sb, "%s%s modified\n", gp->name,
868 		    G_PART_NAME(table, entry, buf, sizeof(buf)));
869 		sbuf_finish(sb);
870 		gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1);
871 		sbuf_delete(sb);
872 	}
873 	return (0);
874 }
875 
876 static int
877 g_part_ctl_move(struct gctl_req *req, struct g_part_parms *gpp)
878 {
879 	gctl_error(req, "%d verb 'move'", ENOSYS);
880 	return (ENOSYS);
881 }
882 
883 static int
884 g_part_ctl_recover(struct gctl_req *req, struct g_part_parms *gpp)
885 {
886 	gctl_error(req, "%d verb 'recover'", ENOSYS);
887 	return (ENOSYS);
888 }
889 
890 static int
891 g_part_ctl_resize(struct gctl_req *req, struct g_part_parms *gpp)
892 {
893 	gctl_error(req, "%d verb 'resize'", ENOSYS);
894 	return (ENOSYS);
895 }
896 
897 static int
898 g_part_ctl_undo(struct gctl_req *req, struct g_part_parms *gpp)
899 {
900 	struct g_consumer *cp;
901 	struct g_provider *pp;
902 	struct g_geom *gp;
903 	struct g_part_entry *entry, *tmp;
904 	struct g_part_table *table;
905 	int error, reprobe;
906 
907 	gp = gpp->gpp_geom;
908 	G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name));
909 	g_topology_assert();
910 
911 	table = gp->softc;
912 	if (!table->gpt_opened) {
913 		gctl_error(req, "%d", EPERM);
914 		return (EPERM);
915 	}
916 
917 	cp = LIST_FIRST(&gp->consumer);
918 	LIST_FOREACH_SAFE(entry, &table->gpt_entry, gpe_entry, tmp) {
919 		entry->gpe_modified = 0;
920 		if (entry->gpe_created) {
921 			pp = entry->gpe_pp;
922 			pp->private = NULL;
923 			entry->gpe_pp = NULL;
924 			g_wither_provider(pp, ENXIO);
925 			entry->gpe_deleted = 1;
926 		}
927 		if (entry->gpe_deleted) {
928 			LIST_REMOVE(entry, gpe_entry);
929 			g_free(entry);
930 		}
931 	}
932 
933 	g_topology_unlock();
934 
935 	reprobe = (table->gpt_scheme == &g_part_null_scheme ||
936 	    table->gpt_created) ? 1 : 0;
937 
938 	if (reprobe) {
939 		if (!LIST_EMPTY(&table->gpt_entry)) {
940 			error = EBUSY;
941 			goto fail;
942 		}
943 		error = g_part_probe(gp, cp, table->gpt_depth);
944 		if (error) {
945 			g_topology_lock();
946 			g_access(cp, -1, -1, -1);
947 			g_part_wither(gp, error);
948 			return (0);
949 		}
950 		table = gp->softc;
951 	}
952 
953 	error = G_PART_READ(table, cp);
954 	if (error)
955 		goto fail;
956 
957 	g_topology_lock();
958 
959 	LIST_FOREACH(entry, &table->gpt_entry, gpe_entry)
960 		g_part_new_provider(gp, table, entry);
961 
962 	table->gpt_opened = 0;
963 	g_access(cp, -1, -1, -1);
964 	return (0);
965 
966 fail:
967 	g_topology_lock();
968 	gctl_error(req, "%d", error);
969 	return (error);
970 }
971 
972 static void
973 g_part_wither(struct g_geom *gp, int error)
974 {
975 	struct g_part_entry *entry;
976 	struct g_part_table *table;
977 
978 	table = gp->softc;
979 	if (table != NULL) {
980 		while ((entry = LIST_FIRST(&table->gpt_entry)) != NULL) {
981 			LIST_REMOVE(entry, gpe_entry);
982 			g_free(entry);
983 		}
984 		if (gp->softc != NULL) {
985 			kobj_delete((kobj_t)gp->softc, M_GEOM);
986 			gp->softc = NULL;
987 		}
988 	}
989 	g_wither_geom(gp, error);
990 }
991 
992 /*
993  * Class methods.
994  */
995 
996 static void
997 g_part_ctlreq(struct gctl_req *req, struct g_class *mp, const char *verb)
998 {
999 	struct g_part_parms gpp;
1000 	struct g_part_table *table;
1001 	struct gctl_req_arg *ap;
1002 	const char *p;
1003 	enum g_part_ctl ctlreq;
1004 	unsigned int i, mparms, oparms, parm;
1005 	int auto_commit, close_on_error;
1006 	int error, modifies;
1007 
1008 	G_PART_TRACE((G_T_TOPOLOGY, "%s(%s,%s)", __func__, mp->name, verb));
1009 	g_topology_assert();
1010 
1011 	ctlreq = G_PART_CTL_NONE;
1012 	modifies = 1;
1013 	mparms = 0;
1014 	oparms = G_PART_PARM_FLAGS | G_PART_PARM_OUTPUT | G_PART_PARM_VERSION;
1015 	switch (*verb) {
1016 	case 'a':
1017 		if (!strcmp(verb, "add")) {
1018 			ctlreq = G_PART_CTL_ADD;
1019 			mparms |= G_PART_PARM_GEOM | G_PART_PARM_SIZE |
1020 			    G_PART_PARM_START | G_PART_PARM_TYPE;
1021 			oparms |= G_PART_PARM_INDEX | G_PART_PARM_LABEL;
1022 		}
1023 		break;
1024 	case 'c':
1025 		if (!strcmp(verb, "commit")) {
1026 			ctlreq = G_PART_CTL_COMMIT;
1027 			mparms |= G_PART_PARM_GEOM;
1028 			modifies = 0;
1029 		} else if (!strcmp(verb, "create")) {
1030 			ctlreq = G_PART_CTL_CREATE;
1031 			mparms |= G_PART_PARM_PROVIDER | G_PART_PARM_SCHEME;
1032 			oparms |= G_PART_PARM_ENTRIES;
1033 		}
1034 		break;
1035 	case 'd':
1036 		if (!strcmp(verb, "delete")) {
1037 			ctlreq = G_PART_CTL_DELETE;
1038 			mparms |= G_PART_PARM_GEOM | G_PART_PARM_INDEX;
1039 		} else if (!strcmp(verb, "destroy")) {
1040 			ctlreq = G_PART_CTL_DESTROY;
1041 			mparms |= G_PART_PARM_GEOM;
1042 		}
1043 		break;
1044 	case 'm':
1045 		if (!strcmp(verb, "modify")) {
1046 			ctlreq = G_PART_CTL_MODIFY;
1047 			mparms |= G_PART_PARM_GEOM | G_PART_PARM_INDEX;
1048 			oparms |= G_PART_PARM_LABEL | G_PART_PARM_TYPE;
1049 		} else if (!strcmp(verb, "move")) {
1050 			ctlreq = G_PART_CTL_MOVE;
1051 			mparms |= G_PART_PARM_GEOM | G_PART_PARM_INDEX;
1052 		}
1053 		break;
1054 	case 'r':
1055 		if (!strcmp(verb, "recover")) {
1056 			ctlreq = G_PART_CTL_RECOVER;
1057 			mparms |= G_PART_PARM_GEOM;
1058 		} else if (!strcmp(verb, "resize")) {
1059 			ctlreq = G_PART_CTL_RESIZE;
1060 			mparms |= G_PART_PARM_GEOM | G_PART_PARM_INDEX;
1061 		}
1062 		break;
1063 	case 'u':
1064 		if (!strcmp(verb, "undo")) {
1065 			ctlreq = G_PART_CTL_UNDO;
1066 			mparms |= G_PART_PARM_GEOM;
1067 			modifies = 0;
1068 		}
1069 		break;
1070 	}
1071 	if (ctlreq == G_PART_CTL_NONE) {
1072 		gctl_error(req, "%d verb '%s'", EINVAL, verb);
1073 		return;
1074 	}
1075 
1076 	bzero(&gpp, sizeof(gpp));
1077 	for (i = 0; i < req->narg; i++) {
1078 		ap = &req->arg[i];
1079 		parm = 0;
1080 		switch (ap->name[0]) {
1081 		case 'c':
1082 			if (!strcmp(ap->name, "class"))
1083 				continue;
1084 			break;
1085 		case 'e':
1086 			if (!strcmp(ap->name, "entries"))
1087 				parm = G_PART_PARM_ENTRIES;
1088 			break;
1089 		case 'f':
1090 			if (!strcmp(ap->name, "flags"))
1091 				parm = G_PART_PARM_FLAGS;
1092 			break;
1093 		case 'g':
1094 			if (!strcmp(ap->name, "geom"))
1095 				parm = G_PART_PARM_GEOM;
1096 			break;
1097 		case 'i':
1098 			if (!strcmp(ap->name, "index"))
1099 				parm = G_PART_PARM_INDEX;
1100 			break;
1101 		case 'l':
1102 			if (!strcmp(ap->name, "label"))
1103 				parm = G_PART_PARM_LABEL;
1104 			break;
1105 		case 'o':
1106 			if (!strcmp(ap->name, "output"))
1107 				parm = G_PART_PARM_OUTPUT;
1108 			break;
1109 		case 'p':
1110 			if (!strcmp(ap->name, "provider"))
1111 				parm = G_PART_PARM_PROVIDER;
1112 			break;
1113 		case 's':
1114 			if (!strcmp(ap->name, "scheme"))
1115 				parm = G_PART_PARM_SCHEME;
1116 			else if (!strcmp(ap->name, "size"))
1117 				parm = G_PART_PARM_SIZE;
1118 			else if (!strcmp(ap->name, "start"))
1119 				parm = G_PART_PARM_START;
1120 			break;
1121 		case 't':
1122 			if (!strcmp(ap->name, "type"))
1123 				parm = G_PART_PARM_TYPE;
1124 			break;
1125 		case 'v':
1126 			if (!strcmp(ap->name, "verb"))
1127 				continue;
1128 			else if (!strcmp(ap->name, "version"))
1129 				parm = G_PART_PARM_VERSION;
1130 			break;
1131 		}
1132 		if ((parm & (mparms | oparms)) == 0) {
1133 			gctl_error(req, "%d param '%s'", EINVAL, ap->name);
1134 			return;
1135 		}
1136 		p = gctl_get_asciiparam(req, ap->name);
1137 		if (p == NULL) {
1138 			gctl_error(req, "%d param '%s'", ENOATTR, ap->name);
1139 			return;
1140 		}
1141 		switch (parm) {
1142 		case G_PART_PARM_ENTRIES:
1143 			error = g_part_parm_uint(p, &gpp.gpp_entries);
1144 			break;
1145 		case G_PART_PARM_FLAGS:
1146 			if (p[0] == '\0')
1147 				continue;
1148 			error = g_part_parm_str(p, &gpp.gpp_flags);
1149 			break;
1150 		case G_PART_PARM_GEOM:
1151 			error = g_part_parm_geom(p, &gpp.gpp_geom);
1152 			break;
1153 		case G_PART_PARM_INDEX:
1154 			error = g_part_parm_uint(p, &gpp.gpp_index);
1155 			break;
1156 		case G_PART_PARM_LABEL:
1157 			/* An empty label is always valid. */
1158 			gpp.gpp_label = p;
1159 			error = 0;
1160 			break;
1161 		case G_PART_PARM_OUTPUT:
1162 			error = 0;	/* Write-only parameter */
1163 			break;
1164 		case G_PART_PARM_PROVIDER:
1165 			error = g_part_parm_provider(p, &gpp.gpp_provider);
1166 			break;
1167 		case G_PART_PARM_SCHEME:
1168 			error = g_part_parm_scheme(p, &gpp.gpp_scheme);
1169 			break;
1170 		case G_PART_PARM_SIZE:
1171 			error = g_part_parm_quad(p, &gpp.gpp_size);
1172 			break;
1173 		case G_PART_PARM_START:
1174 			error = g_part_parm_quad(p, &gpp.gpp_start);
1175 			break;
1176 		case G_PART_PARM_TYPE:
1177 			error = g_part_parm_str(p, &gpp.gpp_type);
1178 			break;
1179 		case G_PART_PARM_VERSION:
1180 			error = g_part_parm_uint(p, &gpp.gpp_version);
1181 			break;
1182 		default:
1183 			error = EDOOFUS;
1184 			break;
1185 		}
1186 		if (error) {
1187 			gctl_error(req, "%d %s '%s'", error, ap->name, p);
1188 			return;
1189 		}
1190 		gpp.gpp_parms |= parm;
1191 	}
1192 	if ((gpp.gpp_parms & mparms) != mparms) {
1193 		parm = mparms - (gpp.gpp_parms & mparms);
1194 		gctl_error(req, "%d param '%x'", ENOATTR, parm);
1195 		return;
1196 	}
1197 
1198 	/* Obtain permissions if possible/necessary. */
1199 	close_on_error = 0;
1200 	table = NULL;	/* Suppress uninit. warning. */
1201 	if (modifies && (gpp.gpp_parms & G_PART_PARM_GEOM)) {
1202 		table = gpp.gpp_geom->softc;
1203 		if (table != NULL && !table->gpt_opened) {
1204 			error = g_access(LIST_FIRST(&gpp.gpp_geom->consumer),
1205 			    1, 1, 1);
1206 			if (error) {
1207 				gctl_error(req, "%d geom '%s'", error,
1208 				    gpp.gpp_geom->name);
1209 				return;
1210 			}
1211 			table->gpt_opened = 1;
1212 			close_on_error = 1;
1213 		}
1214 	}
1215 
1216 	error = EDOOFUS;	/* Prevent bogus  uninit. warning. */
1217 	switch (ctlreq) {
1218 	case G_PART_CTL_NONE:
1219 		panic("%s", __func__);
1220 	case G_PART_CTL_ADD:
1221 		error = g_part_ctl_add(req, &gpp);
1222 		break;
1223 	case G_PART_CTL_COMMIT:
1224 		error = g_part_ctl_commit(req, &gpp);
1225 		break;
1226 	case G_PART_CTL_CREATE:
1227 		error = g_part_ctl_create(req, &gpp);
1228 		break;
1229 	case G_PART_CTL_DELETE:
1230 		error = g_part_ctl_delete(req, &gpp);
1231 		break;
1232 	case G_PART_CTL_DESTROY:
1233 		error = g_part_ctl_destroy(req, &gpp);
1234 		break;
1235 	case G_PART_CTL_MODIFY:
1236 		error = g_part_ctl_modify(req, &gpp);
1237 		break;
1238 	case G_PART_CTL_MOVE:
1239 		error = g_part_ctl_move(req, &gpp);
1240 		break;
1241 	case G_PART_CTL_RECOVER:
1242 		error = g_part_ctl_recover(req, &gpp);
1243 		break;
1244 	case G_PART_CTL_RESIZE:
1245 		error = g_part_ctl_resize(req, &gpp);
1246 		break;
1247 	case G_PART_CTL_UNDO:
1248 		error = g_part_ctl_undo(req, &gpp);
1249 		break;
1250 	}
1251 
1252 	/* Implement automatic commit. */
1253 	if (!error) {
1254 		auto_commit = (modifies &&
1255 		    (gpp.gpp_parms & G_PART_PARM_FLAGS) &&
1256 		    strchr(gpp.gpp_flags, 'C') != NULL) ? 1 : 0;
1257 		if (auto_commit) {
1258 			KASSERT(gpp.gpp_parms & G_PART_PARM_GEOM, (__func__));
1259 			error = g_part_ctl_commit(req, &gpp);
1260 		}
1261 	}
1262 
1263 	if (error && close_on_error) {
1264 		g_access(LIST_FIRST(&gpp.gpp_geom->consumer), -1, -1, -1);
1265 		table->gpt_opened = 0;
1266 	}
1267 }
1268 
1269 static int
1270 g_part_destroy_geom(struct gctl_req *req, struct g_class *mp,
1271     struct g_geom *gp)
1272 {
1273 
1274 	G_PART_TRACE((G_T_TOPOLOGY, "%s(%s,%s)", __func__, mp->name, gp->name));
1275 	g_topology_assert();
1276 
1277 	g_part_wither(gp, EINVAL);
1278 	return (0);
1279 }
1280 
1281 static struct g_geom *
1282 g_part_taste(struct g_class *mp, struct g_provider *pp, int flags __unused)
1283 {
1284 	struct g_consumer *cp;
1285 	struct g_geom *gp;
1286 	struct g_part_entry *entry;
1287 	struct g_part_table *table;
1288 	int attr, depth;
1289 	int error;
1290 
1291 	G_PART_TRACE((G_T_TOPOLOGY, "%s(%s,%s)", __func__, mp->name, pp->name));
1292 	g_topology_assert();
1293 
1294 	/*
1295 	 * Create a GEOM with consumer and hook it up to the provider.
1296 	 * With that we become part of the topology. Optain read access
1297 	 * to the provider.
1298 	 */
1299 	gp = g_new_geomf(mp, "%s", pp->name);
1300 	cp = g_new_consumer(gp);
1301 	error = g_attach(cp, pp);
1302 	if (error == 0)
1303 		error = g_access(cp, 1, 0, 0);
1304 	if (error != 0) {
1305 		g_part_wither(gp, error);
1306 		return (NULL);
1307 	}
1308 
1309 	g_topology_unlock();
1310 
1311 	/*
1312 	 * Short-circuit the whole probing galore when there's no
1313 	 * media present.
1314 	 */
1315 	if (pp->mediasize == 0 || pp->sectorsize == 0) {
1316 		error = ENODEV;
1317 		goto fail;
1318 	}
1319 
1320 	/* Make sure we can nest and if so, determine our depth. */
1321 	error = g_getattr("PART::isleaf", cp, &attr);
1322 	if (!error && attr) {
1323 		error = ENODEV;
1324 		goto fail;
1325 	}
1326 	error = g_getattr("PART::depth", cp, &attr);
1327 	depth = (!error) ? attr + 1 : 0;
1328 
1329 	error = g_part_probe(gp, cp, depth);
1330 	if (error)
1331 		goto fail;
1332 
1333 	table = gp->softc;
1334 
1335 	/*
1336 	 * Synthesize a disk geometry. Some partitioning schemes
1337 	 * depend on it and since some file systems need it even
1338 	 * when the partitition scheme doesn't, we do it here in
1339 	 * scheme-independent code.
1340 	 */
1341 	g_part_geometry(table, cp, pp->mediasize / pp->sectorsize);
1342 
1343 	error = G_PART_READ(table, cp);
1344 	if (error)
1345 		goto fail;
1346 
1347 	g_topology_lock();
1348 	LIST_FOREACH(entry, &table->gpt_entry, gpe_entry)
1349 		g_part_new_provider(gp, table, entry);
1350 
1351 	g_access(cp, -1, 0, 0);
1352 	return (gp);
1353 
1354  fail:
1355 	g_topology_lock();
1356 	g_access(cp, -1, 0, 0);
1357 	g_part_wither(gp, error);
1358 	return (NULL);
1359 }
1360 
1361 /*
1362  * Geom methods.
1363  */
1364 
1365 static int
1366 g_part_access(struct g_provider *pp, int dr, int dw, int de)
1367 {
1368 	struct g_consumer *cp;
1369 
1370 	G_PART_TRACE((G_T_ACCESS, "%s(%s,%d,%d,%d)", __func__, pp->name, dr,
1371 	    dw, de));
1372 
1373 	cp = LIST_FIRST(&pp->geom->consumer);
1374 
1375 	/* We always gain write-exclusive access. */
1376 	return (g_access(cp, dr, dw, dw + de));
1377 }
1378 
1379 static void
1380 g_part_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp,
1381     struct g_consumer *cp, struct g_provider *pp)
1382 {
1383 	char buf[64];
1384 	struct g_part_entry *entry;
1385 	struct g_part_table *table;
1386 
1387 	KASSERT(sb != NULL && gp != NULL, (__func__));
1388 	table = gp->softc;
1389 
1390 	if (indent == NULL) {
1391 		KASSERT(cp == NULL && pp != NULL, (__func__));
1392 		entry = pp->private;
1393 		if (entry == NULL)
1394 			return;
1395 		sbuf_printf(sb, " i %u o %ju ty %s", entry->gpe_index,
1396 		    (uintmax_t)entry->gpe_offset,
1397 		    G_PART_TYPE(table, entry, buf, sizeof(buf)));
1398 	} else if (cp != NULL) {	/* Consumer configuration. */
1399 		KASSERT(pp == NULL, (__func__));
1400 		/* none */
1401 	} else if (pp != NULL) {	/* Provider configuration. */
1402 		entry = pp->private;
1403 		if (entry == NULL)
1404 			return;
1405 		sbuf_printf(sb, "%s<index>%u</index>\n", indent,
1406 		    entry->gpe_index);
1407 		sbuf_printf(sb, "%s<type>%s</type>\n", indent,
1408 		    G_PART_TYPE(table, entry, buf, sizeof(buf)));
1409 		sbuf_printf(sb, "%s<offset>%ju</offset>\n", indent,
1410 		    (uintmax_t)entry->gpe_offset);
1411 		sbuf_printf(sb, "%s<length>%ju</length>\n", indent,
1412 		    (uintmax_t)pp->mediasize);
1413 		G_PART_DUMPCONF(table, entry, sb, indent);
1414 	} else {			/* Geom configuration. */
1415 		sbuf_printf(sb, "%s<scheme>%s</scheme>\n", indent,
1416 		    table->gpt_scheme->name);
1417 		sbuf_printf(sb, "%s<entries>%u</entries>\n", indent,
1418 		    table->gpt_entries);
1419 		sbuf_printf(sb, "%s<first>%ju</first>\n", indent,
1420 		    (uintmax_t)table->gpt_first);
1421 		sbuf_printf(sb, "%s<last>%ju</last>\n", indent,
1422 		    (uintmax_t)table->gpt_last);
1423 		sbuf_printf(sb, "%s<fwsectors>%u</fwsectors>\n", indent,
1424 		    table->gpt_sectors);
1425 		sbuf_printf(sb, "%s<fwheads>%u</fwheads>\n", indent,
1426 		    table->gpt_heads);
1427 		G_PART_DUMPCONF(table, NULL, sb, indent);
1428 	}
1429 }
1430 
1431 static void
1432 g_part_orphan(struct g_consumer *cp)
1433 {
1434 	struct g_provider *pp;
1435 
1436 	pp = cp->provider;
1437 	KASSERT(pp != NULL, (__func__));
1438 	G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, pp->name));
1439 	g_topology_assert();
1440 
1441 	KASSERT(pp->error != 0, (__func__));
1442 	g_part_wither(cp->geom, pp->error);
1443 }
1444 
1445 static void
1446 g_part_spoiled(struct g_consumer *cp)
1447 {
1448 
1449 	G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, cp->provider->name));
1450 	g_topology_assert();
1451 
1452 	g_part_wither(cp->geom, ENXIO);
1453 }
1454 
1455 static void
1456 g_part_start(struct bio *bp)
1457 {
1458 	struct bio *bp2;
1459 	struct g_consumer *cp;
1460 	struct g_geom *gp;
1461 	struct g_part_entry *entry;
1462 	struct g_part_table *table;
1463 	struct g_kerneldump *gkd;
1464 	struct g_provider *pp;
1465 
1466 	pp = bp->bio_to;
1467 	gp = pp->geom;
1468 	table = gp->softc;
1469 	cp = LIST_FIRST(&gp->consumer);
1470 
1471 	G_PART_TRACE((G_T_BIO, "%s: cmd=%d, provider=%s", __func__, bp->bio_cmd,
1472 	    pp->name));
1473 
1474 	entry = pp->private;
1475 	if (entry == NULL) {
1476 		g_io_deliver(bp, ENXIO);
1477 		return;
1478 	}
1479 
1480 	switch(bp->bio_cmd) {
1481 	case BIO_DELETE:
1482 	case BIO_READ:
1483 	case BIO_WRITE:
1484 		if (bp->bio_offset >= pp->mediasize) {
1485 			g_io_deliver(bp, EIO);
1486 			return;
1487 		}
1488 		bp2 = g_clone_bio(bp);
1489 		if (bp2 == NULL) {
1490 			g_io_deliver(bp, ENOMEM);
1491 			return;
1492 		}
1493 		if (bp2->bio_offset + bp2->bio_length > pp->mediasize)
1494 			bp2->bio_length = pp->mediasize - bp2->bio_offset;
1495 		bp2->bio_done = g_std_done;
1496 		bp2->bio_offset += entry->gpe_offset;
1497 		g_io_request(bp2, cp);
1498 		return;
1499 	case BIO_FLUSH:
1500 		break;
1501 	case BIO_GETATTR:
1502 		if (g_handleattr_int(bp, "GEOM::fwheads", table->gpt_heads))
1503 			return;
1504 		if (g_handleattr_int(bp, "GEOM::fwsectors", table->gpt_sectors))
1505 			return;
1506 		if (g_handleattr_int(bp, "PART::isleaf", table->gpt_isleaf))
1507 			return;
1508 		if (g_handleattr_int(bp, "PART::depth", table->gpt_depth))
1509 			return;
1510 		if (!strcmp("GEOM::kerneldump", bp->bio_attribute)) {
1511 			/*
1512 			 * Check that the partition is suitable for kernel
1513 			 * dumps. Typically only swap partitions should be
1514 			 * used.
1515 			 */
1516 			if (!G_PART_DUMPTO(table, entry)) {
1517 				g_io_deliver(bp, ENXIO);
1518 				return;
1519 			}
1520 			gkd = (struct g_kerneldump *)bp->bio_data;
1521 			if (gkd->offset >= pp->mediasize) {
1522 				g_io_deliver(bp, EIO);
1523 				return;
1524 			}
1525 			if (gkd->offset + gkd->length > pp->mediasize)
1526 				gkd->length = pp->mediasize - gkd->offset;
1527 			gkd->offset += entry->gpe_offset;
1528 		}
1529 		break;
1530 	default:
1531 		g_io_deliver(bp, EOPNOTSUPP);
1532 		return;
1533 	}
1534 
1535 	bp2 = g_clone_bio(bp);
1536 	if (bp2 == NULL) {
1537 		g_io_deliver(bp, ENOMEM);
1538 		return;
1539 	}
1540 	bp2->bio_done = g_std_done;
1541 	g_io_request(bp2, cp);
1542 }
1543