xref: /freebsd/sys/geom/part/g_part.c (revision f0a75d274af375d15b97b830966b99a02b7db911)
1 /*-
2  * Copyright (c) 2002, 2005, 2006, 2007 Marcel Moolenaar
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  *
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25  */
26 
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
29 
30 #include <sys/param.h>
31 #include <sys/bio.h>
32 #include <sys/diskmbr.h>
33 #include <sys/endian.h>
34 #include <sys/kernel.h>
35 #include <sys/kobj.h>
36 #include <sys/limits.h>
37 #include <sys/lock.h>
38 #include <sys/malloc.h>
39 #include <sys/mutex.h>
40 #include <sys/queue.h>
41 #include <sys/sbuf.h>
42 #include <sys/systm.h>
43 #include <sys/uuid.h>
44 #include <geom/geom.h>
45 #include <geom/geom_ctl.h>
46 #include <geom/part/g_part.h>
47 
48 #include "g_part_if.h"
49 
50 static kobj_method_t g_part_null_methods[] = {
51 	{ 0, 0 }
52 };
53 
54 static struct g_part_scheme g_part_null_scheme = {
55 	NULL,
56 	g_part_null_methods,
57 	sizeof(struct g_part_table),
58 };
59 G_PART_SCHEME_DECLARE(g_part_null_scheme);
60 
61 SET_DECLARE(g_part_scheme_set, struct g_part_scheme);
62 
63 struct g_part_alias_list {
64 	const char *lexeme;
65 	enum g_part_alias alias;
66 } g_part_alias_list[G_PART_ALIAS_COUNT] = {
67 	{ "@efi", G_PART_ALIAS_EFI },
68 	{ "@freebsd", G_PART_ALIAS_FREEBSD },
69 	{ "@freebsd-swap", G_PART_ALIAS_FREEBSD_SWAP },
70 	{ "@freebsd-ufs", G_PART_ALIAS_FREEBSD_UFS },
71 	{ "@freebsd-vinum", G_PART_ALIAS_FREEBSD_VINUM },
72 	{ "@mbr", G_PART_ALIAS_MBR }
73 };
74 
75 /*
76  * The GEOM partitioning class.
77  */
78 static g_ctl_req_t g_part_ctlreq;
79 static g_ctl_destroy_geom_t g_part_destroy_geom;
80 static g_taste_t g_part_taste;
81 
82 static g_access_t g_part_access;
83 static g_dumpconf_t g_part_dumpconf;
84 static g_orphan_t g_part_orphan;
85 static g_spoiled_t g_part_spoiled;
86 static g_start_t g_part_start;
87 
88 static struct g_class g_part_class = {
89 	.name = "PART",
90 	.version = G_VERSION,
91 	/* Class methods. */
92 	.ctlreq = g_part_ctlreq,
93 	.destroy_geom = g_part_destroy_geom,
94 	.taste = g_part_taste,
95 	/* Geom methods. */
96 	.access = g_part_access,
97 	.dumpconf = g_part_dumpconf,
98 	.orphan = g_part_orphan,
99 	.spoiled = g_part_spoiled,
100 	.start = g_part_start,
101 };
102 
103 DECLARE_GEOM_CLASS(g_part_class, g_part);
104 
105 enum g_part_ctl {
106 	G_PART_CTL_NONE,
107 	G_PART_CTL_ADD,
108 	G_PART_CTL_COMMIT,
109 	G_PART_CTL_CREATE,
110 	G_PART_CTL_DELETE,
111 	G_PART_CTL_DESTROY,
112 	G_PART_CTL_MODIFY,
113 	G_PART_CTL_MOVE,
114 	G_PART_CTL_QUERY,
115 	G_PART_CTL_RECOVER,
116 	G_PART_CTL_RESIZE,
117 	G_PART_CTL_UNDO
118 };
119 
120 /*
121  * Support functions.
122  */
123 
124 static void g_part_wither(struct g_geom *, int);
125 
126 const char *
127 g_part_alias_name(enum g_part_alias alias)
128 {
129 	int i;
130 
131 	for (i = 0; i < G_PART_ALIAS_COUNT; i++) {
132 		if (g_part_alias_list[i].alias != alias)
133 			continue;
134 		return (g_part_alias_list[i].lexeme);
135 	}
136 
137 	return (NULL);
138 }
139 
140 struct g_part_entry *
141 g_part_new_entry(struct g_part_table *table, int index, quad_t start,
142     quad_t end)
143 {
144 	struct g_part_entry *entry, *last;
145 
146 	last = NULL;
147 	LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) {
148 		if (entry->gpe_index == index)
149 			break;
150 		if (entry->gpe_index > index) {
151 			entry = NULL;
152 			break;
153 		}
154 		last = entry;
155 	}
156 	if (entry == NULL) {
157 		entry = g_malloc(table->gpt_scheme->gps_entrysz,
158 		    M_WAITOK | M_ZERO);
159 		entry->gpe_index = index;
160 		if (last == NULL)
161 			LIST_INSERT_HEAD(&table->gpt_entry, entry, gpe_entry);
162 		else
163 			LIST_INSERT_AFTER(last, entry, gpe_entry);
164 	}
165 	entry->gpe_start = start;
166 	entry->gpe_end = end;
167 	return (entry);
168 }
169 
170 static void
171 g_part_new_provider(struct g_geom *gp, struct g_part_table *table,
172     struct g_part_entry *entry)
173 {
174 	char buf[32];
175 	struct g_consumer *cp;
176 	struct g_provider *pp;
177 
178 	cp = LIST_FIRST(&gp->consumer);
179 	pp = cp->provider;
180 
181 	entry->gpe_offset = entry->gpe_start * pp->sectorsize;
182 
183 	if (entry->gpe_pp == NULL) {
184 		entry->gpe_pp = g_new_providerf(gp, "%s%s", gp->name,
185 		    G_PART_NAME(table, entry, buf, sizeof(buf)));
186 		entry->gpe_pp->private = entry;		/* Close the circle. */
187 	}
188 	entry->gpe_pp->index = entry->gpe_index - 1;	/* index is 1-based. */
189 	entry->gpe_pp->mediasize = (entry->gpe_end - entry->gpe_start + 1) *
190 	    pp->sectorsize;
191 	entry->gpe_pp->sectorsize = pp->sectorsize;
192 	entry->gpe_pp->flags = pp->flags & G_PF_CANDELETE;
193 	if (pp->stripesize > 0) {
194 		entry->gpe_pp->stripesize = pp->stripesize;
195 		entry->gpe_pp->stripeoffset = (pp->stripeoffset +
196 		    entry->gpe_offset) % pp->stripesize;
197 	}
198 	g_error_provider(entry->gpe_pp, 0);
199 }
200 
201 static int
202 g_part_parm_geom(const char *p, struct g_geom **v)
203 {
204 	struct g_geom *gp;
205 
206 	LIST_FOREACH(gp, &g_part_class.geom, geom) {
207 		if (!strcmp(p, gp->name))
208 			break;
209 	}
210 	if (gp == NULL)
211 		return (EINVAL);
212 	*v = gp;
213 	return (0);
214 }
215 
216 static int
217 g_part_parm_provider(const char *p, struct g_provider **v)
218 {
219 	struct g_provider *pp;
220 
221 	pp = g_provider_by_name(p);
222 	if (pp == NULL)
223 		return (EINVAL);
224 	*v = pp;
225 	return (0);
226 }
227 
228 static int
229 g_part_parm_quad(const char *p, quad_t *v)
230 {
231 	char *x;
232 	quad_t q;
233 
234 	q = strtoq(p, &x, 0);
235 	if (*x != '\0' || q < 0)
236 		return (EINVAL);
237 	*v = q;
238 	return (0);
239 }
240 
241 static int
242 g_part_parm_scheme(const char *p, struct g_part_scheme **v)
243 {
244 	struct g_part_scheme **iter, *s;
245 
246 	s = NULL;
247 	SET_FOREACH(iter, g_part_scheme_set) {
248 		if ((*iter)->name == NULL)
249 			continue;
250 		if (!strcmp((*iter)->name, p)) {
251 			s = *iter;
252 			break;
253 		}
254 	}
255 	if (s == NULL)
256 		return (EINVAL);
257 	*v = s;
258 	return (0);
259 }
260 
261 static int
262 g_part_parm_str(const char *p, const char **v)
263 {
264 
265 	if (p[0] == '\0')
266 		return (EINVAL);
267 	*v = p;
268 	return (0);
269 }
270 
271 static int
272 g_part_parm_uint(const char *p, u_int *v)
273 {
274 	char *x;
275 	long l;
276 
277 	l = strtol(p, &x, 0);
278 	if (*x != '\0' || l < 0 || l > INT_MAX)
279 		return (EINVAL);
280 	*v = (unsigned int)l;
281 	return (0);
282 }
283 
284 static int
285 g_part_probe(struct g_geom *gp, struct g_consumer *cp, int depth)
286 {
287 	struct g_part_scheme **iter, *scheme;
288 	struct g_part_table *table;
289 	int pri, probe;
290 
291 	table = gp->softc;
292 	scheme = (table != NULL) ? table->gpt_scheme : &g_part_null_scheme;
293 	pri = (scheme != &g_part_null_scheme) ? G_PART_PROBE(table, cp) :
294 	    INT_MIN;
295 	if (pri == 0)
296 		goto done;
297 	if (pri > 0) {	/* error */
298 		scheme = &g_part_null_scheme;
299 		pri = INT_MIN;
300 	}
301 
302 	SET_FOREACH(iter, g_part_scheme_set) {
303 		if ((*iter) == &g_part_null_scheme)
304 			continue;
305 		table = (void *)kobj_create((kobj_class_t)(*iter), M_GEOM,
306 		    M_WAITOK);
307 		table->gpt_gp = gp;
308 		table->gpt_scheme = *iter;
309 		table->gpt_depth = depth;
310 		probe = G_PART_PROBE(table, cp);
311 		if (probe <= 0 && probe > pri) {
312 			pri = probe;
313 			scheme = *iter;
314 			if (gp->softc != NULL)
315 				kobj_delete((kobj_t)gp->softc, M_GEOM);
316 			gp->softc = table;
317 			if (pri == 0)
318 				goto done;
319 		} else
320 			kobj_delete((kobj_t)table, M_GEOM);
321 	}
322 
323 done:
324 	return ((scheme == &g_part_null_scheme) ? ENXIO : 0);
325 }
326 
327 /*
328  * Control request functions.
329  */
330 
331 static int
332 g_part_ctl_add(struct gctl_req *req, struct g_part_parms *gpp)
333 {
334 	char buf[16];
335 	struct g_geom *gp;
336 	struct g_provider *pp;
337 	struct g_part_entry *delent, *last, *entry;
338 	struct g_part_table *table;
339 	quad_t end;
340 	unsigned int index;
341 	int error;
342 
343 	gp = gpp->gpp_geom;
344 	G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name));
345 	g_topology_assert();
346 
347 	pp = LIST_FIRST(&gp->consumer)->provider;
348 	table = gp->softc;
349 	end = gpp->gpp_start + gpp->gpp_size - 1;
350 
351 	if (gpp->gpp_start < table->gpt_first ||
352 	    gpp->gpp_start > table->gpt_last) {
353 		gctl_error(req, "%d start '%jd'", EINVAL,
354 		    (intmax_t)gpp->gpp_start);
355 		return (EINVAL);
356 	}
357 	if (end < gpp->gpp_start || end > table->gpt_last) {
358 		gctl_error(req, "%d size '%jd'", EINVAL,
359 		    (intmax_t)gpp->gpp_size);
360 		return (EINVAL);
361 	}
362 	if (gpp->gpp_index > table->gpt_entries) {
363 		gctl_error(req, "%d index '%d'", EINVAL, gpp->gpp_index);
364 		return (EINVAL);
365 	}
366 
367 	delent = last = NULL;
368 	index = (gpp->gpp_index > 0) ? gpp->gpp_index : 1;
369 	LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) {
370 		if (entry->gpe_deleted) {
371 			if (entry->gpe_index == index)
372 				delent = entry;
373 			continue;
374 		}
375 		if (entry->gpe_index == index) {
376 			index = entry->gpe_index + 1;
377 			last = entry;
378 		}
379 		if (gpp->gpp_start >= entry->gpe_start &&
380 		    gpp->gpp_start <= entry->gpe_end) {
381 			gctl_error(req, "%d start '%jd'", ENOSPC,
382 			    (intmax_t)gpp->gpp_start);
383 			return (ENOSPC);
384 		}
385 		if (end >= entry->gpe_start && end <= entry->gpe_end) {
386 			gctl_error(req, "%d end '%jd'", ENOSPC, (intmax_t)end);
387 			return (ENOSPC);
388 		}
389 		if (gpp->gpp_start < entry->gpe_start && end > entry->gpe_end) {
390 			gctl_error(req, "%d size '%jd'", ENOSPC,
391 			    (intmax_t)gpp->gpp_size);
392 			return (ENOSPC);
393 		}
394 	}
395 	if (gpp->gpp_index > 0 && index != gpp->gpp_index) {
396 		gctl_error(req, "%d index '%d'", EEXIST, gpp->gpp_index);
397 		return (EEXIST);
398 	}
399 	snprintf(buf, sizeof(buf), "%d", index);
400 	gctl_set_param(req, "index", buf, strlen(buf) + 1);
401 
402 	entry = (delent == NULL) ? g_malloc(table->gpt_scheme->gps_entrysz,
403 	    M_WAITOK | M_ZERO) : delent;
404 	entry->gpe_index = index;
405 	entry->gpe_start = gpp->gpp_start;
406 	entry->gpe_end = end;
407 	error = G_PART_ADD(table, entry, gpp);
408 	if (error) {
409 		gctl_error(req, "%d", error);
410 		if (delent == NULL)
411 			g_free(entry);
412 		return (error);
413 	}
414 	if (delent == NULL) {
415 		if (last == NULL)
416 			LIST_INSERT_HEAD(&table->gpt_entry, entry, gpe_entry);
417 		else
418 			LIST_INSERT_AFTER(last, entry, gpe_entry);
419 		entry->gpe_created = 1;
420 	} else {
421 		entry->gpe_deleted = 0;
422 		entry->gpe_modified = 1;
423 	}
424 	g_part_new_provider(gp, table, entry);
425 	return (0);
426 }
427 
428 static int
429 g_part_ctl_commit(struct gctl_req *req, struct g_part_parms *gpp)
430 {
431 	struct g_consumer *cp;
432 	struct g_geom *gp;
433 	struct g_provider *pp;
434 	struct g_part_entry *entry, *tmp;
435 	struct g_part_table *table;
436 	char *buf;
437 	int error, i;
438 
439 	gp = gpp->gpp_geom;
440 	G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name));
441 	g_topology_assert();
442 
443 	table = gp->softc;
444 	if (!table->gpt_opened) {
445 		gctl_error(req, "%d", EPERM);
446 		return (EPERM);
447 	}
448 
449 	cp = LIST_FIRST(&gp->consumer);
450 	if ((table->gpt_smhead | table->gpt_smtail) != 0) {
451 		pp = cp->provider;
452 		buf = g_malloc(pp->sectorsize, M_WAITOK | M_ZERO);
453 		while (table->gpt_smhead != 0) {
454 			i = ffs(table->gpt_smhead) - 1;
455 			error = g_write_data(cp, i * pp->sectorsize, buf,
456 			    pp->sectorsize);
457 			if (error) {
458 				g_free(buf);
459 				goto fail;
460 			}
461 			table->gpt_smhead &= ~(1 << i);
462 		}
463 		while (table->gpt_smtail != 0) {
464 			i = ffs(table->gpt_smtail) - 1;
465 			error = g_write_data(cp, pp->mediasize - (i + 1) *
466 			    pp->sectorsize, buf, pp->sectorsize);
467 			if (error) {
468 				g_free(buf);
469 				goto fail;
470 			}
471 			table->gpt_smtail &= ~(1 << i);
472 		}
473 		g_free(buf);
474 	}
475 
476 	if (table->gpt_scheme == &g_part_null_scheme) {
477 		g_access(cp, -1, -1, -1);
478 		g_part_wither(gp, ENXIO);
479 		return (0);
480 	}
481 
482 	error = G_PART_WRITE(table, cp);
483 	if (error)
484 		goto fail;
485 
486 	LIST_FOREACH_SAFE(entry, &table->gpt_entry, gpe_entry, tmp) {
487 		if (!entry->gpe_deleted) {
488 			entry->gpe_created = 0;
489 			entry->gpe_modified = 0;
490 			continue;
491 		}
492 		LIST_REMOVE(entry, gpe_entry);
493 		g_free(entry);
494 	}
495 	table->gpt_created = 0;
496 	table->gpt_opened = 0;
497 	g_access(cp, -1, -1, -1);
498 	return (0);
499 
500 fail:
501 	gctl_error(req, "%d", error);
502 	return (error);
503 }
504 
505 static int
506 g_part_ctl_create(struct gctl_req *req, struct g_part_parms *gpp)
507 {
508 	struct g_consumer *cp;
509 	struct g_geom *gp;
510 	struct g_provider *pp;
511 	struct g_part_scheme *scheme;
512 	struct g_part_table *null, *table;
513 	int attr, error;
514 
515 	pp = gpp->gpp_provider;
516 	scheme = gpp->gpp_scheme;
517 	G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, pp->name));
518 	g_topology_assert();
519 
520 	/* Check that there isn't already a g_part geom on the provider. */
521 	error = g_part_parm_geom(pp->name, &gp);
522 	if (!error) {
523 		null = gp->softc;
524 		if (null->gpt_scheme != &g_part_null_scheme) {
525 			gctl_error(req, "%d geom '%s'", EEXIST, pp->name);
526 			return (EEXIST);
527 		}
528 	} else
529 		null = NULL;
530 
531 	if ((gpp->gpp_parms & G_PART_PARM_ENTRIES) &&
532 	    (gpp->gpp_entries < scheme->gps_minent ||
533 	     gpp->gpp_entries > scheme->gps_maxent)) {
534 		gctl_error(req, "%d entries '%d'", EINVAL, gpp->gpp_entries);
535 		return (EINVAL);
536 	}
537 
538 	if (null == NULL)
539 		gp = g_new_geomf(&g_part_class, "%s", pp->name);
540 	gp->softc = kobj_create((kobj_class_t)gpp->gpp_scheme, M_GEOM,
541 	    M_WAITOK);
542 	table = gp->softc;
543 	table->gpt_gp = gp;
544 	table->gpt_scheme = gpp->gpp_scheme;
545 	table->gpt_entries = (gpp->gpp_parms & G_PART_PARM_ENTRIES) ?
546 	    gpp->gpp_entries : scheme->gps_minent;
547 	LIST_INIT(&table->gpt_entry);
548 	if (null == NULL) {
549 		cp = g_new_consumer(gp);
550 		error = g_attach(cp, pp);
551 		if (error == 0)
552 			error = g_access(cp, 1, 1, 1);
553 		if (error != 0) {
554 			g_part_wither(gp, error);
555 			gctl_error(req, "%d geom '%s'", error, pp->name);
556 			return (error);
557 		}
558 		table->gpt_opened = 1;
559 	} else {
560 		cp = LIST_FIRST(&gp->consumer);
561 		table->gpt_opened = null->gpt_opened;
562 		table->gpt_smhead = null->gpt_smhead;
563 		table->gpt_smtail = null->gpt_smtail;
564 	}
565 
566 	g_topology_unlock();
567 
568 	/* Make sure we can nest and if so, determine our depth. */
569 	error = g_getattr("PART::isleaf", cp, &attr);
570 	if (!error && attr) {
571 		error = ENODEV;
572 		goto fail;
573 	}
574 	error = g_getattr("PART::depth", cp, &attr);
575 	table->gpt_depth = (!error) ? attr + 1 : 0;
576 
577 	error = G_PART_CREATE(table, gpp);
578 	if (error)
579 		goto fail;
580 
581 	g_topology_lock();
582 
583 	table->gpt_created = 1;
584 	if (null != NULL)
585 		kobj_delete((kobj_t)null, M_GEOM);
586 	return (0);
587 
588 fail:
589 	g_topology_lock();
590 	if (null == NULL) {
591 		g_access(cp, -1, -1, -1);
592 		g_part_wither(gp, error);
593 	} else {
594 		kobj_delete((kobj_t)gp->softc, M_GEOM);
595 		gp->softc = null;
596 	}
597 	gctl_error(req, "%d provider", error);
598 	return (error);
599 }
600 
601 static int
602 g_part_ctl_delete(struct gctl_req *req, struct g_part_parms *gpp)
603 {
604 	struct g_geom *gp;
605 	struct g_provider *pp;
606 	struct g_part_entry *entry;
607 	struct g_part_table *table;
608 
609 	gp = gpp->gpp_geom;
610 	G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name));
611 	g_topology_assert();
612 
613 	table = gp->softc;
614 
615 	LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) {
616 		if (entry->gpe_deleted)
617 			continue;
618 		if (entry->gpe_index == gpp->gpp_index)
619 			break;
620 	}
621 	if (entry == NULL) {
622 		gctl_error(req, "%d index '%d'", ENOENT, gpp->gpp_index);
623 		return (ENOENT);
624 	}
625 
626 	pp = entry->gpe_pp;
627 	if (pp->acr > 0 || pp->acw > 0 || pp->ace > 0) {
628 		gctl_error(req, "%d", EBUSY);
629 		return (EBUSY);
630 	}
631 
632 	pp->private = NULL;
633 	entry->gpe_pp = NULL;
634 	if (entry->gpe_created) {
635 		LIST_REMOVE(entry, gpe_entry);
636 		g_free(entry);
637 	} else {
638 		entry->gpe_modified = 0;
639 		entry->gpe_deleted = 1;
640 	}
641 	g_wither_provider(pp, ENXIO);
642 	return (0);
643 }
644 
645 static int
646 g_part_ctl_destroy(struct gctl_req *req, struct g_part_parms *gpp)
647 {
648 	struct g_geom *gp;
649 	struct g_part_entry *entry;
650 	struct g_part_table *null, *table;
651 	int error;
652 
653 	gp = gpp->gpp_geom;
654 	G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name));
655 	g_topology_assert();
656 
657 	table = gp->softc;
658 	LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) {
659 		if (entry->gpe_deleted)
660 			continue;
661 		gctl_error(req, "%d", EBUSY);
662 		return (EBUSY);
663 	}
664 
665 	error = G_PART_DESTROY(table, gpp);
666 	if (error) {
667 		gctl_error(req, "%d", error);
668 		return (error);
669 	}
670 
671 	gp->softc = kobj_create((kobj_class_t)&g_part_null_scheme, M_GEOM,
672 	    M_WAITOK);
673 	null = gp->softc;
674 	null->gpt_gp = gp;
675 	null->gpt_scheme = &g_part_null_scheme;
676 	LIST_INIT(&null->gpt_entry);
677 	null->gpt_depth = table->gpt_depth;
678 	null->gpt_opened = table->gpt_opened;
679 	null->gpt_smhead = table->gpt_smhead;
680 	null->gpt_smtail = table->gpt_smtail;
681 
682 	while ((entry = LIST_FIRST(&table->gpt_entry)) != NULL) {
683 		LIST_REMOVE(entry, gpe_entry);
684 		g_free(entry);
685 	}
686 	kobj_delete((kobj_t)table, M_GEOM);
687 
688 	return (0);
689 }
690 
691 static int
692 g_part_ctl_modify(struct gctl_req *req, struct g_part_parms *gpp)
693 {
694 	struct g_geom *gp;
695 	struct g_part_entry *entry;
696 	struct g_part_table *table;
697 	int error;
698 
699 	gp = gpp->gpp_geom;
700 	G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name));
701 	g_topology_assert();
702 
703 	table = gp->softc;
704 
705 	LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) {
706 		if (entry->gpe_deleted)
707 			continue;
708 		if (entry->gpe_index == gpp->gpp_index)
709 			break;
710 	}
711 	if (entry == NULL) {
712 		gctl_error(req, "%d index '%d'", ENOENT, gpp->gpp_index);
713 		return (ENOENT);
714 	}
715 
716 	error = G_PART_MODIFY(table, entry, gpp);
717 	if (error) {
718 		gctl_error(req, "%d", error);
719 		return (error);
720 	}
721 
722 	if (!entry->gpe_created)
723 		entry->gpe_modified = 1;
724 	return (0);
725 }
726 
727 static int
728 g_part_ctl_move(struct gctl_req *req, struct g_part_parms *gpp)
729 {
730 	gctl_error(req, "%d verb 'move'", ENOSYS);
731 	return (ENOSYS);
732 }
733 
734 static int
735 g_part_ctl_query(struct gctl_req *req, struct g_part_parms *gpp)
736 {
737 	gctl_error(req, "%d verb 'query'", ENOSYS);
738 	return (ENOSYS);
739 }
740 
741 static int
742 g_part_ctl_recover(struct gctl_req *req, struct g_part_parms *gpp)
743 {
744 	gctl_error(req, "%d verb 'recover'", ENOSYS);
745 	return (ENOSYS);
746 }
747 
748 static int
749 g_part_ctl_resize(struct gctl_req *req, struct g_part_parms *gpp)
750 {
751 	gctl_error(req, "%d verb 'resize'", ENOSYS);
752 	return (ENOSYS);
753 }
754 
755 static int
756 g_part_ctl_undo(struct gctl_req *req, struct g_part_parms *gpp)
757 {
758 	struct g_consumer *cp;
759 	struct g_provider *pp;
760 	struct g_geom *gp;
761 	struct g_part_entry *entry, *tmp;
762 	struct g_part_table *table;
763 	int error, reprobe;
764 
765 	gp = gpp->gpp_geom;
766 	G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name));
767 	g_topology_assert();
768 
769 	table = gp->softc;
770 	if (!table->gpt_opened) {
771 		gctl_error(req, "%d", EPERM);
772 		return (EPERM);
773 	}
774 
775 	cp = LIST_FIRST(&gp->consumer);
776 	LIST_FOREACH_SAFE(entry, &table->gpt_entry, gpe_entry, tmp) {
777 		entry->gpe_modified = 0;
778 		if (entry->gpe_created) {
779 			pp = entry->gpe_pp;
780 			pp->private = NULL;
781 			entry->gpe_pp = NULL;
782 			g_wither_provider(pp, ENXIO);
783 			entry->gpe_deleted = 1;
784 		}
785 		if (entry->gpe_deleted) {
786 			LIST_REMOVE(entry, gpe_entry);
787 			g_free(entry);
788 		}
789 	}
790 
791 	g_topology_unlock();
792 
793 	reprobe = (table->gpt_scheme == &g_part_null_scheme ||
794 	    table->gpt_created) ? 1 : 0;
795 
796 	if (reprobe) {
797 		if (!LIST_EMPTY(&table->gpt_entry)) {
798 			error = EBUSY;
799 			goto fail;
800 		}
801 		error = g_part_probe(gp, cp, table->gpt_depth);
802 		if (error)
803 			goto fail;
804 		table = gp->softc;
805 	}
806 
807 	error = G_PART_READ(table, cp);
808 	if (error)
809 		goto fail;
810 
811 	g_topology_lock();
812 
813 	LIST_FOREACH(entry, &table->gpt_entry, gpe_entry)
814 		g_part_new_provider(gp, table, entry);
815 
816 	table->gpt_opened = 0;
817 	g_access(cp, -1, -1, -1);
818 	return (0);
819 
820 fail:
821 	g_topology_lock();
822 	gctl_error(req, "%d", error);
823 	return (error);
824 }
825 
826 static void
827 g_part_wither(struct g_geom *gp, int error)
828 {
829 	struct g_part_entry *entry;
830 	struct g_part_table *table;
831 
832 	table = gp->softc;
833 	if (table != NULL) {
834 		while ((entry = LIST_FIRST(&table->gpt_entry)) != NULL) {
835 			LIST_REMOVE(entry, gpe_entry);
836 			g_free(entry);
837 		}
838 		if (gp->softc != NULL) {
839 			kobj_delete((kobj_t)gp->softc, M_GEOM);
840 			gp->softc = NULL;
841 		}
842 	}
843 	g_wither_geom(gp, error);
844 }
845 
846 /*
847  * Class methods.
848  */
849 
850 static void
851 g_part_ctlreq(struct gctl_req *req, struct g_class *mp, const char *verb)
852 {
853 	struct g_part_parms gpp;
854 	struct g_part_table *table;
855 	struct gctl_req_arg *ap;
856 	const char *p;
857 	enum g_part_ctl ctlreq;
858 	unsigned int i, mparms, oparms, parm;
859 	int error, modifies;
860 
861 	G_PART_TRACE((G_T_TOPOLOGY, "%s(%s,%s)", __func__, mp->name, verb));
862 	g_topology_assert();
863 
864 	ctlreq = G_PART_CTL_NONE;
865 	modifies = 0;
866 	mparms = oparms = 0;
867 	switch (*verb) {
868 	case 'a':
869 		if (!strcmp(verb, "add")) {
870 			ctlreq = G_PART_CTL_ADD;
871 			modifies = 1;
872 			mparms = G_PART_PARM_GEOM | G_PART_PARM_SIZE |
873 			    G_PART_PARM_START | G_PART_PARM_TYPE;
874 			oparms = G_PART_PARM_FLAGS | G_PART_PARM_INDEX |
875 			    G_PART_PARM_LABEL;
876 		}
877 		break;
878 	case 'c':
879 		if (!strcmp(verb, "commit")) {
880 			ctlreq = G_PART_CTL_COMMIT;
881 			mparms = G_PART_PARM_GEOM;
882 			oparms = G_PART_PARM_FLAGS;
883 		} else if (!strcmp(verb, "create")) {
884 			ctlreq = G_PART_CTL_CREATE;
885 			modifies = 1;
886 			mparms = G_PART_PARM_PROVIDER |
887 			    G_PART_PARM_SCHEME;
888 			oparms = G_PART_PARM_ENTRIES | G_PART_PARM_FLAGS;
889 		}
890 		break;
891 	case 'd':
892 		if (!strcmp(verb, "delete")) {
893 			ctlreq = G_PART_CTL_DELETE;
894 			modifies = 1;
895 			mparms = G_PART_PARM_GEOM | G_PART_PARM_INDEX;
896 			oparms = G_PART_PARM_FLAGS;
897 		} else if (!strcmp(verb, "destroy")) {
898 			ctlreq = G_PART_CTL_DESTROY;
899 			modifies = 1;
900 			mparms = G_PART_PARM_GEOM;
901 			oparms = G_PART_PARM_FLAGS;
902 		}
903 		break;
904 	case 'm':
905 		if (!strcmp(verb, "modify")) {
906 			ctlreq = G_PART_CTL_MODIFY;
907 			modifies = 1;
908 			mparms = G_PART_PARM_GEOM | G_PART_PARM_INDEX;
909 			oparms = G_PART_PARM_FLAGS | G_PART_PARM_LABEL |
910 			    G_PART_PARM_TYPE;
911 		} else if (!strcmp(verb, "move")) {
912 			ctlreq = G_PART_CTL_MOVE;
913 			modifies = 1;
914 			mparms = G_PART_PARM_GEOM | G_PART_PARM_INDEX;
915 			oparms = G_PART_PARM_FLAGS;
916 		}
917 		break;
918 	case 'q':
919 		if (!strcmp(verb, "query")) {
920 			ctlreq = G_PART_CTL_QUERY;
921 			mparms = G_PART_PARM_REQUEST | G_PART_PARM_RESPONSE;
922 			oparms = G_PART_PARM_FLAGS | G_PART_PARM_GEOM;
923 		}
924 		break;
925 	case 'r':
926 		if (!strcmp(verb, "recover")) {
927 			ctlreq = G_PART_CTL_RECOVER;
928 			modifies = 1;
929 			mparms = G_PART_PARM_GEOM;
930 			oparms = G_PART_PARM_FLAGS;
931 		} else if (!strcmp(verb, "resize")) {
932 			ctlreq = G_PART_CTL_RESIZE;
933 			modifies = 1;
934 			mparms = G_PART_PARM_GEOM | G_PART_PARM_INDEX;
935 			oparms = G_PART_PARM_FLAGS;
936 		}
937 		break;
938 	case 'u':
939 		if (!strcmp(verb, "undo")) {
940 			ctlreq = G_PART_CTL_UNDO;
941 			mparms = G_PART_PARM_GEOM;
942 			oparms = G_PART_PARM_FLAGS;
943 		}
944 		break;
945 	}
946 	if (ctlreq == G_PART_CTL_NONE) {
947 		gctl_error(req, "%d verb '%s'", EINVAL, verb);
948 		return;
949 	}
950 
951 	bzero(&gpp, sizeof(gpp));
952 	for (i = 0; i < req->narg; i++) {
953 		ap = &req->arg[i];
954 		parm = 0;
955 		switch (ap->name[0]) {
956 		case 'c':
957 			if (!strcmp(ap->name, "class"))
958 				continue;
959 			break;
960 		case 'e':
961 			if (!strcmp(ap->name, "entries"))
962 				parm = G_PART_PARM_ENTRIES;
963 			break;
964 		case 'f':
965 			if (!strcmp(ap->name, "flags"))
966 				parm = G_PART_PARM_FLAGS;
967 			break;
968 		case 'g':
969 			if (!strcmp(ap->name, "geom"))
970 				parm = G_PART_PARM_GEOM;
971 			break;
972 		case 'i':
973 			if (!strcmp(ap->name, "index"))
974 				parm = G_PART_PARM_INDEX;
975 			break;
976 		case 'l':
977 			if (!strcmp(ap->name, "label"))
978 				parm = G_PART_PARM_LABEL;
979 			break;
980 		case 'p':
981 			if (!strcmp(ap->name, "provider"))
982 				parm = G_PART_PARM_PROVIDER;
983 			break;
984 		case 'r':
985 			if (!strcmp(ap->name, "request"))
986 				parm = G_PART_PARM_REQUEST;
987 			else if (!strcmp(ap->name, "response"))
988 				parm = G_PART_PARM_RESPONSE;
989 			break;
990 		case 's':
991 			if (!strcmp(ap->name, "scheme"))
992 				parm = G_PART_PARM_SCHEME;
993 			else if (!strcmp(ap->name, "size"))
994 				parm = G_PART_PARM_SIZE;
995 			else if (!strcmp(ap->name, "start"))
996 				parm = G_PART_PARM_START;
997 			break;
998 		case 't':
999 			if (!strcmp(ap->name, "type"))
1000 				parm = G_PART_PARM_TYPE;
1001 			break;
1002 		case 'v':
1003 			if (!strcmp(ap->name, "verb"))
1004 				continue;
1005 			break;
1006 		}
1007 		if ((parm & (mparms | oparms)) == 0) {
1008 			gctl_error(req, "%d param '%s'", EINVAL, ap->name);
1009 			return;
1010 		}
1011 		p = gctl_get_asciiparam(req, ap->name);
1012 		if (p == NULL) {
1013 			gctl_error(req, "%d param '%s'", ENOATTR, ap->name);
1014 			return;
1015 		}
1016 		switch (parm) {
1017 		case G_PART_PARM_ENTRIES:
1018 			error = g_part_parm_uint(p, &gpp.gpp_entries);
1019 			break;
1020 		case G_PART_PARM_FLAGS:
1021 			error = g_part_parm_str(p, &gpp.gpp_flags);
1022 			break;
1023 		case G_PART_PARM_GEOM:
1024 			error = g_part_parm_geom(p, &gpp.gpp_geom);
1025 			break;
1026 		case G_PART_PARM_INDEX:
1027 			error = g_part_parm_uint(p, &gpp.gpp_index);
1028 			break;
1029 		case G_PART_PARM_LABEL:
1030 			error = g_part_parm_str(p, &gpp.gpp_label);
1031 			break;
1032 		case G_PART_PARM_PROVIDER:
1033 			error = g_part_parm_provider(p, &gpp.gpp_provider);
1034 			break;
1035 		case G_PART_PARM_REQUEST:
1036 			error = g_part_parm_str(p, &gpp.gpp_request);
1037 			break;
1038 		case G_PART_PARM_RESPONSE:
1039 			error = 0;	/* Write-only parameter. */
1040 			break;
1041 		case G_PART_PARM_SCHEME:
1042 			error = g_part_parm_scheme(p, &gpp.gpp_scheme);
1043 			break;
1044 		case G_PART_PARM_SIZE:
1045 			error = g_part_parm_quad(p, &gpp.gpp_size);
1046 			break;
1047 		case G_PART_PARM_START:
1048 			error = g_part_parm_quad(p, &gpp.gpp_start);
1049 			break;
1050 		case G_PART_PARM_TYPE:
1051 			error = g_part_parm_str(p, &gpp.gpp_type);
1052 			break;
1053 		default:
1054 			error = EDOOFUS;
1055 			break;
1056 		}
1057 		if (error) {
1058 			gctl_error(req, "%d %s '%s'", error, ap->name, p);
1059 			return;
1060 		}
1061 		gpp.gpp_parms |= parm;
1062 	}
1063 	if ((gpp.gpp_parms & mparms) != mparms) {
1064 		parm = mparms - (gpp.gpp_parms & mparms);
1065 		gctl_error(req, "%d param '%x'", ENOATTR, parm);
1066 		return;
1067 	}
1068 
1069 	/* Obtain permissions if possible/necessary. */
1070 	if (modifies && (gpp.gpp_parms & G_PART_PARM_GEOM)) {
1071 		table = gpp.gpp_geom->softc;
1072 		if (table != NULL && !table->gpt_opened) {
1073 			error = g_access(LIST_FIRST(&gpp.gpp_geom->consumer),
1074 			    1, 1, 1);
1075 			if (error) {
1076 				gctl_error(req, "%d geom '%s'", error,
1077 				    gpp.gpp_geom->name);
1078 				return;
1079 			}
1080 			table->gpt_opened = 1;
1081 		}
1082 	}
1083 
1084 	error = EDOOFUS;	/* Prevent bogus  uninit. warning. */
1085 	switch (ctlreq) {
1086 	case G_PART_CTL_NONE:
1087 		panic("%s", __func__);
1088 	case G_PART_CTL_ADD:
1089 		error = g_part_ctl_add(req, &gpp);
1090 		break;
1091 	case G_PART_CTL_COMMIT:
1092 		error = g_part_ctl_commit(req, &gpp);
1093 		break;
1094 	case G_PART_CTL_CREATE:
1095 		error = g_part_ctl_create(req, &gpp);
1096 		break;
1097 	case G_PART_CTL_DELETE:
1098 		error = g_part_ctl_delete(req, &gpp);
1099 		break;
1100 	case G_PART_CTL_DESTROY:
1101 		error = g_part_ctl_destroy(req, &gpp);
1102 		break;
1103 	case G_PART_CTL_MODIFY:
1104 		error = g_part_ctl_modify(req, &gpp);
1105 		break;
1106 	case G_PART_CTL_MOVE:
1107 		error = g_part_ctl_move(req, &gpp);
1108 		break;
1109 	case G_PART_CTL_QUERY:
1110 		error = g_part_ctl_query(req, &gpp);
1111 		break;
1112 	case G_PART_CTL_RECOVER:
1113 		error = g_part_ctl_recover(req, &gpp);
1114 		break;
1115 	case G_PART_CTL_RESIZE:
1116 		error = g_part_ctl_resize(req, &gpp);
1117 		break;
1118 	case G_PART_CTL_UNDO:
1119 		error = g_part_ctl_undo(req, &gpp);
1120 		break;
1121 	}
1122 }
1123 
1124 static int
1125 g_part_destroy_geom(struct gctl_req *req, struct g_class *mp,
1126     struct g_geom *gp)
1127 {
1128 
1129 	G_PART_TRACE((G_T_TOPOLOGY, "%s(%s,%s)", __func__, mp->name, gp->name));
1130 	g_topology_assert();
1131 
1132 	g_part_wither(gp, EINVAL);
1133 	return (0);
1134 }
1135 
1136 static struct g_geom *
1137 g_part_taste(struct g_class *mp, struct g_provider *pp, int flags __unused)
1138 {
1139 	struct g_consumer *cp;
1140 	struct g_geom *gp;
1141 	struct g_part_entry *entry;
1142 	struct g_part_table *table;
1143 	int attr, depth;
1144 	int error;
1145 
1146 	G_PART_TRACE((G_T_TOPOLOGY, "%s(%s,%s)", __func__, mp->name, pp->name));
1147 	g_topology_assert();
1148 
1149 	/*
1150 	 * Create a GEOM with consumer and hook it up to the provider.
1151 	 * With that we become part of the topology. Optain read access
1152 	 * to the provider.
1153 	 */
1154 	gp = g_new_geomf(mp, "%s", pp->name);
1155 	cp = g_new_consumer(gp);
1156 	error = g_attach(cp, pp);
1157 	if (error == 0)
1158 		error = g_access(cp, 1, 0, 0);
1159 	if (error != 0) {
1160 		g_part_wither(gp, error);
1161 		return (NULL);
1162 	}
1163 
1164 	g_topology_unlock();
1165 
1166 	/* Make sure we can nest and if so, determine our depth. */
1167 	error = g_getattr("PART::isleaf", cp, &attr);
1168 	if (!error && attr) {
1169 		error = ENODEV;
1170 		goto fail;
1171 	}
1172 	error = g_getattr("PART::depth", cp, &attr);
1173 	depth = (!error) ? attr + 1 : 0;
1174 
1175 	error = g_part_probe(gp, cp, depth);
1176 	if (error)
1177 		goto fail;
1178 
1179 	table = gp->softc;
1180 	error = G_PART_READ(table, cp);
1181 	if (error)
1182 		goto fail;
1183 
1184 	g_topology_lock();
1185 	LIST_FOREACH(entry, &table->gpt_entry, gpe_entry)
1186 		g_part_new_provider(gp, table, entry);
1187 
1188 	g_access(cp, -1, 0, 0);
1189 	return (gp);
1190 
1191  fail:
1192 	g_topology_lock();
1193 	g_access(cp, -1, 0, 0);
1194 	g_part_wither(gp, error);
1195 	return (NULL);
1196 }
1197 
1198 /*
1199  * Geom methods.
1200  */
1201 
1202 static int
1203 g_part_access(struct g_provider *pp, int dr, int dw, int de)
1204 {
1205 	struct g_consumer *cp;
1206 
1207 	G_PART_TRACE((G_T_ACCESS, "%s(%s,%d,%d,%d)", __func__, pp->name, dr,
1208 	    dw, de));
1209 
1210 	cp = LIST_FIRST(&pp->geom->consumer);
1211 
1212 	/* We always gain write-exclusive access. */
1213 	return (g_access(cp, dr, dw, dw + de));
1214 }
1215 
1216 static void
1217 g_part_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp,
1218     struct g_consumer *cp, struct g_provider *pp)
1219 {
1220 	char buf[64];
1221 	struct g_part_entry *entry;
1222 	struct g_part_table *table;
1223 
1224 	KASSERT(sb != NULL && gp != NULL, (__func__));
1225 	table = gp->softc;
1226 
1227 	if (indent == NULL) {
1228 		KASSERT(cp == NULL && pp != NULL, (__func__));
1229 		entry = pp->private;
1230 		if (entry == NULL)
1231 			return;
1232 		sbuf_printf(sb, " i %u o %ju ty %s", entry->gpe_index,
1233 		    (uintmax_t)entry->gpe_offset,
1234 		    G_PART_TYPE(table, entry, buf, sizeof(buf)));
1235 	} else if (cp != NULL) {	/* Consumer configuration. */
1236 		KASSERT(pp == NULL, (__func__));
1237 		/* none */
1238 	} else if (pp != NULL) {	/* Provider configuration. */
1239 		entry = pp->private;
1240 		if (entry == NULL)
1241 			return;
1242 		sbuf_printf(sb, "%s<index>%u</index>\n", indent,
1243 		    entry->gpe_index);
1244 		sbuf_printf(sb, "%s<type>%s</type>\n", indent,
1245 		    G_PART_TYPE(table, entry, buf, sizeof(buf)));
1246 		sbuf_printf(sb, "%s<offset>%ju</offset>\n", indent,
1247 		    (uintmax_t)entry->gpe_offset);
1248 		sbuf_printf(sb, "%s<length>%ju</length>\n", indent,
1249 		    (uintmax_t)pp->mediasize);
1250 		G_PART_DUMPCONF(table, entry, sb, indent);
1251 	} else {			/* Geom configuration. */
1252 		sbuf_printf(sb, "%s<entries>%u</entries>\n", indent,
1253 		    table->gpt_entries);
1254 		sbuf_printf(sb, "%s<first>%ju</first>\n", indent,
1255 		    (uintmax_t)table->gpt_first);
1256 		sbuf_printf(sb, "%s<last>%ju</last>\n", indent,
1257 		    (uintmax_t)table->gpt_last);
1258 		G_PART_DUMPCONF(table, NULL, sb, indent);
1259 	}
1260 }
1261 
1262 static void
1263 g_part_orphan(struct g_consumer *cp)
1264 {
1265 	struct g_provider *pp;
1266 
1267 	pp = cp->provider;
1268 	KASSERT(pp != NULL, (__func__));
1269 	G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, pp->name));
1270 	g_topology_assert();
1271 
1272 	KASSERT(pp->error != 0, (__func__));
1273 	g_part_wither(cp->geom, pp->error);
1274 }
1275 
1276 static void
1277 g_part_spoiled(struct g_consumer *cp)
1278 {
1279 
1280 	G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, cp->provider->name));
1281 	g_topology_assert();
1282 
1283 	g_part_wither(cp->geom, ENXIO);
1284 }
1285 
1286 static void
1287 g_part_start(struct bio *bp)
1288 {
1289 	struct bio *bp2;
1290 	struct g_consumer *cp;
1291 	struct g_geom *gp;
1292 	struct g_part_entry *entry;
1293 	struct g_part_table *table;
1294 	struct g_kerneldump *gkd;
1295 	struct g_provider *pp;
1296 	int attr;
1297 
1298 	pp = bp->bio_to;
1299 	gp = pp->geom;
1300 	table = gp->softc;
1301 	cp = LIST_FIRST(&gp->consumer);
1302 
1303 	G_PART_TRACE((G_T_BIO, "%s: cmd=%d, provider=%s", __func__, bp->bio_cmd,
1304 	    pp->name));
1305 
1306 	entry = pp->private;
1307 	if (entry == NULL) {
1308 		g_io_deliver(bp, ENXIO);
1309 		return;
1310 	}
1311 
1312 	switch(bp->bio_cmd) {
1313 	case BIO_DELETE:
1314 	case BIO_READ:
1315 	case BIO_WRITE:
1316 		if (bp->bio_offset >= pp->mediasize) {
1317 			g_io_deliver(bp, EIO);
1318 			return;
1319 		}
1320 		bp2 = g_clone_bio(bp);
1321 		if (bp2 == NULL) {
1322 			g_io_deliver(bp, ENOMEM);
1323 			return;
1324 		}
1325 		if (bp2->bio_offset + bp2->bio_length > pp->mediasize)
1326 			bp2->bio_length = pp->mediasize - bp2->bio_offset;
1327 		bp2->bio_done = g_std_done;
1328 		bp2->bio_offset += entry->gpe_offset;
1329 		g_io_request(bp2, cp);
1330 		return;
1331 	case BIO_FLUSH:
1332 		break;
1333 	case BIO_GETATTR:
1334 		if (!strcmp("GEOM::kerneldump", bp->bio_attribute)) {
1335 			/*
1336 			 * Check that the partition is suitable for kernel
1337 			 * dumps. Typically only swap partitions should be
1338 			 * used.
1339 			 */
1340 			if (!G_PART_DUMPTO(table, entry)) {
1341 				g_io_deliver(bp, ENXIO);
1342 				return;
1343 			}
1344 			gkd = (struct g_kerneldump *)bp->bio_data;
1345 			if (gkd->offset >= pp->mediasize) {
1346 				g_io_deliver(bp, EIO);
1347 				return;
1348 			}
1349 			if (gkd->offset + gkd->length > pp->mediasize)
1350 				gkd->length = pp->mediasize - gkd->offset;
1351 			gkd->offset += entry->gpe_offset;
1352 		} else if (!strcmp("PART::isleaf", bp->bio_attribute)) {
1353 			if (bp->bio_length != sizeof(int)) {
1354 				g_io_deliver(bp, EFAULT);
1355 				return;
1356 			}
1357 			attr = table->gpt_isleaf ? 1 : 0;
1358 			bcopy(&attr, bp->bio_data, sizeof(int));
1359 			bp->bio_completed = sizeof(int);
1360 			g_io_deliver(bp, 0);
1361 			return;
1362 		} else if (!strcmp("PART::depth", bp->bio_attribute)) {
1363 			if (bp->bio_length != sizeof(int)) {
1364 				g_io_deliver(bp, EFAULT);
1365 				return;
1366 			}
1367 			bcopy(&table->gpt_depth, bp->bio_data, sizeof(int));
1368 			bp->bio_completed = sizeof(int);
1369 			g_io_deliver(bp, 0);
1370 			return;
1371 		}
1372 		break;
1373 	default:
1374 		g_io_deliver(bp, EOPNOTSUPP);
1375 		return;
1376 	}
1377 
1378 	bp2 = g_clone_bio(bp);
1379 	if (bp2 == NULL) {
1380 		g_io_deliver(bp, ENOMEM);
1381 		return;
1382 	}
1383 	bp2->bio_done = g_std_done;
1384 	g_io_request(bp2, cp);
1385 }
1386