1 /*- 2 * Copyright (c) 2002, 2005, 2006, 2007 Marcel Moolenaar 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 */ 26 27 #include <sys/cdefs.h> 28 __FBSDID("$FreeBSD$"); 29 30 #include <sys/param.h> 31 #include <sys/bio.h> 32 #include <sys/diskmbr.h> 33 #include <sys/endian.h> 34 #include <sys/kernel.h> 35 #include <sys/kobj.h> 36 #include <sys/limits.h> 37 #include <sys/lock.h> 38 #include <sys/malloc.h> 39 #include <sys/mutex.h> 40 #include <sys/queue.h> 41 #include <sys/sbuf.h> 42 #include <sys/systm.h> 43 #include <sys/uuid.h> 44 #include <geom/geom.h> 45 #include <geom/geom_ctl.h> 46 #include <geom/part/g_part.h> 47 48 #include "g_part_if.h" 49 50 static kobj_method_t g_part_null_methods[] = { 51 { 0, 0 } 52 }; 53 54 static struct g_part_scheme g_part_null_scheme = { 55 "n/a", 56 g_part_null_methods, 57 sizeof(struct g_part_table), 58 }; 59 G_PART_SCHEME_DECLARE(g_part_null_scheme); 60 61 SET_DECLARE(g_part_scheme_set, struct g_part_scheme); 62 63 struct g_part_alias_list { 64 const char *lexeme; 65 enum g_part_alias alias; 66 } g_part_alias_list[G_PART_ALIAS_COUNT] = { 67 { "efi", G_PART_ALIAS_EFI }, 68 { "freebsd", G_PART_ALIAS_FREEBSD }, 69 { "freebsd-swap", G_PART_ALIAS_FREEBSD_SWAP }, 70 { "freebsd-ufs", G_PART_ALIAS_FREEBSD_UFS }, 71 { "freebsd-vinum", G_PART_ALIAS_FREEBSD_VINUM }, 72 { "mbr", G_PART_ALIAS_MBR } 73 }; 74 75 /* 76 * The GEOM partitioning class. 77 */ 78 static g_ctl_req_t g_part_ctlreq; 79 static g_ctl_destroy_geom_t g_part_destroy_geom; 80 static g_taste_t g_part_taste; 81 82 static g_access_t g_part_access; 83 static g_dumpconf_t g_part_dumpconf; 84 static g_orphan_t g_part_orphan; 85 static g_spoiled_t g_part_spoiled; 86 static g_start_t g_part_start; 87 88 static struct g_class g_part_class = { 89 .name = "PART", 90 .version = G_VERSION, 91 /* Class methods. */ 92 .ctlreq = g_part_ctlreq, 93 .destroy_geom = g_part_destroy_geom, 94 .taste = g_part_taste, 95 /* Geom methods. */ 96 .access = g_part_access, 97 .dumpconf = g_part_dumpconf, 98 .orphan = g_part_orphan, 99 .spoiled = g_part_spoiled, 100 .start = g_part_start, 101 }; 102 103 DECLARE_GEOM_CLASS(g_part_class, g_part); 104 105 enum g_part_ctl { 106 G_PART_CTL_NONE, 107 G_PART_CTL_ADD, 108 G_PART_CTL_COMMIT, 109 G_PART_CTL_CREATE, 110 G_PART_CTL_DELETE, 111 G_PART_CTL_DESTROY, 112 G_PART_CTL_MODIFY, 113 G_PART_CTL_MOVE, 114 G_PART_CTL_RECOVER, 115 G_PART_CTL_RESIZE, 116 G_PART_CTL_UNDO 117 }; 118 119 /* 120 * Support functions. 121 */ 122 123 static void g_part_wither(struct g_geom *, int); 124 125 const char * 126 g_part_alias_name(enum g_part_alias alias) 127 { 128 int i; 129 130 for (i = 0; i < G_PART_ALIAS_COUNT; i++) { 131 if (g_part_alias_list[i].alias != alias) 132 continue; 133 return (g_part_alias_list[i].lexeme); 134 } 135 136 return (NULL); 137 } 138 139 struct g_part_entry * 140 g_part_new_entry(struct g_part_table *table, int index, quad_t start, 141 quad_t end) 142 { 143 struct g_part_entry *entry, *last; 144 145 last = NULL; 146 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) { 147 if (entry->gpe_index == index) 148 break; 149 if (entry->gpe_index > index) { 150 entry = NULL; 151 break; 152 } 153 last = entry; 154 } 155 if (entry == NULL) { 156 entry = g_malloc(table->gpt_scheme->gps_entrysz, 157 M_WAITOK | M_ZERO); 158 entry->gpe_index = index; 159 if (last == NULL) 160 LIST_INSERT_HEAD(&table->gpt_entry, entry, gpe_entry); 161 else 162 LIST_INSERT_AFTER(last, entry, gpe_entry); 163 } 164 entry->gpe_start = start; 165 entry->gpe_end = end; 166 return (entry); 167 } 168 169 static void 170 g_part_new_provider(struct g_geom *gp, struct g_part_table *table, 171 struct g_part_entry *entry) 172 { 173 char buf[32]; 174 struct g_consumer *cp; 175 struct g_provider *pp; 176 177 cp = LIST_FIRST(&gp->consumer); 178 pp = cp->provider; 179 180 entry->gpe_offset = entry->gpe_start * pp->sectorsize; 181 182 if (entry->gpe_pp == NULL) { 183 entry->gpe_pp = g_new_providerf(gp, "%s%s", gp->name, 184 G_PART_NAME(table, entry, buf, sizeof(buf))); 185 entry->gpe_pp->private = entry; /* Close the circle. */ 186 } 187 entry->gpe_pp->index = entry->gpe_index - 1; /* index is 1-based. */ 188 entry->gpe_pp->mediasize = (entry->gpe_end - entry->gpe_start + 1) * 189 pp->sectorsize; 190 entry->gpe_pp->sectorsize = pp->sectorsize; 191 entry->gpe_pp->flags = pp->flags & G_PF_CANDELETE; 192 if (pp->stripesize > 0) { 193 entry->gpe_pp->stripesize = pp->stripesize; 194 entry->gpe_pp->stripeoffset = (pp->stripeoffset + 195 entry->gpe_offset) % pp->stripesize; 196 } 197 g_error_provider(entry->gpe_pp, 0); 198 } 199 200 static int 201 g_part_parm_geom(const char *p, struct g_geom **v) 202 { 203 struct g_geom *gp; 204 205 LIST_FOREACH(gp, &g_part_class.geom, geom) { 206 if (!strcmp(p, gp->name)) 207 break; 208 } 209 if (gp == NULL) 210 return (EINVAL); 211 *v = gp; 212 return (0); 213 } 214 215 static int 216 g_part_parm_provider(const char *p, struct g_provider **v) 217 { 218 struct g_provider *pp; 219 220 pp = g_provider_by_name(p); 221 if (pp == NULL) 222 return (EINVAL); 223 *v = pp; 224 return (0); 225 } 226 227 static int 228 g_part_parm_quad(const char *p, quad_t *v) 229 { 230 char *x; 231 quad_t q; 232 233 q = strtoq(p, &x, 0); 234 if (*x != '\0' || q < 0) 235 return (EINVAL); 236 *v = q; 237 return (0); 238 } 239 240 static int 241 g_part_parm_scheme(const char *p, struct g_part_scheme **v) 242 { 243 struct g_part_scheme **iter, *s; 244 245 s = NULL; 246 SET_FOREACH(iter, g_part_scheme_set) { 247 if ((*iter)->name == NULL) 248 continue; 249 if (!strcasecmp((*iter)->name, p)) { 250 s = *iter; 251 break; 252 } 253 } 254 if (s == NULL) 255 return (EINVAL); 256 *v = s; 257 return (0); 258 } 259 260 static int 261 g_part_parm_str(const char *p, const char **v) 262 { 263 264 if (p[0] == '\0') 265 return (EINVAL); 266 *v = p; 267 return (0); 268 } 269 270 static int 271 g_part_parm_uint(const char *p, u_int *v) 272 { 273 char *x; 274 long l; 275 276 l = strtol(p, &x, 0); 277 if (*x != '\0' || l < 0 || l > INT_MAX) 278 return (EINVAL); 279 *v = (unsigned int)l; 280 return (0); 281 } 282 283 static int 284 g_part_probe(struct g_geom *gp, struct g_consumer *cp, int depth) 285 { 286 struct g_part_scheme **iter, *scheme; 287 struct g_part_table *table; 288 int pri, probe; 289 290 table = gp->softc; 291 scheme = (table != NULL) ? table->gpt_scheme : &g_part_null_scheme; 292 pri = (scheme != &g_part_null_scheme) ? G_PART_PROBE(table, cp) : 293 INT_MIN; 294 if (pri == 0) 295 goto done; 296 if (pri > 0) { /* error */ 297 scheme = &g_part_null_scheme; 298 pri = INT_MIN; 299 } 300 301 SET_FOREACH(iter, g_part_scheme_set) { 302 if ((*iter) == &g_part_null_scheme) 303 continue; 304 table = (void *)kobj_create((kobj_class_t)(*iter), M_GEOM, 305 M_WAITOK); 306 table->gpt_gp = gp; 307 table->gpt_scheme = *iter; 308 table->gpt_depth = depth; 309 probe = G_PART_PROBE(table, cp); 310 if (probe <= 0 && probe > pri) { 311 pri = probe; 312 scheme = *iter; 313 if (gp->softc != NULL) 314 kobj_delete((kobj_t)gp->softc, M_GEOM); 315 gp->softc = table; 316 if (pri == 0) 317 goto done; 318 } else 319 kobj_delete((kobj_t)table, M_GEOM); 320 } 321 322 done: 323 return ((scheme == &g_part_null_scheme) ? ENXIO : 0); 324 } 325 326 /* 327 * Control request functions. 328 */ 329 330 static int 331 g_part_ctl_add(struct gctl_req *req, struct g_part_parms *gpp) 332 { 333 char buf[32]; 334 struct g_geom *gp; 335 struct g_provider *pp; 336 struct g_part_entry *delent, *last, *entry; 337 struct g_part_table *table; 338 struct sbuf *sb; 339 quad_t end; 340 unsigned int index; 341 int error; 342 343 gp = gpp->gpp_geom; 344 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name)); 345 g_topology_assert(); 346 347 pp = LIST_FIRST(&gp->consumer)->provider; 348 table = gp->softc; 349 end = gpp->gpp_start + gpp->gpp_size - 1; 350 351 if (gpp->gpp_start < table->gpt_first || 352 gpp->gpp_start > table->gpt_last) { 353 gctl_error(req, "%d start '%jd'", EINVAL, 354 (intmax_t)gpp->gpp_start); 355 return (EINVAL); 356 } 357 if (end < gpp->gpp_start || end > table->gpt_last) { 358 gctl_error(req, "%d size '%jd'", EINVAL, 359 (intmax_t)gpp->gpp_size); 360 return (EINVAL); 361 } 362 if (gpp->gpp_index > table->gpt_entries) { 363 gctl_error(req, "%d index '%d'", EINVAL, gpp->gpp_index); 364 return (EINVAL); 365 } 366 367 delent = last = NULL; 368 index = (gpp->gpp_index > 0) ? gpp->gpp_index : 1; 369 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) { 370 if (entry->gpe_deleted) { 371 if (entry->gpe_index == index) 372 delent = entry; 373 continue; 374 } 375 if (entry->gpe_index == index) { 376 index = entry->gpe_index + 1; 377 last = entry; 378 } 379 if (gpp->gpp_start >= entry->gpe_start && 380 gpp->gpp_start <= entry->gpe_end) { 381 gctl_error(req, "%d start '%jd'", ENOSPC, 382 (intmax_t)gpp->gpp_start); 383 return (ENOSPC); 384 } 385 if (end >= entry->gpe_start && end <= entry->gpe_end) { 386 gctl_error(req, "%d end '%jd'", ENOSPC, (intmax_t)end); 387 return (ENOSPC); 388 } 389 if (gpp->gpp_start < entry->gpe_start && end > entry->gpe_end) { 390 gctl_error(req, "%d size '%jd'", ENOSPC, 391 (intmax_t)gpp->gpp_size); 392 return (ENOSPC); 393 } 394 } 395 if (gpp->gpp_index > 0 && index != gpp->gpp_index) { 396 gctl_error(req, "%d index '%d'", EEXIST, gpp->gpp_index); 397 return (EEXIST); 398 } 399 400 entry = (delent == NULL) ? g_malloc(table->gpt_scheme->gps_entrysz, 401 M_WAITOK | M_ZERO) : delent; 402 entry->gpe_index = index; 403 entry->gpe_start = gpp->gpp_start; 404 entry->gpe_end = end; 405 error = G_PART_ADD(table, entry, gpp); 406 if (error) { 407 gctl_error(req, "%d", error); 408 if (delent == NULL) 409 g_free(entry); 410 return (error); 411 } 412 if (delent == NULL) { 413 if (last == NULL) 414 LIST_INSERT_HEAD(&table->gpt_entry, entry, gpe_entry); 415 else 416 LIST_INSERT_AFTER(last, entry, gpe_entry); 417 entry->gpe_created = 1; 418 } else { 419 entry->gpe_deleted = 0; 420 entry->gpe_modified = 1; 421 } 422 g_part_new_provider(gp, table, entry); 423 424 /* Provide feedback if so requested. */ 425 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) { 426 sb = sbuf_new(NULL, NULL, 0, SBUF_AUTOEXTEND); 427 sbuf_printf(sb, "%s%s added\n", gp->name, 428 G_PART_NAME(table, entry, buf, sizeof(buf))); 429 sbuf_finish(sb); 430 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1); 431 sbuf_delete(sb); 432 } 433 return (0); 434 } 435 436 static int 437 g_part_ctl_commit(struct gctl_req *req, struct g_part_parms *gpp) 438 { 439 struct g_consumer *cp; 440 struct g_geom *gp; 441 struct g_provider *pp; 442 struct g_part_entry *entry, *tmp; 443 struct g_part_table *table; 444 char *buf; 445 int error, i; 446 447 gp = gpp->gpp_geom; 448 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name)); 449 g_topology_assert(); 450 451 table = gp->softc; 452 if (!table->gpt_opened) { 453 gctl_error(req, "%d", EPERM); 454 return (EPERM); 455 } 456 457 cp = LIST_FIRST(&gp->consumer); 458 if ((table->gpt_smhead | table->gpt_smtail) != 0) { 459 pp = cp->provider; 460 buf = g_malloc(pp->sectorsize, M_WAITOK | M_ZERO); 461 while (table->gpt_smhead != 0) { 462 i = ffs(table->gpt_smhead) - 1; 463 error = g_write_data(cp, i * pp->sectorsize, buf, 464 pp->sectorsize); 465 if (error) { 466 g_free(buf); 467 goto fail; 468 } 469 table->gpt_smhead &= ~(1 << i); 470 } 471 while (table->gpt_smtail != 0) { 472 i = ffs(table->gpt_smtail) - 1; 473 error = g_write_data(cp, pp->mediasize - (i + 1) * 474 pp->sectorsize, buf, pp->sectorsize); 475 if (error) { 476 g_free(buf); 477 goto fail; 478 } 479 table->gpt_smtail &= ~(1 << i); 480 } 481 g_free(buf); 482 } 483 484 if (table->gpt_scheme == &g_part_null_scheme) { 485 g_access(cp, -1, -1, -1); 486 g_part_wither(gp, ENXIO); 487 return (0); 488 } 489 490 error = G_PART_WRITE(table, cp); 491 if (error) 492 goto fail; 493 494 LIST_FOREACH_SAFE(entry, &table->gpt_entry, gpe_entry, tmp) { 495 if (!entry->gpe_deleted) { 496 entry->gpe_created = 0; 497 entry->gpe_modified = 0; 498 continue; 499 } 500 LIST_REMOVE(entry, gpe_entry); 501 g_free(entry); 502 } 503 table->gpt_created = 0; 504 table->gpt_opened = 0; 505 g_access(cp, -1, -1, -1); 506 return (0); 507 508 fail: 509 gctl_error(req, "%d", error); 510 return (error); 511 } 512 513 static int 514 g_part_ctl_create(struct gctl_req *req, struct g_part_parms *gpp) 515 { 516 struct g_consumer *cp; 517 struct g_geom *gp; 518 struct g_provider *pp; 519 struct g_part_scheme *scheme; 520 struct g_part_table *null, *table; 521 struct sbuf *sb; 522 int attr, error; 523 524 pp = gpp->gpp_provider; 525 scheme = gpp->gpp_scheme; 526 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, pp->name)); 527 g_topology_assert(); 528 529 /* Check that there isn't already a g_part geom on the provider. */ 530 error = g_part_parm_geom(pp->name, &gp); 531 if (!error) { 532 null = gp->softc; 533 if (null->gpt_scheme != &g_part_null_scheme) { 534 gctl_error(req, "%d geom '%s'", EEXIST, pp->name); 535 return (EEXIST); 536 } 537 } else 538 null = NULL; 539 540 if ((gpp->gpp_parms & G_PART_PARM_ENTRIES) && 541 (gpp->gpp_entries < scheme->gps_minent || 542 gpp->gpp_entries > scheme->gps_maxent)) { 543 gctl_error(req, "%d entries '%d'", EINVAL, gpp->gpp_entries); 544 return (EINVAL); 545 } 546 547 if (null == NULL) 548 gp = g_new_geomf(&g_part_class, "%s", pp->name); 549 gp->softc = kobj_create((kobj_class_t)gpp->gpp_scheme, M_GEOM, 550 M_WAITOK); 551 table = gp->softc; 552 table->gpt_gp = gp; 553 table->gpt_scheme = gpp->gpp_scheme; 554 table->gpt_entries = (gpp->gpp_parms & G_PART_PARM_ENTRIES) ? 555 gpp->gpp_entries : scheme->gps_minent; 556 LIST_INIT(&table->gpt_entry); 557 if (null == NULL) { 558 cp = g_new_consumer(gp); 559 error = g_attach(cp, pp); 560 if (error == 0) 561 error = g_access(cp, 1, 1, 1); 562 if (error != 0) { 563 g_part_wither(gp, error); 564 gctl_error(req, "%d geom '%s'", error, pp->name); 565 return (error); 566 } 567 table->gpt_opened = 1; 568 } else { 569 cp = LIST_FIRST(&gp->consumer); 570 table->gpt_opened = null->gpt_opened; 571 table->gpt_smhead = null->gpt_smhead; 572 table->gpt_smtail = null->gpt_smtail; 573 } 574 575 g_topology_unlock(); 576 577 /* Make sure we can nest and if so, determine our depth. */ 578 error = g_getattr("PART::isleaf", cp, &attr); 579 if (!error && attr) { 580 error = ENODEV; 581 goto fail; 582 } 583 error = g_getattr("PART::depth", cp, &attr); 584 table->gpt_depth = (!error) ? attr + 1 : 0; 585 586 error = G_PART_CREATE(table, gpp); 587 if (error) 588 goto fail; 589 590 g_topology_lock(); 591 592 table->gpt_created = 1; 593 if (null != NULL) 594 kobj_delete((kobj_t)null, M_GEOM); 595 596 /* 597 * Support automatic commit by filling in the gpp_geom 598 * parameter. 599 */ 600 gpp->gpp_parms |= G_PART_PARM_GEOM; 601 gpp->gpp_geom = gp; 602 603 /* Provide feedback if so requested. */ 604 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) { 605 sb = sbuf_new(NULL, NULL, 0, SBUF_AUTOEXTEND); 606 sbuf_printf(sb, "%s created\n", gp->name); 607 sbuf_finish(sb); 608 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1); 609 sbuf_delete(sb); 610 } 611 return (0); 612 613 fail: 614 g_topology_lock(); 615 if (null == NULL) { 616 g_access(cp, -1, -1, -1); 617 g_part_wither(gp, error); 618 } else { 619 kobj_delete((kobj_t)gp->softc, M_GEOM); 620 gp->softc = null; 621 } 622 gctl_error(req, "%d provider", error); 623 return (error); 624 } 625 626 static int 627 g_part_ctl_delete(struct gctl_req *req, struct g_part_parms *gpp) 628 { 629 char buf[32]; 630 struct g_geom *gp; 631 struct g_provider *pp; 632 struct g_part_entry *entry; 633 struct g_part_table *table; 634 struct sbuf *sb; 635 636 gp = gpp->gpp_geom; 637 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name)); 638 g_topology_assert(); 639 640 table = gp->softc; 641 642 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) { 643 if (entry->gpe_deleted) 644 continue; 645 if (entry->gpe_index == gpp->gpp_index) 646 break; 647 } 648 if (entry == NULL) { 649 gctl_error(req, "%d index '%d'", ENOENT, gpp->gpp_index); 650 return (ENOENT); 651 } 652 653 pp = entry->gpe_pp; 654 if (pp->acr > 0 || pp->acw > 0 || pp->ace > 0) { 655 gctl_error(req, "%d", EBUSY); 656 return (EBUSY); 657 } 658 659 pp->private = NULL; 660 entry->gpe_pp = NULL; 661 if (entry->gpe_created) { 662 LIST_REMOVE(entry, gpe_entry); 663 g_free(entry); 664 } else { 665 entry->gpe_modified = 0; 666 entry->gpe_deleted = 1; 667 } 668 g_wither_provider(pp, ENXIO); 669 670 /* Provide feedback if so requested. */ 671 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) { 672 sb = sbuf_new(NULL, NULL, 0, SBUF_AUTOEXTEND); 673 sbuf_printf(sb, "%s%s deleted\n", gp->name, 674 G_PART_NAME(table, entry, buf, sizeof(buf))); 675 sbuf_finish(sb); 676 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1); 677 sbuf_delete(sb); 678 } 679 return (0); 680 } 681 682 static int 683 g_part_ctl_destroy(struct gctl_req *req, struct g_part_parms *gpp) 684 { 685 struct g_geom *gp; 686 struct g_part_entry *entry; 687 struct g_part_table *null, *table; 688 struct sbuf *sb; 689 int error; 690 691 gp = gpp->gpp_geom; 692 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name)); 693 g_topology_assert(); 694 695 table = gp->softc; 696 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) { 697 if (entry->gpe_deleted) 698 continue; 699 gctl_error(req, "%d", EBUSY); 700 return (EBUSY); 701 } 702 703 error = G_PART_DESTROY(table, gpp); 704 if (error) { 705 gctl_error(req, "%d", error); 706 return (error); 707 } 708 709 gp->softc = kobj_create((kobj_class_t)&g_part_null_scheme, M_GEOM, 710 M_WAITOK); 711 null = gp->softc; 712 null->gpt_gp = gp; 713 null->gpt_scheme = &g_part_null_scheme; 714 LIST_INIT(&null->gpt_entry); 715 null->gpt_depth = table->gpt_depth; 716 null->gpt_opened = table->gpt_opened; 717 null->gpt_smhead = table->gpt_smhead; 718 null->gpt_smtail = table->gpt_smtail; 719 720 while ((entry = LIST_FIRST(&table->gpt_entry)) != NULL) { 721 LIST_REMOVE(entry, gpe_entry); 722 g_free(entry); 723 } 724 kobj_delete((kobj_t)table, M_GEOM); 725 726 /* Provide feedback if so requested. */ 727 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) { 728 sb = sbuf_new(NULL, NULL, 0, SBUF_AUTOEXTEND); 729 sbuf_printf(sb, "%s destroyed\n", gp->name); 730 sbuf_finish(sb); 731 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1); 732 sbuf_delete(sb); 733 } 734 return (0); 735 } 736 737 static int 738 g_part_ctl_modify(struct gctl_req *req, struct g_part_parms *gpp) 739 { 740 char buf[32]; 741 struct g_geom *gp; 742 struct g_part_entry *entry; 743 struct g_part_table *table; 744 struct sbuf *sb; 745 int error; 746 747 gp = gpp->gpp_geom; 748 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name)); 749 g_topology_assert(); 750 751 table = gp->softc; 752 753 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) { 754 if (entry->gpe_deleted) 755 continue; 756 if (entry->gpe_index == gpp->gpp_index) 757 break; 758 } 759 if (entry == NULL) { 760 gctl_error(req, "%d index '%d'", ENOENT, gpp->gpp_index); 761 return (ENOENT); 762 } 763 764 error = G_PART_MODIFY(table, entry, gpp); 765 if (error) { 766 gctl_error(req, "%d", error); 767 return (error); 768 } 769 770 if (!entry->gpe_created) 771 entry->gpe_modified = 1; 772 773 /* Provide feedback if so requested. */ 774 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) { 775 sb = sbuf_new(NULL, NULL, 0, SBUF_AUTOEXTEND); 776 sbuf_printf(sb, "%s%s modified\n", gp->name, 777 G_PART_NAME(table, entry, buf, sizeof(buf))); 778 sbuf_finish(sb); 779 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1); 780 sbuf_delete(sb); 781 } 782 return (0); 783 } 784 785 static int 786 g_part_ctl_move(struct gctl_req *req, struct g_part_parms *gpp) 787 { 788 gctl_error(req, "%d verb 'move'", ENOSYS); 789 return (ENOSYS); 790 } 791 792 static int 793 g_part_ctl_recover(struct gctl_req *req, struct g_part_parms *gpp) 794 { 795 gctl_error(req, "%d verb 'recover'", ENOSYS); 796 return (ENOSYS); 797 } 798 799 static int 800 g_part_ctl_resize(struct gctl_req *req, struct g_part_parms *gpp) 801 { 802 gctl_error(req, "%d verb 'resize'", ENOSYS); 803 return (ENOSYS); 804 } 805 806 static int 807 g_part_ctl_undo(struct gctl_req *req, struct g_part_parms *gpp) 808 { 809 struct g_consumer *cp; 810 struct g_provider *pp; 811 struct g_geom *gp; 812 struct g_part_entry *entry, *tmp; 813 struct g_part_table *table; 814 int error, reprobe; 815 816 gp = gpp->gpp_geom; 817 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name)); 818 g_topology_assert(); 819 820 table = gp->softc; 821 if (!table->gpt_opened) { 822 gctl_error(req, "%d", EPERM); 823 return (EPERM); 824 } 825 826 cp = LIST_FIRST(&gp->consumer); 827 LIST_FOREACH_SAFE(entry, &table->gpt_entry, gpe_entry, tmp) { 828 entry->gpe_modified = 0; 829 if (entry->gpe_created) { 830 pp = entry->gpe_pp; 831 pp->private = NULL; 832 entry->gpe_pp = NULL; 833 g_wither_provider(pp, ENXIO); 834 entry->gpe_deleted = 1; 835 } 836 if (entry->gpe_deleted) { 837 LIST_REMOVE(entry, gpe_entry); 838 g_free(entry); 839 } 840 } 841 842 g_topology_unlock(); 843 844 reprobe = (table->gpt_scheme == &g_part_null_scheme || 845 table->gpt_created) ? 1 : 0; 846 847 if (reprobe) { 848 if (!LIST_EMPTY(&table->gpt_entry)) { 849 error = EBUSY; 850 goto fail; 851 } 852 error = g_part_probe(gp, cp, table->gpt_depth); 853 if (error) { 854 g_topology_lock(); 855 g_access(cp, -1, -1, -1); 856 g_part_wither(gp, error); 857 return (0); 858 } 859 table = gp->softc; 860 } 861 862 error = G_PART_READ(table, cp); 863 if (error) 864 goto fail; 865 866 g_topology_lock(); 867 868 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) 869 g_part_new_provider(gp, table, entry); 870 871 table->gpt_opened = 0; 872 g_access(cp, -1, -1, -1); 873 return (0); 874 875 fail: 876 g_topology_lock(); 877 gctl_error(req, "%d", error); 878 return (error); 879 } 880 881 static void 882 g_part_wither(struct g_geom *gp, int error) 883 { 884 struct g_part_entry *entry; 885 struct g_part_table *table; 886 887 table = gp->softc; 888 if (table != NULL) { 889 while ((entry = LIST_FIRST(&table->gpt_entry)) != NULL) { 890 LIST_REMOVE(entry, gpe_entry); 891 g_free(entry); 892 } 893 if (gp->softc != NULL) { 894 kobj_delete((kobj_t)gp->softc, M_GEOM); 895 gp->softc = NULL; 896 } 897 } 898 g_wither_geom(gp, error); 899 } 900 901 /* 902 * Class methods. 903 */ 904 905 static void 906 g_part_ctlreq(struct gctl_req *req, struct g_class *mp, const char *verb) 907 { 908 struct g_part_parms gpp; 909 struct g_part_table *table; 910 struct gctl_req_arg *ap; 911 const char *p; 912 enum g_part_ctl ctlreq; 913 unsigned int i, mparms, oparms, parm; 914 int auto_commit, close_on_error; 915 int error, modifies; 916 917 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s,%s)", __func__, mp->name, verb)); 918 g_topology_assert(); 919 920 ctlreq = G_PART_CTL_NONE; 921 modifies = 1; 922 mparms = 0; 923 oparms = G_PART_PARM_FLAGS | G_PART_PARM_OUTPUT | G_PART_PARM_VERSION; 924 switch (*verb) { 925 case 'a': 926 if (!strcmp(verb, "add")) { 927 ctlreq = G_PART_CTL_ADD; 928 mparms |= G_PART_PARM_GEOM | G_PART_PARM_SIZE | 929 G_PART_PARM_START | G_PART_PARM_TYPE; 930 oparms |= G_PART_PARM_INDEX | G_PART_PARM_LABEL; 931 } 932 break; 933 case 'c': 934 if (!strcmp(verb, "commit")) { 935 ctlreq = G_PART_CTL_COMMIT; 936 mparms |= G_PART_PARM_GEOM; 937 modifies = 0; 938 } else if (!strcmp(verb, "create")) { 939 ctlreq = G_PART_CTL_CREATE; 940 mparms |= G_PART_PARM_PROVIDER | G_PART_PARM_SCHEME; 941 oparms |= G_PART_PARM_ENTRIES; 942 } 943 break; 944 case 'd': 945 if (!strcmp(verb, "delete")) { 946 ctlreq = G_PART_CTL_DELETE; 947 mparms |= G_PART_PARM_GEOM | G_PART_PARM_INDEX; 948 } else if (!strcmp(verb, "destroy")) { 949 ctlreq = G_PART_CTL_DESTROY; 950 mparms |= G_PART_PARM_GEOM; 951 } 952 break; 953 case 'm': 954 if (!strcmp(verb, "modify")) { 955 ctlreq = G_PART_CTL_MODIFY; 956 mparms |= G_PART_PARM_GEOM | G_PART_PARM_INDEX; 957 oparms |= G_PART_PARM_LABEL | G_PART_PARM_TYPE; 958 } else if (!strcmp(verb, "move")) { 959 ctlreq = G_PART_CTL_MOVE; 960 mparms |= G_PART_PARM_GEOM | G_PART_PARM_INDEX; 961 } 962 break; 963 case 'r': 964 if (!strcmp(verb, "recover")) { 965 ctlreq = G_PART_CTL_RECOVER; 966 mparms |= G_PART_PARM_GEOM; 967 } else if (!strcmp(verb, "resize")) { 968 ctlreq = G_PART_CTL_RESIZE; 969 mparms |= G_PART_PARM_GEOM | G_PART_PARM_INDEX; 970 } 971 break; 972 case 'u': 973 if (!strcmp(verb, "undo")) { 974 ctlreq = G_PART_CTL_UNDO; 975 mparms |= G_PART_PARM_GEOM; 976 modifies = 0; 977 } 978 break; 979 } 980 if (ctlreq == G_PART_CTL_NONE) { 981 gctl_error(req, "%d verb '%s'", EINVAL, verb); 982 return; 983 } 984 985 bzero(&gpp, sizeof(gpp)); 986 for (i = 0; i < req->narg; i++) { 987 ap = &req->arg[i]; 988 parm = 0; 989 switch (ap->name[0]) { 990 case 'c': 991 if (!strcmp(ap->name, "class")) 992 continue; 993 break; 994 case 'e': 995 if (!strcmp(ap->name, "entries")) 996 parm = G_PART_PARM_ENTRIES; 997 break; 998 case 'f': 999 if (!strcmp(ap->name, "flags")) 1000 parm = G_PART_PARM_FLAGS; 1001 break; 1002 case 'g': 1003 if (!strcmp(ap->name, "geom")) 1004 parm = G_PART_PARM_GEOM; 1005 break; 1006 case 'i': 1007 if (!strcmp(ap->name, "index")) 1008 parm = G_PART_PARM_INDEX; 1009 break; 1010 case 'l': 1011 if (!strcmp(ap->name, "label")) 1012 parm = G_PART_PARM_LABEL; 1013 break; 1014 case 'o': 1015 if (!strcmp(ap->name, "output")) 1016 parm = G_PART_PARM_OUTPUT; 1017 break; 1018 case 'p': 1019 if (!strcmp(ap->name, "provider")) 1020 parm = G_PART_PARM_PROVIDER; 1021 break; 1022 case 's': 1023 if (!strcmp(ap->name, "scheme")) 1024 parm = G_PART_PARM_SCHEME; 1025 else if (!strcmp(ap->name, "size")) 1026 parm = G_PART_PARM_SIZE; 1027 else if (!strcmp(ap->name, "start")) 1028 parm = G_PART_PARM_START; 1029 break; 1030 case 't': 1031 if (!strcmp(ap->name, "type")) 1032 parm = G_PART_PARM_TYPE; 1033 break; 1034 case 'v': 1035 if (!strcmp(ap->name, "verb")) 1036 continue; 1037 else if (!strcmp(ap->name, "version")) 1038 parm = G_PART_PARM_VERSION; 1039 break; 1040 } 1041 if ((parm & (mparms | oparms)) == 0) { 1042 gctl_error(req, "%d param '%s'", EINVAL, ap->name); 1043 return; 1044 } 1045 p = gctl_get_asciiparam(req, ap->name); 1046 if (p == NULL) { 1047 gctl_error(req, "%d param '%s'", ENOATTR, ap->name); 1048 return; 1049 } 1050 switch (parm) { 1051 case G_PART_PARM_ENTRIES: 1052 error = g_part_parm_uint(p, &gpp.gpp_entries); 1053 break; 1054 case G_PART_PARM_FLAGS: 1055 if (p[0] == '\0') 1056 continue; 1057 error = g_part_parm_str(p, &gpp.gpp_flags); 1058 break; 1059 case G_PART_PARM_GEOM: 1060 error = g_part_parm_geom(p, &gpp.gpp_geom); 1061 break; 1062 case G_PART_PARM_INDEX: 1063 error = g_part_parm_uint(p, &gpp.gpp_index); 1064 break; 1065 case G_PART_PARM_LABEL: 1066 /* An empty label is always valid. */ 1067 gpp.gpp_label = p; 1068 error = 0; 1069 break; 1070 case G_PART_PARM_OUTPUT: 1071 error = 0; /* Write-only parameter */ 1072 break; 1073 case G_PART_PARM_PROVIDER: 1074 error = g_part_parm_provider(p, &gpp.gpp_provider); 1075 break; 1076 case G_PART_PARM_SCHEME: 1077 error = g_part_parm_scheme(p, &gpp.gpp_scheme); 1078 break; 1079 case G_PART_PARM_SIZE: 1080 error = g_part_parm_quad(p, &gpp.gpp_size); 1081 break; 1082 case G_PART_PARM_START: 1083 error = g_part_parm_quad(p, &gpp.gpp_start); 1084 break; 1085 case G_PART_PARM_TYPE: 1086 error = g_part_parm_str(p, &gpp.gpp_type); 1087 break; 1088 case G_PART_PARM_VERSION: 1089 error = g_part_parm_uint(p, &gpp.gpp_version); 1090 break; 1091 default: 1092 error = EDOOFUS; 1093 break; 1094 } 1095 if (error) { 1096 gctl_error(req, "%d %s '%s'", error, ap->name, p); 1097 return; 1098 } 1099 gpp.gpp_parms |= parm; 1100 } 1101 if ((gpp.gpp_parms & mparms) != mparms) { 1102 parm = mparms - (gpp.gpp_parms & mparms); 1103 gctl_error(req, "%d param '%x'", ENOATTR, parm); 1104 return; 1105 } 1106 1107 /* Obtain permissions if possible/necessary. */ 1108 close_on_error = 0; 1109 table = NULL; /* Suppress uninit. warning. */ 1110 if (modifies && (gpp.gpp_parms & G_PART_PARM_GEOM)) { 1111 table = gpp.gpp_geom->softc; 1112 if (table != NULL && !table->gpt_opened) { 1113 error = g_access(LIST_FIRST(&gpp.gpp_geom->consumer), 1114 1, 1, 1); 1115 if (error) { 1116 gctl_error(req, "%d geom '%s'", error, 1117 gpp.gpp_geom->name); 1118 return; 1119 } 1120 table->gpt_opened = 1; 1121 close_on_error = 1; 1122 } 1123 } 1124 1125 error = EDOOFUS; /* Prevent bogus uninit. warning. */ 1126 switch (ctlreq) { 1127 case G_PART_CTL_NONE: 1128 panic("%s", __func__); 1129 case G_PART_CTL_ADD: 1130 error = g_part_ctl_add(req, &gpp); 1131 break; 1132 case G_PART_CTL_COMMIT: 1133 error = g_part_ctl_commit(req, &gpp); 1134 break; 1135 case G_PART_CTL_CREATE: 1136 error = g_part_ctl_create(req, &gpp); 1137 break; 1138 case G_PART_CTL_DELETE: 1139 error = g_part_ctl_delete(req, &gpp); 1140 break; 1141 case G_PART_CTL_DESTROY: 1142 error = g_part_ctl_destroy(req, &gpp); 1143 break; 1144 case G_PART_CTL_MODIFY: 1145 error = g_part_ctl_modify(req, &gpp); 1146 break; 1147 case G_PART_CTL_MOVE: 1148 error = g_part_ctl_move(req, &gpp); 1149 break; 1150 case G_PART_CTL_RECOVER: 1151 error = g_part_ctl_recover(req, &gpp); 1152 break; 1153 case G_PART_CTL_RESIZE: 1154 error = g_part_ctl_resize(req, &gpp); 1155 break; 1156 case G_PART_CTL_UNDO: 1157 error = g_part_ctl_undo(req, &gpp); 1158 break; 1159 } 1160 1161 /* Implement automatic commit. */ 1162 if (!error) { 1163 auto_commit = (modifies && 1164 (gpp.gpp_parms & G_PART_PARM_FLAGS) && 1165 strchr(gpp.gpp_flags, 'C') != NULL) ? 1 : 0; 1166 if (auto_commit) { 1167 KASSERT(gpp.gpp_parms & G_PART_PARM_GEOM, (__func__)); 1168 error = g_part_ctl_commit(req, &gpp); 1169 } 1170 } 1171 1172 if (error && close_on_error) { 1173 g_access(LIST_FIRST(&gpp.gpp_geom->consumer), -1, -1, -1); 1174 table->gpt_opened = 0; 1175 } 1176 } 1177 1178 static int 1179 g_part_destroy_geom(struct gctl_req *req, struct g_class *mp, 1180 struct g_geom *gp) 1181 { 1182 1183 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s,%s)", __func__, mp->name, gp->name)); 1184 g_topology_assert(); 1185 1186 g_part_wither(gp, EINVAL); 1187 return (0); 1188 } 1189 1190 static struct g_geom * 1191 g_part_taste(struct g_class *mp, struct g_provider *pp, int flags __unused) 1192 { 1193 struct g_consumer *cp; 1194 struct g_geom *gp; 1195 struct g_part_entry *entry; 1196 struct g_part_table *table; 1197 int attr, depth; 1198 int error; 1199 1200 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s,%s)", __func__, mp->name, pp->name)); 1201 g_topology_assert(); 1202 1203 /* 1204 * Create a GEOM with consumer and hook it up to the provider. 1205 * With that we become part of the topology. Optain read access 1206 * to the provider. 1207 */ 1208 gp = g_new_geomf(mp, "%s", pp->name); 1209 cp = g_new_consumer(gp); 1210 error = g_attach(cp, pp); 1211 if (error == 0) 1212 error = g_access(cp, 1, 0, 0); 1213 if (error != 0) { 1214 g_part_wither(gp, error); 1215 return (NULL); 1216 } 1217 1218 g_topology_unlock(); 1219 1220 /* Make sure we can nest and if so, determine our depth. */ 1221 error = g_getattr("PART::isleaf", cp, &attr); 1222 if (!error && attr) { 1223 error = ENODEV; 1224 goto fail; 1225 } 1226 error = g_getattr("PART::depth", cp, &attr); 1227 depth = (!error) ? attr + 1 : 0; 1228 1229 error = g_part_probe(gp, cp, depth); 1230 if (error) 1231 goto fail; 1232 1233 table = gp->softc; 1234 error = G_PART_READ(table, cp); 1235 if (error) 1236 goto fail; 1237 1238 g_topology_lock(); 1239 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) 1240 g_part_new_provider(gp, table, entry); 1241 1242 g_access(cp, -1, 0, 0); 1243 return (gp); 1244 1245 fail: 1246 g_topology_lock(); 1247 g_access(cp, -1, 0, 0); 1248 g_part_wither(gp, error); 1249 return (NULL); 1250 } 1251 1252 /* 1253 * Geom methods. 1254 */ 1255 1256 static int 1257 g_part_access(struct g_provider *pp, int dr, int dw, int de) 1258 { 1259 struct g_consumer *cp; 1260 1261 G_PART_TRACE((G_T_ACCESS, "%s(%s,%d,%d,%d)", __func__, pp->name, dr, 1262 dw, de)); 1263 1264 cp = LIST_FIRST(&pp->geom->consumer); 1265 1266 /* We always gain write-exclusive access. */ 1267 return (g_access(cp, dr, dw, dw + de)); 1268 } 1269 1270 static void 1271 g_part_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp, 1272 struct g_consumer *cp, struct g_provider *pp) 1273 { 1274 char buf[64]; 1275 struct g_part_entry *entry; 1276 struct g_part_table *table; 1277 1278 KASSERT(sb != NULL && gp != NULL, (__func__)); 1279 table = gp->softc; 1280 1281 if (indent == NULL) { 1282 KASSERT(cp == NULL && pp != NULL, (__func__)); 1283 entry = pp->private; 1284 if (entry == NULL) 1285 return; 1286 sbuf_printf(sb, " i %u o %ju ty %s", entry->gpe_index, 1287 (uintmax_t)entry->gpe_offset, 1288 G_PART_TYPE(table, entry, buf, sizeof(buf))); 1289 } else if (cp != NULL) { /* Consumer configuration. */ 1290 KASSERT(pp == NULL, (__func__)); 1291 /* none */ 1292 } else if (pp != NULL) { /* Provider configuration. */ 1293 entry = pp->private; 1294 if (entry == NULL) 1295 return; 1296 sbuf_printf(sb, "%s<index>%u</index>\n", indent, 1297 entry->gpe_index); 1298 sbuf_printf(sb, "%s<type>%s</type>\n", indent, 1299 G_PART_TYPE(table, entry, buf, sizeof(buf))); 1300 sbuf_printf(sb, "%s<offset>%ju</offset>\n", indent, 1301 (uintmax_t)entry->gpe_offset); 1302 sbuf_printf(sb, "%s<length>%ju</length>\n", indent, 1303 (uintmax_t)pp->mediasize); 1304 G_PART_DUMPCONF(table, entry, sb, indent); 1305 } else { /* Geom configuration. */ 1306 sbuf_printf(sb, "%s<scheme>%s</scheme>\n", indent, 1307 table->gpt_scheme->name); 1308 sbuf_printf(sb, "%s<entries>%u</entries>\n", indent, 1309 table->gpt_entries); 1310 sbuf_printf(sb, "%s<first>%ju</first>\n", indent, 1311 (uintmax_t)table->gpt_first); 1312 sbuf_printf(sb, "%s<last>%ju</last>\n", indent, 1313 (uintmax_t)table->gpt_last); 1314 G_PART_DUMPCONF(table, NULL, sb, indent); 1315 } 1316 } 1317 1318 static void 1319 g_part_orphan(struct g_consumer *cp) 1320 { 1321 struct g_provider *pp; 1322 1323 pp = cp->provider; 1324 KASSERT(pp != NULL, (__func__)); 1325 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, pp->name)); 1326 g_topology_assert(); 1327 1328 KASSERT(pp->error != 0, (__func__)); 1329 g_part_wither(cp->geom, pp->error); 1330 } 1331 1332 static void 1333 g_part_spoiled(struct g_consumer *cp) 1334 { 1335 1336 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, cp->provider->name)); 1337 g_topology_assert(); 1338 1339 g_part_wither(cp->geom, ENXIO); 1340 } 1341 1342 static void 1343 g_part_start(struct bio *bp) 1344 { 1345 struct bio *bp2; 1346 struct g_consumer *cp; 1347 struct g_geom *gp; 1348 struct g_part_entry *entry; 1349 struct g_part_table *table; 1350 struct g_kerneldump *gkd; 1351 struct g_provider *pp; 1352 int attr; 1353 1354 pp = bp->bio_to; 1355 gp = pp->geom; 1356 table = gp->softc; 1357 cp = LIST_FIRST(&gp->consumer); 1358 1359 G_PART_TRACE((G_T_BIO, "%s: cmd=%d, provider=%s", __func__, bp->bio_cmd, 1360 pp->name)); 1361 1362 entry = pp->private; 1363 if (entry == NULL) { 1364 g_io_deliver(bp, ENXIO); 1365 return; 1366 } 1367 1368 switch(bp->bio_cmd) { 1369 case BIO_DELETE: 1370 case BIO_READ: 1371 case BIO_WRITE: 1372 if (bp->bio_offset >= pp->mediasize) { 1373 g_io_deliver(bp, EIO); 1374 return; 1375 } 1376 bp2 = g_clone_bio(bp); 1377 if (bp2 == NULL) { 1378 g_io_deliver(bp, ENOMEM); 1379 return; 1380 } 1381 if (bp2->bio_offset + bp2->bio_length > pp->mediasize) 1382 bp2->bio_length = pp->mediasize - bp2->bio_offset; 1383 bp2->bio_done = g_std_done; 1384 bp2->bio_offset += entry->gpe_offset; 1385 g_io_request(bp2, cp); 1386 return; 1387 case BIO_FLUSH: 1388 break; 1389 case BIO_GETATTR: 1390 if (!strcmp("GEOM::kerneldump", bp->bio_attribute)) { 1391 /* 1392 * Check that the partition is suitable for kernel 1393 * dumps. Typically only swap partitions should be 1394 * used. 1395 */ 1396 if (!G_PART_DUMPTO(table, entry)) { 1397 g_io_deliver(bp, ENXIO); 1398 return; 1399 } 1400 gkd = (struct g_kerneldump *)bp->bio_data; 1401 if (gkd->offset >= pp->mediasize) { 1402 g_io_deliver(bp, EIO); 1403 return; 1404 } 1405 if (gkd->offset + gkd->length > pp->mediasize) 1406 gkd->length = pp->mediasize - gkd->offset; 1407 gkd->offset += entry->gpe_offset; 1408 } else if (!strcmp("PART::isleaf", bp->bio_attribute)) { 1409 if (bp->bio_length != sizeof(int)) { 1410 g_io_deliver(bp, EFAULT); 1411 return; 1412 } 1413 attr = table->gpt_isleaf ? 1 : 0; 1414 bcopy(&attr, bp->bio_data, sizeof(int)); 1415 bp->bio_completed = sizeof(int); 1416 g_io_deliver(bp, 0); 1417 return; 1418 } else if (!strcmp("PART::depth", bp->bio_attribute)) { 1419 if (bp->bio_length != sizeof(int)) { 1420 g_io_deliver(bp, EFAULT); 1421 return; 1422 } 1423 bcopy(&table->gpt_depth, bp->bio_data, sizeof(int)); 1424 bp->bio_completed = sizeof(int); 1425 g_io_deliver(bp, 0); 1426 return; 1427 } 1428 break; 1429 default: 1430 g_io_deliver(bp, EOPNOTSUPP); 1431 return; 1432 } 1433 1434 bp2 = g_clone_bio(bp); 1435 if (bp2 == NULL) { 1436 g_io_deliver(bp, ENOMEM); 1437 return; 1438 } 1439 bp2->bio_done = g_std_done; 1440 g_io_request(bp2, cp); 1441 } 1442