1 /*- 2 * Copyright (c) 2002, 2005, 2006, 2007 Marcel Moolenaar 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 */ 26 27 #include <sys/cdefs.h> 28 __FBSDID("$FreeBSD$"); 29 30 #include <sys/param.h> 31 #include <sys/bio.h> 32 #include <sys/diskmbr.h> 33 #include <sys/endian.h> 34 #include <sys/kernel.h> 35 #include <sys/kobj.h> 36 #include <sys/limits.h> 37 #include <sys/lock.h> 38 #include <sys/malloc.h> 39 #include <sys/mutex.h> 40 #include <sys/queue.h> 41 #include <sys/sbuf.h> 42 #include <sys/systm.h> 43 #include <sys/uuid.h> 44 #include <geom/geom.h> 45 #include <geom/geom_ctl.h> 46 #include <geom/part/g_part.h> 47 48 #include "g_part_if.h" 49 50 static kobj_method_t g_part_null_methods[] = { 51 { 0, 0 } 52 }; 53 54 static struct g_part_scheme g_part_null_scheme = { 55 "n/a", 56 g_part_null_methods, 57 sizeof(struct g_part_table), 58 }; 59 G_PART_SCHEME_DECLARE(g_part_null_scheme); 60 61 SET_DECLARE(g_part_scheme_set, struct g_part_scheme); 62 63 struct g_part_alias_list { 64 const char *lexeme; 65 enum g_part_alias alias; 66 } g_part_alias_list[G_PART_ALIAS_COUNT] = { 67 { "efi", G_PART_ALIAS_EFI }, 68 { "freebsd", G_PART_ALIAS_FREEBSD }, 69 { "freebsd-boot", G_PART_ALIAS_FREEBSD_BOOT }, 70 { "freebsd-swap", G_PART_ALIAS_FREEBSD_SWAP }, 71 { "freebsd-ufs", G_PART_ALIAS_FREEBSD_UFS }, 72 { "freebsd-vinum", G_PART_ALIAS_FREEBSD_VINUM }, 73 { "freebsd-zfs", G_PART_ALIAS_FREEBSD_ZFS }, 74 { "mbr", G_PART_ALIAS_MBR } 75 }; 76 77 /* 78 * The GEOM partitioning class. 79 */ 80 static g_ctl_req_t g_part_ctlreq; 81 static g_ctl_destroy_geom_t g_part_destroy_geom; 82 static g_taste_t g_part_taste; 83 84 static g_access_t g_part_access; 85 static g_dumpconf_t g_part_dumpconf; 86 static g_orphan_t g_part_orphan; 87 static g_spoiled_t g_part_spoiled; 88 static g_start_t g_part_start; 89 90 static struct g_class g_part_class = { 91 .name = "PART", 92 .version = G_VERSION, 93 /* Class methods. */ 94 .ctlreq = g_part_ctlreq, 95 .destroy_geom = g_part_destroy_geom, 96 .taste = g_part_taste, 97 /* Geom methods. */ 98 .access = g_part_access, 99 .dumpconf = g_part_dumpconf, 100 .orphan = g_part_orphan, 101 .spoiled = g_part_spoiled, 102 .start = g_part_start, 103 }; 104 105 DECLARE_GEOM_CLASS(g_part_class, g_part); 106 107 enum g_part_ctl { 108 G_PART_CTL_NONE, 109 G_PART_CTL_ADD, 110 G_PART_CTL_COMMIT, 111 G_PART_CTL_CREATE, 112 G_PART_CTL_DELETE, 113 G_PART_CTL_DESTROY, 114 G_PART_CTL_MODIFY, 115 G_PART_CTL_MOVE, 116 G_PART_CTL_RECOVER, 117 G_PART_CTL_RESIZE, 118 G_PART_CTL_UNDO 119 }; 120 121 /* 122 * Support functions. 123 */ 124 125 static void g_part_wither(struct g_geom *, int); 126 127 const char * 128 g_part_alias_name(enum g_part_alias alias) 129 { 130 int i; 131 132 for (i = 0; i < G_PART_ALIAS_COUNT; i++) { 133 if (g_part_alias_list[i].alias != alias) 134 continue; 135 return (g_part_alias_list[i].lexeme); 136 } 137 138 return (NULL); 139 } 140 141 void 142 g_part_geometry_heads(off_t blocks, u_int sectors, off_t *bestchs, 143 u_int *bestheads) 144 { 145 static u_int candidate_heads[] = { 1, 2, 16, 32, 64, 128, 255, 0 }; 146 off_t chs, cylinders; 147 u_int heads; 148 int idx; 149 150 *bestchs = 0; 151 *bestheads = 0; 152 for (idx = 0; candidate_heads[idx] != 0; idx++) { 153 heads = candidate_heads[idx]; 154 cylinders = blocks / heads / sectors; 155 if (cylinders < heads || cylinders < sectors) 156 break; 157 if (cylinders > 1023) 158 continue; 159 chs = cylinders * heads * sectors; 160 if (chs > *bestchs || (chs == *bestchs && *bestheads == 1)) { 161 *bestchs = chs; 162 *bestheads = heads; 163 } 164 } 165 } 166 167 static void 168 g_part_geometry(struct g_part_table *table, struct g_consumer *cp, 169 off_t blocks) 170 { 171 static u_int candidate_sectors[] = { 1, 9, 17, 33, 63, 0 }; 172 off_t chs, bestchs; 173 u_int heads, sectors; 174 int idx; 175 176 if (g_getattr("GEOM::fwsectors", cp, §ors) != 0 || 177 sectors < 1 || sectors > 63 || 178 g_getattr("GEOM::fwheads", cp, &heads) != 0 || 179 heads < 1 || heads > 255) { 180 table->gpt_fixgeom = 0; 181 table->gpt_heads = 0; 182 table->gpt_sectors = 0; 183 bestchs = 0; 184 for (idx = 0; candidate_sectors[idx] != 0; idx++) { 185 sectors = candidate_sectors[idx]; 186 g_part_geometry_heads(blocks, sectors, &chs, &heads); 187 if (chs == 0) 188 continue; 189 /* 190 * Prefer a geometry with sectors > 1, but only if 191 * it doesn't bump down the numbver of heads to 1. 192 */ 193 if (chs > bestchs || (chs == bestchs && heads > 1 && 194 table->gpt_sectors == 1)) { 195 bestchs = chs; 196 table->gpt_heads = heads; 197 table->gpt_sectors = sectors; 198 } 199 } 200 /* 201 * If we didn't find a geometry at all, then the disk is 202 * too big. This means we can use the maximum number of 203 * heads and sectors. 204 */ 205 if (bestchs == 0) { 206 table->gpt_heads = 255; 207 table->gpt_sectors = 63; 208 } 209 } else { 210 table->gpt_fixgeom = 1; 211 table->gpt_heads = heads; 212 table->gpt_sectors = sectors; 213 } 214 } 215 216 struct g_part_entry * 217 g_part_new_entry(struct g_part_table *table, int index, quad_t start, 218 quad_t end) 219 { 220 struct g_part_entry *entry, *last; 221 222 last = NULL; 223 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) { 224 if (entry->gpe_index == index) 225 break; 226 if (entry->gpe_index > index) { 227 entry = NULL; 228 break; 229 } 230 last = entry; 231 } 232 if (entry == NULL) { 233 entry = g_malloc(table->gpt_scheme->gps_entrysz, 234 M_WAITOK | M_ZERO); 235 entry->gpe_index = index; 236 if (last == NULL) 237 LIST_INSERT_HEAD(&table->gpt_entry, entry, gpe_entry); 238 else 239 LIST_INSERT_AFTER(last, entry, gpe_entry); 240 } 241 entry->gpe_start = start; 242 entry->gpe_end = end; 243 return (entry); 244 } 245 246 static void 247 g_part_new_provider(struct g_geom *gp, struct g_part_table *table, 248 struct g_part_entry *entry) 249 { 250 char buf[32]; 251 struct g_consumer *cp; 252 struct g_provider *pp; 253 254 cp = LIST_FIRST(&gp->consumer); 255 pp = cp->provider; 256 257 entry->gpe_offset = entry->gpe_start * pp->sectorsize; 258 259 if (entry->gpe_pp == NULL) { 260 entry->gpe_pp = g_new_providerf(gp, "%s%s", gp->name, 261 G_PART_NAME(table, entry, buf, sizeof(buf))); 262 entry->gpe_pp->private = entry; /* Close the circle. */ 263 } 264 entry->gpe_pp->index = entry->gpe_index - 1; /* index is 1-based. */ 265 entry->gpe_pp->mediasize = (entry->gpe_end - entry->gpe_start + 1) * 266 pp->sectorsize; 267 entry->gpe_pp->sectorsize = pp->sectorsize; 268 entry->gpe_pp->flags = pp->flags & G_PF_CANDELETE; 269 if (pp->stripesize > 0) { 270 entry->gpe_pp->stripesize = pp->stripesize; 271 entry->gpe_pp->stripeoffset = (pp->stripeoffset + 272 entry->gpe_offset) % pp->stripesize; 273 } 274 g_error_provider(entry->gpe_pp, 0); 275 } 276 277 static int 278 g_part_parm_geom(const char *p, struct g_geom **v) 279 { 280 struct g_geom *gp; 281 282 LIST_FOREACH(gp, &g_part_class.geom, geom) { 283 if (!strcmp(p, gp->name)) 284 break; 285 } 286 if (gp == NULL) 287 return (EINVAL); 288 *v = gp; 289 return (0); 290 } 291 292 static int 293 g_part_parm_provider(const char *p, struct g_provider **v) 294 { 295 struct g_provider *pp; 296 297 pp = g_provider_by_name(p); 298 if (pp == NULL) 299 return (EINVAL); 300 *v = pp; 301 return (0); 302 } 303 304 static int 305 g_part_parm_quad(const char *p, quad_t *v) 306 { 307 char *x; 308 quad_t q; 309 310 q = strtoq(p, &x, 0); 311 if (*x != '\0' || q < 0) 312 return (EINVAL); 313 *v = q; 314 return (0); 315 } 316 317 static int 318 g_part_parm_scheme(const char *p, struct g_part_scheme **v) 319 { 320 struct g_part_scheme **iter, *s; 321 322 s = NULL; 323 SET_FOREACH(iter, g_part_scheme_set) { 324 if ((*iter)->name == NULL) 325 continue; 326 if (!strcasecmp((*iter)->name, p)) { 327 s = *iter; 328 break; 329 } 330 } 331 if (s == NULL) 332 return (EINVAL); 333 *v = s; 334 return (0); 335 } 336 337 static int 338 g_part_parm_str(const char *p, const char **v) 339 { 340 341 if (p[0] == '\0') 342 return (EINVAL); 343 *v = p; 344 return (0); 345 } 346 347 static int 348 g_part_parm_uint(const char *p, u_int *v) 349 { 350 char *x; 351 long l; 352 353 l = strtol(p, &x, 0); 354 if (*x != '\0' || l < 0 || l > INT_MAX) 355 return (EINVAL); 356 *v = (unsigned int)l; 357 return (0); 358 } 359 360 static int 361 g_part_probe(struct g_geom *gp, struct g_consumer *cp, int depth) 362 { 363 struct g_part_scheme **iter, *scheme; 364 struct g_part_table *table; 365 int pri, probe; 366 367 table = gp->softc; 368 scheme = (table != NULL) ? table->gpt_scheme : &g_part_null_scheme; 369 pri = (scheme != &g_part_null_scheme) ? G_PART_PROBE(table, cp) : 370 INT_MIN; 371 if (pri == 0) 372 goto done; 373 if (pri > 0) { /* error */ 374 scheme = &g_part_null_scheme; 375 pri = INT_MIN; 376 } 377 378 SET_FOREACH(iter, g_part_scheme_set) { 379 if ((*iter) == &g_part_null_scheme) 380 continue; 381 table = (void *)kobj_create((kobj_class_t)(*iter), M_GEOM, 382 M_WAITOK); 383 table->gpt_gp = gp; 384 table->gpt_scheme = *iter; 385 table->gpt_depth = depth; 386 probe = G_PART_PROBE(table, cp); 387 if (probe <= 0 && probe > pri) { 388 pri = probe; 389 scheme = *iter; 390 if (gp->softc != NULL) 391 kobj_delete((kobj_t)gp->softc, M_GEOM); 392 gp->softc = table; 393 if (pri == 0) 394 goto done; 395 } else 396 kobj_delete((kobj_t)table, M_GEOM); 397 } 398 399 done: 400 return ((scheme == &g_part_null_scheme) ? ENXIO : 0); 401 } 402 403 /* 404 * Control request functions. 405 */ 406 407 static int 408 g_part_ctl_add(struct gctl_req *req, struct g_part_parms *gpp) 409 { 410 char buf[32]; 411 struct g_geom *gp; 412 struct g_provider *pp; 413 struct g_part_entry *delent, *last, *entry; 414 struct g_part_table *table; 415 struct sbuf *sb; 416 quad_t end; 417 unsigned int index; 418 int error; 419 420 gp = gpp->gpp_geom; 421 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name)); 422 g_topology_assert(); 423 424 pp = LIST_FIRST(&gp->consumer)->provider; 425 table = gp->softc; 426 end = gpp->gpp_start + gpp->gpp_size - 1; 427 428 if (gpp->gpp_start < table->gpt_first || 429 gpp->gpp_start > table->gpt_last) { 430 gctl_error(req, "%d start '%jd'", EINVAL, 431 (intmax_t)gpp->gpp_start); 432 return (EINVAL); 433 } 434 if (end < gpp->gpp_start || end > table->gpt_last) { 435 gctl_error(req, "%d size '%jd'", EINVAL, 436 (intmax_t)gpp->gpp_size); 437 return (EINVAL); 438 } 439 if (gpp->gpp_index > table->gpt_entries) { 440 gctl_error(req, "%d index '%d'", EINVAL, gpp->gpp_index); 441 return (EINVAL); 442 } 443 444 delent = last = NULL; 445 index = (gpp->gpp_index > 0) ? gpp->gpp_index : 1; 446 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) { 447 if (entry->gpe_deleted) { 448 if (entry->gpe_index == index) 449 delent = entry; 450 continue; 451 } 452 if (entry->gpe_index == index) 453 index = entry->gpe_index + 1; 454 if (entry->gpe_index < index) 455 last = entry; 456 if (entry->gpe_internal) 457 continue; 458 if (gpp->gpp_start >= entry->gpe_start && 459 gpp->gpp_start <= entry->gpe_end) { 460 gctl_error(req, "%d start '%jd'", ENOSPC, 461 (intmax_t)gpp->gpp_start); 462 return (ENOSPC); 463 } 464 if (end >= entry->gpe_start && end <= entry->gpe_end) { 465 gctl_error(req, "%d end '%jd'", ENOSPC, (intmax_t)end); 466 return (ENOSPC); 467 } 468 if (gpp->gpp_start < entry->gpe_start && end > entry->gpe_end) { 469 gctl_error(req, "%d size '%jd'", ENOSPC, 470 (intmax_t)gpp->gpp_size); 471 return (ENOSPC); 472 } 473 } 474 if (gpp->gpp_index > 0 && index != gpp->gpp_index) { 475 gctl_error(req, "%d index '%d'", EEXIST, gpp->gpp_index); 476 return (EEXIST); 477 } 478 479 entry = (delent == NULL) ? g_malloc(table->gpt_scheme->gps_entrysz, 480 M_WAITOK | M_ZERO) : delent; 481 entry->gpe_index = index; 482 entry->gpe_start = gpp->gpp_start; 483 entry->gpe_end = end; 484 error = G_PART_ADD(table, entry, gpp); 485 if (error) { 486 gctl_error(req, "%d", error); 487 if (delent == NULL) 488 g_free(entry); 489 return (error); 490 } 491 if (delent == NULL) { 492 if (last == NULL) 493 LIST_INSERT_HEAD(&table->gpt_entry, entry, gpe_entry); 494 else 495 LIST_INSERT_AFTER(last, entry, gpe_entry); 496 entry->gpe_created = 1; 497 } else { 498 entry->gpe_deleted = 0; 499 entry->gpe_modified = 1; 500 } 501 g_part_new_provider(gp, table, entry); 502 503 /* Provide feedback if so requested. */ 504 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) { 505 sb = sbuf_new(NULL, NULL, 0, SBUF_AUTOEXTEND); 506 sbuf_printf(sb, "%s%s added\n", gp->name, 507 G_PART_NAME(table, entry, buf, sizeof(buf))); 508 sbuf_finish(sb); 509 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1); 510 sbuf_delete(sb); 511 } 512 return (0); 513 } 514 515 static int 516 g_part_ctl_commit(struct gctl_req *req, struct g_part_parms *gpp) 517 { 518 struct g_consumer *cp; 519 struct g_geom *gp; 520 struct g_provider *pp; 521 struct g_part_entry *entry, *tmp; 522 struct g_part_table *table; 523 char *buf; 524 int error, i; 525 526 gp = gpp->gpp_geom; 527 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name)); 528 g_topology_assert(); 529 530 table = gp->softc; 531 if (!table->gpt_opened) { 532 gctl_error(req, "%d", EPERM); 533 return (EPERM); 534 } 535 536 cp = LIST_FIRST(&gp->consumer); 537 if ((table->gpt_smhead | table->gpt_smtail) != 0) { 538 pp = cp->provider; 539 buf = g_malloc(pp->sectorsize, M_WAITOK | M_ZERO); 540 while (table->gpt_smhead != 0) { 541 i = ffs(table->gpt_smhead) - 1; 542 error = g_write_data(cp, i * pp->sectorsize, buf, 543 pp->sectorsize); 544 if (error) { 545 g_free(buf); 546 goto fail; 547 } 548 table->gpt_smhead &= ~(1 << i); 549 } 550 while (table->gpt_smtail != 0) { 551 i = ffs(table->gpt_smtail) - 1; 552 error = g_write_data(cp, pp->mediasize - (i + 1) * 553 pp->sectorsize, buf, pp->sectorsize); 554 if (error) { 555 g_free(buf); 556 goto fail; 557 } 558 table->gpt_smtail &= ~(1 << i); 559 } 560 g_free(buf); 561 } 562 563 if (table->gpt_scheme == &g_part_null_scheme) { 564 g_access(cp, -1, -1, -1); 565 g_part_wither(gp, ENXIO); 566 return (0); 567 } 568 569 error = G_PART_WRITE(table, cp); 570 if (error) 571 goto fail; 572 573 LIST_FOREACH_SAFE(entry, &table->gpt_entry, gpe_entry, tmp) { 574 if (!entry->gpe_deleted) { 575 entry->gpe_created = 0; 576 entry->gpe_modified = 0; 577 continue; 578 } 579 LIST_REMOVE(entry, gpe_entry); 580 g_free(entry); 581 } 582 table->gpt_created = 0; 583 table->gpt_opened = 0; 584 g_access(cp, -1, -1, -1); 585 return (0); 586 587 fail: 588 gctl_error(req, "%d", error); 589 return (error); 590 } 591 592 static int 593 g_part_ctl_create(struct gctl_req *req, struct g_part_parms *gpp) 594 { 595 struct g_consumer *cp; 596 struct g_geom *gp; 597 struct g_provider *pp; 598 struct g_part_scheme *scheme; 599 struct g_part_table *null, *table; 600 struct sbuf *sb; 601 int attr, error; 602 603 pp = gpp->gpp_provider; 604 scheme = gpp->gpp_scheme; 605 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, pp->name)); 606 g_topology_assert(); 607 608 /* Check that there isn't already a g_part geom on the provider. */ 609 error = g_part_parm_geom(pp->name, &gp); 610 if (!error) { 611 null = gp->softc; 612 if (null->gpt_scheme != &g_part_null_scheme) { 613 gctl_error(req, "%d geom '%s'", EEXIST, pp->name); 614 return (EEXIST); 615 } 616 } else 617 null = NULL; 618 619 if ((gpp->gpp_parms & G_PART_PARM_ENTRIES) && 620 (gpp->gpp_entries < scheme->gps_minent || 621 gpp->gpp_entries > scheme->gps_maxent)) { 622 gctl_error(req, "%d entries '%d'", EINVAL, gpp->gpp_entries); 623 return (EINVAL); 624 } 625 626 if (null == NULL) 627 gp = g_new_geomf(&g_part_class, "%s", pp->name); 628 gp->softc = kobj_create((kobj_class_t)gpp->gpp_scheme, M_GEOM, 629 M_WAITOK); 630 table = gp->softc; 631 table->gpt_gp = gp; 632 table->gpt_scheme = gpp->gpp_scheme; 633 table->gpt_entries = (gpp->gpp_parms & G_PART_PARM_ENTRIES) ? 634 gpp->gpp_entries : scheme->gps_minent; 635 LIST_INIT(&table->gpt_entry); 636 if (null == NULL) { 637 cp = g_new_consumer(gp); 638 error = g_attach(cp, pp); 639 if (error == 0) 640 error = g_access(cp, 1, 1, 1); 641 if (error != 0) { 642 g_part_wither(gp, error); 643 gctl_error(req, "%d geom '%s'", error, pp->name); 644 return (error); 645 } 646 table->gpt_opened = 1; 647 } else { 648 cp = LIST_FIRST(&gp->consumer); 649 table->gpt_opened = null->gpt_opened; 650 table->gpt_smhead = null->gpt_smhead; 651 table->gpt_smtail = null->gpt_smtail; 652 } 653 654 g_topology_unlock(); 655 656 /* Make sure the provider has media. */ 657 if (pp->mediasize == 0 || pp->sectorsize == 0) { 658 error = ENODEV; 659 goto fail; 660 } 661 662 /* Make sure we can nest and if so, determine our depth. */ 663 error = g_getattr("PART::isleaf", cp, &attr); 664 if (!error && attr) { 665 error = ENODEV; 666 goto fail; 667 } 668 error = g_getattr("PART::depth", cp, &attr); 669 table->gpt_depth = (!error) ? attr + 1 : 0; 670 671 /* If we're nested, get the absolute sector offset on disk. */ 672 if (table->gpt_depth) { 673 error = g_getattr("PART::offset", cp, &attr); 674 if (error) 675 goto fail; 676 table->gpt_offset = attr; 677 } 678 679 /* 680 * Synthesize a disk geometry. Some partitioning schemes 681 * depend on it and since some file systems need it even 682 * when the partitition scheme doesn't, we do it here in 683 * scheme-independent code. 684 */ 685 g_part_geometry(table, cp, pp->mediasize / pp->sectorsize); 686 687 error = G_PART_CREATE(table, gpp); 688 if (error) 689 goto fail; 690 691 g_topology_lock(); 692 693 table->gpt_created = 1; 694 if (null != NULL) 695 kobj_delete((kobj_t)null, M_GEOM); 696 697 /* 698 * Support automatic commit by filling in the gpp_geom 699 * parameter. 700 */ 701 gpp->gpp_parms |= G_PART_PARM_GEOM; 702 gpp->gpp_geom = gp; 703 704 /* Provide feedback if so requested. */ 705 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) { 706 sb = sbuf_new(NULL, NULL, 0, SBUF_AUTOEXTEND); 707 sbuf_printf(sb, "%s created\n", gp->name); 708 sbuf_finish(sb); 709 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1); 710 sbuf_delete(sb); 711 } 712 return (0); 713 714 fail: 715 g_topology_lock(); 716 if (null == NULL) { 717 g_access(cp, -1, -1, -1); 718 g_part_wither(gp, error); 719 } else { 720 kobj_delete((kobj_t)gp->softc, M_GEOM); 721 gp->softc = null; 722 } 723 gctl_error(req, "%d provider", error); 724 return (error); 725 } 726 727 static int 728 g_part_ctl_delete(struct gctl_req *req, struct g_part_parms *gpp) 729 { 730 char buf[32]; 731 struct g_geom *gp; 732 struct g_provider *pp; 733 struct g_part_entry *entry; 734 struct g_part_table *table; 735 struct sbuf *sb; 736 737 gp = gpp->gpp_geom; 738 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name)); 739 g_topology_assert(); 740 741 table = gp->softc; 742 743 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) { 744 if (entry->gpe_deleted || entry->gpe_internal) 745 continue; 746 if (entry->gpe_index == gpp->gpp_index) 747 break; 748 } 749 if (entry == NULL) { 750 gctl_error(req, "%d index '%d'", ENOENT, gpp->gpp_index); 751 return (ENOENT); 752 } 753 754 pp = entry->gpe_pp; 755 if (pp != NULL) { 756 if (pp->acr > 0 || pp->acw > 0 || pp->ace > 0) { 757 gctl_error(req, "%d", EBUSY); 758 return (EBUSY); 759 } 760 761 pp->private = NULL; 762 entry->gpe_pp = NULL; 763 } 764 765 if (entry->gpe_created) { 766 LIST_REMOVE(entry, gpe_entry); 767 g_free(entry); 768 } else { 769 entry->gpe_modified = 0; 770 entry->gpe_deleted = 1; 771 } 772 773 if (pp != NULL) 774 g_wither_provider(pp, ENXIO); 775 776 /* Provide feedback if so requested. */ 777 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) { 778 sb = sbuf_new(NULL, NULL, 0, SBUF_AUTOEXTEND); 779 sbuf_printf(sb, "%s%s deleted\n", gp->name, 780 G_PART_NAME(table, entry, buf, sizeof(buf))); 781 sbuf_finish(sb); 782 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1); 783 sbuf_delete(sb); 784 } 785 return (0); 786 } 787 788 static int 789 g_part_ctl_destroy(struct gctl_req *req, struct g_part_parms *gpp) 790 { 791 struct g_geom *gp; 792 struct g_part_entry *entry; 793 struct g_part_table *null, *table; 794 struct sbuf *sb; 795 int error; 796 797 gp = gpp->gpp_geom; 798 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name)); 799 g_topology_assert(); 800 801 table = gp->softc; 802 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) { 803 if (entry->gpe_deleted || entry->gpe_internal) 804 continue; 805 gctl_error(req, "%d", EBUSY); 806 return (EBUSY); 807 } 808 809 error = G_PART_DESTROY(table, gpp); 810 if (error) { 811 gctl_error(req, "%d", error); 812 return (error); 813 } 814 815 gp->softc = kobj_create((kobj_class_t)&g_part_null_scheme, M_GEOM, 816 M_WAITOK); 817 null = gp->softc; 818 null->gpt_gp = gp; 819 null->gpt_scheme = &g_part_null_scheme; 820 LIST_INIT(&null->gpt_entry); 821 null->gpt_depth = table->gpt_depth; 822 null->gpt_opened = table->gpt_opened; 823 null->gpt_smhead = table->gpt_smhead; 824 null->gpt_smtail = table->gpt_smtail; 825 826 while ((entry = LIST_FIRST(&table->gpt_entry)) != NULL) { 827 LIST_REMOVE(entry, gpe_entry); 828 g_free(entry); 829 } 830 kobj_delete((kobj_t)table, M_GEOM); 831 832 /* Provide feedback if so requested. */ 833 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) { 834 sb = sbuf_new(NULL, NULL, 0, SBUF_AUTOEXTEND); 835 sbuf_printf(sb, "%s destroyed\n", gp->name); 836 sbuf_finish(sb); 837 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1); 838 sbuf_delete(sb); 839 } 840 return (0); 841 } 842 843 static int 844 g_part_ctl_modify(struct gctl_req *req, struct g_part_parms *gpp) 845 { 846 char buf[32]; 847 struct g_geom *gp; 848 struct g_part_entry *entry; 849 struct g_part_table *table; 850 struct sbuf *sb; 851 int error; 852 853 gp = gpp->gpp_geom; 854 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name)); 855 g_topology_assert(); 856 857 table = gp->softc; 858 859 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) { 860 if (entry->gpe_deleted || entry->gpe_internal) 861 continue; 862 if (entry->gpe_index == gpp->gpp_index) 863 break; 864 } 865 if (entry == NULL) { 866 gctl_error(req, "%d index '%d'", ENOENT, gpp->gpp_index); 867 return (ENOENT); 868 } 869 870 error = G_PART_MODIFY(table, entry, gpp); 871 if (error) { 872 gctl_error(req, "%d", error); 873 return (error); 874 } 875 876 if (!entry->gpe_created) 877 entry->gpe_modified = 1; 878 879 /* Provide feedback if so requested. */ 880 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) { 881 sb = sbuf_new(NULL, NULL, 0, SBUF_AUTOEXTEND); 882 sbuf_printf(sb, "%s%s modified\n", gp->name, 883 G_PART_NAME(table, entry, buf, sizeof(buf))); 884 sbuf_finish(sb); 885 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1); 886 sbuf_delete(sb); 887 } 888 return (0); 889 } 890 891 static int 892 g_part_ctl_move(struct gctl_req *req, struct g_part_parms *gpp) 893 { 894 gctl_error(req, "%d verb 'move'", ENOSYS); 895 return (ENOSYS); 896 } 897 898 static int 899 g_part_ctl_recover(struct gctl_req *req, struct g_part_parms *gpp) 900 { 901 gctl_error(req, "%d verb 'recover'", ENOSYS); 902 return (ENOSYS); 903 } 904 905 static int 906 g_part_ctl_resize(struct gctl_req *req, struct g_part_parms *gpp) 907 { 908 gctl_error(req, "%d verb 'resize'", ENOSYS); 909 return (ENOSYS); 910 } 911 912 static int 913 g_part_ctl_undo(struct gctl_req *req, struct g_part_parms *gpp) 914 { 915 struct g_consumer *cp; 916 struct g_provider *pp; 917 struct g_geom *gp; 918 struct g_part_entry *entry, *tmp; 919 struct g_part_table *table; 920 int error, reprobe; 921 922 gp = gpp->gpp_geom; 923 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name)); 924 g_topology_assert(); 925 926 table = gp->softc; 927 if (!table->gpt_opened) { 928 gctl_error(req, "%d", EPERM); 929 return (EPERM); 930 } 931 932 cp = LIST_FIRST(&gp->consumer); 933 LIST_FOREACH_SAFE(entry, &table->gpt_entry, gpe_entry, tmp) { 934 entry->gpe_modified = 0; 935 if (entry->gpe_created) { 936 pp = entry->gpe_pp; 937 if (pp != NULL) { 938 pp->private = NULL; 939 entry->gpe_pp = NULL; 940 g_wither_provider(pp, ENXIO); 941 } 942 entry->gpe_deleted = 1; 943 } 944 if (entry->gpe_deleted) { 945 LIST_REMOVE(entry, gpe_entry); 946 g_free(entry); 947 } 948 } 949 950 g_topology_unlock(); 951 952 reprobe = (table->gpt_scheme == &g_part_null_scheme || 953 table->gpt_created) ? 1 : 0; 954 955 if (reprobe) { 956 if (!LIST_EMPTY(&table->gpt_entry)) { 957 error = EBUSY; 958 goto fail; 959 } 960 error = g_part_probe(gp, cp, table->gpt_depth); 961 if (error) { 962 g_topology_lock(); 963 g_access(cp, -1, -1, -1); 964 g_part_wither(gp, error); 965 return (0); 966 } 967 table = gp->softc; 968 } 969 970 error = G_PART_READ(table, cp); 971 if (error) 972 goto fail; 973 974 g_topology_lock(); 975 976 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) { 977 if (!entry->gpe_internal) 978 g_part_new_provider(gp, table, entry); 979 } 980 981 table->gpt_opened = 0; 982 g_access(cp, -1, -1, -1); 983 return (0); 984 985 fail: 986 g_topology_lock(); 987 gctl_error(req, "%d", error); 988 return (error); 989 } 990 991 static void 992 g_part_wither(struct g_geom *gp, int error) 993 { 994 struct g_part_entry *entry; 995 struct g_part_table *table; 996 997 table = gp->softc; 998 if (table != NULL) { 999 while ((entry = LIST_FIRST(&table->gpt_entry)) != NULL) { 1000 LIST_REMOVE(entry, gpe_entry); 1001 g_free(entry); 1002 } 1003 if (gp->softc != NULL) { 1004 kobj_delete((kobj_t)gp->softc, M_GEOM); 1005 gp->softc = NULL; 1006 } 1007 } 1008 g_wither_geom(gp, error); 1009 } 1010 1011 /* 1012 * Class methods. 1013 */ 1014 1015 static void 1016 g_part_ctlreq(struct gctl_req *req, struct g_class *mp, const char *verb) 1017 { 1018 struct g_part_parms gpp; 1019 struct g_part_table *table; 1020 struct gctl_req_arg *ap; 1021 const char *p; 1022 enum g_part_ctl ctlreq; 1023 unsigned int i, mparms, oparms, parm; 1024 int auto_commit, close_on_error; 1025 int error, modifies; 1026 1027 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s,%s)", __func__, mp->name, verb)); 1028 g_topology_assert(); 1029 1030 ctlreq = G_PART_CTL_NONE; 1031 modifies = 1; 1032 mparms = 0; 1033 oparms = G_PART_PARM_FLAGS | G_PART_PARM_OUTPUT | G_PART_PARM_VERSION; 1034 switch (*verb) { 1035 case 'a': 1036 if (!strcmp(verb, "add")) { 1037 ctlreq = G_PART_CTL_ADD; 1038 mparms |= G_PART_PARM_GEOM | G_PART_PARM_SIZE | 1039 G_PART_PARM_START | G_PART_PARM_TYPE; 1040 oparms |= G_PART_PARM_INDEX | G_PART_PARM_LABEL; 1041 } 1042 break; 1043 case 'c': 1044 if (!strcmp(verb, "commit")) { 1045 ctlreq = G_PART_CTL_COMMIT; 1046 mparms |= G_PART_PARM_GEOM; 1047 modifies = 0; 1048 } else if (!strcmp(verb, "create")) { 1049 ctlreq = G_PART_CTL_CREATE; 1050 mparms |= G_PART_PARM_PROVIDER | G_PART_PARM_SCHEME; 1051 oparms |= G_PART_PARM_ENTRIES; 1052 } 1053 break; 1054 case 'd': 1055 if (!strcmp(verb, "delete")) { 1056 ctlreq = G_PART_CTL_DELETE; 1057 mparms |= G_PART_PARM_GEOM | G_PART_PARM_INDEX; 1058 } else if (!strcmp(verb, "destroy")) { 1059 ctlreq = G_PART_CTL_DESTROY; 1060 mparms |= G_PART_PARM_GEOM; 1061 } 1062 break; 1063 case 'm': 1064 if (!strcmp(verb, "modify")) { 1065 ctlreq = G_PART_CTL_MODIFY; 1066 mparms |= G_PART_PARM_GEOM | G_PART_PARM_INDEX; 1067 oparms |= G_PART_PARM_LABEL | G_PART_PARM_TYPE; 1068 } else if (!strcmp(verb, "move")) { 1069 ctlreq = G_PART_CTL_MOVE; 1070 mparms |= G_PART_PARM_GEOM | G_PART_PARM_INDEX; 1071 } 1072 break; 1073 case 'r': 1074 if (!strcmp(verb, "recover")) { 1075 ctlreq = G_PART_CTL_RECOVER; 1076 mparms |= G_PART_PARM_GEOM; 1077 } else if (!strcmp(verb, "resize")) { 1078 ctlreq = G_PART_CTL_RESIZE; 1079 mparms |= G_PART_PARM_GEOM | G_PART_PARM_INDEX; 1080 } 1081 break; 1082 case 'u': 1083 if (!strcmp(verb, "undo")) { 1084 ctlreq = G_PART_CTL_UNDO; 1085 mparms |= G_PART_PARM_GEOM; 1086 modifies = 0; 1087 } 1088 break; 1089 } 1090 if (ctlreq == G_PART_CTL_NONE) { 1091 gctl_error(req, "%d verb '%s'", EINVAL, verb); 1092 return; 1093 } 1094 1095 bzero(&gpp, sizeof(gpp)); 1096 for (i = 0; i < req->narg; i++) { 1097 ap = &req->arg[i]; 1098 parm = 0; 1099 switch (ap->name[0]) { 1100 case 'c': 1101 if (!strcmp(ap->name, "class")) 1102 continue; 1103 break; 1104 case 'e': 1105 if (!strcmp(ap->name, "entries")) 1106 parm = G_PART_PARM_ENTRIES; 1107 break; 1108 case 'f': 1109 if (!strcmp(ap->name, "flags")) 1110 parm = G_PART_PARM_FLAGS; 1111 break; 1112 case 'g': 1113 if (!strcmp(ap->name, "geom")) 1114 parm = G_PART_PARM_GEOM; 1115 break; 1116 case 'i': 1117 if (!strcmp(ap->name, "index")) 1118 parm = G_PART_PARM_INDEX; 1119 break; 1120 case 'l': 1121 if (!strcmp(ap->name, "label")) 1122 parm = G_PART_PARM_LABEL; 1123 break; 1124 case 'o': 1125 if (!strcmp(ap->name, "output")) 1126 parm = G_PART_PARM_OUTPUT; 1127 break; 1128 case 'p': 1129 if (!strcmp(ap->name, "provider")) 1130 parm = G_PART_PARM_PROVIDER; 1131 break; 1132 case 's': 1133 if (!strcmp(ap->name, "scheme")) 1134 parm = G_PART_PARM_SCHEME; 1135 else if (!strcmp(ap->name, "size")) 1136 parm = G_PART_PARM_SIZE; 1137 else if (!strcmp(ap->name, "start")) 1138 parm = G_PART_PARM_START; 1139 break; 1140 case 't': 1141 if (!strcmp(ap->name, "type")) 1142 parm = G_PART_PARM_TYPE; 1143 break; 1144 case 'v': 1145 if (!strcmp(ap->name, "verb")) 1146 continue; 1147 else if (!strcmp(ap->name, "version")) 1148 parm = G_PART_PARM_VERSION; 1149 break; 1150 } 1151 if ((parm & (mparms | oparms)) == 0) { 1152 gctl_error(req, "%d param '%s'", EINVAL, ap->name); 1153 return; 1154 } 1155 p = gctl_get_asciiparam(req, ap->name); 1156 if (p == NULL) { 1157 gctl_error(req, "%d param '%s'", ENOATTR, ap->name); 1158 return; 1159 } 1160 switch (parm) { 1161 case G_PART_PARM_ENTRIES: 1162 error = g_part_parm_uint(p, &gpp.gpp_entries); 1163 break; 1164 case G_PART_PARM_FLAGS: 1165 if (p[0] == '\0') 1166 continue; 1167 error = g_part_parm_str(p, &gpp.gpp_flags); 1168 break; 1169 case G_PART_PARM_GEOM: 1170 error = g_part_parm_geom(p, &gpp.gpp_geom); 1171 break; 1172 case G_PART_PARM_INDEX: 1173 error = g_part_parm_uint(p, &gpp.gpp_index); 1174 break; 1175 case G_PART_PARM_LABEL: 1176 /* An empty label is always valid. */ 1177 gpp.gpp_label = p; 1178 error = 0; 1179 break; 1180 case G_PART_PARM_OUTPUT: 1181 error = 0; /* Write-only parameter */ 1182 break; 1183 case G_PART_PARM_PROVIDER: 1184 error = g_part_parm_provider(p, &gpp.gpp_provider); 1185 break; 1186 case G_PART_PARM_SCHEME: 1187 error = g_part_parm_scheme(p, &gpp.gpp_scheme); 1188 break; 1189 case G_PART_PARM_SIZE: 1190 error = g_part_parm_quad(p, &gpp.gpp_size); 1191 break; 1192 case G_PART_PARM_START: 1193 error = g_part_parm_quad(p, &gpp.gpp_start); 1194 break; 1195 case G_PART_PARM_TYPE: 1196 error = g_part_parm_str(p, &gpp.gpp_type); 1197 break; 1198 case G_PART_PARM_VERSION: 1199 error = g_part_parm_uint(p, &gpp.gpp_version); 1200 break; 1201 default: 1202 error = EDOOFUS; 1203 break; 1204 } 1205 if (error) { 1206 gctl_error(req, "%d %s '%s'", error, ap->name, p); 1207 return; 1208 } 1209 gpp.gpp_parms |= parm; 1210 } 1211 if ((gpp.gpp_parms & mparms) != mparms) { 1212 parm = mparms - (gpp.gpp_parms & mparms); 1213 gctl_error(req, "%d param '%x'", ENOATTR, parm); 1214 return; 1215 } 1216 1217 /* Obtain permissions if possible/necessary. */ 1218 close_on_error = 0; 1219 table = NULL; /* Suppress uninit. warning. */ 1220 if (modifies && (gpp.gpp_parms & G_PART_PARM_GEOM)) { 1221 table = gpp.gpp_geom->softc; 1222 if (table != NULL && !table->gpt_opened) { 1223 error = g_access(LIST_FIRST(&gpp.gpp_geom->consumer), 1224 1, 1, 1); 1225 if (error) { 1226 gctl_error(req, "%d geom '%s'", error, 1227 gpp.gpp_geom->name); 1228 return; 1229 } 1230 table->gpt_opened = 1; 1231 close_on_error = 1; 1232 } 1233 } 1234 1235 error = EDOOFUS; /* Prevent bogus uninit. warning. */ 1236 switch (ctlreq) { 1237 case G_PART_CTL_NONE: 1238 panic("%s", __func__); 1239 case G_PART_CTL_ADD: 1240 error = g_part_ctl_add(req, &gpp); 1241 break; 1242 case G_PART_CTL_COMMIT: 1243 error = g_part_ctl_commit(req, &gpp); 1244 break; 1245 case G_PART_CTL_CREATE: 1246 error = g_part_ctl_create(req, &gpp); 1247 break; 1248 case G_PART_CTL_DELETE: 1249 error = g_part_ctl_delete(req, &gpp); 1250 break; 1251 case G_PART_CTL_DESTROY: 1252 error = g_part_ctl_destroy(req, &gpp); 1253 break; 1254 case G_PART_CTL_MODIFY: 1255 error = g_part_ctl_modify(req, &gpp); 1256 break; 1257 case G_PART_CTL_MOVE: 1258 error = g_part_ctl_move(req, &gpp); 1259 break; 1260 case G_PART_CTL_RECOVER: 1261 error = g_part_ctl_recover(req, &gpp); 1262 break; 1263 case G_PART_CTL_RESIZE: 1264 error = g_part_ctl_resize(req, &gpp); 1265 break; 1266 case G_PART_CTL_UNDO: 1267 error = g_part_ctl_undo(req, &gpp); 1268 break; 1269 } 1270 1271 /* Implement automatic commit. */ 1272 if (!error) { 1273 auto_commit = (modifies && 1274 (gpp.gpp_parms & G_PART_PARM_FLAGS) && 1275 strchr(gpp.gpp_flags, 'C') != NULL) ? 1 : 0; 1276 if (auto_commit) { 1277 KASSERT(gpp.gpp_parms & G_PART_PARM_GEOM, (__func__)); 1278 error = g_part_ctl_commit(req, &gpp); 1279 } 1280 } 1281 1282 if (error && close_on_error) { 1283 g_access(LIST_FIRST(&gpp.gpp_geom->consumer), -1, -1, -1); 1284 table->gpt_opened = 0; 1285 } 1286 } 1287 1288 static int 1289 g_part_destroy_geom(struct gctl_req *req, struct g_class *mp, 1290 struct g_geom *gp) 1291 { 1292 1293 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s,%s)", __func__, mp->name, gp->name)); 1294 g_topology_assert(); 1295 1296 g_part_wither(gp, EINVAL); 1297 return (0); 1298 } 1299 1300 static struct g_geom * 1301 g_part_taste(struct g_class *mp, struct g_provider *pp, int flags __unused) 1302 { 1303 struct g_consumer *cp; 1304 struct g_geom *gp; 1305 struct g_part_entry *entry; 1306 struct g_part_table *table; 1307 int attr, depth; 1308 int error; 1309 1310 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s,%s)", __func__, mp->name, pp->name)); 1311 g_topology_assert(); 1312 1313 /* 1314 * Create a GEOM with consumer and hook it up to the provider. 1315 * With that we become part of the topology. Optain read access 1316 * to the provider. 1317 */ 1318 gp = g_new_geomf(mp, "%s", pp->name); 1319 cp = g_new_consumer(gp); 1320 error = g_attach(cp, pp); 1321 if (error == 0) 1322 error = g_access(cp, 1, 0, 0); 1323 if (error != 0) { 1324 g_part_wither(gp, error); 1325 return (NULL); 1326 } 1327 1328 g_topology_unlock(); 1329 1330 /* 1331 * Short-circuit the whole probing galore when there's no 1332 * media present. 1333 */ 1334 if (pp->mediasize == 0 || pp->sectorsize == 0) { 1335 error = ENODEV; 1336 goto fail; 1337 } 1338 1339 /* Make sure we can nest and if so, determine our depth. */ 1340 error = g_getattr("PART::isleaf", cp, &attr); 1341 if (!error && attr) { 1342 error = ENODEV; 1343 goto fail; 1344 } 1345 error = g_getattr("PART::depth", cp, &attr); 1346 depth = (!error) ? attr + 1 : 0; 1347 1348 error = g_part_probe(gp, cp, depth); 1349 if (error) 1350 goto fail; 1351 1352 table = gp->softc; 1353 1354 /* If we're nested, get the absolute sector offset on disk. */ 1355 if (table->gpt_depth) { 1356 error = g_getattr("PART::offset", cp, &attr); 1357 if (error) 1358 goto fail; 1359 table->gpt_offset = attr; 1360 } 1361 1362 /* 1363 * Synthesize a disk geometry. Some partitioning schemes 1364 * depend on it and since some file systems need it even 1365 * when the partitition scheme doesn't, we do it here in 1366 * scheme-independent code. 1367 */ 1368 g_part_geometry(table, cp, pp->mediasize / pp->sectorsize); 1369 1370 error = G_PART_READ(table, cp); 1371 if (error) 1372 goto fail; 1373 1374 g_topology_lock(); 1375 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) { 1376 if (!entry->gpe_internal) 1377 g_part_new_provider(gp, table, entry); 1378 } 1379 1380 g_access(cp, -1, 0, 0); 1381 return (gp); 1382 1383 fail: 1384 g_topology_lock(); 1385 g_access(cp, -1, 0, 0); 1386 g_part_wither(gp, error); 1387 return (NULL); 1388 } 1389 1390 /* 1391 * Geom methods. 1392 */ 1393 1394 static int 1395 g_part_access(struct g_provider *pp, int dr, int dw, int de) 1396 { 1397 struct g_consumer *cp; 1398 1399 G_PART_TRACE((G_T_ACCESS, "%s(%s,%d,%d,%d)", __func__, pp->name, dr, 1400 dw, de)); 1401 1402 cp = LIST_FIRST(&pp->geom->consumer); 1403 1404 /* We always gain write-exclusive access. */ 1405 return (g_access(cp, dr, dw, dw + de)); 1406 } 1407 1408 static void 1409 g_part_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp, 1410 struct g_consumer *cp, struct g_provider *pp) 1411 { 1412 char buf[64]; 1413 struct g_part_entry *entry; 1414 struct g_part_table *table; 1415 1416 KASSERT(sb != NULL && gp != NULL, (__func__)); 1417 table = gp->softc; 1418 1419 if (indent == NULL) { 1420 KASSERT(cp == NULL && pp != NULL, (__func__)); 1421 entry = pp->private; 1422 if (entry == NULL) 1423 return; 1424 sbuf_printf(sb, " i %u o %ju ty %s", entry->gpe_index, 1425 (uintmax_t)entry->gpe_offset, 1426 G_PART_TYPE(table, entry, buf, sizeof(buf))); 1427 } else if (cp != NULL) { /* Consumer configuration. */ 1428 KASSERT(pp == NULL, (__func__)); 1429 /* none */ 1430 } else if (pp != NULL) { /* Provider configuration. */ 1431 entry = pp->private; 1432 if (entry == NULL) 1433 return; 1434 sbuf_printf(sb, "%s<index>%u</index>\n", indent, 1435 entry->gpe_index); 1436 sbuf_printf(sb, "%s<type>%s</type>\n", indent, 1437 G_PART_TYPE(table, entry, buf, sizeof(buf))); 1438 sbuf_printf(sb, "%s<offset>%ju</offset>\n", indent, 1439 (uintmax_t)entry->gpe_offset); 1440 sbuf_printf(sb, "%s<length>%ju</length>\n", indent, 1441 (uintmax_t)pp->mediasize); 1442 G_PART_DUMPCONF(table, entry, sb, indent); 1443 } else { /* Geom configuration. */ 1444 sbuf_printf(sb, "%s<scheme>%s</scheme>\n", indent, 1445 table->gpt_scheme->name); 1446 sbuf_printf(sb, "%s<entries>%u</entries>\n", indent, 1447 table->gpt_entries); 1448 sbuf_printf(sb, "%s<first>%ju</first>\n", indent, 1449 (uintmax_t)table->gpt_first); 1450 sbuf_printf(sb, "%s<last>%ju</last>\n", indent, 1451 (uintmax_t)table->gpt_last); 1452 sbuf_printf(sb, "%s<fwsectors>%u</fwsectors>\n", indent, 1453 table->gpt_sectors); 1454 sbuf_printf(sb, "%s<fwheads>%u</fwheads>\n", indent, 1455 table->gpt_heads); 1456 G_PART_DUMPCONF(table, NULL, sb, indent); 1457 } 1458 } 1459 1460 static void 1461 g_part_orphan(struct g_consumer *cp) 1462 { 1463 struct g_provider *pp; 1464 1465 pp = cp->provider; 1466 KASSERT(pp != NULL, (__func__)); 1467 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, pp->name)); 1468 g_topology_assert(); 1469 1470 KASSERT(pp->error != 0, (__func__)); 1471 g_part_wither(cp->geom, pp->error); 1472 } 1473 1474 static void 1475 g_part_spoiled(struct g_consumer *cp) 1476 { 1477 1478 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, cp->provider->name)); 1479 g_topology_assert(); 1480 1481 g_part_wither(cp->geom, ENXIO); 1482 } 1483 1484 static void 1485 g_part_start(struct bio *bp) 1486 { 1487 struct bio *bp2; 1488 struct g_consumer *cp; 1489 struct g_geom *gp; 1490 struct g_part_entry *entry; 1491 struct g_part_table *table; 1492 struct g_kerneldump *gkd; 1493 struct g_provider *pp; 1494 1495 pp = bp->bio_to; 1496 gp = pp->geom; 1497 table = gp->softc; 1498 cp = LIST_FIRST(&gp->consumer); 1499 1500 G_PART_TRACE((G_T_BIO, "%s: cmd=%d, provider=%s", __func__, bp->bio_cmd, 1501 pp->name)); 1502 1503 entry = pp->private; 1504 if (entry == NULL) { 1505 g_io_deliver(bp, ENXIO); 1506 return; 1507 } 1508 1509 switch(bp->bio_cmd) { 1510 case BIO_DELETE: 1511 case BIO_READ: 1512 case BIO_WRITE: 1513 if (bp->bio_offset >= pp->mediasize) { 1514 g_io_deliver(bp, EIO); 1515 return; 1516 } 1517 bp2 = g_clone_bio(bp); 1518 if (bp2 == NULL) { 1519 g_io_deliver(bp, ENOMEM); 1520 return; 1521 } 1522 if (bp2->bio_offset + bp2->bio_length > pp->mediasize) 1523 bp2->bio_length = pp->mediasize - bp2->bio_offset; 1524 bp2->bio_done = g_std_done; 1525 bp2->bio_offset += entry->gpe_offset; 1526 g_io_request(bp2, cp); 1527 return; 1528 case BIO_FLUSH: 1529 break; 1530 case BIO_GETATTR: 1531 if (g_handleattr_int(bp, "GEOM::fwheads", table->gpt_heads)) 1532 return; 1533 if (g_handleattr_int(bp, "GEOM::fwsectors", table->gpt_sectors)) 1534 return; 1535 if (g_handleattr_int(bp, "PART::isleaf", table->gpt_isleaf)) 1536 return; 1537 if (g_handleattr_int(bp, "PART::depth", table->gpt_depth)) 1538 return; 1539 if (g_handleattr_int(bp, "PART::offset", 1540 table->gpt_offset + entry->gpe_start)) 1541 return; 1542 if (!strcmp("GEOM::kerneldump", bp->bio_attribute)) { 1543 /* 1544 * Check that the partition is suitable for kernel 1545 * dumps. Typically only swap partitions should be 1546 * used. 1547 */ 1548 if (!G_PART_DUMPTO(table, entry)) { 1549 g_io_deliver(bp, ENXIO); 1550 return; 1551 } 1552 gkd = (struct g_kerneldump *)bp->bio_data; 1553 if (gkd->offset >= pp->mediasize) { 1554 g_io_deliver(bp, EIO); 1555 return; 1556 } 1557 if (gkd->offset + gkd->length > pp->mediasize) 1558 gkd->length = pp->mediasize - gkd->offset; 1559 gkd->offset += entry->gpe_offset; 1560 } 1561 break; 1562 default: 1563 g_io_deliver(bp, EOPNOTSUPP); 1564 return; 1565 } 1566 1567 bp2 = g_clone_bio(bp); 1568 if (bp2 == NULL) { 1569 g_io_deliver(bp, ENOMEM); 1570 return; 1571 } 1572 bp2->bio_done = g_std_done; 1573 g_io_request(bp2, cp); 1574 } 1575