1 /*- 2 * Copyright (c) 2002, 2005-2009 Marcel Moolenaar 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 */ 26 27 #include <sys/cdefs.h> 28 __FBSDID("$FreeBSD$"); 29 30 #include <sys/param.h> 31 #include <sys/bio.h> 32 #include <sys/diskmbr.h> 33 #include <sys/endian.h> 34 #include <sys/kernel.h> 35 #include <sys/kobj.h> 36 #include <sys/limits.h> 37 #include <sys/lock.h> 38 #include <sys/malloc.h> 39 #include <sys/mutex.h> 40 #include <sys/queue.h> 41 #include <sys/sbuf.h> 42 #include <sys/systm.h> 43 #include <sys/uuid.h> 44 #include <geom/geom.h> 45 #include <geom/geom_ctl.h> 46 #include <geom/geom_int.h> 47 #include <geom/part/g_part.h> 48 49 #include "g_part_if.h" 50 51 #ifndef _PATH_DEV 52 #define _PATH_DEV "/dev/" 53 #endif 54 55 static kobj_method_t g_part_null_methods[] = { 56 { 0, 0 } 57 }; 58 59 static struct g_part_scheme g_part_null_scheme = { 60 "(none)", 61 g_part_null_methods, 62 sizeof(struct g_part_table), 63 }; 64 65 TAILQ_HEAD(, g_part_scheme) g_part_schemes = 66 TAILQ_HEAD_INITIALIZER(g_part_schemes); 67 68 struct g_part_alias_list { 69 const char *lexeme; 70 enum g_part_alias alias; 71 } g_part_alias_list[G_PART_ALIAS_COUNT] = { 72 { "apple-boot", G_PART_ALIAS_APPLE_BOOT }, 73 { "apple-hfs", G_PART_ALIAS_APPLE_HFS }, 74 { "apple-label", G_PART_ALIAS_APPLE_LABEL }, 75 { "apple-raid", G_PART_ALIAS_APPLE_RAID }, 76 { "apple-raid-offline", G_PART_ALIAS_APPLE_RAID_OFFLINE }, 77 { "apple-tv-recovery", G_PART_ALIAS_APPLE_TV_RECOVERY }, 78 { "apple-ufs", G_PART_ALIAS_APPLE_UFS }, 79 { "efi", G_PART_ALIAS_EFI }, 80 { "freebsd", G_PART_ALIAS_FREEBSD }, 81 { "freebsd-boot", G_PART_ALIAS_FREEBSD_BOOT }, 82 { "freebsd-swap", G_PART_ALIAS_FREEBSD_SWAP }, 83 { "freebsd-ufs", G_PART_ALIAS_FREEBSD_UFS }, 84 { "freebsd-vinum", G_PART_ALIAS_FREEBSD_VINUM }, 85 { "freebsd-zfs", G_PART_ALIAS_FREEBSD_ZFS }, 86 { "linux-data", G_PART_ALIAS_LINUX_DATA }, 87 { "linux-lvm", G_PART_ALIAS_LINUX_LVM }, 88 { "linux-raid", G_PART_ALIAS_LINUX_RAID }, 89 { "linux-swap", G_PART_ALIAS_LINUX_SWAP }, 90 { "ms-basic-data", G_PART_ALIAS_MS_BASIC_DATA }, 91 { "ms-ldm-data", G_PART_ALIAS_MS_LDM_DATA }, 92 { "ms-ldm-metadata", G_PART_ALIAS_MS_LDM_METADATA }, 93 { "ms-reserved", G_PART_ALIAS_MS_RESERVED }, 94 { "netbsd-ccd", G_PART_ALIAS_NETBSD_CCD }, 95 { "netbsd-cgd", G_PART_ALIAS_NETBSD_CGD }, 96 { "netbsd-ffs", G_PART_ALIAS_NETBSD_FFS }, 97 { "netbsd-lfs", G_PART_ALIAS_NETBSD_LFS }, 98 { "netbsd-raid", G_PART_ALIAS_NETBSD_RAID }, 99 { "netbsd-swap", G_PART_ALIAS_NETBSD_SWAP }, 100 { "mbr", G_PART_ALIAS_MBR } 101 }; 102 103 /* 104 * The GEOM partitioning class. 105 */ 106 static g_ctl_req_t g_part_ctlreq; 107 static g_ctl_destroy_geom_t g_part_destroy_geom; 108 static g_fini_t g_part_fini; 109 static g_init_t g_part_init; 110 static g_taste_t g_part_taste; 111 112 static g_access_t g_part_access; 113 static g_dumpconf_t g_part_dumpconf; 114 static g_orphan_t g_part_orphan; 115 static g_spoiled_t g_part_spoiled; 116 static g_start_t g_part_start; 117 118 static struct g_class g_part_class = { 119 .name = "PART", 120 .version = G_VERSION, 121 /* Class methods. */ 122 .ctlreq = g_part_ctlreq, 123 .destroy_geom = g_part_destroy_geom, 124 .fini = g_part_fini, 125 .init = g_part_init, 126 .taste = g_part_taste, 127 /* Geom methods. */ 128 .access = g_part_access, 129 .dumpconf = g_part_dumpconf, 130 .orphan = g_part_orphan, 131 .spoiled = g_part_spoiled, 132 .start = g_part_start, 133 }; 134 135 DECLARE_GEOM_CLASS(g_part_class, g_part); 136 137 /* 138 * Support functions. 139 */ 140 141 static void g_part_wither(struct g_geom *, int); 142 143 const char * 144 g_part_alias_name(enum g_part_alias alias) 145 { 146 int i; 147 148 for (i = 0; i < G_PART_ALIAS_COUNT; i++) { 149 if (g_part_alias_list[i].alias != alias) 150 continue; 151 return (g_part_alias_list[i].lexeme); 152 } 153 154 return (NULL); 155 } 156 157 void 158 g_part_geometry_heads(off_t blocks, u_int sectors, off_t *bestchs, 159 u_int *bestheads) 160 { 161 static u_int candidate_heads[] = { 1, 2, 16, 32, 64, 128, 255, 0 }; 162 off_t chs, cylinders; 163 u_int heads; 164 int idx; 165 166 *bestchs = 0; 167 *bestheads = 0; 168 for (idx = 0; candidate_heads[idx] != 0; idx++) { 169 heads = candidate_heads[idx]; 170 cylinders = blocks / heads / sectors; 171 if (cylinders < heads || cylinders < sectors) 172 break; 173 if (cylinders > 1023) 174 continue; 175 chs = cylinders * heads * sectors; 176 if (chs > *bestchs || (chs == *bestchs && *bestheads == 1)) { 177 *bestchs = chs; 178 *bestheads = heads; 179 } 180 } 181 } 182 183 static void 184 g_part_geometry(struct g_part_table *table, struct g_consumer *cp, 185 off_t blocks) 186 { 187 static u_int candidate_sectors[] = { 1, 9, 17, 33, 63, 0 }; 188 off_t chs, bestchs; 189 u_int heads, sectors; 190 int idx; 191 192 if (g_getattr("GEOM::fwsectors", cp, §ors) != 0 || sectors == 0 || 193 g_getattr("GEOM::fwheads", cp, &heads) != 0 || heads == 0) { 194 table->gpt_fixgeom = 0; 195 table->gpt_heads = 0; 196 table->gpt_sectors = 0; 197 bestchs = 0; 198 for (idx = 0; candidate_sectors[idx] != 0; idx++) { 199 sectors = candidate_sectors[idx]; 200 g_part_geometry_heads(blocks, sectors, &chs, &heads); 201 if (chs == 0) 202 continue; 203 /* 204 * Prefer a geometry with sectors > 1, but only if 205 * it doesn't bump down the numbver of heads to 1. 206 */ 207 if (chs > bestchs || (chs == bestchs && heads > 1 && 208 table->gpt_sectors == 1)) { 209 bestchs = chs; 210 table->gpt_heads = heads; 211 table->gpt_sectors = sectors; 212 } 213 } 214 /* 215 * If we didn't find a geometry at all, then the disk is 216 * too big. This means we can use the maximum number of 217 * heads and sectors. 218 */ 219 if (bestchs == 0) { 220 table->gpt_heads = 255; 221 table->gpt_sectors = 63; 222 } 223 } else { 224 table->gpt_fixgeom = 1; 225 table->gpt_heads = heads; 226 table->gpt_sectors = sectors; 227 } 228 } 229 230 struct g_part_entry * 231 g_part_new_entry(struct g_part_table *table, int index, quad_t start, 232 quad_t end) 233 { 234 struct g_part_entry *entry, *last; 235 236 last = NULL; 237 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) { 238 if (entry->gpe_index == index) 239 break; 240 if (entry->gpe_index > index) { 241 entry = NULL; 242 break; 243 } 244 last = entry; 245 } 246 if (entry == NULL) { 247 entry = g_malloc(table->gpt_scheme->gps_entrysz, 248 M_WAITOK | M_ZERO); 249 entry->gpe_index = index; 250 if (last == NULL) 251 LIST_INSERT_HEAD(&table->gpt_entry, entry, gpe_entry); 252 else 253 LIST_INSERT_AFTER(last, entry, gpe_entry); 254 } else 255 entry->gpe_offset = 0; 256 entry->gpe_start = start; 257 entry->gpe_end = end; 258 return (entry); 259 } 260 261 static void 262 g_part_new_provider(struct g_geom *gp, struct g_part_table *table, 263 struct g_part_entry *entry) 264 { 265 struct g_consumer *cp; 266 struct g_provider *pp; 267 struct sbuf *sb; 268 off_t offset; 269 270 cp = LIST_FIRST(&gp->consumer); 271 pp = cp->provider; 272 273 offset = entry->gpe_start * pp->sectorsize; 274 if (entry->gpe_offset < offset) 275 entry->gpe_offset = offset; 276 277 if (entry->gpe_pp == NULL) { 278 sb = sbuf_new_auto(); 279 G_PART_FULLNAME(table, entry, sb, gp->name); 280 sbuf_finish(sb); 281 entry->gpe_pp = g_new_providerf(gp, "%s", sbuf_data(sb)); 282 sbuf_delete(sb); 283 entry->gpe_pp->private = entry; /* Close the circle. */ 284 } 285 entry->gpe_pp->index = entry->gpe_index - 1; /* index is 1-based. */ 286 entry->gpe_pp->mediasize = (entry->gpe_end - entry->gpe_start + 1) * 287 pp->sectorsize; 288 entry->gpe_pp->mediasize -= entry->gpe_offset - offset; 289 entry->gpe_pp->sectorsize = pp->sectorsize; 290 entry->gpe_pp->flags = pp->flags & G_PF_CANDELETE; 291 if (pp->stripesize > 0) { 292 entry->gpe_pp->stripesize = pp->stripesize; 293 entry->gpe_pp->stripeoffset = (pp->stripeoffset + 294 entry->gpe_offset) % pp->stripesize; 295 } 296 g_error_provider(entry->gpe_pp, 0); 297 } 298 299 static int 300 g_part_parm_geom(const char *rawname, struct g_geom **v) 301 { 302 struct g_geom *gp; 303 const char *pname; 304 305 if (strncmp(rawname, _PATH_DEV, strlen(_PATH_DEV)) == 0) 306 pname = rawname + strlen(_PATH_DEV); 307 else 308 pname = rawname; 309 LIST_FOREACH(gp, &g_part_class.geom, geom) { 310 if (!strcmp(pname, gp->name)) 311 break; 312 } 313 if (gp == NULL) 314 return (EINVAL); 315 *v = gp; 316 return (0); 317 } 318 319 static int 320 g_part_parm_provider(const char *pname, struct g_provider **v) 321 { 322 struct g_provider *pp; 323 324 if (strncmp(pname, _PATH_DEV, strlen(_PATH_DEV)) == 0) 325 pp = g_provider_by_name(pname + strlen(_PATH_DEV)); 326 else 327 pp = g_provider_by_name(pname); 328 if (pp == NULL) 329 return (EINVAL); 330 *v = pp; 331 return (0); 332 } 333 334 static int 335 g_part_parm_quad(const char *p, quad_t *v) 336 { 337 char *x; 338 quad_t q; 339 340 q = strtoq(p, &x, 0); 341 if (*x != '\0' || q < 0) 342 return (EINVAL); 343 *v = q; 344 return (0); 345 } 346 347 static int 348 g_part_parm_scheme(const char *p, struct g_part_scheme **v) 349 { 350 struct g_part_scheme *s; 351 352 TAILQ_FOREACH(s, &g_part_schemes, scheme_list) { 353 if (s == &g_part_null_scheme) 354 continue; 355 if (!strcasecmp(s->name, p)) 356 break; 357 } 358 if (s == NULL) 359 return (EINVAL); 360 *v = s; 361 return (0); 362 } 363 364 static int 365 g_part_parm_str(const char *p, const char **v) 366 { 367 368 if (p[0] == '\0') 369 return (EINVAL); 370 *v = p; 371 return (0); 372 } 373 374 static int 375 g_part_parm_uint(const char *p, u_int *v) 376 { 377 char *x; 378 long l; 379 380 l = strtol(p, &x, 0); 381 if (*x != '\0' || l < 0 || l > INT_MAX) 382 return (EINVAL); 383 *v = (unsigned int)l; 384 return (0); 385 } 386 387 static int 388 g_part_probe(struct g_geom *gp, struct g_consumer *cp, int depth) 389 { 390 struct g_part_scheme *iter, *scheme; 391 struct g_part_table *table; 392 int pri, probe; 393 394 table = gp->softc; 395 scheme = (table != NULL) ? table->gpt_scheme : NULL; 396 pri = (scheme != NULL) ? G_PART_PROBE(table, cp) : INT_MIN; 397 if (pri == 0) 398 goto done; 399 if (pri > 0) { /* error */ 400 scheme = NULL; 401 pri = INT_MIN; 402 } 403 404 TAILQ_FOREACH(iter, &g_part_schemes, scheme_list) { 405 if (iter == &g_part_null_scheme) 406 continue; 407 table = (void *)kobj_create((kobj_class_t)iter, M_GEOM, 408 M_WAITOK); 409 table->gpt_gp = gp; 410 table->gpt_scheme = iter; 411 table->gpt_depth = depth; 412 probe = G_PART_PROBE(table, cp); 413 if (probe <= 0 && probe > pri) { 414 pri = probe; 415 scheme = iter; 416 if (gp->softc != NULL) 417 kobj_delete((kobj_t)gp->softc, M_GEOM); 418 gp->softc = table; 419 if (pri == 0) 420 goto done; 421 } else 422 kobj_delete((kobj_t)table, M_GEOM); 423 } 424 425 done: 426 return ((scheme == NULL) ? ENXIO : 0); 427 } 428 429 /* 430 * Control request functions. 431 */ 432 433 static int 434 g_part_ctl_add(struct gctl_req *req, struct g_part_parms *gpp) 435 { 436 struct g_geom *gp; 437 struct g_provider *pp; 438 struct g_part_entry *delent, *last, *entry; 439 struct g_part_table *table; 440 struct sbuf *sb; 441 quad_t end; 442 unsigned int index; 443 int error; 444 445 gp = gpp->gpp_geom; 446 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name)); 447 g_topology_assert(); 448 449 pp = LIST_FIRST(&gp->consumer)->provider; 450 table = gp->softc; 451 end = gpp->gpp_start + gpp->gpp_size - 1; 452 453 if (gpp->gpp_start < table->gpt_first || 454 gpp->gpp_start > table->gpt_last) { 455 gctl_error(req, "%d start '%jd'", EINVAL, 456 (intmax_t)gpp->gpp_start); 457 return (EINVAL); 458 } 459 if (end < gpp->gpp_start || end > table->gpt_last) { 460 gctl_error(req, "%d size '%jd'", EINVAL, 461 (intmax_t)gpp->gpp_size); 462 return (EINVAL); 463 } 464 if (gpp->gpp_index > table->gpt_entries) { 465 gctl_error(req, "%d index '%d'", EINVAL, gpp->gpp_index); 466 return (EINVAL); 467 } 468 469 delent = last = NULL; 470 index = (gpp->gpp_index > 0) ? gpp->gpp_index : 1; 471 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) { 472 if (entry->gpe_deleted) { 473 if (entry->gpe_index == index) 474 delent = entry; 475 continue; 476 } 477 if (entry->gpe_index == index) 478 index = entry->gpe_index + 1; 479 if (entry->gpe_index < index) 480 last = entry; 481 if (entry->gpe_internal) 482 continue; 483 if (gpp->gpp_start >= entry->gpe_start && 484 gpp->gpp_start <= entry->gpe_end) { 485 gctl_error(req, "%d start '%jd'", ENOSPC, 486 (intmax_t)gpp->gpp_start); 487 return (ENOSPC); 488 } 489 if (end >= entry->gpe_start && end <= entry->gpe_end) { 490 gctl_error(req, "%d end '%jd'", ENOSPC, (intmax_t)end); 491 return (ENOSPC); 492 } 493 if (gpp->gpp_start < entry->gpe_start && end > entry->gpe_end) { 494 gctl_error(req, "%d size '%jd'", ENOSPC, 495 (intmax_t)gpp->gpp_size); 496 return (ENOSPC); 497 } 498 } 499 if (gpp->gpp_index > 0 && index != gpp->gpp_index) { 500 gctl_error(req, "%d index '%d'", EEXIST, gpp->gpp_index); 501 return (EEXIST); 502 } 503 if (index > table->gpt_entries) { 504 gctl_error(req, "%d index '%d'", ENOSPC, index); 505 return (ENOSPC); 506 } 507 508 entry = (delent == NULL) ? g_malloc(table->gpt_scheme->gps_entrysz, 509 M_WAITOK | M_ZERO) : delent; 510 entry->gpe_index = index; 511 entry->gpe_start = gpp->gpp_start; 512 entry->gpe_end = end; 513 error = G_PART_ADD(table, entry, gpp); 514 if (error) { 515 gctl_error(req, "%d", error); 516 if (delent == NULL) 517 g_free(entry); 518 return (error); 519 } 520 if (delent == NULL) { 521 if (last == NULL) 522 LIST_INSERT_HEAD(&table->gpt_entry, entry, gpe_entry); 523 else 524 LIST_INSERT_AFTER(last, entry, gpe_entry); 525 entry->gpe_created = 1; 526 } else { 527 entry->gpe_deleted = 0; 528 entry->gpe_modified = 1; 529 } 530 g_part_new_provider(gp, table, entry); 531 532 /* Provide feedback if so requested. */ 533 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) { 534 sb = sbuf_new_auto(); 535 G_PART_FULLNAME(table, entry, sb, gp->name); 536 sbuf_cat(sb, " added\n"); 537 sbuf_finish(sb); 538 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1); 539 sbuf_delete(sb); 540 } 541 return (0); 542 } 543 544 static int 545 g_part_ctl_bootcode(struct gctl_req *req, struct g_part_parms *gpp) 546 { 547 struct g_geom *gp; 548 struct g_part_table *table; 549 struct sbuf *sb; 550 int error, sz; 551 552 gp = gpp->gpp_geom; 553 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name)); 554 g_topology_assert(); 555 556 table = gp->softc; 557 sz = table->gpt_scheme->gps_bootcodesz; 558 if (sz == 0) { 559 error = ENODEV; 560 goto fail; 561 } 562 if (gpp->gpp_codesize > sz) { 563 error = EFBIG; 564 goto fail; 565 } 566 567 error = G_PART_BOOTCODE(table, gpp); 568 if (error) 569 goto fail; 570 571 /* Provide feedback if so requested. */ 572 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) { 573 sb = sbuf_new_auto(); 574 sbuf_printf(sb, "%s has bootcode\n", gp->name); 575 sbuf_finish(sb); 576 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1); 577 sbuf_delete(sb); 578 } 579 return (0); 580 581 fail: 582 gctl_error(req, "%d", error); 583 return (error); 584 } 585 586 static int 587 g_part_ctl_commit(struct gctl_req *req, struct g_part_parms *gpp) 588 { 589 struct g_consumer *cp; 590 struct g_geom *gp; 591 struct g_provider *pp; 592 struct g_part_entry *entry, *tmp; 593 struct g_part_table *table; 594 char *buf; 595 int error, i; 596 597 gp = gpp->gpp_geom; 598 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name)); 599 g_topology_assert(); 600 601 table = gp->softc; 602 if (!table->gpt_opened) { 603 gctl_error(req, "%d", EPERM); 604 return (EPERM); 605 } 606 607 g_topology_unlock(); 608 609 cp = LIST_FIRST(&gp->consumer); 610 if ((table->gpt_smhead | table->gpt_smtail) != 0) { 611 pp = cp->provider; 612 buf = g_malloc(pp->sectorsize, M_WAITOK | M_ZERO); 613 while (table->gpt_smhead != 0) { 614 i = ffs(table->gpt_smhead) - 1; 615 error = g_write_data(cp, i * pp->sectorsize, buf, 616 pp->sectorsize); 617 if (error) { 618 g_free(buf); 619 goto fail; 620 } 621 table->gpt_smhead &= ~(1 << i); 622 } 623 while (table->gpt_smtail != 0) { 624 i = ffs(table->gpt_smtail) - 1; 625 error = g_write_data(cp, pp->mediasize - (i + 1) * 626 pp->sectorsize, buf, pp->sectorsize); 627 if (error) { 628 g_free(buf); 629 goto fail; 630 } 631 table->gpt_smtail &= ~(1 << i); 632 } 633 g_free(buf); 634 } 635 636 if (table->gpt_scheme == &g_part_null_scheme) { 637 g_topology_lock(); 638 g_access(cp, -1, -1, -1); 639 g_part_wither(gp, ENXIO); 640 return (0); 641 } 642 643 error = G_PART_WRITE(table, cp); 644 if (error) 645 goto fail; 646 647 LIST_FOREACH_SAFE(entry, &table->gpt_entry, gpe_entry, tmp) { 648 if (!entry->gpe_deleted) { 649 entry->gpe_created = 0; 650 entry->gpe_modified = 0; 651 continue; 652 } 653 LIST_REMOVE(entry, gpe_entry); 654 g_free(entry); 655 } 656 table->gpt_created = 0; 657 table->gpt_opened = 0; 658 659 g_topology_lock(); 660 g_access(cp, -1, -1, -1); 661 return (0); 662 663 fail: 664 g_topology_lock(); 665 gctl_error(req, "%d", error); 666 return (error); 667 } 668 669 static int 670 g_part_ctl_create(struct gctl_req *req, struct g_part_parms *gpp) 671 { 672 struct g_consumer *cp; 673 struct g_geom *gp; 674 struct g_provider *pp; 675 struct g_part_scheme *scheme; 676 struct g_part_table *null, *table; 677 struct sbuf *sb; 678 int attr, error; 679 680 pp = gpp->gpp_provider; 681 scheme = gpp->gpp_scheme; 682 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, pp->name)); 683 g_topology_assert(); 684 685 /* Check that there isn't already a g_part geom on the provider. */ 686 error = g_part_parm_geom(pp->name, &gp); 687 if (!error) { 688 null = gp->softc; 689 if (null->gpt_scheme != &g_part_null_scheme) { 690 gctl_error(req, "%d geom '%s'", EEXIST, pp->name); 691 return (EEXIST); 692 } 693 } else 694 null = NULL; 695 696 if ((gpp->gpp_parms & G_PART_PARM_ENTRIES) && 697 (gpp->gpp_entries < scheme->gps_minent || 698 gpp->gpp_entries > scheme->gps_maxent)) { 699 gctl_error(req, "%d entries '%d'", EINVAL, gpp->gpp_entries); 700 return (EINVAL); 701 } 702 703 if (null == NULL) 704 gp = g_new_geomf(&g_part_class, "%s", pp->name); 705 gp->softc = kobj_create((kobj_class_t)gpp->gpp_scheme, M_GEOM, 706 M_WAITOK); 707 table = gp->softc; 708 table->gpt_gp = gp; 709 table->gpt_scheme = gpp->gpp_scheme; 710 table->gpt_entries = (gpp->gpp_parms & G_PART_PARM_ENTRIES) ? 711 gpp->gpp_entries : scheme->gps_minent; 712 LIST_INIT(&table->gpt_entry); 713 if (null == NULL) { 714 cp = g_new_consumer(gp); 715 error = g_attach(cp, pp); 716 if (error == 0) 717 error = g_access(cp, 1, 1, 1); 718 if (error != 0) { 719 g_part_wither(gp, error); 720 gctl_error(req, "%d geom '%s'", error, pp->name); 721 return (error); 722 } 723 table->gpt_opened = 1; 724 } else { 725 cp = LIST_FIRST(&gp->consumer); 726 table->gpt_opened = null->gpt_opened; 727 table->gpt_smhead = null->gpt_smhead; 728 table->gpt_smtail = null->gpt_smtail; 729 } 730 731 g_topology_unlock(); 732 733 /* Make sure the provider has media. */ 734 if (pp->mediasize == 0 || pp->sectorsize == 0) { 735 error = ENODEV; 736 goto fail; 737 } 738 739 /* Make sure we can nest and if so, determine our depth. */ 740 error = g_getattr("PART::isleaf", cp, &attr); 741 if (!error && attr) { 742 error = ENODEV; 743 goto fail; 744 } 745 error = g_getattr("PART::depth", cp, &attr); 746 table->gpt_depth = (!error) ? attr + 1 : 0; 747 748 /* 749 * Synthesize a disk geometry. Some partitioning schemes 750 * depend on it and since some file systems need it even 751 * when the partitition scheme doesn't, we do it here in 752 * scheme-independent code. 753 */ 754 g_part_geometry(table, cp, pp->mediasize / pp->sectorsize); 755 756 error = G_PART_CREATE(table, gpp); 757 if (error) 758 goto fail; 759 760 g_topology_lock(); 761 762 table->gpt_created = 1; 763 if (null != NULL) 764 kobj_delete((kobj_t)null, M_GEOM); 765 766 /* 767 * Support automatic commit by filling in the gpp_geom 768 * parameter. 769 */ 770 gpp->gpp_parms |= G_PART_PARM_GEOM; 771 gpp->gpp_geom = gp; 772 773 /* Provide feedback if so requested. */ 774 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) { 775 sb = sbuf_new_auto(); 776 sbuf_printf(sb, "%s created\n", gp->name); 777 sbuf_finish(sb); 778 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1); 779 sbuf_delete(sb); 780 } 781 return (0); 782 783 fail: 784 g_topology_lock(); 785 if (null == NULL) { 786 g_access(cp, -1, -1, -1); 787 g_part_wither(gp, error); 788 } else { 789 kobj_delete((kobj_t)gp->softc, M_GEOM); 790 gp->softc = null; 791 } 792 gctl_error(req, "%d provider", error); 793 return (error); 794 } 795 796 static int 797 g_part_ctl_delete(struct gctl_req *req, struct g_part_parms *gpp) 798 { 799 struct g_geom *gp; 800 struct g_provider *pp; 801 struct g_part_entry *entry; 802 struct g_part_table *table; 803 struct sbuf *sb; 804 805 gp = gpp->gpp_geom; 806 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name)); 807 g_topology_assert(); 808 809 table = gp->softc; 810 811 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) { 812 if (entry->gpe_deleted || entry->gpe_internal) 813 continue; 814 if (entry->gpe_index == gpp->gpp_index) 815 break; 816 } 817 if (entry == NULL) { 818 gctl_error(req, "%d index '%d'", ENOENT, gpp->gpp_index); 819 return (ENOENT); 820 } 821 822 pp = entry->gpe_pp; 823 if (pp != NULL) { 824 if (pp->acr > 0 || pp->acw > 0 || pp->ace > 0) { 825 gctl_error(req, "%d", EBUSY); 826 return (EBUSY); 827 } 828 829 pp->private = NULL; 830 entry->gpe_pp = NULL; 831 } 832 833 if (entry->gpe_created) { 834 LIST_REMOVE(entry, gpe_entry); 835 g_free(entry); 836 } else { 837 entry->gpe_modified = 0; 838 entry->gpe_deleted = 1; 839 } 840 841 if (pp != NULL) 842 g_wither_provider(pp, ENXIO); 843 844 /* Provide feedback if so requested. */ 845 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) { 846 sb = sbuf_new_auto(); 847 G_PART_FULLNAME(table, entry, sb, gp->name); 848 sbuf_cat(sb, " deleted\n"); 849 sbuf_finish(sb); 850 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1); 851 sbuf_delete(sb); 852 } 853 return (0); 854 } 855 856 static int 857 g_part_ctl_destroy(struct gctl_req *req, struct g_part_parms *gpp) 858 { 859 struct g_geom *gp; 860 struct g_part_entry *entry; 861 struct g_part_table *null, *table; 862 struct sbuf *sb; 863 int error; 864 865 gp = gpp->gpp_geom; 866 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name)); 867 g_topology_assert(); 868 869 table = gp->softc; 870 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) { 871 if (entry->gpe_deleted || entry->gpe_internal) 872 continue; 873 gctl_error(req, "%d", EBUSY); 874 return (EBUSY); 875 } 876 877 error = G_PART_DESTROY(table, gpp); 878 if (error) { 879 gctl_error(req, "%d", error); 880 return (error); 881 } 882 883 gp->softc = kobj_create((kobj_class_t)&g_part_null_scheme, M_GEOM, 884 M_WAITOK); 885 null = gp->softc; 886 null->gpt_gp = gp; 887 null->gpt_scheme = &g_part_null_scheme; 888 LIST_INIT(&null->gpt_entry); 889 null->gpt_depth = table->gpt_depth; 890 null->gpt_opened = table->gpt_opened; 891 null->gpt_smhead = table->gpt_smhead; 892 null->gpt_smtail = table->gpt_smtail; 893 894 while ((entry = LIST_FIRST(&table->gpt_entry)) != NULL) { 895 LIST_REMOVE(entry, gpe_entry); 896 g_free(entry); 897 } 898 kobj_delete((kobj_t)table, M_GEOM); 899 900 /* Provide feedback if so requested. */ 901 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) { 902 sb = sbuf_new_auto(); 903 sbuf_printf(sb, "%s destroyed\n", gp->name); 904 sbuf_finish(sb); 905 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1); 906 sbuf_delete(sb); 907 } 908 return (0); 909 } 910 911 static int 912 g_part_ctl_modify(struct gctl_req *req, struct g_part_parms *gpp) 913 { 914 struct g_geom *gp; 915 struct g_part_entry *entry; 916 struct g_part_table *table; 917 struct sbuf *sb; 918 int error; 919 920 gp = gpp->gpp_geom; 921 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name)); 922 g_topology_assert(); 923 924 table = gp->softc; 925 926 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) { 927 if (entry->gpe_deleted || entry->gpe_internal) 928 continue; 929 if (entry->gpe_index == gpp->gpp_index) 930 break; 931 } 932 if (entry == NULL) { 933 gctl_error(req, "%d index '%d'", ENOENT, gpp->gpp_index); 934 return (ENOENT); 935 } 936 937 error = G_PART_MODIFY(table, entry, gpp); 938 if (error) { 939 gctl_error(req, "%d", error); 940 return (error); 941 } 942 943 if (!entry->gpe_created) 944 entry->gpe_modified = 1; 945 946 /* Provide feedback if so requested. */ 947 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) { 948 sb = sbuf_new_auto(); 949 G_PART_FULLNAME(table, entry, sb, gp->name); 950 sbuf_cat(sb, " modified\n"); 951 sbuf_finish(sb); 952 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1); 953 sbuf_delete(sb); 954 } 955 return (0); 956 } 957 958 static int 959 g_part_ctl_move(struct gctl_req *req, struct g_part_parms *gpp) 960 { 961 gctl_error(req, "%d verb 'move'", ENOSYS); 962 return (ENOSYS); 963 } 964 965 static int 966 g_part_ctl_recover(struct gctl_req *req, struct g_part_parms *gpp) 967 { 968 gctl_error(req, "%d verb 'recover'", ENOSYS); 969 return (ENOSYS); 970 } 971 972 static int 973 g_part_ctl_resize(struct gctl_req *req, struct g_part_parms *gpp) 974 { 975 gctl_error(req, "%d verb 'resize'", ENOSYS); 976 return (ENOSYS); 977 } 978 979 static int 980 g_part_ctl_setunset(struct gctl_req *req, struct g_part_parms *gpp, 981 unsigned int set) 982 { 983 struct g_geom *gp; 984 struct g_part_entry *entry; 985 struct g_part_table *table; 986 struct sbuf *sb; 987 int error; 988 989 gp = gpp->gpp_geom; 990 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name)); 991 g_topology_assert(); 992 993 table = gp->softc; 994 995 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) { 996 if (entry->gpe_deleted || entry->gpe_internal) 997 continue; 998 if (entry->gpe_index == gpp->gpp_index) 999 break; 1000 } 1001 if (entry == NULL) { 1002 gctl_error(req, "%d index '%d'", ENOENT, gpp->gpp_index); 1003 return (ENOENT); 1004 } 1005 1006 error = G_PART_SETUNSET(table, entry, gpp->gpp_attrib, set); 1007 if (error) { 1008 gctl_error(req, "%d attrib '%s'", error, gpp->gpp_attrib); 1009 return (error); 1010 } 1011 1012 /* Provide feedback if so requested. */ 1013 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) { 1014 sb = sbuf_new_auto(); 1015 G_PART_FULLNAME(table, entry, sb, gp->name); 1016 sbuf_printf(sb, " has %s %sset\n", gpp->gpp_attrib, 1017 (set) ? "" : "un"); 1018 sbuf_finish(sb); 1019 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1); 1020 sbuf_delete(sb); 1021 } 1022 return (0); 1023 } 1024 1025 static int 1026 g_part_ctl_undo(struct gctl_req *req, struct g_part_parms *gpp) 1027 { 1028 struct g_consumer *cp; 1029 struct g_provider *pp; 1030 struct g_geom *gp; 1031 struct g_part_entry *entry, *tmp; 1032 struct g_part_table *table; 1033 int error, reprobe; 1034 1035 gp = gpp->gpp_geom; 1036 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name)); 1037 g_topology_assert(); 1038 1039 table = gp->softc; 1040 if (!table->gpt_opened) { 1041 gctl_error(req, "%d", EPERM); 1042 return (EPERM); 1043 } 1044 1045 cp = LIST_FIRST(&gp->consumer); 1046 LIST_FOREACH_SAFE(entry, &table->gpt_entry, gpe_entry, tmp) { 1047 entry->gpe_modified = 0; 1048 if (entry->gpe_created) { 1049 pp = entry->gpe_pp; 1050 if (pp != NULL) { 1051 pp->private = NULL; 1052 entry->gpe_pp = NULL; 1053 g_wither_provider(pp, ENXIO); 1054 } 1055 entry->gpe_deleted = 1; 1056 } 1057 if (entry->gpe_deleted) { 1058 LIST_REMOVE(entry, gpe_entry); 1059 g_free(entry); 1060 } 1061 } 1062 1063 g_topology_unlock(); 1064 1065 reprobe = (table->gpt_scheme == &g_part_null_scheme || 1066 table->gpt_created) ? 1 : 0; 1067 1068 if (reprobe) { 1069 if (!LIST_EMPTY(&table->gpt_entry)) { 1070 error = EBUSY; 1071 goto fail; 1072 } 1073 error = g_part_probe(gp, cp, table->gpt_depth); 1074 if (error) { 1075 g_topology_lock(); 1076 g_access(cp, -1, -1, -1); 1077 g_part_wither(gp, error); 1078 return (0); 1079 } 1080 table = gp->softc; 1081 } 1082 1083 error = G_PART_READ(table, cp); 1084 if (error) 1085 goto fail; 1086 1087 g_topology_lock(); 1088 1089 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) { 1090 if (!entry->gpe_internal) 1091 g_part_new_provider(gp, table, entry); 1092 } 1093 1094 table->gpt_opened = 0; 1095 g_access(cp, -1, -1, -1); 1096 return (0); 1097 1098 fail: 1099 g_topology_lock(); 1100 gctl_error(req, "%d", error); 1101 return (error); 1102 } 1103 1104 static void 1105 g_part_wither(struct g_geom *gp, int error) 1106 { 1107 struct g_part_entry *entry; 1108 struct g_part_table *table; 1109 1110 table = gp->softc; 1111 if (table != NULL) { 1112 while ((entry = LIST_FIRST(&table->gpt_entry)) != NULL) { 1113 LIST_REMOVE(entry, gpe_entry); 1114 g_free(entry); 1115 } 1116 if (gp->softc != NULL) { 1117 kobj_delete((kobj_t)gp->softc, M_GEOM); 1118 gp->softc = NULL; 1119 } 1120 } 1121 g_wither_geom(gp, error); 1122 } 1123 1124 /* 1125 * Class methods. 1126 */ 1127 1128 static void 1129 g_part_ctlreq(struct gctl_req *req, struct g_class *mp, const char *verb) 1130 { 1131 struct g_part_parms gpp; 1132 struct g_part_table *table; 1133 struct gctl_req_arg *ap; 1134 const char *p; 1135 enum g_part_ctl ctlreq; 1136 unsigned int i, mparms, oparms, parm; 1137 int auto_commit, close_on_error; 1138 int error, len, modifies; 1139 1140 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s,%s)", __func__, mp->name, verb)); 1141 g_topology_assert(); 1142 1143 ctlreq = G_PART_CTL_NONE; 1144 modifies = 1; 1145 mparms = 0; 1146 oparms = G_PART_PARM_FLAGS | G_PART_PARM_OUTPUT | G_PART_PARM_VERSION; 1147 switch (*verb) { 1148 case 'a': 1149 if (!strcmp(verb, "add")) { 1150 ctlreq = G_PART_CTL_ADD; 1151 mparms |= G_PART_PARM_GEOM | G_PART_PARM_SIZE | 1152 G_PART_PARM_START | G_PART_PARM_TYPE; 1153 oparms |= G_PART_PARM_INDEX | G_PART_PARM_LABEL; 1154 } 1155 break; 1156 case 'b': 1157 if (!strcmp(verb, "bootcode")) { 1158 ctlreq = G_PART_CTL_BOOTCODE; 1159 mparms |= G_PART_PARM_GEOM | G_PART_PARM_BOOTCODE; 1160 } 1161 break; 1162 case 'c': 1163 if (!strcmp(verb, "commit")) { 1164 ctlreq = G_PART_CTL_COMMIT; 1165 mparms |= G_PART_PARM_GEOM; 1166 modifies = 0; 1167 } else if (!strcmp(verb, "create")) { 1168 ctlreq = G_PART_CTL_CREATE; 1169 mparms |= G_PART_PARM_PROVIDER | G_PART_PARM_SCHEME; 1170 oparms |= G_PART_PARM_ENTRIES; 1171 } 1172 break; 1173 case 'd': 1174 if (!strcmp(verb, "delete")) { 1175 ctlreq = G_PART_CTL_DELETE; 1176 mparms |= G_PART_PARM_GEOM | G_PART_PARM_INDEX; 1177 } else if (!strcmp(verb, "destroy")) { 1178 ctlreq = G_PART_CTL_DESTROY; 1179 mparms |= G_PART_PARM_GEOM; 1180 } 1181 break; 1182 case 'm': 1183 if (!strcmp(verb, "modify")) { 1184 ctlreq = G_PART_CTL_MODIFY; 1185 mparms |= G_PART_PARM_GEOM | G_PART_PARM_INDEX; 1186 oparms |= G_PART_PARM_LABEL | G_PART_PARM_TYPE; 1187 } else if (!strcmp(verb, "move")) { 1188 ctlreq = G_PART_CTL_MOVE; 1189 mparms |= G_PART_PARM_GEOM | G_PART_PARM_INDEX; 1190 } 1191 break; 1192 case 'r': 1193 if (!strcmp(verb, "recover")) { 1194 ctlreq = G_PART_CTL_RECOVER; 1195 mparms |= G_PART_PARM_GEOM; 1196 } else if (!strcmp(verb, "resize")) { 1197 ctlreq = G_PART_CTL_RESIZE; 1198 mparms |= G_PART_PARM_GEOM | G_PART_PARM_INDEX; 1199 } 1200 break; 1201 case 's': 1202 if (!strcmp(verb, "set")) { 1203 ctlreq = G_PART_CTL_SET; 1204 mparms |= G_PART_PARM_ATTRIB | G_PART_PARM_GEOM | 1205 G_PART_PARM_INDEX; 1206 } 1207 break; 1208 case 'u': 1209 if (!strcmp(verb, "undo")) { 1210 ctlreq = G_PART_CTL_UNDO; 1211 mparms |= G_PART_PARM_GEOM; 1212 modifies = 0; 1213 } else if (!strcmp(verb, "unset")) { 1214 ctlreq = G_PART_CTL_UNSET; 1215 mparms |= G_PART_PARM_ATTRIB | G_PART_PARM_GEOM | 1216 G_PART_PARM_INDEX; 1217 } 1218 break; 1219 } 1220 if (ctlreq == G_PART_CTL_NONE) { 1221 gctl_error(req, "%d verb '%s'", EINVAL, verb); 1222 return; 1223 } 1224 1225 bzero(&gpp, sizeof(gpp)); 1226 for (i = 0; i < req->narg; i++) { 1227 ap = &req->arg[i]; 1228 parm = 0; 1229 switch (ap->name[0]) { 1230 case 'a': 1231 if (!strcmp(ap->name, "attrib")) 1232 parm = G_PART_PARM_ATTRIB; 1233 break; 1234 case 'b': 1235 if (!strcmp(ap->name, "bootcode")) 1236 parm = G_PART_PARM_BOOTCODE; 1237 break; 1238 case 'c': 1239 if (!strcmp(ap->name, "class")) 1240 continue; 1241 break; 1242 case 'e': 1243 if (!strcmp(ap->name, "entries")) 1244 parm = G_PART_PARM_ENTRIES; 1245 break; 1246 case 'f': 1247 if (!strcmp(ap->name, "flags")) 1248 parm = G_PART_PARM_FLAGS; 1249 break; 1250 case 'g': 1251 if (!strcmp(ap->name, "geom")) 1252 parm = G_PART_PARM_GEOM; 1253 break; 1254 case 'i': 1255 if (!strcmp(ap->name, "index")) 1256 parm = G_PART_PARM_INDEX; 1257 break; 1258 case 'l': 1259 if (!strcmp(ap->name, "label")) 1260 parm = G_PART_PARM_LABEL; 1261 break; 1262 case 'o': 1263 if (!strcmp(ap->name, "output")) 1264 parm = G_PART_PARM_OUTPUT; 1265 break; 1266 case 'p': 1267 if (!strcmp(ap->name, "provider")) 1268 parm = G_PART_PARM_PROVIDER; 1269 break; 1270 case 's': 1271 if (!strcmp(ap->name, "scheme")) 1272 parm = G_PART_PARM_SCHEME; 1273 else if (!strcmp(ap->name, "size")) 1274 parm = G_PART_PARM_SIZE; 1275 else if (!strcmp(ap->name, "start")) 1276 parm = G_PART_PARM_START; 1277 break; 1278 case 't': 1279 if (!strcmp(ap->name, "type")) 1280 parm = G_PART_PARM_TYPE; 1281 break; 1282 case 'v': 1283 if (!strcmp(ap->name, "verb")) 1284 continue; 1285 else if (!strcmp(ap->name, "version")) 1286 parm = G_PART_PARM_VERSION; 1287 break; 1288 } 1289 if ((parm & (mparms | oparms)) == 0) { 1290 gctl_error(req, "%d param '%s'", EINVAL, ap->name); 1291 return; 1292 } 1293 if (parm == G_PART_PARM_BOOTCODE) 1294 p = gctl_get_param(req, ap->name, &len); 1295 else 1296 p = gctl_get_asciiparam(req, ap->name); 1297 if (p == NULL) { 1298 gctl_error(req, "%d param '%s'", ENOATTR, ap->name); 1299 return; 1300 } 1301 switch (parm) { 1302 case G_PART_PARM_ATTRIB: 1303 error = g_part_parm_str(p, &gpp.gpp_attrib); 1304 break; 1305 case G_PART_PARM_BOOTCODE: 1306 gpp.gpp_codeptr = p; 1307 gpp.gpp_codesize = len; 1308 error = 0; 1309 break; 1310 case G_PART_PARM_ENTRIES: 1311 error = g_part_parm_uint(p, &gpp.gpp_entries); 1312 break; 1313 case G_PART_PARM_FLAGS: 1314 if (p[0] == '\0') 1315 continue; 1316 error = g_part_parm_str(p, &gpp.gpp_flags); 1317 break; 1318 case G_PART_PARM_GEOM: 1319 error = g_part_parm_geom(p, &gpp.gpp_geom); 1320 break; 1321 case G_PART_PARM_INDEX: 1322 error = g_part_parm_uint(p, &gpp.gpp_index); 1323 break; 1324 case G_PART_PARM_LABEL: 1325 /* An empty label is always valid. */ 1326 gpp.gpp_label = p; 1327 error = 0; 1328 break; 1329 case G_PART_PARM_OUTPUT: 1330 error = 0; /* Write-only parameter */ 1331 break; 1332 case G_PART_PARM_PROVIDER: 1333 error = g_part_parm_provider(p, &gpp.gpp_provider); 1334 break; 1335 case G_PART_PARM_SCHEME: 1336 error = g_part_parm_scheme(p, &gpp.gpp_scheme); 1337 break; 1338 case G_PART_PARM_SIZE: 1339 error = g_part_parm_quad(p, &gpp.gpp_size); 1340 break; 1341 case G_PART_PARM_START: 1342 error = g_part_parm_quad(p, &gpp.gpp_start); 1343 break; 1344 case G_PART_PARM_TYPE: 1345 error = g_part_parm_str(p, &gpp.gpp_type); 1346 break; 1347 case G_PART_PARM_VERSION: 1348 error = g_part_parm_uint(p, &gpp.gpp_version); 1349 break; 1350 default: 1351 error = EDOOFUS; 1352 break; 1353 } 1354 if (error) { 1355 gctl_error(req, "%d %s '%s'", error, ap->name, p); 1356 return; 1357 } 1358 gpp.gpp_parms |= parm; 1359 } 1360 if ((gpp.gpp_parms & mparms) != mparms) { 1361 parm = mparms - (gpp.gpp_parms & mparms); 1362 gctl_error(req, "%d param '%x'", ENOATTR, parm); 1363 return; 1364 } 1365 1366 /* Obtain permissions if possible/necessary. */ 1367 close_on_error = 0; 1368 table = NULL; 1369 if (modifies && (gpp.gpp_parms & G_PART_PARM_GEOM)) { 1370 table = gpp.gpp_geom->softc; 1371 if (table != NULL && !table->gpt_opened) { 1372 error = g_access(LIST_FIRST(&gpp.gpp_geom->consumer), 1373 1, 1, 1); 1374 if (error) { 1375 gctl_error(req, "%d geom '%s'", error, 1376 gpp.gpp_geom->name); 1377 return; 1378 } 1379 table->gpt_opened = 1; 1380 close_on_error = 1; 1381 } 1382 } 1383 1384 /* Allow the scheme to check or modify the parameters. */ 1385 if (table != NULL) { 1386 error = G_PART_PRECHECK(table, ctlreq, &gpp); 1387 if (error) { 1388 gctl_error(req, "%d pre-check failed", error); 1389 goto out; 1390 } 1391 } else 1392 error = EDOOFUS; /* Prevent bogus uninit. warning. */ 1393 1394 switch (ctlreq) { 1395 case G_PART_CTL_NONE: 1396 panic("%s", __func__); 1397 case G_PART_CTL_ADD: 1398 error = g_part_ctl_add(req, &gpp); 1399 break; 1400 case G_PART_CTL_BOOTCODE: 1401 error = g_part_ctl_bootcode(req, &gpp); 1402 break; 1403 case G_PART_CTL_COMMIT: 1404 error = g_part_ctl_commit(req, &gpp); 1405 break; 1406 case G_PART_CTL_CREATE: 1407 error = g_part_ctl_create(req, &gpp); 1408 break; 1409 case G_PART_CTL_DELETE: 1410 error = g_part_ctl_delete(req, &gpp); 1411 break; 1412 case G_PART_CTL_DESTROY: 1413 error = g_part_ctl_destroy(req, &gpp); 1414 break; 1415 case G_PART_CTL_MODIFY: 1416 error = g_part_ctl_modify(req, &gpp); 1417 break; 1418 case G_PART_CTL_MOVE: 1419 error = g_part_ctl_move(req, &gpp); 1420 break; 1421 case G_PART_CTL_RECOVER: 1422 error = g_part_ctl_recover(req, &gpp); 1423 break; 1424 case G_PART_CTL_RESIZE: 1425 error = g_part_ctl_resize(req, &gpp); 1426 break; 1427 case G_PART_CTL_SET: 1428 error = g_part_ctl_setunset(req, &gpp, 1); 1429 break; 1430 case G_PART_CTL_UNDO: 1431 error = g_part_ctl_undo(req, &gpp); 1432 break; 1433 case G_PART_CTL_UNSET: 1434 error = g_part_ctl_setunset(req, &gpp, 0); 1435 break; 1436 } 1437 1438 /* Implement automatic commit. */ 1439 if (!error) { 1440 auto_commit = (modifies && 1441 (gpp.gpp_parms & G_PART_PARM_FLAGS) && 1442 strchr(gpp.gpp_flags, 'C') != NULL) ? 1 : 0; 1443 if (auto_commit) { 1444 KASSERT(gpp.gpp_parms & G_PART_PARM_GEOM, (__func__)); 1445 error = g_part_ctl_commit(req, &gpp); 1446 } 1447 } 1448 1449 out: 1450 if (error && close_on_error) { 1451 g_access(LIST_FIRST(&gpp.gpp_geom->consumer), -1, -1, -1); 1452 table->gpt_opened = 0; 1453 } 1454 } 1455 1456 static int 1457 g_part_destroy_geom(struct gctl_req *req, struct g_class *mp, 1458 struct g_geom *gp) 1459 { 1460 1461 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s,%s)", __func__, mp->name, gp->name)); 1462 g_topology_assert(); 1463 1464 g_part_wither(gp, EINVAL); 1465 return (0); 1466 } 1467 1468 static struct g_geom * 1469 g_part_taste(struct g_class *mp, struct g_provider *pp, int flags __unused) 1470 { 1471 struct g_consumer *cp; 1472 struct g_geom *gp; 1473 struct g_part_entry *entry; 1474 struct g_part_table *table; 1475 struct root_hold_token *rht; 1476 int attr, depth; 1477 int error; 1478 1479 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s,%s)", __func__, mp->name, pp->name)); 1480 g_topology_assert(); 1481 1482 /* Skip providers that are already open for writing. */ 1483 if (pp->acw > 0) 1484 return (NULL); 1485 1486 /* 1487 * Create a GEOM with consumer and hook it up to the provider. 1488 * With that we become part of the topology. Optain read access 1489 * to the provider. 1490 */ 1491 gp = g_new_geomf(mp, "%s", pp->name); 1492 cp = g_new_consumer(gp); 1493 error = g_attach(cp, pp); 1494 if (error == 0) 1495 error = g_access(cp, 1, 0, 0); 1496 if (error != 0) { 1497 g_part_wither(gp, error); 1498 return (NULL); 1499 } 1500 1501 rht = root_mount_hold(mp->name); 1502 g_topology_unlock(); 1503 1504 /* 1505 * Short-circuit the whole probing galore when there's no 1506 * media present. 1507 */ 1508 if (pp->mediasize == 0 || pp->sectorsize == 0) { 1509 error = ENODEV; 1510 goto fail; 1511 } 1512 1513 /* Make sure we can nest and if so, determine our depth. */ 1514 error = g_getattr("PART::isleaf", cp, &attr); 1515 if (!error && attr) { 1516 error = ENODEV; 1517 goto fail; 1518 } 1519 error = g_getattr("PART::depth", cp, &attr); 1520 depth = (!error) ? attr + 1 : 0; 1521 1522 error = g_part_probe(gp, cp, depth); 1523 if (error) 1524 goto fail; 1525 1526 table = gp->softc; 1527 1528 /* 1529 * Synthesize a disk geometry. Some partitioning schemes 1530 * depend on it and since some file systems need it even 1531 * when the partitition scheme doesn't, we do it here in 1532 * scheme-independent code. 1533 */ 1534 g_part_geometry(table, cp, pp->mediasize / pp->sectorsize); 1535 1536 error = G_PART_READ(table, cp); 1537 if (error) 1538 goto fail; 1539 1540 g_topology_lock(); 1541 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) { 1542 if (!entry->gpe_internal) 1543 g_part_new_provider(gp, table, entry); 1544 } 1545 1546 root_mount_rel(rht); 1547 g_access(cp, -1, 0, 0); 1548 return (gp); 1549 1550 fail: 1551 g_topology_lock(); 1552 root_mount_rel(rht); 1553 g_access(cp, -1, 0, 0); 1554 g_part_wither(gp, error); 1555 return (NULL); 1556 } 1557 1558 /* 1559 * Geom methods. 1560 */ 1561 1562 static int 1563 g_part_access(struct g_provider *pp, int dr, int dw, int de) 1564 { 1565 struct g_consumer *cp; 1566 1567 G_PART_TRACE((G_T_ACCESS, "%s(%s,%d,%d,%d)", __func__, pp->name, dr, 1568 dw, de)); 1569 1570 cp = LIST_FIRST(&pp->geom->consumer); 1571 1572 /* We always gain write-exclusive access. */ 1573 return (g_access(cp, dr, dw, dw + de)); 1574 } 1575 1576 static void 1577 g_part_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp, 1578 struct g_consumer *cp, struct g_provider *pp) 1579 { 1580 char buf[64]; 1581 struct g_part_entry *entry; 1582 struct g_part_table *table; 1583 1584 KASSERT(sb != NULL && gp != NULL, (__func__)); 1585 table = gp->softc; 1586 1587 if (indent == NULL) { 1588 KASSERT(cp == NULL && pp != NULL, (__func__)); 1589 entry = pp->private; 1590 if (entry == NULL) 1591 return; 1592 sbuf_printf(sb, " i %u o %ju ty %s", entry->gpe_index, 1593 (uintmax_t)entry->gpe_offset, 1594 G_PART_TYPE(table, entry, buf, sizeof(buf))); 1595 /* 1596 * libdisk compatibility quirk - the scheme dumps the 1597 * slicer name and partition type in a way that is 1598 * compatible with libdisk. When libdisk is not used 1599 * anymore, this should go away. 1600 */ 1601 G_PART_DUMPCONF(table, entry, sb, indent); 1602 } else if (cp != NULL) { /* Consumer configuration. */ 1603 KASSERT(pp == NULL, (__func__)); 1604 /* none */ 1605 } else if (pp != NULL) { /* Provider configuration. */ 1606 entry = pp->private; 1607 if (entry == NULL) 1608 return; 1609 sbuf_printf(sb, "%s<start>%ju</start>\n", indent, 1610 (uintmax_t)entry->gpe_start); 1611 sbuf_printf(sb, "%s<end>%ju</end>\n", indent, 1612 (uintmax_t)entry->gpe_end); 1613 sbuf_printf(sb, "%s<index>%u</index>\n", indent, 1614 entry->gpe_index); 1615 sbuf_printf(sb, "%s<type>%s</type>\n", indent, 1616 G_PART_TYPE(table, entry, buf, sizeof(buf))); 1617 sbuf_printf(sb, "%s<offset>%ju</offset>\n", indent, 1618 (uintmax_t)entry->gpe_offset); 1619 sbuf_printf(sb, "%s<length>%ju</length>\n", indent, 1620 (uintmax_t)pp->mediasize); 1621 G_PART_DUMPCONF(table, entry, sb, indent); 1622 } else { /* Geom configuration. */ 1623 sbuf_printf(sb, "%s<scheme>%s</scheme>\n", indent, 1624 table->gpt_scheme->name); 1625 sbuf_printf(sb, "%s<entries>%u</entries>\n", indent, 1626 table->gpt_entries); 1627 sbuf_printf(sb, "%s<first>%ju</first>\n", indent, 1628 (uintmax_t)table->gpt_first); 1629 sbuf_printf(sb, "%s<last>%ju</last>\n", indent, 1630 (uintmax_t)table->gpt_last); 1631 sbuf_printf(sb, "%s<fwsectors>%u</fwsectors>\n", indent, 1632 table->gpt_sectors); 1633 sbuf_printf(sb, "%s<fwheads>%u</fwheads>\n", indent, 1634 table->gpt_heads); 1635 G_PART_DUMPCONF(table, NULL, sb, indent); 1636 } 1637 } 1638 1639 static void 1640 g_part_orphan(struct g_consumer *cp) 1641 { 1642 struct g_provider *pp; 1643 1644 pp = cp->provider; 1645 KASSERT(pp != NULL, (__func__)); 1646 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, pp->name)); 1647 g_topology_assert(); 1648 1649 KASSERT(pp->error != 0, (__func__)); 1650 g_part_wither(cp->geom, pp->error); 1651 } 1652 1653 static void 1654 g_part_spoiled(struct g_consumer *cp) 1655 { 1656 1657 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, cp->provider->name)); 1658 g_topology_assert(); 1659 1660 g_part_wither(cp->geom, ENXIO); 1661 } 1662 1663 static void 1664 g_part_start(struct bio *bp) 1665 { 1666 struct bio *bp2; 1667 struct g_consumer *cp; 1668 struct g_geom *gp; 1669 struct g_part_entry *entry; 1670 struct g_part_table *table; 1671 struct g_kerneldump *gkd; 1672 struct g_provider *pp; 1673 1674 pp = bp->bio_to; 1675 gp = pp->geom; 1676 table = gp->softc; 1677 cp = LIST_FIRST(&gp->consumer); 1678 1679 G_PART_TRACE((G_T_BIO, "%s: cmd=%d, provider=%s", __func__, bp->bio_cmd, 1680 pp->name)); 1681 1682 entry = pp->private; 1683 if (entry == NULL) { 1684 g_io_deliver(bp, ENXIO); 1685 return; 1686 } 1687 1688 switch(bp->bio_cmd) { 1689 case BIO_DELETE: 1690 case BIO_READ: 1691 case BIO_WRITE: 1692 if (bp->bio_offset >= pp->mediasize) { 1693 g_io_deliver(bp, EIO); 1694 return; 1695 } 1696 bp2 = g_clone_bio(bp); 1697 if (bp2 == NULL) { 1698 g_io_deliver(bp, ENOMEM); 1699 return; 1700 } 1701 if (bp2->bio_offset + bp2->bio_length > pp->mediasize) 1702 bp2->bio_length = pp->mediasize - bp2->bio_offset; 1703 bp2->bio_done = g_std_done; 1704 bp2->bio_offset += entry->gpe_offset; 1705 g_io_request(bp2, cp); 1706 return; 1707 case BIO_FLUSH: 1708 break; 1709 case BIO_GETATTR: 1710 if (g_handleattr_int(bp, "GEOM::fwheads", table->gpt_heads)) 1711 return; 1712 if (g_handleattr_int(bp, "GEOM::fwsectors", table->gpt_sectors)) 1713 return; 1714 if (g_handleattr_int(bp, "PART::isleaf", table->gpt_isleaf)) 1715 return; 1716 if (g_handleattr_int(bp, "PART::depth", table->gpt_depth)) 1717 return; 1718 if (g_handleattr_str(bp, "PART::scheme", 1719 table->gpt_scheme->name)) 1720 return; 1721 if (!strcmp("GEOM::kerneldump", bp->bio_attribute)) { 1722 /* 1723 * Check that the partition is suitable for kernel 1724 * dumps. Typically only swap partitions should be 1725 * used. 1726 */ 1727 if (!G_PART_DUMPTO(table, entry)) { 1728 g_io_deliver(bp, ENODEV); 1729 printf("GEOM_PART: Partition '%s' not suitable" 1730 " for kernel dumps (wrong type?)\n", 1731 pp->name); 1732 return; 1733 } 1734 gkd = (struct g_kerneldump *)bp->bio_data; 1735 if (gkd->offset >= pp->mediasize) { 1736 g_io_deliver(bp, EIO); 1737 return; 1738 } 1739 if (gkd->offset + gkd->length > pp->mediasize) 1740 gkd->length = pp->mediasize - gkd->offset; 1741 gkd->offset += entry->gpe_offset; 1742 } 1743 break; 1744 default: 1745 g_io_deliver(bp, EOPNOTSUPP); 1746 return; 1747 } 1748 1749 bp2 = g_clone_bio(bp); 1750 if (bp2 == NULL) { 1751 g_io_deliver(bp, ENOMEM); 1752 return; 1753 } 1754 bp2->bio_done = g_std_done; 1755 g_io_request(bp2, cp); 1756 } 1757 1758 static void 1759 g_part_init(struct g_class *mp) 1760 { 1761 1762 TAILQ_INSERT_HEAD(&g_part_schemes, &g_part_null_scheme, scheme_list); 1763 } 1764 1765 static void 1766 g_part_fini(struct g_class *mp) 1767 { 1768 1769 TAILQ_REMOVE(&g_part_schemes, &g_part_null_scheme, scheme_list); 1770 } 1771 1772 static void 1773 g_part_unload_event(void *arg, int flag) 1774 { 1775 struct g_consumer *cp; 1776 struct g_geom *gp; 1777 struct g_provider *pp; 1778 struct g_part_scheme *scheme; 1779 struct g_part_table *table; 1780 uintptr_t *xchg; 1781 int acc, error; 1782 1783 if (flag == EV_CANCEL) 1784 return; 1785 1786 xchg = arg; 1787 error = 0; 1788 scheme = (void *)(*xchg); 1789 1790 g_topology_assert(); 1791 1792 LIST_FOREACH(gp, &g_part_class.geom, geom) { 1793 table = gp->softc; 1794 if (table->gpt_scheme != scheme) 1795 continue; 1796 1797 acc = 0; 1798 LIST_FOREACH(pp, &gp->provider, provider) 1799 acc += pp->acr + pp->acw + pp->ace; 1800 LIST_FOREACH(cp, &gp->consumer, consumer) 1801 acc += cp->acr + cp->acw + cp->ace; 1802 1803 if (!acc) 1804 g_part_wither(gp, ENOSYS); 1805 else 1806 error = EBUSY; 1807 } 1808 1809 if (!error) 1810 TAILQ_REMOVE(&g_part_schemes, scheme, scheme_list); 1811 1812 *xchg = error; 1813 } 1814 1815 int 1816 g_part_modevent(module_t mod, int type, struct g_part_scheme *scheme) 1817 { 1818 uintptr_t arg; 1819 int error; 1820 1821 switch (type) { 1822 case MOD_LOAD: 1823 TAILQ_INSERT_TAIL(&g_part_schemes, scheme, scheme_list); 1824 1825 error = g_retaste(&g_part_class); 1826 if (error) 1827 TAILQ_REMOVE(&g_part_schemes, scheme, scheme_list); 1828 break; 1829 case MOD_UNLOAD: 1830 arg = (uintptr_t)scheme; 1831 error = g_waitfor_event(g_part_unload_event, &arg, M_WAITOK, 1832 NULL); 1833 if (!error) 1834 error = (arg == (uintptr_t)scheme) ? EDOOFUS : arg; 1835 break; 1836 default: 1837 error = EOPNOTSUPP; 1838 break; 1839 } 1840 1841 return (error); 1842 } 1843