1 /*- 2 * Copyright (c) 2002, 2005-2009 Marcel Moolenaar 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 */ 26 27 #include <sys/cdefs.h> 28 __FBSDID("$FreeBSD$"); 29 30 #include <sys/param.h> 31 #include <sys/bio.h> 32 #include <sys/diskmbr.h> 33 #include <sys/endian.h> 34 #include <sys/kernel.h> 35 #include <sys/kobj.h> 36 #include <sys/limits.h> 37 #include <sys/lock.h> 38 #include <sys/malloc.h> 39 #include <sys/mutex.h> 40 #include <sys/queue.h> 41 #include <sys/sbuf.h> 42 #include <sys/sysctl.h> 43 #include <sys/systm.h> 44 #include <sys/uuid.h> 45 #include <geom/geom.h> 46 #include <geom/geom_ctl.h> 47 #include <geom/geom_int.h> 48 #include <geom/part/g_part.h> 49 50 #include "g_part_if.h" 51 52 #ifndef _PATH_DEV 53 #define _PATH_DEV "/dev/" 54 #endif 55 56 static kobj_method_t g_part_null_methods[] = { 57 { 0, 0 } 58 }; 59 60 static struct g_part_scheme g_part_null_scheme = { 61 "(none)", 62 g_part_null_methods, 63 sizeof(struct g_part_table), 64 }; 65 66 TAILQ_HEAD(, g_part_scheme) g_part_schemes = 67 TAILQ_HEAD_INITIALIZER(g_part_schemes); 68 69 struct g_part_alias_list { 70 const char *lexeme; 71 enum g_part_alias alias; 72 } g_part_alias_list[G_PART_ALIAS_COUNT] = { 73 { "apple-boot", G_PART_ALIAS_APPLE_BOOT }, 74 { "apple-hfs", G_PART_ALIAS_APPLE_HFS }, 75 { "apple-label", G_PART_ALIAS_APPLE_LABEL }, 76 { "apple-raid", G_PART_ALIAS_APPLE_RAID }, 77 { "apple-raid-offline", G_PART_ALIAS_APPLE_RAID_OFFLINE }, 78 { "apple-tv-recovery", G_PART_ALIAS_APPLE_TV_RECOVERY }, 79 { "apple-ufs", G_PART_ALIAS_APPLE_UFS }, 80 { "bios-boot", G_PART_ALIAS_BIOS_BOOT }, 81 { "ebr", G_PART_ALIAS_EBR }, 82 { "efi", G_PART_ALIAS_EFI }, 83 { "fat32", G_PART_ALIAS_MS_FAT32 }, 84 { "freebsd", G_PART_ALIAS_FREEBSD }, 85 { "freebsd-boot", G_PART_ALIAS_FREEBSD_BOOT }, 86 { "freebsd-nandfs", G_PART_ALIAS_FREEBSD_NANDFS }, 87 { "freebsd-swap", G_PART_ALIAS_FREEBSD_SWAP }, 88 { "freebsd-ufs", G_PART_ALIAS_FREEBSD_UFS }, 89 { "freebsd-vinum", G_PART_ALIAS_FREEBSD_VINUM }, 90 { "freebsd-zfs", G_PART_ALIAS_FREEBSD_ZFS }, 91 { "linux-data", G_PART_ALIAS_LINUX_DATA }, 92 { "linux-lvm", G_PART_ALIAS_LINUX_LVM }, 93 { "linux-raid", G_PART_ALIAS_LINUX_RAID }, 94 { "linux-swap", G_PART_ALIAS_LINUX_SWAP }, 95 { "mbr", G_PART_ALIAS_MBR }, 96 { "ms-basic-data", G_PART_ALIAS_MS_BASIC_DATA }, 97 { "ms-ldm-data", G_PART_ALIAS_MS_LDM_DATA }, 98 { "ms-ldm-metadata", G_PART_ALIAS_MS_LDM_METADATA }, 99 { "ms-reserved", G_PART_ALIAS_MS_RESERVED }, 100 { "ntfs", G_PART_ALIAS_MS_NTFS }, 101 { "netbsd-ccd", G_PART_ALIAS_NETBSD_CCD }, 102 { "netbsd-cgd", G_PART_ALIAS_NETBSD_CGD }, 103 { "netbsd-ffs", G_PART_ALIAS_NETBSD_FFS }, 104 { "netbsd-lfs", G_PART_ALIAS_NETBSD_LFS }, 105 { "netbsd-raid", G_PART_ALIAS_NETBSD_RAID }, 106 { "netbsd-swap", G_PART_ALIAS_NETBSD_SWAP }, 107 { "vmware-vmfs", G_PART_ALIAS_VMFS }, 108 { "vmware-vmkdiag", G_PART_ALIAS_VMKDIAG }, 109 { "vmware-reserved", G_PART_ALIAS_VMRESERVED }, 110 }; 111 112 SYSCTL_DECL(_kern_geom); 113 SYSCTL_NODE(_kern_geom, OID_AUTO, part, CTLFLAG_RW, 0, 114 "GEOM_PART stuff"); 115 static u_int check_integrity = 1; 116 TUNABLE_INT("kern.geom.part.check_integrity", &check_integrity); 117 SYSCTL_UINT(_kern_geom_part, OID_AUTO, check_integrity, 118 CTLFLAG_RW | CTLFLAG_TUN, &check_integrity, 1, 119 "Enable integrity checking"); 120 121 /* 122 * The GEOM partitioning class. 123 */ 124 static g_ctl_req_t g_part_ctlreq; 125 static g_ctl_destroy_geom_t g_part_destroy_geom; 126 static g_fini_t g_part_fini; 127 static g_init_t g_part_init; 128 static g_taste_t g_part_taste; 129 130 static g_access_t g_part_access; 131 static g_dumpconf_t g_part_dumpconf; 132 static g_orphan_t g_part_orphan; 133 static g_spoiled_t g_part_spoiled; 134 static g_start_t g_part_start; 135 136 static struct g_class g_part_class = { 137 .name = "PART", 138 .version = G_VERSION, 139 /* Class methods. */ 140 .ctlreq = g_part_ctlreq, 141 .destroy_geom = g_part_destroy_geom, 142 .fini = g_part_fini, 143 .init = g_part_init, 144 .taste = g_part_taste, 145 /* Geom methods. */ 146 .access = g_part_access, 147 .dumpconf = g_part_dumpconf, 148 .orphan = g_part_orphan, 149 .spoiled = g_part_spoiled, 150 .start = g_part_start, 151 }; 152 153 DECLARE_GEOM_CLASS(g_part_class, g_part); 154 MODULE_VERSION(g_part, 0); 155 156 /* 157 * Support functions. 158 */ 159 160 static void g_part_wither(struct g_geom *, int); 161 162 const char * 163 g_part_alias_name(enum g_part_alias alias) 164 { 165 int i; 166 167 for (i = 0; i < G_PART_ALIAS_COUNT; i++) { 168 if (g_part_alias_list[i].alias != alias) 169 continue; 170 return (g_part_alias_list[i].lexeme); 171 } 172 173 return (NULL); 174 } 175 176 void 177 g_part_geometry_heads(off_t blocks, u_int sectors, off_t *bestchs, 178 u_int *bestheads) 179 { 180 static u_int candidate_heads[] = { 1, 2, 16, 32, 64, 128, 255, 0 }; 181 off_t chs, cylinders; 182 u_int heads; 183 int idx; 184 185 *bestchs = 0; 186 *bestheads = 0; 187 for (idx = 0; candidate_heads[idx] != 0; idx++) { 188 heads = candidate_heads[idx]; 189 cylinders = blocks / heads / sectors; 190 if (cylinders < heads || cylinders < sectors) 191 break; 192 if (cylinders > 1023) 193 continue; 194 chs = cylinders * heads * sectors; 195 if (chs > *bestchs || (chs == *bestchs && *bestheads == 1)) { 196 *bestchs = chs; 197 *bestheads = heads; 198 } 199 } 200 } 201 202 static void 203 g_part_geometry(struct g_part_table *table, struct g_consumer *cp, 204 off_t blocks) 205 { 206 static u_int candidate_sectors[] = { 1, 9, 17, 33, 63, 0 }; 207 off_t chs, bestchs; 208 u_int heads, sectors; 209 int idx; 210 211 if (g_getattr("GEOM::fwsectors", cp, §ors) != 0 || sectors == 0 || 212 g_getattr("GEOM::fwheads", cp, &heads) != 0 || heads == 0) { 213 table->gpt_fixgeom = 0; 214 table->gpt_heads = 0; 215 table->gpt_sectors = 0; 216 bestchs = 0; 217 for (idx = 0; candidate_sectors[idx] != 0; idx++) { 218 sectors = candidate_sectors[idx]; 219 g_part_geometry_heads(blocks, sectors, &chs, &heads); 220 if (chs == 0) 221 continue; 222 /* 223 * Prefer a geometry with sectors > 1, but only if 224 * it doesn't bump down the number of heads to 1. 225 */ 226 if (chs > bestchs || (chs == bestchs && heads > 1 && 227 table->gpt_sectors == 1)) { 228 bestchs = chs; 229 table->gpt_heads = heads; 230 table->gpt_sectors = sectors; 231 } 232 } 233 /* 234 * If we didn't find a geometry at all, then the disk is 235 * too big. This means we can use the maximum number of 236 * heads and sectors. 237 */ 238 if (bestchs == 0) { 239 table->gpt_heads = 255; 240 table->gpt_sectors = 63; 241 } 242 } else { 243 table->gpt_fixgeom = 1; 244 table->gpt_heads = heads; 245 table->gpt_sectors = sectors; 246 } 247 } 248 249 #define DPRINTF(...) if (bootverbose) { \ 250 printf("GEOM_PART: " __VA_ARGS__); \ 251 } 252 253 static int 254 g_part_check_integrity(struct g_part_table *table, struct g_consumer *cp) 255 { 256 struct g_part_entry *e1, *e2; 257 struct g_provider *pp; 258 off_t offset; 259 int failed; 260 261 failed = 0; 262 pp = cp->provider; 263 if (table->gpt_last < table->gpt_first) { 264 DPRINTF("last LBA is below first LBA: %jd < %jd\n", 265 (intmax_t)table->gpt_last, (intmax_t)table->gpt_first); 266 failed++; 267 } 268 if (table->gpt_last > pp->mediasize / pp->sectorsize - 1) { 269 DPRINTF("last LBA extends beyond mediasize: " 270 "%jd > %jd\n", (intmax_t)table->gpt_last, 271 (intmax_t)pp->mediasize / pp->sectorsize - 1); 272 failed++; 273 } 274 LIST_FOREACH(e1, &table->gpt_entry, gpe_entry) { 275 if (e1->gpe_deleted || e1->gpe_internal) 276 continue; 277 if (e1->gpe_start < table->gpt_first) { 278 DPRINTF("partition %d has start offset below first " 279 "LBA: %jd < %jd\n", e1->gpe_index, 280 (intmax_t)e1->gpe_start, 281 (intmax_t)table->gpt_first); 282 failed++; 283 } 284 if (e1->gpe_start > table->gpt_last) { 285 DPRINTF("partition %d has start offset beyond last " 286 "LBA: %jd > %jd\n", e1->gpe_index, 287 (intmax_t)e1->gpe_start, 288 (intmax_t)table->gpt_last); 289 failed++; 290 } 291 if (e1->gpe_end < e1->gpe_start) { 292 DPRINTF("partition %d has end offset below start " 293 "offset: %jd < %jd\n", e1->gpe_index, 294 (intmax_t)e1->gpe_end, 295 (intmax_t)e1->gpe_start); 296 failed++; 297 } 298 if (e1->gpe_end > table->gpt_last) { 299 DPRINTF("partition %d has end offset beyond last " 300 "LBA: %jd > %jd\n", e1->gpe_index, 301 (intmax_t)e1->gpe_end, 302 (intmax_t)table->gpt_last); 303 failed++; 304 } 305 if (pp->stripesize > 0) { 306 offset = e1->gpe_start * pp->sectorsize; 307 if (e1->gpe_offset > offset) 308 offset = e1->gpe_offset; 309 if ((offset + pp->stripeoffset) % pp->stripesize) { 310 DPRINTF("partition %d is not aligned on %u " 311 "bytes\n", e1->gpe_index, pp->stripesize); 312 /* Don't treat this as a critical failure */ 313 } 314 } 315 e2 = e1; 316 while ((e2 = LIST_NEXT(e2, gpe_entry)) != NULL) { 317 if (e2->gpe_deleted || e2->gpe_internal) 318 continue; 319 if (e1->gpe_start >= e2->gpe_start && 320 e1->gpe_start <= e2->gpe_end) { 321 DPRINTF("partition %d has start offset inside " 322 "partition %d: start[%d] %jd >= start[%d] " 323 "%jd <= end[%d] %jd\n", 324 e1->gpe_index, e2->gpe_index, 325 e2->gpe_index, (intmax_t)e2->gpe_start, 326 e1->gpe_index, (intmax_t)e1->gpe_start, 327 e2->gpe_index, (intmax_t)e2->gpe_end); 328 failed++; 329 } 330 if (e1->gpe_end >= e2->gpe_start && 331 e1->gpe_end <= e2->gpe_end) { 332 DPRINTF("partition %d has end offset inside " 333 "partition %d: start[%d] %jd >= end[%d] " 334 "%jd <= end[%d] %jd\n", 335 e1->gpe_index, e2->gpe_index, 336 e2->gpe_index, (intmax_t)e2->gpe_start, 337 e1->gpe_index, (intmax_t)e1->gpe_end, 338 e2->gpe_index, (intmax_t)e2->gpe_end); 339 failed++; 340 } 341 if (e1->gpe_start < e2->gpe_start && 342 e1->gpe_end > e2->gpe_end) { 343 DPRINTF("partition %d contains partition %d: " 344 "start[%d] %jd > start[%d] %jd, end[%d] " 345 "%jd < end[%d] %jd\n", 346 e1->gpe_index, e2->gpe_index, 347 e1->gpe_index, (intmax_t)e1->gpe_start, 348 e2->gpe_index, (intmax_t)e2->gpe_start, 349 e2->gpe_index, (intmax_t)e2->gpe_end, 350 e1->gpe_index, (intmax_t)e1->gpe_end); 351 failed++; 352 } 353 } 354 } 355 if (failed != 0) { 356 printf("GEOM_PART: integrity check failed (%s, %s)\n", 357 pp->name, table->gpt_scheme->name); 358 if (check_integrity != 0) 359 return (EINVAL); 360 table->gpt_corrupt = 1; 361 } 362 return (0); 363 } 364 #undef DPRINTF 365 366 struct g_part_entry * 367 g_part_new_entry(struct g_part_table *table, int index, quad_t start, 368 quad_t end) 369 { 370 struct g_part_entry *entry, *last; 371 372 last = NULL; 373 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) { 374 if (entry->gpe_index == index) 375 break; 376 if (entry->gpe_index > index) { 377 entry = NULL; 378 break; 379 } 380 last = entry; 381 } 382 if (entry == NULL) { 383 entry = g_malloc(table->gpt_scheme->gps_entrysz, 384 M_WAITOK | M_ZERO); 385 entry->gpe_index = index; 386 if (last == NULL) 387 LIST_INSERT_HEAD(&table->gpt_entry, entry, gpe_entry); 388 else 389 LIST_INSERT_AFTER(last, entry, gpe_entry); 390 } else 391 entry->gpe_offset = 0; 392 entry->gpe_start = start; 393 entry->gpe_end = end; 394 return (entry); 395 } 396 397 static void 398 g_part_new_provider(struct g_geom *gp, struct g_part_table *table, 399 struct g_part_entry *entry) 400 { 401 struct g_consumer *cp; 402 struct g_provider *pp; 403 struct sbuf *sb; 404 off_t offset; 405 406 cp = LIST_FIRST(&gp->consumer); 407 pp = cp->provider; 408 409 offset = entry->gpe_start * pp->sectorsize; 410 if (entry->gpe_offset < offset) 411 entry->gpe_offset = offset; 412 413 if (entry->gpe_pp == NULL) { 414 sb = sbuf_new_auto(); 415 G_PART_FULLNAME(table, entry, sb, gp->name); 416 sbuf_finish(sb); 417 entry->gpe_pp = g_new_providerf(gp, "%s", sbuf_data(sb)); 418 sbuf_delete(sb); 419 entry->gpe_pp->private = entry; /* Close the circle. */ 420 } 421 entry->gpe_pp->index = entry->gpe_index - 1; /* index is 1-based. */ 422 entry->gpe_pp->mediasize = (entry->gpe_end - entry->gpe_start + 1) * 423 pp->sectorsize; 424 entry->gpe_pp->mediasize -= entry->gpe_offset - offset; 425 entry->gpe_pp->sectorsize = pp->sectorsize; 426 entry->gpe_pp->stripesize = pp->stripesize; 427 entry->gpe_pp->stripeoffset = pp->stripeoffset + entry->gpe_offset; 428 if (pp->stripesize > 0) 429 entry->gpe_pp->stripeoffset %= pp->stripesize; 430 g_error_provider(entry->gpe_pp, 0); 431 } 432 433 static struct g_geom* 434 g_part_find_geom(const char *name) 435 { 436 struct g_geom *gp; 437 LIST_FOREACH(gp, &g_part_class.geom, geom) { 438 if (!strcmp(name, gp->name)) 439 break; 440 } 441 return (gp); 442 } 443 444 static int 445 g_part_parm_geom(struct gctl_req *req, const char *name, struct g_geom **v) 446 { 447 struct g_geom *gp; 448 const char *gname; 449 450 gname = gctl_get_asciiparam(req, name); 451 if (gname == NULL) 452 return (ENOATTR); 453 if (strncmp(gname, _PATH_DEV, sizeof(_PATH_DEV) - 1) == 0) 454 gname += sizeof(_PATH_DEV) - 1; 455 gp = g_part_find_geom(gname); 456 if (gp == NULL) { 457 gctl_error(req, "%d %s '%s'", EINVAL, name, gname); 458 return (EINVAL); 459 } 460 if ((gp->flags & G_GEOM_WITHER) != 0) { 461 gctl_error(req, "%d %s", ENXIO, gname); 462 return (ENXIO); 463 } 464 *v = gp; 465 return (0); 466 } 467 468 static int 469 g_part_parm_provider(struct gctl_req *req, const char *name, 470 struct g_provider **v) 471 { 472 struct g_provider *pp; 473 const char *pname; 474 475 pname = gctl_get_asciiparam(req, name); 476 if (pname == NULL) 477 return (ENOATTR); 478 if (strncmp(pname, _PATH_DEV, sizeof(_PATH_DEV) - 1) == 0) 479 pname += sizeof(_PATH_DEV) - 1; 480 pp = g_provider_by_name(pname); 481 if (pp == NULL) { 482 gctl_error(req, "%d %s '%s'", EINVAL, name, pname); 483 return (EINVAL); 484 } 485 *v = pp; 486 return (0); 487 } 488 489 static int 490 g_part_parm_quad(struct gctl_req *req, const char *name, quad_t *v) 491 { 492 const char *p; 493 char *x; 494 quad_t q; 495 496 p = gctl_get_asciiparam(req, name); 497 if (p == NULL) 498 return (ENOATTR); 499 q = strtoq(p, &x, 0); 500 if (*x != '\0' || q < 0) { 501 gctl_error(req, "%d %s '%s'", EINVAL, name, p); 502 return (EINVAL); 503 } 504 *v = q; 505 return (0); 506 } 507 508 static int 509 g_part_parm_scheme(struct gctl_req *req, const char *name, 510 struct g_part_scheme **v) 511 { 512 struct g_part_scheme *s; 513 const char *p; 514 515 p = gctl_get_asciiparam(req, name); 516 if (p == NULL) 517 return (ENOATTR); 518 TAILQ_FOREACH(s, &g_part_schemes, scheme_list) { 519 if (s == &g_part_null_scheme) 520 continue; 521 if (!strcasecmp(s->name, p)) 522 break; 523 } 524 if (s == NULL) { 525 gctl_error(req, "%d %s '%s'", EINVAL, name, p); 526 return (EINVAL); 527 } 528 *v = s; 529 return (0); 530 } 531 532 static int 533 g_part_parm_str(struct gctl_req *req, const char *name, const char **v) 534 { 535 const char *p; 536 537 p = gctl_get_asciiparam(req, name); 538 if (p == NULL) 539 return (ENOATTR); 540 /* An empty label is always valid. */ 541 if (strcmp(name, "label") != 0 && p[0] == '\0') { 542 gctl_error(req, "%d %s '%s'", EINVAL, name, p); 543 return (EINVAL); 544 } 545 *v = p; 546 return (0); 547 } 548 549 static int 550 g_part_parm_intmax(struct gctl_req *req, const char *name, u_int *v) 551 { 552 const intmax_t *p; 553 int size; 554 555 p = gctl_get_param(req, name, &size); 556 if (p == NULL) 557 return (ENOATTR); 558 if (size != sizeof(*p) || *p < 0 || *p > INT_MAX) { 559 gctl_error(req, "%d %s '%jd'", EINVAL, name, *p); 560 return (EINVAL); 561 } 562 *v = (u_int)*p; 563 return (0); 564 } 565 566 static int 567 g_part_parm_uint32(struct gctl_req *req, const char *name, u_int *v) 568 { 569 const uint32_t *p; 570 int size; 571 572 p = gctl_get_param(req, name, &size); 573 if (p == NULL) 574 return (ENOATTR); 575 if (size != sizeof(*p) || *p > INT_MAX) { 576 gctl_error(req, "%d %s '%u'", EINVAL, name, (unsigned int)*p); 577 return (EINVAL); 578 } 579 *v = (u_int)*p; 580 return (0); 581 } 582 583 static int 584 g_part_parm_bootcode(struct gctl_req *req, const char *name, const void **v, 585 unsigned int *s) 586 { 587 const void *p; 588 int size; 589 590 p = gctl_get_param(req, name, &size); 591 if (p == NULL) 592 return (ENOATTR); 593 *v = p; 594 *s = size; 595 return (0); 596 } 597 598 static int 599 g_part_probe(struct g_geom *gp, struct g_consumer *cp, int depth) 600 { 601 struct g_part_scheme *iter, *scheme; 602 struct g_part_table *table; 603 int pri, probe; 604 605 table = gp->softc; 606 scheme = (table != NULL) ? table->gpt_scheme : NULL; 607 pri = (scheme != NULL) ? G_PART_PROBE(table, cp) : INT_MIN; 608 if (pri == 0) 609 goto done; 610 if (pri > 0) { /* error */ 611 scheme = NULL; 612 pri = INT_MIN; 613 } 614 615 TAILQ_FOREACH(iter, &g_part_schemes, scheme_list) { 616 if (iter == &g_part_null_scheme) 617 continue; 618 table = (void *)kobj_create((kobj_class_t)iter, M_GEOM, 619 M_WAITOK); 620 table->gpt_gp = gp; 621 table->gpt_scheme = iter; 622 table->gpt_depth = depth; 623 probe = G_PART_PROBE(table, cp); 624 if (probe <= 0 && probe > pri) { 625 pri = probe; 626 scheme = iter; 627 if (gp->softc != NULL) 628 kobj_delete((kobj_t)gp->softc, M_GEOM); 629 gp->softc = table; 630 if (pri == 0) 631 goto done; 632 } else 633 kobj_delete((kobj_t)table, M_GEOM); 634 } 635 636 done: 637 return ((scheme == NULL) ? ENXIO : 0); 638 } 639 640 /* 641 * Control request functions. 642 */ 643 644 static int 645 g_part_ctl_add(struct gctl_req *req, struct g_part_parms *gpp) 646 { 647 struct g_geom *gp; 648 struct g_provider *pp; 649 struct g_part_entry *delent, *last, *entry; 650 struct g_part_table *table; 651 struct sbuf *sb; 652 quad_t end; 653 unsigned int index; 654 int error; 655 656 gp = gpp->gpp_geom; 657 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name)); 658 g_topology_assert(); 659 660 pp = LIST_FIRST(&gp->consumer)->provider; 661 table = gp->softc; 662 end = gpp->gpp_start + gpp->gpp_size - 1; 663 664 if (gpp->gpp_start < table->gpt_first || 665 gpp->gpp_start > table->gpt_last) { 666 gctl_error(req, "%d start '%jd'", EINVAL, 667 (intmax_t)gpp->gpp_start); 668 return (EINVAL); 669 } 670 if (end < gpp->gpp_start || end > table->gpt_last) { 671 gctl_error(req, "%d size '%jd'", EINVAL, 672 (intmax_t)gpp->gpp_size); 673 return (EINVAL); 674 } 675 if (gpp->gpp_index > table->gpt_entries) { 676 gctl_error(req, "%d index '%d'", EINVAL, gpp->gpp_index); 677 return (EINVAL); 678 } 679 680 delent = last = NULL; 681 index = (gpp->gpp_index > 0) ? gpp->gpp_index : 1; 682 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) { 683 if (entry->gpe_deleted) { 684 if (entry->gpe_index == index) 685 delent = entry; 686 continue; 687 } 688 if (entry->gpe_index == index) 689 index = entry->gpe_index + 1; 690 if (entry->gpe_index < index) 691 last = entry; 692 if (entry->gpe_internal) 693 continue; 694 if (gpp->gpp_start >= entry->gpe_start && 695 gpp->gpp_start <= entry->gpe_end) { 696 gctl_error(req, "%d start '%jd'", ENOSPC, 697 (intmax_t)gpp->gpp_start); 698 return (ENOSPC); 699 } 700 if (end >= entry->gpe_start && end <= entry->gpe_end) { 701 gctl_error(req, "%d end '%jd'", ENOSPC, (intmax_t)end); 702 return (ENOSPC); 703 } 704 if (gpp->gpp_start < entry->gpe_start && end > entry->gpe_end) { 705 gctl_error(req, "%d size '%jd'", ENOSPC, 706 (intmax_t)gpp->gpp_size); 707 return (ENOSPC); 708 } 709 } 710 if (gpp->gpp_index > 0 && index != gpp->gpp_index) { 711 gctl_error(req, "%d index '%d'", EEXIST, gpp->gpp_index); 712 return (EEXIST); 713 } 714 if (index > table->gpt_entries) { 715 gctl_error(req, "%d index '%d'", ENOSPC, index); 716 return (ENOSPC); 717 } 718 719 entry = (delent == NULL) ? g_malloc(table->gpt_scheme->gps_entrysz, 720 M_WAITOK | M_ZERO) : delent; 721 entry->gpe_index = index; 722 entry->gpe_start = gpp->gpp_start; 723 entry->gpe_end = end; 724 error = G_PART_ADD(table, entry, gpp); 725 if (error) { 726 gctl_error(req, "%d", error); 727 if (delent == NULL) 728 g_free(entry); 729 return (error); 730 } 731 if (delent == NULL) { 732 if (last == NULL) 733 LIST_INSERT_HEAD(&table->gpt_entry, entry, gpe_entry); 734 else 735 LIST_INSERT_AFTER(last, entry, gpe_entry); 736 entry->gpe_created = 1; 737 } else { 738 entry->gpe_deleted = 0; 739 entry->gpe_modified = 1; 740 } 741 g_part_new_provider(gp, table, entry); 742 743 /* Provide feedback if so requested. */ 744 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) { 745 sb = sbuf_new_auto(); 746 G_PART_FULLNAME(table, entry, sb, gp->name); 747 if (pp->stripesize > 0 && entry->gpe_pp->stripeoffset != 0) 748 sbuf_printf(sb, " added, but partition is not " 749 "aligned on %u bytes\n", pp->stripesize); 750 else 751 sbuf_cat(sb, " added\n"); 752 sbuf_finish(sb); 753 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1); 754 sbuf_delete(sb); 755 } 756 return (0); 757 } 758 759 static int 760 g_part_ctl_bootcode(struct gctl_req *req, struct g_part_parms *gpp) 761 { 762 struct g_geom *gp; 763 struct g_part_table *table; 764 struct sbuf *sb; 765 int error, sz; 766 767 gp = gpp->gpp_geom; 768 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name)); 769 g_topology_assert(); 770 771 table = gp->softc; 772 sz = table->gpt_scheme->gps_bootcodesz; 773 if (sz == 0) { 774 error = ENODEV; 775 goto fail; 776 } 777 if (gpp->gpp_codesize > sz) { 778 error = EFBIG; 779 goto fail; 780 } 781 782 error = G_PART_BOOTCODE(table, gpp); 783 if (error) 784 goto fail; 785 786 /* Provide feedback if so requested. */ 787 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) { 788 sb = sbuf_new_auto(); 789 sbuf_printf(sb, "bootcode written to %s\n", gp->name); 790 sbuf_finish(sb); 791 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1); 792 sbuf_delete(sb); 793 } 794 return (0); 795 796 fail: 797 gctl_error(req, "%d", error); 798 return (error); 799 } 800 801 static int 802 g_part_ctl_commit(struct gctl_req *req, struct g_part_parms *gpp) 803 { 804 struct g_consumer *cp; 805 struct g_geom *gp; 806 struct g_provider *pp; 807 struct g_part_entry *entry, *tmp; 808 struct g_part_table *table; 809 char *buf; 810 int error, i; 811 812 gp = gpp->gpp_geom; 813 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name)); 814 g_topology_assert(); 815 816 table = gp->softc; 817 if (!table->gpt_opened) { 818 gctl_error(req, "%d", EPERM); 819 return (EPERM); 820 } 821 822 g_topology_unlock(); 823 824 cp = LIST_FIRST(&gp->consumer); 825 if ((table->gpt_smhead | table->gpt_smtail) != 0) { 826 pp = cp->provider; 827 buf = g_malloc(pp->sectorsize, M_WAITOK | M_ZERO); 828 while (table->gpt_smhead != 0) { 829 i = ffs(table->gpt_smhead) - 1; 830 error = g_write_data(cp, i * pp->sectorsize, buf, 831 pp->sectorsize); 832 if (error) { 833 g_free(buf); 834 goto fail; 835 } 836 table->gpt_smhead &= ~(1 << i); 837 } 838 while (table->gpt_smtail != 0) { 839 i = ffs(table->gpt_smtail) - 1; 840 error = g_write_data(cp, pp->mediasize - (i + 1) * 841 pp->sectorsize, buf, pp->sectorsize); 842 if (error) { 843 g_free(buf); 844 goto fail; 845 } 846 table->gpt_smtail &= ~(1 << i); 847 } 848 g_free(buf); 849 } 850 851 if (table->gpt_scheme == &g_part_null_scheme) { 852 g_topology_lock(); 853 g_access(cp, -1, -1, -1); 854 g_part_wither(gp, ENXIO); 855 return (0); 856 } 857 858 error = G_PART_WRITE(table, cp); 859 if (error) 860 goto fail; 861 862 LIST_FOREACH_SAFE(entry, &table->gpt_entry, gpe_entry, tmp) { 863 if (!entry->gpe_deleted) { 864 entry->gpe_created = 0; 865 entry->gpe_modified = 0; 866 continue; 867 } 868 LIST_REMOVE(entry, gpe_entry); 869 g_free(entry); 870 } 871 table->gpt_created = 0; 872 table->gpt_opened = 0; 873 874 g_topology_lock(); 875 g_access(cp, -1, -1, -1); 876 return (0); 877 878 fail: 879 g_topology_lock(); 880 gctl_error(req, "%d", error); 881 return (error); 882 } 883 884 static int 885 g_part_ctl_create(struct gctl_req *req, struct g_part_parms *gpp) 886 { 887 struct g_consumer *cp; 888 struct g_geom *gp; 889 struct g_provider *pp; 890 struct g_part_scheme *scheme; 891 struct g_part_table *null, *table; 892 struct sbuf *sb; 893 int attr, error; 894 895 pp = gpp->gpp_provider; 896 scheme = gpp->gpp_scheme; 897 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, pp->name)); 898 g_topology_assert(); 899 900 /* Check that there isn't already a g_part geom on the provider. */ 901 gp = g_part_find_geom(pp->name); 902 if (gp != NULL) { 903 null = gp->softc; 904 if (null->gpt_scheme != &g_part_null_scheme) { 905 gctl_error(req, "%d geom '%s'", EEXIST, pp->name); 906 return (EEXIST); 907 } 908 } else 909 null = NULL; 910 911 if ((gpp->gpp_parms & G_PART_PARM_ENTRIES) && 912 (gpp->gpp_entries < scheme->gps_minent || 913 gpp->gpp_entries > scheme->gps_maxent)) { 914 gctl_error(req, "%d entries '%d'", EINVAL, gpp->gpp_entries); 915 return (EINVAL); 916 } 917 918 if (null == NULL) 919 gp = g_new_geomf(&g_part_class, "%s", pp->name); 920 gp->softc = kobj_create((kobj_class_t)gpp->gpp_scheme, M_GEOM, 921 M_WAITOK); 922 table = gp->softc; 923 table->gpt_gp = gp; 924 table->gpt_scheme = gpp->gpp_scheme; 925 table->gpt_entries = (gpp->gpp_parms & G_PART_PARM_ENTRIES) ? 926 gpp->gpp_entries : scheme->gps_minent; 927 LIST_INIT(&table->gpt_entry); 928 if (null == NULL) { 929 cp = g_new_consumer(gp); 930 error = g_attach(cp, pp); 931 if (error == 0) 932 error = g_access(cp, 1, 1, 1); 933 if (error != 0) { 934 g_part_wither(gp, error); 935 gctl_error(req, "%d geom '%s'", error, pp->name); 936 return (error); 937 } 938 table->gpt_opened = 1; 939 } else { 940 cp = LIST_FIRST(&gp->consumer); 941 table->gpt_opened = null->gpt_opened; 942 table->gpt_smhead = null->gpt_smhead; 943 table->gpt_smtail = null->gpt_smtail; 944 } 945 946 g_topology_unlock(); 947 948 /* Make sure the provider has media. */ 949 if (pp->mediasize == 0 || pp->sectorsize == 0) { 950 error = ENODEV; 951 goto fail; 952 } 953 954 /* Make sure we can nest and if so, determine our depth. */ 955 error = g_getattr("PART::isleaf", cp, &attr); 956 if (!error && attr) { 957 error = ENODEV; 958 goto fail; 959 } 960 error = g_getattr("PART::depth", cp, &attr); 961 table->gpt_depth = (!error) ? attr + 1 : 0; 962 963 /* 964 * Synthesize a disk geometry. Some partitioning schemes 965 * depend on it and since some file systems need it even 966 * when the partitition scheme doesn't, we do it here in 967 * scheme-independent code. 968 */ 969 g_part_geometry(table, cp, pp->mediasize / pp->sectorsize); 970 971 error = G_PART_CREATE(table, gpp); 972 if (error) 973 goto fail; 974 975 g_topology_lock(); 976 977 table->gpt_created = 1; 978 if (null != NULL) 979 kobj_delete((kobj_t)null, M_GEOM); 980 981 /* 982 * Support automatic commit by filling in the gpp_geom 983 * parameter. 984 */ 985 gpp->gpp_parms |= G_PART_PARM_GEOM; 986 gpp->gpp_geom = gp; 987 988 /* Provide feedback if so requested. */ 989 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) { 990 sb = sbuf_new_auto(); 991 sbuf_printf(sb, "%s created\n", gp->name); 992 sbuf_finish(sb); 993 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1); 994 sbuf_delete(sb); 995 } 996 return (0); 997 998 fail: 999 g_topology_lock(); 1000 if (null == NULL) { 1001 g_access(cp, -1, -1, -1); 1002 g_part_wither(gp, error); 1003 } else { 1004 kobj_delete((kobj_t)gp->softc, M_GEOM); 1005 gp->softc = null; 1006 } 1007 gctl_error(req, "%d provider", error); 1008 return (error); 1009 } 1010 1011 static int 1012 g_part_ctl_delete(struct gctl_req *req, struct g_part_parms *gpp) 1013 { 1014 struct g_geom *gp; 1015 struct g_provider *pp; 1016 struct g_part_entry *entry; 1017 struct g_part_table *table; 1018 struct sbuf *sb; 1019 1020 gp = gpp->gpp_geom; 1021 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name)); 1022 g_topology_assert(); 1023 1024 table = gp->softc; 1025 1026 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) { 1027 if (entry->gpe_deleted || entry->gpe_internal) 1028 continue; 1029 if (entry->gpe_index == gpp->gpp_index) 1030 break; 1031 } 1032 if (entry == NULL) { 1033 gctl_error(req, "%d index '%d'", ENOENT, gpp->gpp_index); 1034 return (ENOENT); 1035 } 1036 1037 pp = entry->gpe_pp; 1038 if (pp != NULL) { 1039 if (pp->acr > 0 || pp->acw > 0 || pp->ace > 0) { 1040 gctl_error(req, "%d", EBUSY); 1041 return (EBUSY); 1042 } 1043 1044 pp->private = NULL; 1045 entry->gpe_pp = NULL; 1046 } 1047 1048 if (pp != NULL) 1049 g_wither_provider(pp, ENXIO); 1050 1051 /* Provide feedback if so requested. */ 1052 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) { 1053 sb = sbuf_new_auto(); 1054 G_PART_FULLNAME(table, entry, sb, gp->name); 1055 sbuf_cat(sb, " deleted\n"); 1056 sbuf_finish(sb); 1057 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1); 1058 sbuf_delete(sb); 1059 } 1060 1061 if (entry->gpe_created) { 1062 LIST_REMOVE(entry, gpe_entry); 1063 g_free(entry); 1064 } else { 1065 entry->gpe_modified = 0; 1066 entry->gpe_deleted = 1; 1067 } 1068 return (0); 1069 } 1070 1071 static int 1072 g_part_ctl_destroy(struct gctl_req *req, struct g_part_parms *gpp) 1073 { 1074 struct g_consumer *cp; 1075 struct g_geom *gp; 1076 struct g_provider *pp; 1077 struct g_part_entry *entry, *tmp; 1078 struct g_part_table *null, *table; 1079 struct sbuf *sb; 1080 int error; 1081 1082 gp = gpp->gpp_geom; 1083 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name)); 1084 g_topology_assert(); 1085 1086 table = gp->softc; 1087 /* Check for busy providers. */ 1088 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) { 1089 if (entry->gpe_deleted || entry->gpe_internal) 1090 continue; 1091 if (gpp->gpp_force) { 1092 pp = entry->gpe_pp; 1093 if (pp == NULL) 1094 continue; 1095 if (pp->acr == 0 && pp->acw == 0 && pp->ace == 0) 1096 continue; 1097 } 1098 gctl_error(req, "%d", EBUSY); 1099 return (EBUSY); 1100 } 1101 1102 if (gpp->gpp_force) { 1103 /* Destroy all providers. */ 1104 LIST_FOREACH_SAFE(entry, &table->gpt_entry, gpe_entry, tmp) { 1105 pp = entry->gpe_pp; 1106 if (pp != NULL) { 1107 pp->private = NULL; 1108 g_wither_provider(pp, ENXIO); 1109 } 1110 LIST_REMOVE(entry, gpe_entry); 1111 g_free(entry); 1112 } 1113 } 1114 1115 error = G_PART_DESTROY(table, gpp); 1116 if (error) { 1117 gctl_error(req, "%d", error); 1118 return (error); 1119 } 1120 1121 gp->softc = kobj_create((kobj_class_t)&g_part_null_scheme, M_GEOM, 1122 M_WAITOK); 1123 null = gp->softc; 1124 null->gpt_gp = gp; 1125 null->gpt_scheme = &g_part_null_scheme; 1126 LIST_INIT(&null->gpt_entry); 1127 1128 cp = LIST_FIRST(&gp->consumer); 1129 pp = cp->provider; 1130 null->gpt_last = pp->mediasize / pp->sectorsize - 1; 1131 1132 null->gpt_depth = table->gpt_depth; 1133 null->gpt_opened = table->gpt_opened; 1134 null->gpt_smhead = table->gpt_smhead; 1135 null->gpt_smtail = table->gpt_smtail; 1136 1137 while ((entry = LIST_FIRST(&table->gpt_entry)) != NULL) { 1138 LIST_REMOVE(entry, gpe_entry); 1139 g_free(entry); 1140 } 1141 kobj_delete((kobj_t)table, M_GEOM); 1142 1143 /* Provide feedback if so requested. */ 1144 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) { 1145 sb = sbuf_new_auto(); 1146 sbuf_printf(sb, "%s destroyed\n", gp->name); 1147 sbuf_finish(sb); 1148 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1); 1149 sbuf_delete(sb); 1150 } 1151 return (0); 1152 } 1153 1154 static int 1155 g_part_ctl_modify(struct gctl_req *req, struct g_part_parms *gpp) 1156 { 1157 struct g_geom *gp; 1158 struct g_part_entry *entry; 1159 struct g_part_table *table; 1160 struct sbuf *sb; 1161 int error; 1162 1163 gp = gpp->gpp_geom; 1164 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name)); 1165 g_topology_assert(); 1166 1167 table = gp->softc; 1168 1169 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) { 1170 if (entry->gpe_deleted || entry->gpe_internal) 1171 continue; 1172 if (entry->gpe_index == gpp->gpp_index) 1173 break; 1174 } 1175 if (entry == NULL) { 1176 gctl_error(req, "%d index '%d'", ENOENT, gpp->gpp_index); 1177 return (ENOENT); 1178 } 1179 1180 error = G_PART_MODIFY(table, entry, gpp); 1181 if (error) { 1182 gctl_error(req, "%d", error); 1183 return (error); 1184 } 1185 1186 if (!entry->gpe_created) 1187 entry->gpe_modified = 1; 1188 1189 /* Provide feedback if so requested. */ 1190 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) { 1191 sb = sbuf_new_auto(); 1192 G_PART_FULLNAME(table, entry, sb, gp->name); 1193 sbuf_cat(sb, " modified\n"); 1194 sbuf_finish(sb); 1195 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1); 1196 sbuf_delete(sb); 1197 } 1198 return (0); 1199 } 1200 1201 static int 1202 g_part_ctl_move(struct gctl_req *req, struct g_part_parms *gpp) 1203 { 1204 gctl_error(req, "%d verb 'move'", ENOSYS); 1205 return (ENOSYS); 1206 } 1207 1208 static int 1209 g_part_ctl_recover(struct gctl_req *req, struct g_part_parms *gpp) 1210 { 1211 struct g_part_table *table; 1212 struct g_geom *gp; 1213 struct sbuf *sb; 1214 int error, recovered; 1215 1216 gp = gpp->gpp_geom; 1217 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name)); 1218 g_topology_assert(); 1219 table = gp->softc; 1220 error = recovered = 0; 1221 1222 if (table->gpt_corrupt) { 1223 error = G_PART_RECOVER(table); 1224 if (error == 0) 1225 error = g_part_check_integrity(table, 1226 LIST_FIRST(&gp->consumer)); 1227 if (error) { 1228 gctl_error(req, "%d recovering '%s' failed", 1229 error, gp->name); 1230 return (error); 1231 } 1232 recovered = 1; 1233 } 1234 /* Provide feedback if so requested. */ 1235 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) { 1236 sb = sbuf_new_auto(); 1237 if (recovered) 1238 sbuf_printf(sb, "%s recovered\n", gp->name); 1239 else 1240 sbuf_printf(sb, "%s recovering is not needed\n", 1241 gp->name); 1242 sbuf_finish(sb); 1243 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1); 1244 sbuf_delete(sb); 1245 } 1246 return (0); 1247 } 1248 1249 static int 1250 g_part_ctl_resize(struct gctl_req *req, struct g_part_parms *gpp) 1251 { 1252 struct g_geom *gp; 1253 struct g_provider *pp; 1254 struct g_part_entry *pe, *entry; 1255 struct g_part_table *table; 1256 struct sbuf *sb; 1257 quad_t end; 1258 int error; 1259 off_t mediasize; 1260 1261 gp = gpp->gpp_geom; 1262 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name)); 1263 g_topology_assert(); 1264 table = gp->softc; 1265 1266 /* check gpp_index */ 1267 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) { 1268 if (entry->gpe_deleted || entry->gpe_internal) 1269 continue; 1270 if (entry->gpe_index == gpp->gpp_index) 1271 break; 1272 } 1273 if (entry == NULL) { 1274 gctl_error(req, "%d index '%d'", ENOENT, gpp->gpp_index); 1275 return (ENOENT); 1276 } 1277 1278 /* check gpp_size */ 1279 end = entry->gpe_start + gpp->gpp_size - 1; 1280 if (gpp->gpp_size < 1 || end > table->gpt_last) { 1281 gctl_error(req, "%d size '%jd'", EINVAL, 1282 (intmax_t)gpp->gpp_size); 1283 return (EINVAL); 1284 } 1285 1286 LIST_FOREACH(pe, &table->gpt_entry, gpe_entry) { 1287 if (pe->gpe_deleted || pe->gpe_internal || pe == entry) 1288 continue; 1289 if (end >= pe->gpe_start && end <= pe->gpe_end) { 1290 gctl_error(req, "%d end '%jd'", ENOSPC, 1291 (intmax_t)end); 1292 return (ENOSPC); 1293 } 1294 if (entry->gpe_start < pe->gpe_start && end > pe->gpe_end) { 1295 gctl_error(req, "%d size '%jd'", ENOSPC, 1296 (intmax_t)gpp->gpp_size); 1297 return (ENOSPC); 1298 } 1299 } 1300 1301 pp = entry->gpe_pp; 1302 if ((g_debugflags & 16) == 0 && 1303 (pp->acr > 0 || pp->acw > 0 || pp->ace > 0)) { 1304 if (entry->gpe_end - entry->gpe_start + 1 > gpp->gpp_size) { 1305 /* Deny shrinking of an opened partition. */ 1306 gctl_error(req, "%d", EBUSY); 1307 return (EBUSY); 1308 } 1309 } 1310 1311 error = G_PART_RESIZE(table, entry, gpp); 1312 if (error) { 1313 gctl_error(req, "%d", error); 1314 return (error); 1315 } 1316 1317 if (!entry->gpe_created) 1318 entry->gpe_modified = 1; 1319 1320 /* update mediasize of changed provider */ 1321 mediasize = (entry->gpe_end - entry->gpe_start + 1) * 1322 pp->sectorsize; 1323 g_resize_provider(pp, mediasize); 1324 1325 /* Provide feedback if so requested. */ 1326 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) { 1327 sb = sbuf_new_auto(); 1328 G_PART_FULLNAME(table, entry, sb, gp->name); 1329 sbuf_cat(sb, " resized\n"); 1330 sbuf_finish(sb); 1331 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1); 1332 sbuf_delete(sb); 1333 } 1334 return (0); 1335 } 1336 1337 static int 1338 g_part_ctl_setunset(struct gctl_req *req, struct g_part_parms *gpp, 1339 unsigned int set) 1340 { 1341 struct g_geom *gp; 1342 struct g_part_entry *entry; 1343 struct g_part_table *table; 1344 struct sbuf *sb; 1345 int error; 1346 1347 gp = gpp->gpp_geom; 1348 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name)); 1349 g_topology_assert(); 1350 1351 table = gp->softc; 1352 1353 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) { 1354 if (entry->gpe_deleted || entry->gpe_internal) 1355 continue; 1356 if (entry->gpe_index == gpp->gpp_index) 1357 break; 1358 } 1359 if (entry == NULL) { 1360 gctl_error(req, "%d index '%d'", ENOENT, gpp->gpp_index); 1361 return (ENOENT); 1362 } 1363 1364 error = G_PART_SETUNSET(table, entry, gpp->gpp_attrib, set); 1365 if (error) { 1366 gctl_error(req, "%d attrib '%s'", error, gpp->gpp_attrib); 1367 return (error); 1368 } 1369 1370 /* Provide feedback if so requested. */ 1371 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) { 1372 sb = sbuf_new_auto(); 1373 sbuf_printf(sb, "%s %sset on ", gpp->gpp_attrib, 1374 (set) ? "" : "un"); 1375 G_PART_FULLNAME(table, entry, sb, gp->name); 1376 sbuf_printf(sb, "\n"); 1377 sbuf_finish(sb); 1378 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1); 1379 sbuf_delete(sb); 1380 } 1381 return (0); 1382 } 1383 1384 static int 1385 g_part_ctl_undo(struct gctl_req *req, struct g_part_parms *gpp) 1386 { 1387 struct g_consumer *cp; 1388 struct g_provider *pp; 1389 struct g_geom *gp; 1390 struct g_part_entry *entry, *tmp; 1391 struct g_part_table *table; 1392 int error, reprobe; 1393 1394 gp = gpp->gpp_geom; 1395 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name)); 1396 g_topology_assert(); 1397 1398 table = gp->softc; 1399 if (!table->gpt_opened) { 1400 gctl_error(req, "%d", EPERM); 1401 return (EPERM); 1402 } 1403 1404 cp = LIST_FIRST(&gp->consumer); 1405 LIST_FOREACH_SAFE(entry, &table->gpt_entry, gpe_entry, tmp) { 1406 entry->gpe_modified = 0; 1407 if (entry->gpe_created) { 1408 pp = entry->gpe_pp; 1409 if (pp != NULL) { 1410 pp->private = NULL; 1411 entry->gpe_pp = NULL; 1412 g_wither_provider(pp, ENXIO); 1413 } 1414 entry->gpe_deleted = 1; 1415 } 1416 if (entry->gpe_deleted) { 1417 LIST_REMOVE(entry, gpe_entry); 1418 g_free(entry); 1419 } 1420 } 1421 1422 g_topology_unlock(); 1423 1424 reprobe = (table->gpt_scheme == &g_part_null_scheme || 1425 table->gpt_created) ? 1 : 0; 1426 1427 if (reprobe) { 1428 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) { 1429 if (entry->gpe_internal) 1430 continue; 1431 error = EBUSY; 1432 goto fail; 1433 } 1434 while ((entry = LIST_FIRST(&table->gpt_entry)) != NULL) { 1435 LIST_REMOVE(entry, gpe_entry); 1436 g_free(entry); 1437 } 1438 error = g_part_probe(gp, cp, table->gpt_depth); 1439 if (error) { 1440 g_topology_lock(); 1441 g_access(cp, -1, -1, -1); 1442 g_part_wither(gp, error); 1443 return (0); 1444 } 1445 table = gp->softc; 1446 1447 /* 1448 * Synthesize a disk geometry. Some partitioning schemes 1449 * depend on it and since some file systems need it even 1450 * when the partitition scheme doesn't, we do it here in 1451 * scheme-independent code. 1452 */ 1453 pp = cp->provider; 1454 g_part_geometry(table, cp, pp->mediasize / pp->sectorsize); 1455 } 1456 1457 error = G_PART_READ(table, cp); 1458 if (error) 1459 goto fail; 1460 error = g_part_check_integrity(table, cp); 1461 if (error) 1462 goto fail; 1463 1464 g_topology_lock(); 1465 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) { 1466 if (!entry->gpe_internal) 1467 g_part_new_provider(gp, table, entry); 1468 } 1469 1470 table->gpt_opened = 0; 1471 g_access(cp, -1, -1, -1); 1472 return (0); 1473 1474 fail: 1475 g_topology_lock(); 1476 gctl_error(req, "%d", error); 1477 return (error); 1478 } 1479 1480 static void 1481 g_part_wither(struct g_geom *gp, int error) 1482 { 1483 struct g_part_entry *entry; 1484 struct g_part_table *table; 1485 1486 table = gp->softc; 1487 if (table != NULL) { 1488 G_PART_DESTROY(table, NULL); 1489 while ((entry = LIST_FIRST(&table->gpt_entry)) != NULL) { 1490 LIST_REMOVE(entry, gpe_entry); 1491 g_free(entry); 1492 } 1493 if (gp->softc != NULL) { 1494 kobj_delete((kobj_t)gp->softc, M_GEOM); 1495 gp->softc = NULL; 1496 } 1497 } 1498 g_wither_geom(gp, error); 1499 } 1500 1501 /* 1502 * Class methods. 1503 */ 1504 1505 static void 1506 g_part_ctlreq(struct gctl_req *req, struct g_class *mp, const char *verb) 1507 { 1508 struct g_part_parms gpp; 1509 struct g_part_table *table; 1510 struct gctl_req_arg *ap; 1511 enum g_part_ctl ctlreq; 1512 unsigned int i, mparms, oparms, parm; 1513 int auto_commit, close_on_error; 1514 int error, modifies; 1515 1516 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s,%s)", __func__, mp->name, verb)); 1517 g_topology_assert(); 1518 1519 ctlreq = G_PART_CTL_NONE; 1520 modifies = 1; 1521 mparms = 0; 1522 oparms = G_PART_PARM_FLAGS | G_PART_PARM_OUTPUT | G_PART_PARM_VERSION; 1523 switch (*verb) { 1524 case 'a': 1525 if (!strcmp(verb, "add")) { 1526 ctlreq = G_PART_CTL_ADD; 1527 mparms |= G_PART_PARM_GEOM | G_PART_PARM_SIZE | 1528 G_PART_PARM_START | G_PART_PARM_TYPE; 1529 oparms |= G_PART_PARM_INDEX | G_PART_PARM_LABEL; 1530 } 1531 break; 1532 case 'b': 1533 if (!strcmp(verb, "bootcode")) { 1534 ctlreq = G_PART_CTL_BOOTCODE; 1535 mparms |= G_PART_PARM_GEOM | G_PART_PARM_BOOTCODE; 1536 } 1537 break; 1538 case 'c': 1539 if (!strcmp(verb, "commit")) { 1540 ctlreq = G_PART_CTL_COMMIT; 1541 mparms |= G_PART_PARM_GEOM; 1542 modifies = 0; 1543 } else if (!strcmp(verb, "create")) { 1544 ctlreq = G_PART_CTL_CREATE; 1545 mparms |= G_PART_PARM_PROVIDER | G_PART_PARM_SCHEME; 1546 oparms |= G_PART_PARM_ENTRIES; 1547 } 1548 break; 1549 case 'd': 1550 if (!strcmp(verb, "delete")) { 1551 ctlreq = G_PART_CTL_DELETE; 1552 mparms |= G_PART_PARM_GEOM | G_PART_PARM_INDEX; 1553 } else if (!strcmp(verb, "destroy")) { 1554 ctlreq = G_PART_CTL_DESTROY; 1555 mparms |= G_PART_PARM_GEOM; 1556 oparms |= G_PART_PARM_FORCE; 1557 } 1558 break; 1559 case 'm': 1560 if (!strcmp(verb, "modify")) { 1561 ctlreq = G_PART_CTL_MODIFY; 1562 mparms |= G_PART_PARM_GEOM | G_PART_PARM_INDEX; 1563 oparms |= G_PART_PARM_LABEL | G_PART_PARM_TYPE; 1564 } else if (!strcmp(verb, "move")) { 1565 ctlreq = G_PART_CTL_MOVE; 1566 mparms |= G_PART_PARM_GEOM | G_PART_PARM_INDEX; 1567 } 1568 break; 1569 case 'r': 1570 if (!strcmp(verb, "recover")) { 1571 ctlreq = G_PART_CTL_RECOVER; 1572 mparms |= G_PART_PARM_GEOM; 1573 } else if (!strcmp(verb, "resize")) { 1574 ctlreq = G_PART_CTL_RESIZE; 1575 mparms |= G_PART_PARM_GEOM | G_PART_PARM_INDEX | 1576 G_PART_PARM_SIZE; 1577 } 1578 break; 1579 case 's': 1580 if (!strcmp(verb, "set")) { 1581 ctlreq = G_PART_CTL_SET; 1582 mparms |= G_PART_PARM_ATTRIB | G_PART_PARM_GEOM | 1583 G_PART_PARM_INDEX; 1584 } 1585 break; 1586 case 'u': 1587 if (!strcmp(verb, "undo")) { 1588 ctlreq = G_PART_CTL_UNDO; 1589 mparms |= G_PART_PARM_GEOM; 1590 modifies = 0; 1591 } else if (!strcmp(verb, "unset")) { 1592 ctlreq = G_PART_CTL_UNSET; 1593 mparms |= G_PART_PARM_ATTRIB | G_PART_PARM_GEOM | 1594 G_PART_PARM_INDEX; 1595 } 1596 break; 1597 } 1598 if (ctlreq == G_PART_CTL_NONE) { 1599 gctl_error(req, "%d verb '%s'", EINVAL, verb); 1600 return; 1601 } 1602 1603 bzero(&gpp, sizeof(gpp)); 1604 for (i = 0; i < req->narg; i++) { 1605 ap = &req->arg[i]; 1606 parm = 0; 1607 switch (ap->name[0]) { 1608 case 'a': 1609 if (!strcmp(ap->name, "arg0")) { 1610 parm = mparms & 1611 (G_PART_PARM_GEOM | G_PART_PARM_PROVIDER); 1612 } 1613 if (!strcmp(ap->name, "attrib")) 1614 parm = G_PART_PARM_ATTRIB; 1615 break; 1616 case 'b': 1617 if (!strcmp(ap->name, "bootcode")) 1618 parm = G_PART_PARM_BOOTCODE; 1619 break; 1620 case 'c': 1621 if (!strcmp(ap->name, "class")) 1622 continue; 1623 break; 1624 case 'e': 1625 if (!strcmp(ap->name, "entries")) 1626 parm = G_PART_PARM_ENTRIES; 1627 break; 1628 case 'f': 1629 if (!strcmp(ap->name, "flags")) 1630 parm = G_PART_PARM_FLAGS; 1631 else if (!strcmp(ap->name, "force")) 1632 parm = G_PART_PARM_FORCE; 1633 break; 1634 case 'i': 1635 if (!strcmp(ap->name, "index")) 1636 parm = G_PART_PARM_INDEX; 1637 break; 1638 case 'l': 1639 if (!strcmp(ap->name, "label")) 1640 parm = G_PART_PARM_LABEL; 1641 break; 1642 case 'o': 1643 if (!strcmp(ap->name, "output")) 1644 parm = G_PART_PARM_OUTPUT; 1645 break; 1646 case 's': 1647 if (!strcmp(ap->name, "scheme")) 1648 parm = G_PART_PARM_SCHEME; 1649 else if (!strcmp(ap->name, "size")) 1650 parm = G_PART_PARM_SIZE; 1651 else if (!strcmp(ap->name, "start")) 1652 parm = G_PART_PARM_START; 1653 break; 1654 case 't': 1655 if (!strcmp(ap->name, "type")) 1656 parm = G_PART_PARM_TYPE; 1657 break; 1658 case 'v': 1659 if (!strcmp(ap->name, "verb")) 1660 continue; 1661 else if (!strcmp(ap->name, "version")) 1662 parm = G_PART_PARM_VERSION; 1663 break; 1664 } 1665 if ((parm & (mparms | oparms)) == 0) { 1666 gctl_error(req, "%d param '%s'", EINVAL, ap->name); 1667 return; 1668 } 1669 switch (parm) { 1670 case G_PART_PARM_ATTRIB: 1671 error = g_part_parm_str(req, ap->name, 1672 &gpp.gpp_attrib); 1673 break; 1674 case G_PART_PARM_BOOTCODE: 1675 error = g_part_parm_bootcode(req, ap->name, 1676 &gpp.gpp_codeptr, &gpp.gpp_codesize); 1677 break; 1678 case G_PART_PARM_ENTRIES: 1679 error = g_part_parm_intmax(req, ap->name, 1680 &gpp.gpp_entries); 1681 break; 1682 case G_PART_PARM_FLAGS: 1683 error = g_part_parm_str(req, ap->name, &gpp.gpp_flags); 1684 break; 1685 case G_PART_PARM_FORCE: 1686 error = g_part_parm_uint32(req, ap->name, 1687 &gpp.gpp_force); 1688 break; 1689 case G_PART_PARM_GEOM: 1690 error = g_part_parm_geom(req, ap->name, &gpp.gpp_geom); 1691 break; 1692 case G_PART_PARM_INDEX: 1693 error = g_part_parm_intmax(req, ap->name, 1694 &gpp.gpp_index); 1695 break; 1696 case G_PART_PARM_LABEL: 1697 error = g_part_parm_str(req, ap->name, &gpp.gpp_label); 1698 break; 1699 case G_PART_PARM_OUTPUT: 1700 error = 0; /* Write-only parameter */ 1701 break; 1702 case G_PART_PARM_PROVIDER: 1703 error = g_part_parm_provider(req, ap->name, 1704 &gpp.gpp_provider); 1705 break; 1706 case G_PART_PARM_SCHEME: 1707 error = g_part_parm_scheme(req, ap->name, 1708 &gpp.gpp_scheme); 1709 break; 1710 case G_PART_PARM_SIZE: 1711 error = g_part_parm_quad(req, ap->name, &gpp.gpp_size); 1712 break; 1713 case G_PART_PARM_START: 1714 error = g_part_parm_quad(req, ap->name, 1715 &gpp.gpp_start); 1716 break; 1717 case G_PART_PARM_TYPE: 1718 error = g_part_parm_str(req, ap->name, &gpp.gpp_type); 1719 break; 1720 case G_PART_PARM_VERSION: 1721 error = g_part_parm_uint32(req, ap->name, 1722 &gpp.gpp_version); 1723 break; 1724 default: 1725 error = EDOOFUS; 1726 gctl_error(req, "%d %s", error, ap->name); 1727 break; 1728 } 1729 if (error != 0) { 1730 if (error == ENOATTR) { 1731 gctl_error(req, "%d param '%s'", error, 1732 ap->name); 1733 } 1734 return; 1735 } 1736 gpp.gpp_parms |= parm; 1737 } 1738 if ((gpp.gpp_parms & mparms) != mparms) { 1739 parm = mparms - (gpp.gpp_parms & mparms); 1740 gctl_error(req, "%d param '%x'", ENOATTR, parm); 1741 return; 1742 } 1743 1744 /* Obtain permissions if possible/necessary. */ 1745 close_on_error = 0; 1746 table = NULL; 1747 if (modifies && (gpp.gpp_parms & G_PART_PARM_GEOM)) { 1748 table = gpp.gpp_geom->softc; 1749 if (table != NULL && table->gpt_corrupt && 1750 ctlreq != G_PART_CTL_DESTROY && 1751 ctlreq != G_PART_CTL_RECOVER) { 1752 gctl_error(req, "%d table '%s' is corrupt", 1753 EPERM, gpp.gpp_geom->name); 1754 return; 1755 } 1756 if (table != NULL && !table->gpt_opened) { 1757 error = g_access(LIST_FIRST(&gpp.gpp_geom->consumer), 1758 1, 1, 1); 1759 if (error) { 1760 gctl_error(req, "%d geom '%s'", error, 1761 gpp.gpp_geom->name); 1762 return; 1763 } 1764 table->gpt_opened = 1; 1765 close_on_error = 1; 1766 } 1767 } 1768 1769 /* Allow the scheme to check or modify the parameters. */ 1770 if (table != NULL) { 1771 error = G_PART_PRECHECK(table, ctlreq, &gpp); 1772 if (error) { 1773 gctl_error(req, "%d pre-check failed", error); 1774 goto out; 1775 } 1776 } else 1777 error = EDOOFUS; /* Prevent bogus uninit. warning. */ 1778 1779 switch (ctlreq) { 1780 case G_PART_CTL_NONE: 1781 panic("%s", __func__); 1782 case G_PART_CTL_ADD: 1783 error = g_part_ctl_add(req, &gpp); 1784 break; 1785 case G_PART_CTL_BOOTCODE: 1786 error = g_part_ctl_bootcode(req, &gpp); 1787 break; 1788 case G_PART_CTL_COMMIT: 1789 error = g_part_ctl_commit(req, &gpp); 1790 break; 1791 case G_PART_CTL_CREATE: 1792 error = g_part_ctl_create(req, &gpp); 1793 break; 1794 case G_PART_CTL_DELETE: 1795 error = g_part_ctl_delete(req, &gpp); 1796 break; 1797 case G_PART_CTL_DESTROY: 1798 error = g_part_ctl_destroy(req, &gpp); 1799 break; 1800 case G_PART_CTL_MODIFY: 1801 error = g_part_ctl_modify(req, &gpp); 1802 break; 1803 case G_PART_CTL_MOVE: 1804 error = g_part_ctl_move(req, &gpp); 1805 break; 1806 case G_PART_CTL_RECOVER: 1807 error = g_part_ctl_recover(req, &gpp); 1808 break; 1809 case G_PART_CTL_RESIZE: 1810 error = g_part_ctl_resize(req, &gpp); 1811 break; 1812 case G_PART_CTL_SET: 1813 error = g_part_ctl_setunset(req, &gpp, 1); 1814 break; 1815 case G_PART_CTL_UNDO: 1816 error = g_part_ctl_undo(req, &gpp); 1817 break; 1818 case G_PART_CTL_UNSET: 1819 error = g_part_ctl_setunset(req, &gpp, 0); 1820 break; 1821 } 1822 1823 /* Implement automatic commit. */ 1824 if (!error) { 1825 auto_commit = (modifies && 1826 (gpp.gpp_parms & G_PART_PARM_FLAGS) && 1827 strchr(gpp.gpp_flags, 'C') != NULL) ? 1 : 0; 1828 if (auto_commit) { 1829 KASSERT(gpp.gpp_parms & G_PART_PARM_GEOM, ("%s", 1830 __func__)); 1831 error = g_part_ctl_commit(req, &gpp); 1832 } 1833 } 1834 1835 out: 1836 if (error && close_on_error) { 1837 g_access(LIST_FIRST(&gpp.gpp_geom->consumer), -1, -1, -1); 1838 table->gpt_opened = 0; 1839 } 1840 } 1841 1842 static int 1843 g_part_destroy_geom(struct gctl_req *req, struct g_class *mp, 1844 struct g_geom *gp) 1845 { 1846 1847 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s,%s)", __func__, mp->name, gp->name)); 1848 g_topology_assert(); 1849 1850 g_part_wither(gp, EINVAL); 1851 return (0); 1852 } 1853 1854 static struct g_geom * 1855 g_part_taste(struct g_class *mp, struct g_provider *pp, int flags __unused) 1856 { 1857 struct g_consumer *cp; 1858 struct g_geom *gp; 1859 struct g_part_entry *entry; 1860 struct g_part_table *table; 1861 struct root_hold_token *rht; 1862 int attr, depth; 1863 int error; 1864 1865 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s,%s)", __func__, mp->name, pp->name)); 1866 g_topology_assert(); 1867 1868 /* Skip providers that are already open for writing. */ 1869 if (pp->acw > 0) 1870 return (NULL); 1871 1872 /* 1873 * Create a GEOM with consumer and hook it up to the provider. 1874 * With that we become part of the topology. Optain read access 1875 * to the provider. 1876 */ 1877 gp = g_new_geomf(mp, "%s", pp->name); 1878 cp = g_new_consumer(gp); 1879 error = g_attach(cp, pp); 1880 if (error == 0) 1881 error = g_access(cp, 1, 0, 0); 1882 if (error != 0) { 1883 if (cp->provider) 1884 g_detach(cp); 1885 g_destroy_consumer(cp); 1886 g_destroy_geom(gp); 1887 return (NULL); 1888 } 1889 1890 rht = root_mount_hold(mp->name); 1891 g_topology_unlock(); 1892 1893 /* 1894 * Short-circuit the whole probing galore when there's no 1895 * media present. 1896 */ 1897 if (pp->mediasize == 0 || pp->sectorsize == 0) { 1898 error = ENODEV; 1899 goto fail; 1900 } 1901 1902 /* Make sure we can nest and if so, determine our depth. */ 1903 error = g_getattr("PART::isleaf", cp, &attr); 1904 if (!error && attr) { 1905 error = ENODEV; 1906 goto fail; 1907 } 1908 error = g_getattr("PART::depth", cp, &attr); 1909 depth = (!error) ? attr + 1 : 0; 1910 1911 error = g_part_probe(gp, cp, depth); 1912 if (error) 1913 goto fail; 1914 1915 table = gp->softc; 1916 1917 /* 1918 * Synthesize a disk geometry. Some partitioning schemes 1919 * depend on it and since some file systems need it even 1920 * when the partitition scheme doesn't, we do it here in 1921 * scheme-independent code. 1922 */ 1923 g_part_geometry(table, cp, pp->mediasize / pp->sectorsize); 1924 1925 error = G_PART_READ(table, cp); 1926 if (error) 1927 goto fail; 1928 error = g_part_check_integrity(table, cp); 1929 if (error) 1930 goto fail; 1931 1932 g_topology_lock(); 1933 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) { 1934 if (!entry->gpe_internal) 1935 g_part_new_provider(gp, table, entry); 1936 } 1937 1938 root_mount_rel(rht); 1939 g_access(cp, -1, 0, 0); 1940 return (gp); 1941 1942 fail: 1943 g_topology_lock(); 1944 root_mount_rel(rht); 1945 g_access(cp, -1, 0, 0); 1946 g_detach(cp); 1947 g_destroy_consumer(cp); 1948 g_destroy_geom(gp); 1949 return (NULL); 1950 } 1951 1952 /* 1953 * Geom methods. 1954 */ 1955 1956 static int 1957 g_part_access(struct g_provider *pp, int dr, int dw, int de) 1958 { 1959 struct g_consumer *cp; 1960 1961 G_PART_TRACE((G_T_ACCESS, "%s(%s,%d,%d,%d)", __func__, pp->name, dr, 1962 dw, de)); 1963 1964 cp = LIST_FIRST(&pp->geom->consumer); 1965 1966 /* We always gain write-exclusive access. */ 1967 return (g_access(cp, dr, dw, dw + de)); 1968 } 1969 1970 static void 1971 g_part_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp, 1972 struct g_consumer *cp, struct g_provider *pp) 1973 { 1974 char buf[64]; 1975 struct g_part_entry *entry; 1976 struct g_part_table *table; 1977 1978 KASSERT(sb != NULL && gp != NULL, ("%s", __func__)); 1979 table = gp->softc; 1980 1981 if (indent == NULL) { 1982 KASSERT(cp == NULL && pp != NULL, ("%s", __func__)); 1983 entry = pp->private; 1984 if (entry == NULL) 1985 return; 1986 sbuf_printf(sb, " i %u o %ju ty %s", entry->gpe_index, 1987 (uintmax_t)entry->gpe_offset, 1988 G_PART_TYPE(table, entry, buf, sizeof(buf))); 1989 /* 1990 * libdisk compatibility quirk - the scheme dumps the 1991 * slicer name and partition type in a way that is 1992 * compatible with libdisk. When libdisk is not used 1993 * anymore, this should go away. 1994 */ 1995 G_PART_DUMPCONF(table, entry, sb, indent); 1996 } else if (cp != NULL) { /* Consumer configuration. */ 1997 KASSERT(pp == NULL, ("%s", __func__)); 1998 /* none */ 1999 } else if (pp != NULL) { /* Provider configuration. */ 2000 entry = pp->private; 2001 if (entry == NULL) 2002 return; 2003 sbuf_printf(sb, "%s<start>%ju</start>\n", indent, 2004 (uintmax_t)entry->gpe_start); 2005 sbuf_printf(sb, "%s<end>%ju</end>\n", indent, 2006 (uintmax_t)entry->gpe_end); 2007 sbuf_printf(sb, "%s<index>%u</index>\n", indent, 2008 entry->gpe_index); 2009 sbuf_printf(sb, "%s<type>%s</type>\n", indent, 2010 G_PART_TYPE(table, entry, buf, sizeof(buf))); 2011 sbuf_printf(sb, "%s<offset>%ju</offset>\n", indent, 2012 (uintmax_t)entry->gpe_offset); 2013 sbuf_printf(sb, "%s<length>%ju</length>\n", indent, 2014 (uintmax_t)pp->mediasize); 2015 G_PART_DUMPCONF(table, entry, sb, indent); 2016 } else { /* Geom configuration. */ 2017 sbuf_printf(sb, "%s<scheme>%s</scheme>\n", indent, 2018 table->gpt_scheme->name); 2019 sbuf_printf(sb, "%s<entries>%u</entries>\n", indent, 2020 table->gpt_entries); 2021 sbuf_printf(sb, "%s<first>%ju</first>\n", indent, 2022 (uintmax_t)table->gpt_first); 2023 sbuf_printf(sb, "%s<last>%ju</last>\n", indent, 2024 (uintmax_t)table->gpt_last); 2025 sbuf_printf(sb, "%s<fwsectors>%u</fwsectors>\n", indent, 2026 table->gpt_sectors); 2027 sbuf_printf(sb, "%s<fwheads>%u</fwheads>\n", indent, 2028 table->gpt_heads); 2029 sbuf_printf(sb, "%s<state>%s</state>\n", indent, 2030 table->gpt_corrupt ? "CORRUPT": "OK"); 2031 sbuf_printf(sb, "%s<modified>%s</modified>\n", indent, 2032 table->gpt_opened ? "true": "false"); 2033 G_PART_DUMPCONF(table, NULL, sb, indent); 2034 } 2035 } 2036 2037 static void 2038 g_part_orphan(struct g_consumer *cp) 2039 { 2040 struct g_provider *pp; 2041 struct g_part_table *table; 2042 2043 pp = cp->provider; 2044 KASSERT(pp != NULL, ("%s", __func__)); 2045 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, pp->name)); 2046 g_topology_assert(); 2047 2048 KASSERT(pp->error != 0, ("%s", __func__)); 2049 table = cp->geom->softc; 2050 if (table != NULL && table->gpt_opened) 2051 g_access(cp, -1, -1, -1); 2052 g_part_wither(cp->geom, pp->error); 2053 } 2054 2055 static void 2056 g_part_spoiled(struct g_consumer *cp) 2057 { 2058 2059 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, cp->provider->name)); 2060 g_topology_assert(); 2061 2062 cp->flags |= G_CF_ORPHAN; 2063 g_part_wither(cp->geom, ENXIO); 2064 } 2065 2066 static void 2067 g_part_start(struct bio *bp) 2068 { 2069 struct bio *bp2; 2070 struct g_consumer *cp; 2071 struct g_geom *gp; 2072 struct g_part_entry *entry; 2073 struct g_part_table *table; 2074 struct g_kerneldump *gkd; 2075 struct g_provider *pp; 2076 char buf[64]; 2077 2078 pp = bp->bio_to; 2079 gp = pp->geom; 2080 table = gp->softc; 2081 cp = LIST_FIRST(&gp->consumer); 2082 2083 G_PART_TRACE((G_T_BIO, "%s: cmd=%d, provider=%s", __func__, bp->bio_cmd, 2084 pp->name)); 2085 2086 entry = pp->private; 2087 if (entry == NULL) { 2088 g_io_deliver(bp, ENXIO); 2089 return; 2090 } 2091 2092 switch(bp->bio_cmd) { 2093 case BIO_DELETE: 2094 case BIO_READ: 2095 case BIO_WRITE: 2096 if (bp->bio_offset >= pp->mediasize) { 2097 g_io_deliver(bp, EIO); 2098 return; 2099 } 2100 bp2 = g_clone_bio(bp); 2101 if (bp2 == NULL) { 2102 g_io_deliver(bp, ENOMEM); 2103 return; 2104 } 2105 if (bp2->bio_offset + bp2->bio_length > pp->mediasize) 2106 bp2->bio_length = pp->mediasize - bp2->bio_offset; 2107 bp2->bio_done = g_std_done; 2108 bp2->bio_offset += entry->gpe_offset; 2109 g_io_request(bp2, cp); 2110 return; 2111 case BIO_FLUSH: 2112 break; 2113 case BIO_GETATTR: 2114 if (g_handleattr_int(bp, "GEOM::fwheads", table->gpt_heads)) 2115 return; 2116 if (g_handleattr_int(bp, "GEOM::fwsectors", table->gpt_sectors)) 2117 return; 2118 if (g_handleattr_int(bp, "PART::isleaf", table->gpt_isleaf)) 2119 return; 2120 if (g_handleattr_int(bp, "PART::depth", table->gpt_depth)) 2121 return; 2122 if (g_handleattr_str(bp, "PART::scheme", 2123 table->gpt_scheme->name)) 2124 return; 2125 if (g_handleattr_str(bp, "PART::type", 2126 G_PART_TYPE(table, entry, buf, sizeof(buf)))) 2127 return; 2128 if (!strcmp("GEOM::kerneldump", bp->bio_attribute)) { 2129 /* 2130 * Check that the partition is suitable for kernel 2131 * dumps. Typically only swap partitions should be 2132 * used. If the request comes from the nested scheme 2133 * we allow dumping there as well. 2134 */ 2135 if ((bp->bio_from == NULL || 2136 bp->bio_from->geom->class != &g_part_class) && 2137 G_PART_DUMPTO(table, entry) == 0) { 2138 g_io_deliver(bp, ENODEV); 2139 printf("GEOM_PART: Partition '%s' not suitable" 2140 " for kernel dumps (wrong type?)\n", 2141 pp->name); 2142 return; 2143 } 2144 gkd = (struct g_kerneldump *)bp->bio_data; 2145 if (gkd->offset >= pp->mediasize) { 2146 g_io_deliver(bp, EIO); 2147 return; 2148 } 2149 if (gkd->offset + gkd->length > pp->mediasize) 2150 gkd->length = pp->mediasize - gkd->offset; 2151 gkd->offset += entry->gpe_offset; 2152 } 2153 break; 2154 default: 2155 g_io_deliver(bp, EOPNOTSUPP); 2156 return; 2157 } 2158 2159 bp2 = g_clone_bio(bp); 2160 if (bp2 == NULL) { 2161 g_io_deliver(bp, ENOMEM); 2162 return; 2163 } 2164 bp2->bio_done = g_std_done; 2165 g_io_request(bp2, cp); 2166 } 2167 2168 static void 2169 g_part_init(struct g_class *mp) 2170 { 2171 2172 TAILQ_INSERT_HEAD(&g_part_schemes, &g_part_null_scheme, scheme_list); 2173 } 2174 2175 static void 2176 g_part_fini(struct g_class *mp) 2177 { 2178 2179 TAILQ_REMOVE(&g_part_schemes, &g_part_null_scheme, scheme_list); 2180 } 2181 2182 static void 2183 g_part_unload_event(void *arg, int flag) 2184 { 2185 struct g_consumer *cp; 2186 struct g_geom *gp; 2187 struct g_provider *pp; 2188 struct g_part_scheme *scheme; 2189 struct g_part_table *table; 2190 uintptr_t *xchg; 2191 int acc, error; 2192 2193 if (flag == EV_CANCEL) 2194 return; 2195 2196 xchg = arg; 2197 error = 0; 2198 scheme = (void *)(*xchg); 2199 2200 g_topology_assert(); 2201 2202 LIST_FOREACH(gp, &g_part_class.geom, geom) { 2203 table = gp->softc; 2204 if (table->gpt_scheme != scheme) 2205 continue; 2206 2207 acc = 0; 2208 LIST_FOREACH(pp, &gp->provider, provider) 2209 acc += pp->acr + pp->acw + pp->ace; 2210 LIST_FOREACH(cp, &gp->consumer, consumer) 2211 acc += cp->acr + cp->acw + cp->ace; 2212 2213 if (!acc) 2214 g_part_wither(gp, ENOSYS); 2215 else 2216 error = EBUSY; 2217 } 2218 2219 if (!error) 2220 TAILQ_REMOVE(&g_part_schemes, scheme, scheme_list); 2221 2222 *xchg = error; 2223 } 2224 2225 int 2226 g_part_modevent(module_t mod, int type, struct g_part_scheme *scheme) 2227 { 2228 struct g_part_scheme *iter; 2229 uintptr_t arg; 2230 int error; 2231 2232 error = 0; 2233 switch (type) { 2234 case MOD_LOAD: 2235 TAILQ_FOREACH(iter, &g_part_schemes, scheme_list) { 2236 if (scheme == iter) { 2237 printf("GEOM_PART: scheme %s is already " 2238 "registered!\n", scheme->name); 2239 break; 2240 } 2241 } 2242 if (iter == NULL) { 2243 TAILQ_INSERT_TAIL(&g_part_schemes, scheme, 2244 scheme_list); 2245 g_retaste(&g_part_class); 2246 } 2247 break; 2248 case MOD_UNLOAD: 2249 arg = (uintptr_t)scheme; 2250 error = g_waitfor_event(g_part_unload_event, &arg, M_WAITOK, 2251 NULL); 2252 if (error == 0) 2253 error = arg; 2254 break; 2255 default: 2256 error = EOPNOTSUPP; 2257 break; 2258 } 2259 2260 return (error); 2261 } 2262