1 /*- 2 * Copyright (c) 2002, 2005-2009 Marcel Moolenaar 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 */ 26 27 #include <sys/cdefs.h> 28 __FBSDID("$FreeBSD$"); 29 30 #include <sys/param.h> 31 #include <sys/bio.h> 32 #include <sys/diskmbr.h> 33 #include <sys/endian.h> 34 #include <sys/kernel.h> 35 #include <sys/kobj.h> 36 #include <sys/limits.h> 37 #include <sys/lock.h> 38 #include <sys/malloc.h> 39 #include <sys/mutex.h> 40 #include <sys/queue.h> 41 #include <sys/sbuf.h> 42 #include <sys/sysctl.h> 43 #include <sys/systm.h> 44 #include <sys/uuid.h> 45 #include <geom/geom.h> 46 #include <geom/geom_ctl.h> 47 #include <geom/geom_int.h> 48 #include <geom/part/g_part.h> 49 50 #include "g_part_if.h" 51 52 #ifndef _PATH_DEV 53 #define _PATH_DEV "/dev/" 54 #endif 55 56 static kobj_method_t g_part_null_methods[] = { 57 { 0, 0 } 58 }; 59 60 static struct g_part_scheme g_part_null_scheme = { 61 "(none)", 62 g_part_null_methods, 63 sizeof(struct g_part_table), 64 }; 65 66 TAILQ_HEAD(, g_part_scheme) g_part_schemes = 67 TAILQ_HEAD_INITIALIZER(g_part_schemes); 68 69 struct g_part_alias_list { 70 const char *lexeme; 71 enum g_part_alias alias; 72 } g_part_alias_list[G_PART_ALIAS_COUNT] = { 73 { "apple-boot", G_PART_ALIAS_APPLE_BOOT }, 74 { "apple-hfs", G_PART_ALIAS_APPLE_HFS }, 75 { "apple-label", G_PART_ALIAS_APPLE_LABEL }, 76 { "apple-raid", G_PART_ALIAS_APPLE_RAID }, 77 { "apple-raid-offline", G_PART_ALIAS_APPLE_RAID_OFFLINE }, 78 { "apple-tv-recovery", G_PART_ALIAS_APPLE_TV_RECOVERY }, 79 { "apple-ufs", G_PART_ALIAS_APPLE_UFS }, 80 { "bios-boot", G_PART_ALIAS_BIOS_BOOT }, 81 { "ebr", G_PART_ALIAS_EBR }, 82 { "efi", G_PART_ALIAS_EFI }, 83 { "fat32", G_PART_ALIAS_MS_FAT32 }, 84 { "freebsd", G_PART_ALIAS_FREEBSD }, 85 { "freebsd-boot", G_PART_ALIAS_FREEBSD_BOOT }, 86 { "freebsd-swap", G_PART_ALIAS_FREEBSD_SWAP }, 87 { "freebsd-ufs", G_PART_ALIAS_FREEBSD_UFS }, 88 { "freebsd-vinum", G_PART_ALIAS_FREEBSD_VINUM }, 89 { "freebsd-zfs", G_PART_ALIAS_FREEBSD_ZFS }, 90 { "linux-data", G_PART_ALIAS_LINUX_DATA }, 91 { "linux-lvm", G_PART_ALIAS_LINUX_LVM }, 92 { "linux-raid", G_PART_ALIAS_LINUX_RAID }, 93 { "linux-swap", G_PART_ALIAS_LINUX_SWAP }, 94 { "mbr", G_PART_ALIAS_MBR }, 95 { "ms-basic-data", G_PART_ALIAS_MS_BASIC_DATA }, 96 { "ms-ldm-data", G_PART_ALIAS_MS_LDM_DATA }, 97 { "ms-ldm-metadata", G_PART_ALIAS_MS_LDM_METADATA }, 98 { "ms-reserved", G_PART_ALIAS_MS_RESERVED }, 99 { "ntfs", G_PART_ALIAS_MS_NTFS }, 100 { "netbsd-ccd", G_PART_ALIAS_NETBSD_CCD }, 101 { "netbsd-cgd", G_PART_ALIAS_NETBSD_CGD }, 102 { "netbsd-ffs", G_PART_ALIAS_NETBSD_FFS }, 103 { "netbsd-lfs", G_PART_ALIAS_NETBSD_LFS }, 104 { "netbsd-raid", G_PART_ALIAS_NETBSD_RAID }, 105 { "netbsd-swap", G_PART_ALIAS_NETBSD_SWAP }, 106 }; 107 108 SYSCTL_DECL(_kern_geom); 109 SYSCTL_NODE(_kern_geom, OID_AUTO, part, CTLFLAG_RW, 0, 110 "GEOM_PART stuff"); 111 static u_int check_integrity = 1; 112 TUNABLE_INT("kern.geom.part.check_integrity", &check_integrity); 113 SYSCTL_UINT(_kern_geom_part, OID_AUTO, check_integrity, 114 CTLFLAG_RW | CTLFLAG_TUN, &check_integrity, 1, 115 "Enable integrity checking"); 116 117 /* 118 * The GEOM partitioning class. 119 */ 120 static g_ctl_req_t g_part_ctlreq; 121 static g_ctl_destroy_geom_t g_part_destroy_geom; 122 static g_fini_t g_part_fini; 123 static g_init_t g_part_init; 124 static g_taste_t g_part_taste; 125 126 static g_access_t g_part_access; 127 static g_dumpconf_t g_part_dumpconf; 128 static g_orphan_t g_part_orphan; 129 static g_spoiled_t g_part_spoiled; 130 static g_start_t g_part_start; 131 132 static struct g_class g_part_class = { 133 .name = "PART", 134 .version = G_VERSION, 135 /* Class methods. */ 136 .ctlreq = g_part_ctlreq, 137 .destroy_geom = g_part_destroy_geom, 138 .fini = g_part_fini, 139 .init = g_part_init, 140 .taste = g_part_taste, 141 /* Geom methods. */ 142 .access = g_part_access, 143 .dumpconf = g_part_dumpconf, 144 .orphan = g_part_orphan, 145 .spoiled = g_part_spoiled, 146 .start = g_part_start, 147 }; 148 149 DECLARE_GEOM_CLASS(g_part_class, g_part); 150 MODULE_VERSION(g_part, 0); 151 152 /* 153 * Support functions. 154 */ 155 156 static void g_part_wither(struct g_geom *, int); 157 158 const char * 159 g_part_alias_name(enum g_part_alias alias) 160 { 161 int i; 162 163 for (i = 0; i < G_PART_ALIAS_COUNT; i++) { 164 if (g_part_alias_list[i].alias != alias) 165 continue; 166 return (g_part_alias_list[i].lexeme); 167 } 168 169 return (NULL); 170 } 171 172 void 173 g_part_geometry_heads(off_t blocks, u_int sectors, off_t *bestchs, 174 u_int *bestheads) 175 { 176 static u_int candidate_heads[] = { 1, 2, 16, 32, 64, 128, 255, 0 }; 177 off_t chs, cylinders; 178 u_int heads; 179 int idx; 180 181 *bestchs = 0; 182 *bestheads = 0; 183 for (idx = 0; candidate_heads[idx] != 0; idx++) { 184 heads = candidate_heads[idx]; 185 cylinders = blocks / heads / sectors; 186 if (cylinders < heads || cylinders < sectors) 187 break; 188 if (cylinders > 1023) 189 continue; 190 chs = cylinders * heads * sectors; 191 if (chs > *bestchs || (chs == *bestchs && *bestheads == 1)) { 192 *bestchs = chs; 193 *bestheads = heads; 194 } 195 } 196 } 197 198 static void 199 g_part_geometry(struct g_part_table *table, struct g_consumer *cp, 200 off_t blocks) 201 { 202 static u_int candidate_sectors[] = { 1, 9, 17, 33, 63, 0 }; 203 off_t chs, bestchs; 204 u_int heads, sectors; 205 int idx; 206 207 if (g_getattr("GEOM::fwsectors", cp, §ors) != 0 || sectors == 0 || 208 g_getattr("GEOM::fwheads", cp, &heads) != 0 || heads == 0) { 209 table->gpt_fixgeom = 0; 210 table->gpt_heads = 0; 211 table->gpt_sectors = 0; 212 bestchs = 0; 213 for (idx = 0; candidate_sectors[idx] != 0; idx++) { 214 sectors = candidate_sectors[idx]; 215 g_part_geometry_heads(blocks, sectors, &chs, &heads); 216 if (chs == 0) 217 continue; 218 /* 219 * Prefer a geometry with sectors > 1, but only if 220 * it doesn't bump down the number of heads to 1. 221 */ 222 if (chs > bestchs || (chs == bestchs && heads > 1 && 223 table->gpt_sectors == 1)) { 224 bestchs = chs; 225 table->gpt_heads = heads; 226 table->gpt_sectors = sectors; 227 } 228 } 229 /* 230 * If we didn't find a geometry at all, then the disk is 231 * too big. This means we can use the maximum number of 232 * heads and sectors. 233 */ 234 if (bestchs == 0) { 235 table->gpt_heads = 255; 236 table->gpt_sectors = 63; 237 } 238 } else { 239 table->gpt_fixgeom = 1; 240 table->gpt_heads = heads; 241 table->gpt_sectors = sectors; 242 } 243 } 244 245 #define DPRINTF(...) if (bootverbose) { \ 246 printf("GEOM_PART: " __VA_ARGS__); \ 247 } 248 249 static int 250 g_part_check_integrity(struct g_part_table *table, struct g_consumer *cp) 251 { 252 struct g_part_entry *e1, *e2; 253 struct g_provider *pp; 254 off_t offset; 255 int failed; 256 257 failed = 0; 258 pp = cp->provider; 259 if (table->gpt_last < table->gpt_first) { 260 DPRINTF("last LBA is below first LBA: %jd < %jd\n", 261 (intmax_t)table->gpt_last, (intmax_t)table->gpt_first); 262 failed++; 263 } 264 if (table->gpt_last > pp->mediasize / pp->sectorsize - 1) { 265 DPRINTF("last LBA extends beyond mediasize: " 266 "%jd > %jd\n", (intmax_t)table->gpt_last, 267 (intmax_t)pp->mediasize / pp->sectorsize - 1); 268 failed++; 269 } 270 LIST_FOREACH(e1, &table->gpt_entry, gpe_entry) { 271 if (e1->gpe_deleted || e1->gpe_internal) 272 continue; 273 if (e1->gpe_start < table->gpt_first) { 274 DPRINTF("partition %d has start offset below first " 275 "LBA: %jd < %jd\n", e1->gpe_index, 276 (intmax_t)e1->gpe_start, 277 (intmax_t)table->gpt_first); 278 failed++; 279 } 280 if (e1->gpe_start > table->gpt_last) { 281 DPRINTF("partition %d has start offset beyond last " 282 "LBA: %jd > %jd\n", e1->gpe_index, 283 (intmax_t)e1->gpe_start, 284 (intmax_t)table->gpt_last); 285 failed++; 286 } 287 if (e1->gpe_end < e1->gpe_start) { 288 DPRINTF("partition %d has end offset below start " 289 "offset: %jd < %jd\n", e1->gpe_index, 290 (intmax_t)e1->gpe_end, 291 (intmax_t)e1->gpe_start); 292 failed++; 293 } 294 if (e1->gpe_end > table->gpt_last) { 295 DPRINTF("partition %d has end offset beyond last " 296 "LBA: %jd > %jd\n", e1->gpe_index, 297 (intmax_t)e1->gpe_end, 298 (intmax_t)table->gpt_last); 299 failed++; 300 } 301 if (pp->stripesize > 0) { 302 offset = e1->gpe_start * pp->sectorsize; 303 if (e1->gpe_offset > offset) 304 offset = e1->gpe_offset; 305 if ((offset + pp->stripeoffset) % pp->stripesize) { 306 DPRINTF("partition %d is not aligned on %u " 307 "bytes\n", e1->gpe_index, pp->stripesize); 308 /* Don't treat this as a critical failure */ 309 } 310 } 311 e2 = e1; 312 while ((e2 = LIST_NEXT(e2, gpe_entry)) != NULL) { 313 if (e2->gpe_deleted || e2->gpe_internal) 314 continue; 315 if (e1->gpe_start >= e2->gpe_start && 316 e1->gpe_start <= e2->gpe_end) { 317 DPRINTF("partition %d has start offset inside " 318 "partition %d: start[%d] %jd >= start[%d] " 319 "%jd <= end[%d] %jd\n", 320 e1->gpe_index, e2->gpe_index, 321 e2->gpe_index, (intmax_t)e2->gpe_start, 322 e1->gpe_index, (intmax_t)e1->gpe_start, 323 e2->gpe_index, (intmax_t)e2->gpe_end); 324 failed++; 325 } 326 if (e1->gpe_end >= e2->gpe_start && 327 e1->gpe_end <= e2->gpe_end) { 328 DPRINTF("partition %d has end offset inside " 329 "partition %d: start[%d] %jd >= end[%d] " 330 "%jd <= end[%d] %jd\n", 331 e1->gpe_index, e2->gpe_index, 332 e2->gpe_index, (intmax_t)e2->gpe_start, 333 e1->gpe_index, (intmax_t)e1->gpe_end, 334 e2->gpe_index, (intmax_t)e2->gpe_end); 335 failed++; 336 } 337 if (e1->gpe_start < e2->gpe_start && 338 e1->gpe_end > e2->gpe_end) { 339 DPRINTF("partition %d contains partition %d: " 340 "start[%d] %jd > start[%d] %jd, end[%d] " 341 "%jd < end[%d] %jd\n", 342 e1->gpe_index, e2->gpe_index, 343 e1->gpe_index, (intmax_t)e1->gpe_start, 344 e2->gpe_index, (intmax_t)e2->gpe_start, 345 e2->gpe_index, (intmax_t)e2->gpe_end, 346 e1->gpe_index, (intmax_t)e1->gpe_end); 347 failed++; 348 } 349 } 350 } 351 if (failed != 0) { 352 printf("GEOM_PART: integrity check failed (%s, %s)\n", 353 pp->name, table->gpt_scheme->name); 354 if (check_integrity != 0) 355 return (EINVAL); 356 table->gpt_corrupt = 1; 357 } 358 return (0); 359 } 360 #undef DPRINTF 361 362 struct g_part_entry * 363 g_part_new_entry(struct g_part_table *table, int index, quad_t start, 364 quad_t end) 365 { 366 struct g_part_entry *entry, *last; 367 368 last = NULL; 369 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) { 370 if (entry->gpe_index == index) 371 break; 372 if (entry->gpe_index > index) { 373 entry = NULL; 374 break; 375 } 376 last = entry; 377 } 378 if (entry == NULL) { 379 entry = g_malloc(table->gpt_scheme->gps_entrysz, 380 M_WAITOK | M_ZERO); 381 entry->gpe_index = index; 382 if (last == NULL) 383 LIST_INSERT_HEAD(&table->gpt_entry, entry, gpe_entry); 384 else 385 LIST_INSERT_AFTER(last, entry, gpe_entry); 386 } else 387 entry->gpe_offset = 0; 388 entry->gpe_start = start; 389 entry->gpe_end = end; 390 return (entry); 391 } 392 393 static void 394 g_part_new_provider(struct g_geom *gp, struct g_part_table *table, 395 struct g_part_entry *entry) 396 { 397 struct g_consumer *cp; 398 struct g_provider *pp; 399 struct sbuf *sb; 400 off_t offset; 401 402 cp = LIST_FIRST(&gp->consumer); 403 pp = cp->provider; 404 405 offset = entry->gpe_start * pp->sectorsize; 406 if (entry->gpe_offset < offset) 407 entry->gpe_offset = offset; 408 409 if (entry->gpe_pp == NULL) { 410 sb = sbuf_new_auto(); 411 G_PART_FULLNAME(table, entry, sb, gp->name); 412 sbuf_finish(sb); 413 entry->gpe_pp = g_new_providerf(gp, "%s", sbuf_data(sb)); 414 sbuf_delete(sb); 415 entry->gpe_pp->private = entry; /* Close the circle. */ 416 } 417 entry->gpe_pp->index = entry->gpe_index - 1; /* index is 1-based. */ 418 entry->gpe_pp->mediasize = (entry->gpe_end - entry->gpe_start + 1) * 419 pp->sectorsize; 420 entry->gpe_pp->mediasize -= entry->gpe_offset - offset; 421 entry->gpe_pp->sectorsize = pp->sectorsize; 422 entry->gpe_pp->flags = pp->flags & G_PF_CANDELETE; 423 entry->gpe_pp->stripesize = pp->stripesize; 424 entry->gpe_pp->stripeoffset = pp->stripeoffset + entry->gpe_offset; 425 if (pp->stripesize > 0) 426 entry->gpe_pp->stripeoffset %= pp->stripesize; 427 g_error_provider(entry->gpe_pp, 0); 428 } 429 430 static struct g_geom* 431 g_part_find_geom(const char *name) 432 { 433 struct g_geom *gp; 434 LIST_FOREACH(gp, &g_part_class.geom, geom) { 435 if (!strcmp(name, gp->name)) 436 break; 437 } 438 return (gp); 439 } 440 441 static int 442 g_part_parm_geom(struct gctl_req *req, const char *name, struct g_geom **v) 443 { 444 struct g_geom *gp; 445 const char *gname; 446 447 gname = gctl_get_asciiparam(req, name); 448 if (gname == NULL) 449 return (ENOATTR); 450 if (strncmp(gname, _PATH_DEV, sizeof(_PATH_DEV) - 1) == 0) 451 gname += sizeof(_PATH_DEV) - 1; 452 gp = g_part_find_geom(gname); 453 if (gp == NULL) { 454 gctl_error(req, "%d %s '%s'", EINVAL, name, gname); 455 return (EINVAL); 456 } 457 if ((gp->flags & G_GEOM_WITHER) != 0) { 458 gctl_error(req, "%d %s", ENXIO, gname); 459 return (ENXIO); 460 } 461 *v = gp; 462 return (0); 463 } 464 465 static int 466 g_part_parm_provider(struct gctl_req *req, const char *name, 467 struct g_provider **v) 468 { 469 struct g_provider *pp; 470 const char *pname; 471 472 pname = gctl_get_asciiparam(req, name); 473 if (pname == NULL) 474 return (ENOATTR); 475 if (strncmp(pname, _PATH_DEV, sizeof(_PATH_DEV) - 1) == 0) 476 pname += sizeof(_PATH_DEV) - 1; 477 pp = g_provider_by_name(pname); 478 if (pp == NULL) { 479 gctl_error(req, "%d %s '%s'", EINVAL, name, pname); 480 return (EINVAL); 481 } 482 *v = pp; 483 return (0); 484 } 485 486 static int 487 g_part_parm_quad(struct gctl_req *req, const char *name, quad_t *v) 488 { 489 const char *p; 490 char *x; 491 quad_t q; 492 493 p = gctl_get_asciiparam(req, name); 494 if (p == NULL) 495 return (ENOATTR); 496 q = strtoq(p, &x, 0); 497 if (*x != '\0' || q < 0) { 498 gctl_error(req, "%d %s '%s'", EINVAL, name, p); 499 return (EINVAL); 500 } 501 *v = q; 502 return (0); 503 } 504 505 static int 506 g_part_parm_scheme(struct gctl_req *req, const char *name, 507 struct g_part_scheme **v) 508 { 509 struct g_part_scheme *s; 510 const char *p; 511 512 p = gctl_get_asciiparam(req, name); 513 if (p == NULL) 514 return (ENOATTR); 515 TAILQ_FOREACH(s, &g_part_schemes, scheme_list) { 516 if (s == &g_part_null_scheme) 517 continue; 518 if (!strcasecmp(s->name, p)) 519 break; 520 } 521 if (s == NULL) { 522 gctl_error(req, "%d %s '%s'", EINVAL, name, p); 523 return (EINVAL); 524 } 525 *v = s; 526 return (0); 527 } 528 529 static int 530 g_part_parm_str(struct gctl_req *req, const char *name, const char **v) 531 { 532 const char *p; 533 534 p = gctl_get_asciiparam(req, name); 535 if (p == NULL) 536 return (ENOATTR); 537 /* An empty label is always valid. */ 538 if (strcmp(name, "label") != 0 && p[0] == '\0') { 539 gctl_error(req, "%d %s '%s'", EINVAL, name, p); 540 return (EINVAL); 541 } 542 *v = p; 543 return (0); 544 } 545 546 static int 547 g_part_parm_intmax(struct gctl_req *req, const char *name, u_int *v) 548 { 549 const intmax_t *p; 550 int size; 551 552 p = gctl_get_param(req, name, &size); 553 if (p == NULL) 554 return (ENOATTR); 555 if (size != sizeof(*p) || *p < 0 || *p > INT_MAX) { 556 gctl_error(req, "%d %s '%jd'", EINVAL, name, *p); 557 return (EINVAL); 558 } 559 *v = (u_int)*p; 560 return (0); 561 } 562 563 static int 564 g_part_parm_uint32(struct gctl_req *req, const char *name, u_int *v) 565 { 566 const uint32_t *p; 567 int size; 568 569 p = gctl_get_param(req, name, &size); 570 if (p == NULL) 571 return (ENOATTR); 572 if (size != sizeof(*p) || *p > INT_MAX) { 573 gctl_error(req, "%d %s '%u'", EINVAL, name, (unsigned int)*p); 574 return (EINVAL); 575 } 576 *v = (u_int)*p; 577 return (0); 578 } 579 580 static int 581 g_part_parm_bootcode(struct gctl_req *req, const char *name, const void **v, 582 unsigned int *s) 583 { 584 const void *p; 585 int size; 586 587 p = gctl_get_param(req, name, &size); 588 if (p == NULL) 589 return (ENOATTR); 590 *v = p; 591 *s = size; 592 return (0); 593 } 594 595 static int 596 g_part_probe(struct g_geom *gp, struct g_consumer *cp, int depth) 597 { 598 struct g_part_scheme *iter, *scheme; 599 struct g_part_table *table; 600 int pri, probe; 601 602 table = gp->softc; 603 scheme = (table != NULL) ? table->gpt_scheme : NULL; 604 pri = (scheme != NULL) ? G_PART_PROBE(table, cp) : INT_MIN; 605 if (pri == 0) 606 goto done; 607 if (pri > 0) { /* error */ 608 scheme = NULL; 609 pri = INT_MIN; 610 } 611 612 TAILQ_FOREACH(iter, &g_part_schemes, scheme_list) { 613 if (iter == &g_part_null_scheme) 614 continue; 615 table = (void *)kobj_create((kobj_class_t)iter, M_GEOM, 616 M_WAITOK); 617 table->gpt_gp = gp; 618 table->gpt_scheme = iter; 619 table->gpt_depth = depth; 620 probe = G_PART_PROBE(table, cp); 621 if (probe <= 0 && probe > pri) { 622 pri = probe; 623 scheme = iter; 624 if (gp->softc != NULL) 625 kobj_delete((kobj_t)gp->softc, M_GEOM); 626 gp->softc = table; 627 if (pri == 0) 628 goto done; 629 } else 630 kobj_delete((kobj_t)table, M_GEOM); 631 } 632 633 done: 634 return ((scheme == NULL) ? ENXIO : 0); 635 } 636 637 /* 638 * Control request functions. 639 */ 640 641 static int 642 g_part_ctl_add(struct gctl_req *req, struct g_part_parms *gpp) 643 { 644 struct g_geom *gp; 645 struct g_provider *pp; 646 struct g_part_entry *delent, *last, *entry; 647 struct g_part_table *table; 648 struct sbuf *sb; 649 quad_t end; 650 unsigned int index; 651 int error; 652 653 gp = gpp->gpp_geom; 654 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name)); 655 g_topology_assert(); 656 657 pp = LIST_FIRST(&gp->consumer)->provider; 658 table = gp->softc; 659 end = gpp->gpp_start + gpp->gpp_size - 1; 660 661 if (gpp->gpp_start < table->gpt_first || 662 gpp->gpp_start > table->gpt_last) { 663 gctl_error(req, "%d start '%jd'", EINVAL, 664 (intmax_t)gpp->gpp_start); 665 return (EINVAL); 666 } 667 if (end < gpp->gpp_start || end > table->gpt_last) { 668 gctl_error(req, "%d size '%jd'", EINVAL, 669 (intmax_t)gpp->gpp_size); 670 return (EINVAL); 671 } 672 if (gpp->gpp_index > table->gpt_entries) { 673 gctl_error(req, "%d index '%d'", EINVAL, gpp->gpp_index); 674 return (EINVAL); 675 } 676 677 delent = last = NULL; 678 index = (gpp->gpp_index > 0) ? gpp->gpp_index : 1; 679 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) { 680 if (entry->gpe_deleted) { 681 if (entry->gpe_index == index) 682 delent = entry; 683 continue; 684 } 685 if (entry->gpe_index == index) 686 index = entry->gpe_index + 1; 687 if (entry->gpe_index < index) 688 last = entry; 689 if (entry->gpe_internal) 690 continue; 691 if (gpp->gpp_start >= entry->gpe_start && 692 gpp->gpp_start <= entry->gpe_end) { 693 gctl_error(req, "%d start '%jd'", ENOSPC, 694 (intmax_t)gpp->gpp_start); 695 return (ENOSPC); 696 } 697 if (end >= entry->gpe_start && end <= entry->gpe_end) { 698 gctl_error(req, "%d end '%jd'", ENOSPC, (intmax_t)end); 699 return (ENOSPC); 700 } 701 if (gpp->gpp_start < entry->gpe_start && end > entry->gpe_end) { 702 gctl_error(req, "%d size '%jd'", ENOSPC, 703 (intmax_t)gpp->gpp_size); 704 return (ENOSPC); 705 } 706 } 707 if (gpp->gpp_index > 0 && index != gpp->gpp_index) { 708 gctl_error(req, "%d index '%d'", EEXIST, gpp->gpp_index); 709 return (EEXIST); 710 } 711 if (index > table->gpt_entries) { 712 gctl_error(req, "%d index '%d'", ENOSPC, index); 713 return (ENOSPC); 714 } 715 716 entry = (delent == NULL) ? g_malloc(table->gpt_scheme->gps_entrysz, 717 M_WAITOK | M_ZERO) : delent; 718 entry->gpe_index = index; 719 entry->gpe_start = gpp->gpp_start; 720 entry->gpe_end = end; 721 error = G_PART_ADD(table, entry, gpp); 722 if (error) { 723 gctl_error(req, "%d", error); 724 if (delent == NULL) 725 g_free(entry); 726 return (error); 727 } 728 if (delent == NULL) { 729 if (last == NULL) 730 LIST_INSERT_HEAD(&table->gpt_entry, entry, gpe_entry); 731 else 732 LIST_INSERT_AFTER(last, entry, gpe_entry); 733 entry->gpe_created = 1; 734 } else { 735 entry->gpe_deleted = 0; 736 entry->gpe_modified = 1; 737 } 738 g_part_new_provider(gp, table, entry); 739 740 /* Provide feedback if so requested. */ 741 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) { 742 sb = sbuf_new_auto(); 743 G_PART_FULLNAME(table, entry, sb, gp->name); 744 if (pp->stripesize > 0 && entry->gpe_pp->stripeoffset != 0) 745 sbuf_printf(sb, " added, but partition is not " 746 "aligned on %u bytes\n", pp->stripesize); 747 else 748 sbuf_cat(sb, " added\n"); 749 sbuf_finish(sb); 750 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1); 751 sbuf_delete(sb); 752 } 753 return (0); 754 } 755 756 static int 757 g_part_ctl_bootcode(struct gctl_req *req, struct g_part_parms *gpp) 758 { 759 struct g_geom *gp; 760 struct g_part_table *table; 761 struct sbuf *sb; 762 int error, sz; 763 764 gp = gpp->gpp_geom; 765 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name)); 766 g_topology_assert(); 767 768 table = gp->softc; 769 sz = table->gpt_scheme->gps_bootcodesz; 770 if (sz == 0) { 771 error = ENODEV; 772 goto fail; 773 } 774 if (gpp->gpp_codesize > sz) { 775 error = EFBIG; 776 goto fail; 777 } 778 779 error = G_PART_BOOTCODE(table, gpp); 780 if (error) 781 goto fail; 782 783 /* Provide feedback if so requested. */ 784 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) { 785 sb = sbuf_new_auto(); 786 sbuf_printf(sb, "bootcode written to %s\n", gp->name); 787 sbuf_finish(sb); 788 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1); 789 sbuf_delete(sb); 790 } 791 return (0); 792 793 fail: 794 gctl_error(req, "%d", error); 795 return (error); 796 } 797 798 static int 799 g_part_ctl_commit(struct gctl_req *req, struct g_part_parms *gpp) 800 { 801 struct g_consumer *cp; 802 struct g_geom *gp; 803 struct g_provider *pp; 804 struct g_part_entry *entry, *tmp; 805 struct g_part_table *table; 806 char *buf; 807 int error, i; 808 809 gp = gpp->gpp_geom; 810 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name)); 811 g_topology_assert(); 812 813 table = gp->softc; 814 if (!table->gpt_opened) { 815 gctl_error(req, "%d", EPERM); 816 return (EPERM); 817 } 818 819 g_topology_unlock(); 820 821 cp = LIST_FIRST(&gp->consumer); 822 if ((table->gpt_smhead | table->gpt_smtail) != 0) { 823 pp = cp->provider; 824 buf = g_malloc(pp->sectorsize, M_WAITOK | M_ZERO); 825 while (table->gpt_smhead != 0) { 826 i = ffs(table->gpt_smhead) - 1; 827 error = g_write_data(cp, i * pp->sectorsize, buf, 828 pp->sectorsize); 829 if (error) { 830 g_free(buf); 831 goto fail; 832 } 833 table->gpt_smhead &= ~(1 << i); 834 } 835 while (table->gpt_smtail != 0) { 836 i = ffs(table->gpt_smtail) - 1; 837 error = g_write_data(cp, pp->mediasize - (i + 1) * 838 pp->sectorsize, buf, pp->sectorsize); 839 if (error) { 840 g_free(buf); 841 goto fail; 842 } 843 table->gpt_smtail &= ~(1 << i); 844 } 845 g_free(buf); 846 } 847 848 if (table->gpt_scheme == &g_part_null_scheme) { 849 g_topology_lock(); 850 g_access(cp, -1, -1, -1); 851 g_part_wither(gp, ENXIO); 852 return (0); 853 } 854 855 error = G_PART_WRITE(table, cp); 856 if (error) 857 goto fail; 858 859 LIST_FOREACH_SAFE(entry, &table->gpt_entry, gpe_entry, tmp) { 860 if (!entry->gpe_deleted) { 861 entry->gpe_created = 0; 862 entry->gpe_modified = 0; 863 continue; 864 } 865 LIST_REMOVE(entry, gpe_entry); 866 g_free(entry); 867 } 868 table->gpt_created = 0; 869 table->gpt_opened = 0; 870 871 g_topology_lock(); 872 g_access(cp, -1, -1, -1); 873 return (0); 874 875 fail: 876 g_topology_lock(); 877 gctl_error(req, "%d", error); 878 return (error); 879 } 880 881 static int 882 g_part_ctl_create(struct gctl_req *req, struct g_part_parms *gpp) 883 { 884 struct g_consumer *cp; 885 struct g_geom *gp; 886 struct g_provider *pp; 887 struct g_part_scheme *scheme; 888 struct g_part_table *null, *table; 889 struct sbuf *sb; 890 int attr, error; 891 892 pp = gpp->gpp_provider; 893 scheme = gpp->gpp_scheme; 894 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, pp->name)); 895 g_topology_assert(); 896 897 /* Check that there isn't already a g_part geom on the provider. */ 898 gp = g_part_find_geom(pp->name); 899 if (gp != NULL) { 900 null = gp->softc; 901 if (null->gpt_scheme != &g_part_null_scheme) { 902 gctl_error(req, "%d geom '%s'", EEXIST, pp->name); 903 return (EEXIST); 904 } 905 } else 906 null = NULL; 907 908 if ((gpp->gpp_parms & G_PART_PARM_ENTRIES) && 909 (gpp->gpp_entries < scheme->gps_minent || 910 gpp->gpp_entries > scheme->gps_maxent)) { 911 gctl_error(req, "%d entries '%d'", EINVAL, gpp->gpp_entries); 912 return (EINVAL); 913 } 914 915 if (null == NULL) 916 gp = g_new_geomf(&g_part_class, "%s", pp->name); 917 gp->softc = kobj_create((kobj_class_t)gpp->gpp_scheme, M_GEOM, 918 M_WAITOK); 919 table = gp->softc; 920 table->gpt_gp = gp; 921 table->gpt_scheme = gpp->gpp_scheme; 922 table->gpt_entries = (gpp->gpp_parms & G_PART_PARM_ENTRIES) ? 923 gpp->gpp_entries : scheme->gps_minent; 924 LIST_INIT(&table->gpt_entry); 925 if (null == NULL) { 926 cp = g_new_consumer(gp); 927 error = g_attach(cp, pp); 928 if (error == 0) 929 error = g_access(cp, 1, 1, 1); 930 if (error != 0) { 931 g_part_wither(gp, error); 932 gctl_error(req, "%d geom '%s'", error, pp->name); 933 return (error); 934 } 935 table->gpt_opened = 1; 936 } else { 937 cp = LIST_FIRST(&gp->consumer); 938 table->gpt_opened = null->gpt_opened; 939 table->gpt_smhead = null->gpt_smhead; 940 table->gpt_smtail = null->gpt_smtail; 941 } 942 943 g_topology_unlock(); 944 945 /* Make sure the provider has media. */ 946 if (pp->mediasize == 0 || pp->sectorsize == 0) { 947 error = ENODEV; 948 goto fail; 949 } 950 951 /* Make sure we can nest and if so, determine our depth. */ 952 error = g_getattr("PART::isleaf", cp, &attr); 953 if (!error && attr) { 954 error = ENODEV; 955 goto fail; 956 } 957 error = g_getattr("PART::depth", cp, &attr); 958 table->gpt_depth = (!error) ? attr + 1 : 0; 959 960 /* 961 * Synthesize a disk geometry. Some partitioning schemes 962 * depend on it and since some file systems need it even 963 * when the partitition scheme doesn't, we do it here in 964 * scheme-independent code. 965 */ 966 g_part_geometry(table, cp, pp->mediasize / pp->sectorsize); 967 968 error = G_PART_CREATE(table, gpp); 969 if (error) 970 goto fail; 971 972 g_topology_lock(); 973 974 table->gpt_created = 1; 975 if (null != NULL) 976 kobj_delete((kobj_t)null, M_GEOM); 977 978 /* 979 * Support automatic commit by filling in the gpp_geom 980 * parameter. 981 */ 982 gpp->gpp_parms |= G_PART_PARM_GEOM; 983 gpp->gpp_geom = gp; 984 985 /* Provide feedback if so requested. */ 986 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) { 987 sb = sbuf_new_auto(); 988 sbuf_printf(sb, "%s created\n", gp->name); 989 sbuf_finish(sb); 990 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1); 991 sbuf_delete(sb); 992 } 993 return (0); 994 995 fail: 996 g_topology_lock(); 997 if (null == NULL) { 998 g_access(cp, -1, -1, -1); 999 g_part_wither(gp, error); 1000 } else { 1001 kobj_delete((kobj_t)gp->softc, M_GEOM); 1002 gp->softc = null; 1003 } 1004 gctl_error(req, "%d provider", error); 1005 return (error); 1006 } 1007 1008 static int 1009 g_part_ctl_delete(struct gctl_req *req, struct g_part_parms *gpp) 1010 { 1011 struct g_geom *gp; 1012 struct g_provider *pp; 1013 struct g_part_entry *entry; 1014 struct g_part_table *table; 1015 struct sbuf *sb; 1016 1017 gp = gpp->gpp_geom; 1018 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name)); 1019 g_topology_assert(); 1020 1021 table = gp->softc; 1022 1023 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) { 1024 if (entry->gpe_deleted || entry->gpe_internal) 1025 continue; 1026 if (entry->gpe_index == gpp->gpp_index) 1027 break; 1028 } 1029 if (entry == NULL) { 1030 gctl_error(req, "%d index '%d'", ENOENT, gpp->gpp_index); 1031 return (ENOENT); 1032 } 1033 1034 pp = entry->gpe_pp; 1035 if (pp != NULL) { 1036 if (pp->acr > 0 || pp->acw > 0 || pp->ace > 0) { 1037 gctl_error(req, "%d", EBUSY); 1038 return (EBUSY); 1039 } 1040 1041 pp->private = NULL; 1042 entry->gpe_pp = NULL; 1043 } 1044 1045 if (pp != NULL) 1046 g_wither_provider(pp, ENXIO); 1047 1048 /* Provide feedback if so requested. */ 1049 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) { 1050 sb = sbuf_new_auto(); 1051 G_PART_FULLNAME(table, entry, sb, gp->name); 1052 sbuf_cat(sb, " deleted\n"); 1053 sbuf_finish(sb); 1054 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1); 1055 sbuf_delete(sb); 1056 } 1057 1058 if (entry->gpe_created) { 1059 LIST_REMOVE(entry, gpe_entry); 1060 g_free(entry); 1061 } else { 1062 entry->gpe_modified = 0; 1063 entry->gpe_deleted = 1; 1064 } 1065 return (0); 1066 } 1067 1068 static int 1069 g_part_ctl_destroy(struct gctl_req *req, struct g_part_parms *gpp) 1070 { 1071 struct g_consumer *cp; 1072 struct g_geom *gp; 1073 struct g_provider *pp; 1074 struct g_part_entry *entry, *tmp; 1075 struct g_part_table *null, *table; 1076 struct sbuf *sb; 1077 int error; 1078 1079 gp = gpp->gpp_geom; 1080 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name)); 1081 g_topology_assert(); 1082 1083 table = gp->softc; 1084 /* Check for busy providers. */ 1085 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) { 1086 if (entry->gpe_deleted || entry->gpe_internal) 1087 continue; 1088 if (gpp->gpp_force) { 1089 pp = entry->gpe_pp; 1090 if (pp == NULL) 1091 continue; 1092 if (pp->acr == 0 && pp->acw == 0 && pp->ace == 0) 1093 continue; 1094 } 1095 gctl_error(req, "%d", EBUSY); 1096 return (EBUSY); 1097 } 1098 1099 if (gpp->gpp_force) { 1100 /* Destroy all providers. */ 1101 LIST_FOREACH_SAFE(entry, &table->gpt_entry, gpe_entry, tmp) { 1102 pp = entry->gpe_pp; 1103 if (pp != NULL) { 1104 pp->private = NULL; 1105 g_wither_provider(pp, ENXIO); 1106 } 1107 LIST_REMOVE(entry, gpe_entry); 1108 g_free(entry); 1109 } 1110 } 1111 1112 error = G_PART_DESTROY(table, gpp); 1113 if (error) { 1114 gctl_error(req, "%d", error); 1115 return (error); 1116 } 1117 1118 gp->softc = kobj_create((kobj_class_t)&g_part_null_scheme, M_GEOM, 1119 M_WAITOK); 1120 null = gp->softc; 1121 null->gpt_gp = gp; 1122 null->gpt_scheme = &g_part_null_scheme; 1123 LIST_INIT(&null->gpt_entry); 1124 1125 cp = LIST_FIRST(&gp->consumer); 1126 pp = cp->provider; 1127 null->gpt_last = pp->mediasize / pp->sectorsize - 1; 1128 1129 null->gpt_depth = table->gpt_depth; 1130 null->gpt_opened = table->gpt_opened; 1131 null->gpt_smhead = table->gpt_smhead; 1132 null->gpt_smtail = table->gpt_smtail; 1133 1134 while ((entry = LIST_FIRST(&table->gpt_entry)) != NULL) { 1135 LIST_REMOVE(entry, gpe_entry); 1136 g_free(entry); 1137 } 1138 kobj_delete((kobj_t)table, M_GEOM); 1139 1140 /* Provide feedback if so requested. */ 1141 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) { 1142 sb = sbuf_new_auto(); 1143 sbuf_printf(sb, "%s destroyed\n", gp->name); 1144 sbuf_finish(sb); 1145 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1); 1146 sbuf_delete(sb); 1147 } 1148 return (0); 1149 } 1150 1151 static int 1152 g_part_ctl_modify(struct gctl_req *req, struct g_part_parms *gpp) 1153 { 1154 struct g_geom *gp; 1155 struct g_part_entry *entry; 1156 struct g_part_table *table; 1157 struct sbuf *sb; 1158 int error; 1159 1160 gp = gpp->gpp_geom; 1161 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name)); 1162 g_topology_assert(); 1163 1164 table = gp->softc; 1165 1166 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) { 1167 if (entry->gpe_deleted || entry->gpe_internal) 1168 continue; 1169 if (entry->gpe_index == gpp->gpp_index) 1170 break; 1171 } 1172 if (entry == NULL) { 1173 gctl_error(req, "%d index '%d'", ENOENT, gpp->gpp_index); 1174 return (ENOENT); 1175 } 1176 1177 error = G_PART_MODIFY(table, entry, gpp); 1178 if (error) { 1179 gctl_error(req, "%d", error); 1180 return (error); 1181 } 1182 1183 if (!entry->gpe_created) 1184 entry->gpe_modified = 1; 1185 1186 /* Provide feedback if so requested. */ 1187 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) { 1188 sb = sbuf_new_auto(); 1189 G_PART_FULLNAME(table, entry, sb, gp->name); 1190 sbuf_cat(sb, " modified\n"); 1191 sbuf_finish(sb); 1192 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1); 1193 sbuf_delete(sb); 1194 } 1195 return (0); 1196 } 1197 1198 static int 1199 g_part_ctl_move(struct gctl_req *req, struct g_part_parms *gpp) 1200 { 1201 gctl_error(req, "%d verb 'move'", ENOSYS); 1202 return (ENOSYS); 1203 } 1204 1205 static int 1206 g_part_ctl_recover(struct gctl_req *req, struct g_part_parms *gpp) 1207 { 1208 struct g_part_table *table; 1209 struct g_geom *gp; 1210 struct sbuf *sb; 1211 int error, recovered; 1212 1213 gp = gpp->gpp_geom; 1214 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name)); 1215 g_topology_assert(); 1216 table = gp->softc; 1217 error = recovered = 0; 1218 1219 if (table->gpt_corrupt) { 1220 error = G_PART_RECOVER(table); 1221 if (error == 0) 1222 error = g_part_check_integrity(table, 1223 LIST_FIRST(&gp->consumer)); 1224 if (error) { 1225 gctl_error(req, "%d recovering '%s' failed", 1226 error, gp->name); 1227 return (error); 1228 } 1229 recovered = 1; 1230 } 1231 /* Provide feedback if so requested. */ 1232 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) { 1233 sb = sbuf_new_auto(); 1234 if (recovered) 1235 sbuf_printf(sb, "%s recovered\n", gp->name); 1236 else 1237 sbuf_printf(sb, "%s recovering is not needed\n", 1238 gp->name); 1239 sbuf_finish(sb); 1240 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1); 1241 sbuf_delete(sb); 1242 } 1243 return (0); 1244 } 1245 1246 static int 1247 g_part_ctl_resize(struct gctl_req *req, struct g_part_parms *gpp) 1248 { 1249 struct g_geom *gp; 1250 struct g_provider *pp; 1251 struct g_part_entry *pe, *entry; 1252 struct g_part_table *table; 1253 struct sbuf *sb; 1254 quad_t end; 1255 int error; 1256 1257 gp = gpp->gpp_geom; 1258 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name)); 1259 g_topology_assert(); 1260 table = gp->softc; 1261 1262 /* check gpp_index */ 1263 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) { 1264 if (entry->gpe_deleted || entry->gpe_internal) 1265 continue; 1266 if (entry->gpe_index == gpp->gpp_index) 1267 break; 1268 } 1269 if (entry == NULL) { 1270 gctl_error(req, "%d index '%d'", ENOENT, gpp->gpp_index); 1271 return (ENOENT); 1272 } 1273 1274 /* check gpp_size */ 1275 end = entry->gpe_start + gpp->gpp_size - 1; 1276 if (gpp->gpp_size < 1 || end > table->gpt_last) { 1277 gctl_error(req, "%d size '%jd'", EINVAL, 1278 (intmax_t)gpp->gpp_size); 1279 return (EINVAL); 1280 } 1281 1282 LIST_FOREACH(pe, &table->gpt_entry, gpe_entry) { 1283 if (pe->gpe_deleted || pe->gpe_internal || pe == entry) 1284 continue; 1285 if (end >= pe->gpe_start && end <= pe->gpe_end) { 1286 gctl_error(req, "%d end '%jd'", ENOSPC, 1287 (intmax_t)end); 1288 return (ENOSPC); 1289 } 1290 if (entry->gpe_start < pe->gpe_start && end > pe->gpe_end) { 1291 gctl_error(req, "%d size '%jd'", ENOSPC, 1292 (intmax_t)gpp->gpp_size); 1293 return (ENOSPC); 1294 } 1295 } 1296 1297 pp = entry->gpe_pp; 1298 if ((g_debugflags & 16) == 0 && 1299 (pp->acr > 0 || pp->acw > 0 || pp->ace > 0)) { 1300 gctl_error(req, "%d", EBUSY); 1301 return (EBUSY); 1302 } 1303 1304 error = G_PART_RESIZE(table, entry, gpp); 1305 if (error) { 1306 gctl_error(req, "%d", error); 1307 return (error); 1308 } 1309 1310 if (!entry->gpe_created) 1311 entry->gpe_modified = 1; 1312 1313 /* update mediasize of changed provider */ 1314 pp->mediasize = (entry->gpe_end - entry->gpe_start + 1) * 1315 pp->sectorsize; 1316 1317 /* Provide feedback if so requested. */ 1318 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) { 1319 sb = sbuf_new_auto(); 1320 G_PART_FULLNAME(table, entry, sb, gp->name); 1321 sbuf_cat(sb, " resized\n"); 1322 sbuf_finish(sb); 1323 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1); 1324 sbuf_delete(sb); 1325 } 1326 return (0); 1327 } 1328 1329 static int 1330 g_part_ctl_setunset(struct gctl_req *req, struct g_part_parms *gpp, 1331 unsigned int set) 1332 { 1333 struct g_geom *gp; 1334 struct g_part_entry *entry; 1335 struct g_part_table *table; 1336 struct sbuf *sb; 1337 int error; 1338 1339 gp = gpp->gpp_geom; 1340 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name)); 1341 g_topology_assert(); 1342 1343 table = gp->softc; 1344 1345 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) { 1346 if (entry->gpe_deleted || entry->gpe_internal) 1347 continue; 1348 if (entry->gpe_index == gpp->gpp_index) 1349 break; 1350 } 1351 if (entry == NULL) { 1352 gctl_error(req, "%d index '%d'", ENOENT, gpp->gpp_index); 1353 return (ENOENT); 1354 } 1355 1356 error = G_PART_SETUNSET(table, entry, gpp->gpp_attrib, set); 1357 if (error) { 1358 gctl_error(req, "%d attrib '%s'", error, gpp->gpp_attrib); 1359 return (error); 1360 } 1361 1362 /* Provide feedback if so requested. */ 1363 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) { 1364 sb = sbuf_new_auto(); 1365 sbuf_printf(sb, "%s %sset on ", gpp->gpp_attrib, 1366 (set) ? "" : "un"); 1367 G_PART_FULLNAME(table, entry, sb, gp->name); 1368 sbuf_printf(sb, "\n"); 1369 sbuf_finish(sb); 1370 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1); 1371 sbuf_delete(sb); 1372 } 1373 return (0); 1374 } 1375 1376 static int 1377 g_part_ctl_undo(struct gctl_req *req, struct g_part_parms *gpp) 1378 { 1379 struct g_consumer *cp; 1380 struct g_provider *pp; 1381 struct g_geom *gp; 1382 struct g_part_entry *entry, *tmp; 1383 struct g_part_table *table; 1384 int error, reprobe; 1385 1386 gp = gpp->gpp_geom; 1387 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name)); 1388 g_topology_assert(); 1389 1390 table = gp->softc; 1391 if (!table->gpt_opened) { 1392 gctl_error(req, "%d", EPERM); 1393 return (EPERM); 1394 } 1395 1396 cp = LIST_FIRST(&gp->consumer); 1397 LIST_FOREACH_SAFE(entry, &table->gpt_entry, gpe_entry, tmp) { 1398 entry->gpe_modified = 0; 1399 if (entry->gpe_created) { 1400 pp = entry->gpe_pp; 1401 if (pp != NULL) { 1402 pp->private = NULL; 1403 entry->gpe_pp = NULL; 1404 g_wither_provider(pp, ENXIO); 1405 } 1406 entry->gpe_deleted = 1; 1407 } 1408 if (entry->gpe_deleted) { 1409 LIST_REMOVE(entry, gpe_entry); 1410 g_free(entry); 1411 } 1412 } 1413 1414 g_topology_unlock(); 1415 1416 reprobe = (table->gpt_scheme == &g_part_null_scheme || 1417 table->gpt_created) ? 1 : 0; 1418 1419 if (reprobe) { 1420 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) { 1421 if (entry->gpe_internal) 1422 continue; 1423 error = EBUSY; 1424 goto fail; 1425 } 1426 while ((entry = LIST_FIRST(&table->gpt_entry)) != NULL) { 1427 LIST_REMOVE(entry, gpe_entry); 1428 g_free(entry); 1429 } 1430 error = g_part_probe(gp, cp, table->gpt_depth); 1431 if (error) { 1432 g_topology_lock(); 1433 g_access(cp, -1, -1, -1); 1434 g_part_wither(gp, error); 1435 return (0); 1436 } 1437 table = gp->softc; 1438 1439 /* 1440 * Synthesize a disk geometry. Some partitioning schemes 1441 * depend on it and since some file systems need it even 1442 * when the partitition scheme doesn't, we do it here in 1443 * scheme-independent code. 1444 */ 1445 pp = cp->provider; 1446 g_part_geometry(table, cp, pp->mediasize / pp->sectorsize); 1447 } 1448 1449 error = G_PART_READ(table, cp); 1450 if (error) 1451 goto fail; 1452 error = g_part_check_integrity(table, cp); 1453 if (error) 1454 goto fail; 1455 1456 g_topology_lock(); 1457 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) { 1458 if (!entry->gpe_internal) 1459 g_part_new_provider(gp, table, entry); 1460 } 1461 1462 table->gpt_opened = 0; 1463 g_access(cp, -1, -1, -1); 1464 return (0); 1465 1466 fail: 1467 g_topology_lock(); 1468 gctl_error(req, "%d", error); 1469 return (error); 1470 } 1471 1472 static void 1473 g_part_wither(struct g_geom *gp, int error) 1474 { 1475 struct g_part_entry *entry; 1476 struct g_part_table *table; 1477 1478 table = gp->softc; 1479 if (table != NULL) { 1480 G_PART_DESTROY(table, NULL); 1481 while ((entry = LIST_FIRST(&table->gpt_entry)) != NULL) { 1482 LIST_REMOVE(entry, gpe_entry); 1483 g_free(entry); 1484 } 1485 if (gp->softc != NULL) { 1486 kobj_delete((kobj_t)gp->softc, M_GEOM); 1487 gp->softc = NULL; 1488 } 1489 } 1490 g_wither_geom(gp, error); 1491 } 1492 1493 /* 1494 * Class methods. 1495 */ 1496 1497 static void 1498 g_part_ctlreq(struct gctl_req *req, struct g_class *mp, const char *verb) 1499 { 1500 struct g_part_parms gpp; 1501 struct g_part_table *table; 1502 struct gctl_req_arg *ap; 1503 enum g_part_ctl ctlreq; 1504 unsigned int i, mparms, oparms, parm; 1505 int auto_commit, close_on_error; 1506 int error, modifies; 1507 1508 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s,%s)", __func__, mp->name, verb)); 1509 g_topology_assert(); 1510 1511 ctlreq = G_PART_CTL_NONE; 1512 modifies = 1; 1513 mparms = 0; 1514 oparms = G_PART_PARM_FLAGS | G_PART_PARM_OUTPUT | G_PART_PARM_VERSION; 1515 switch (*verb) { 1516 case 'a': 1517 if (!strcmp(verb, "add")) { 1518 ctlreq = G_PART_CTL_ADD; 1519 mparms |= G_PART_PARM_GEOM | G_PART_PARM_SIZE | 1520 G_PART_PARM_START | G_PART_PARM_TYPE; 1521 oparms |= G_PART_PARM_INDEX | G_PART_PARM_LABEL; 1522 } 1523 break; 1524 case 'b': 1525 if (!strcmp(verb, "bootcode")) { 1526 ctlreq = G_PART_CTL_BOOTCODE; 1527 mparms |= G_PART_PARM_GEOM | G_PART_PARM_BOOTCODE; 1528 } 1529 break; 1530 case 'c': 1531 if (!strcmp(verb, "commit")) { 1532 ctlreq = G_PART_CTL_COMMIT; 1533 mparms |= G_PART_PARM_GEOM; 1534 modifies = 0; 1535 } else if (!strcmp(verb, "create")) { 1536 ctlreq = G_PART_CTL_CREATE; 1537 mparms |= G_PART_PARM_PROVIDER | G_PART_PARM_SCHEME; 1538 oparms |= G_PART_PARM_ENTRIES; 1539 } 1540 break; 1541 case 'd': 1542 if (!strcmp(verb, "delete")) { 1543 ctlreq = G_PART_CTL_DELETE; 1544 mparms |= G_PART_PARM_GEOM | G_PART_PARM_INDEX; 1545 } else if (!strcmp(verb, "destroy")) { 1546 ctlreq = G_PART_CTL_DESTROY; 1547 mparms |= G_PART_PARM_GEOM; 1548 oparms |= G_PART_PARM_FORCE; 1549 } 1550 break; 1551 case 'm': 1552 if (!strcmp(verb, "modify")) { 1553 ctlreq = G_PART_CTL_MODIFY; 1554 mparms |= G_PART_PARM_GEOM | G_PART_PARM_INDEX; 1555 oparms |= G_PART_PARM_LABEL | G_PART_PARM_TYPE; 1556 } else if (!strcmp(verb, "move")) { 1557 ctlreq = G_PART_CTL_MOVE; 1558 mparms |= G_PART_PARM_GEOM | G_PART_PARM_INDEX; 1559 } 1560 break; 1561 case 'r': 1562 if (!strcmp(verb, "recover")) { 1563 ctlreq = G_PART_CTL_RECOVER; 1564 mparms |= G_PART_PARM_GEOM; 1565 } else if (!strcmp(verb, "resize")) { 1566 ctlreq = G_PART_CTL_RESIZE; 1567 mparms |= G_PART_PARM_GEOM | G_PART_PARM_INDEX | 1568 G_PART_PARM_SIZE; 1569 } 1570 break; 1571 case 's': 1572 if (!strcmp(verb, "set")) { 1573 ctlreq = G_PART_CTL_SET; 1574 mparms |= G_PART_PARM_ATTRIB | G_PART_PARM_GEOM | 1575 G_PART_PARM_INDEX; 1576 } 1577 break; 1578 case 'u': 1579 if (!strcmp(verb, "undo")) { 1580 ctlreq = G_PART_CTL_UNDO; 1581 mparms |= G_PART_PARM_GEOM; 1582 modifies = 0; 1583 } else if (!strcmp(verb, "unset")) { 1584 ctlreq = G_PART_CTL_UNSET; 1585 mparms |= G_PART_PARM_ATTRIB | G_PART_PARM_GEOM | 1586 G_PART_PARM_INDEX; 1587 } 1588 break; 1589 } 1590 if (ctlreq == G_PART_CTL_NONE) { 1591 gctl_error(req, "%d verb '%s'", EINVAL, verb); 1592 return; 1593 } 1594 1595 bzero(&gpp, sizeof(gpp)); 1596 for (i = 0; i < req->narg; i++) { 1597 ap = &req->arg[i]; 1598 parm = 0; 1599 switch (ap->name[0]) { 1600 case 'a': 1601 if (!strcmp(ap->name, "arg0")) { 1602 parm = mparms & 1603 (G_PART_PARM_GEOM | G_PART_PARM_PROVIDER); 1604 } 1605 if (!strcmp(ap->name, "attrib")) 1606 parm = G_PART_PARM_ATTRIB; 1607 break; 1608 case 'b': 1609 if (!strcmp(ap->name, "bootcode")) 1610 parm = G_PART_PARM_BOOTCODE; 1611 break; 1612 case 'c': 1613 if (!strcmp(ap->name, "class")) 1614 continue; 1615 break; 1616 case 'e': 1617 if (!strcmp(ap->name, "entries")) 1618 parm = G_PART_PARM_ENTRIES; 1619 break; 1620 case 'f': 1621 if (!strcmp(ap->name, "flags")) 1622 parm = G_PART_PARM_FLAGS; 1623 else if (!strcmp(ap->name, "force")) 1624 parm = G_PART_PARM_FORCE; 1625 break; 1626 case 'i': 1627 if (!strcmp(ap->name, "index")) 1628 parm = G_PART_PARM_INDEX; 1629 break; 1630 case 'l': 1631 if (!strcmp(ap->name, "label")) 1632 parm = G_PART_PARM_LABEL; 1633 break; 1634 case 'o': 1635 if (!strcmp(ap->name, "output")) 1636 parm = G_PART_PARM_OUTPUT; 1637 break; 1638 case 's': 1639 if (!strcmp(ap->name, "scheme")) 1640 parm = G_PART_PARM_SCHEME; 1641 else if (!strcmp(ap->name, "size")) 1642 parm = G_PART_PARM_SIZE; 1643 else if (!strcmp(ap->name, "start")) 1644 parm = G_PART_PARM_START; 1645 break; 1646 case 't': 1647 if (!strcmp(ap->name, "type")) 1648 parm = G_PART_PARM_TYPE; 1649 break; 1650 case 'v': 1651 if (!strcmp(ap->name, "verb")) 1652 continue; 1653 else if (!strcmp(ap->name, "version")) 1654 parm = G_PART_PARM_VERSION; 1655 break; 1656 } 1657 if ((parm & (mparms | oparms)) == 0) { 1658 gctl_error(req, "%d param '%s'", EINVAL, ap->name); 1659 return; 1660 } 1661 switch (parm) { 1662 case G_PART_PARM_ATTRIB: 1663 error = g_part_parm_str(req, ap->name, 1664 &gpp.gpp_attrib); 1665 break; 1666 case G_PART_PARM_BOOTCODE: 1667 error = g_part_parm_bootcode(req, ap->name, 1668 &gpp.gpp_codeptr, &gpp.gpp_codesize); 1669 break; 1670 case G_PART_PARM_ENTRIES: 1671 error = g_part_parm_intmax(req, ap->name, 1672 &gpp.gpp_entries); 1673 break; 1674 case G_PART_PARM_FLAGS: 1675 error = g_part_parm_str(req, ap->name, &gpp.gpp_flags); 1676 break; 1677 case G_PART_PARM_FORCE: 1678 error = g_part_parm_uint32(req, ap->name, 1679 &gpp.gpp_force); 1680 break; 1681 case G_PART_PARM_GEOM: 1682 error = g_part_parm_geom(req, ap->name, &gpp.gpp_geom); 1683 break; 1684 case G_PART_PARM_INDEX: 1685 error = g_part_parm_intmax(req, ap->name, 1686 &gpp.gpp_index); 1687 break; 1688 case G_PART_PARM_LABEL: 1689 error = g_part_parm_str(req, ap->name, &gpp.gpp_label); 1690 break; 1691 case G_PART_PARM_OUTPUT: 1692 error = 0; /* Write-only parameter */ 1693 break; 1694 case G_PART_PARM_PROVIDER: 1695 error = g_part_parm_provider(req, ap->name, 1696 &gpp.gpp_provider); 1697 break; 1698 case G_PART_PARM_SCHEME: 1699 error = g_part_parm_scheme(req, ap->name, 1700 &gpp.gpp_scheme); 1701 break; 1702 case G_PART_PARM_SIZE: 1703 error = g_part_parm_quad(req, ap->name, &gpp.gpp_size); 1704 break; 1705 case G_PART_PARM_START: 1706 error = g_part_parm_quad(req, ap->name, 1707 &gpp.gpp_start); 1708 break; 1709 case G_PART_PARM_TYPE: 1710 error = g_part_parm_str(req, ap->name, &gpp.gpp_type); 1711 break; 1712 case G_PART_PARM_VERSION: 1713 error = g_part_parm_uint32(req, ap->name, 1714 &gpp.gpp_version); 1715 break; 1716 default: 1717 error = EDOOFUS; 1718 gctl_error(req, "%d %s", error, ap->name); 1719 break; 1720 } 1721 if (error != 0) { 1722 if (error == ENOATTR) { 1723 gctl_error(req, "%d param '%s'", error, 1724 ap->name); 1725 } 1726 return; 1727 } 1728 gpp.gpp_parms |= parm; 1729 } 1730 if ((gpp.gpp_parms & mparms) != mparms) { 1731 parm = mparms - (gpp.gpp_parms & mparms); 1732 gctl_error(req, "%d param '%x'", ENOATTR, parm); 1733 return; 1734 } 1735 1736 /* Obtain permissions if possible/necessary. */ 1737 close_on_error = 0; 1738 table = NULL; 1739 if (modifies && (gpp.gpp_parms & G_PART_PARM_GEOM)) { 1740 table = gpp.gpp_geom->softc; 1741 if (table != NULL && table->gpt_corrupt && 1742 ctlreq != G_PART_CTL_DESTROY && 1743 ctlreq != G_PART_CTL_RECOVER) { 1744 gctl_error(req, "%d table '%s' is corrupt", 1745 EPERM, gpp.gpp_geom->name); 1746 return; 1747 } 1748 if (table != NULL && !table->gpt_opened) { 1749 error = g_access(LIST_FIRST(&gpp.gpp_geom->consumer), 1750 1, 1, 1); 1751 if (error) { 1752 gctl_error(req, "%d geom '%s'", error, 1753 gpp.gpp_geom->name); 1754 return; 1755 } 1756 table->gpt_opened = 1; 1757 close_on_error = 1; 1758 } 1759 } 1760 1761 /* Allow the scheme to check or modify the parameters. */ 1762 if (table != NULL) { 1763 error = G_PART_PRECHECK(table, ctlreq, &gpp); 1764 if (error) { 1765 gctl_error(req, "%d pre-check failed", error); 1766 goto out; 1767 } 1768 } else 1769 error = EDOOFUS; /* Prevent bogus uninit. warning. */ 1770 1771 switch (ctlreq) { 1772 case G_PART_CTL_NONE: 1773 panic("%s", __func__); 1774 case G_PART_CTL_ADD: 1775 error = g_part_ctl_add(req, &gpp); 1776 break; 1777 case G_PART_CTL_BOOTCODE: 1778 error = g_part_ctl_bootcode(req, &gpp); 1779 break; 1780 case G_PART_CTL_COMMIT: 1781 error = g_part_ctl_commit(req, &gpp); 1782 break; 1783 case G_PART_CTL_CREATE: 1784 error = g_part_ctl_create(req, &gpp); 1785 break; 1786 case G_PART_CTL_DELETE: 1787 error = g_part_ctl_delete(req, &gpp); 1788 break; 1789 case G_PART_CTL_DESTROY: 1790 error = g_part_ctl_destroy(req, &gpp); 1791 break; 1792 case G_PART_CTL_MODIFY: 1793 error = g_part_ctl_modify(req, &gpp); 1794 break; 1795 case G_PART_CTL_MOVE: 1796 error = g_part_ctl_move(req, &gpp); 1797 break; 1798 case G_PART_CTL_RECOVER: 1799 error = g_part_ctl_recover(req, &gpp); 1800 break; 1801 case G_PART_CTL_RESIZE: 1802 error = g_part_ctl_resize(req, &gpp); 1803 break; 1804 case G_PART_CTL_SET: 1805 error = g_part_ctl_setunset(req, &gpp, 1); 1806 break; 1807 case G_PART_CTL_UNDO: 1808 error = g_part_ctl_undo(req, &gpp); 1809 break; 1810 case G_PART_CTL_UNSET: 1811 error = g_part_ctl_setunset(req, &gpp, 0); 1812 break; 1813 } 1814 1815 /* Implement automatic commit. */ 1816 if (!error) { 1817 auto_commit = (modifies && 1818 (gpp.gpp_parms & G_PART_PARM_FLAGS) && 1819 strchr(gpp.gpp_flags, 'C') != NULL) ? 1 : 0; 1820 if (auto_commit) { 1821 KASSERT(gpp.gpp_parms & G_PART_PARM_GEOM, ("%s", 1822 __func__)); 1823 error = g_part_ctl_commit(req, &gpp); 1824 } 1825 } 1826 1827 out: 1828 if (error && close_on_error) { 1829 g_access(LIST_FIRST(&gpp.gpp_geom->consumer), -1, -1, -1); 1830 table->gpt_opened = 0; 1831 } 1832 } 1833 1834 static int 1835 g_part_destroy_geom(struct gctl_req *req, struct g_class *mp, 1836 struct g_geom *gp) 1837 { 1838 1839 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s,%s)", __func__, mp->name, gp->name)); 1840 g_topology_assert(); 1841 1842 g_part_wither(gp, EINVAL); 1843 return (0); 1844 } 1845 1846 static struct g_geom * 1847 g_part_taste(struct g_class *mp, struct g_provider *pp, int flags __unused) 1848 { 1849 struct g_consumer *cp; 1850 struct g_geom *gp; 1851 struct g_part_entry *entry; 1852 struct g_part_table *table; 1853 struct root_hold_token *rht; 1854 int attr, depth; 1855 int error; 1856 1857 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s,%s)", __func__, mp->name, pp->name)); 1858 g_topology_assert(); 1859 1860 /* Skip providers that are already open for writing. */ 1861 if (pp->acw > 0) 1862 return (NULL); 1863 1864 /* 1865 * Create a GEOM with consumer and hook it up to the provider. 1866 * With that we become part of the topology. Optain read access 1867 * to the provider. 1868 */ 1869 gp = g_new_geomf(mp, "%s", pp->name); 1870 cp = g_new_consumer(gp); 1871 error = g_attach(cp, pp); 1872 if (error == 0) 1873 error = g_access(cp, 1, 0, 0); 1874 if (error != 0) { 1875 g_part_wither(gp, error); 1876 return (NULL); 1877 } 1878 1879 rht = root_mount_hold(mp->name); 1880 g_topology_unlock(); 1881 1882 /* 1883 * Short-circuit the whole probing galore when there's no 1884 * media present. 1885 */ 1886 if (pp->mediasize == 0 || pp->sectorsize == 0) { 1887 error = ENODEV; 1888 goto fail; 1889 } 1890 1891 /* Make sure we can nest and if so, determine our depth. */ 1892 error = g_getattr("PART::isleaf", cp, &attr); 1893 if (!error && attr) { 1894 error = ENODEV; 1895 goto fail; 1896 } 1897 error = g_getattr("PART::depth", cp, &attr); 1898 depth = (!error) ? attr + 1 : 0; 1899 1900 error = g_part_probe(gp, cp, depth); 1901 if (error) 1902 goto fail; 1903 1904 table = gp->softc; 1905 1906 /* 1907 * Synthesize a disk geometry. Some partitioning schemes 1908 * depend on it and since some file systems need it even 1909 * when the partitition scheme doesn't, we do it here in 1910 * scheme-independent code. 1911 */ 1912 g_part_geometry(table, cp, pp->mediasize / pp->sectorsize); 1913 1914 error = G_PART_READ(table, cp); 1915 if (error) 1916 goto fail; 1917 error = g_part_check_integrity(table, cp); 1918 if (error) 1919 goto fail; 1920 1921 g_topology_lock(); 1922 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) { 1923 if (!entry->gpe_internal) 1924 g_part_new_provider(gp, table, entry); 1925 } 1926 1927 root_mount_rel(rht); 1928 g_access(cp, -1, 0, 0); 1929 return (gp); 1930 1931 fail: 1932 g_topology_lock(); 1933 root_mount_rel(rht); 1934 g_access(cp, -1, 0, 0); 1935 g_part_wither(gp, error); 1936 return (NULL); 1937 } 1938 1939 /* 1940 * Geom methods. 1941 */ 1942 1943 static int 1944 g_part_access(struct g_provider *pp, int dr, int dw, int de) 1945 { 1946 struct g_consumer *cp; 1947 1948 G_PART_TRACE((G_T_ACCESS, "%s(%s,%d,%d,%d)", __func__, pp->name, dr, 1949 dw, de)); 1950 1951 cp = LIST_FIRST(&pp->geom->consumer); 1952 1953 /* We always gain write-exclusive access. */ 1954 return (g_access(cp, dr, dw, dw + de)); 1955 } 1956 1957 static void 1958 g_part_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp, 1959 struct g_consumer *cp, struct g_provider *pp) 1960 { 1961 char buf[64]; 1962 struct g_part_entry *entry; 1963 struct g_part_table *table; 1964 1965 KASSERT(sb != NULL && gp != NULL, ("%s", __func__)); 1966 table = gp->softc; 1967 1968 if (indent == NULL) { 1969 KASSERT(cp == NULL && pp != NULL, ("%s", __func__)); 1970 entry = pp->private; 1971 if (entry == NULL) 1972 return; 1973 sbuf_printf(sb, " i %u o %ju ty %s", entry->gpe_index, 1974 (uintmax_t)entry->gpe_offset, 1975 G_PART_TYPE(table, entry, buf, sizeof(buf))); 1976 /* 1977 * libdisk compatibility quirk - the scheme dumps the 1978 * slicer name and partition type in a way that is 1979 * compatible with libdisk. When libdisk is not used 1980 * anymore, this should go away. 1981 */ 1982 G_PART_DUMPCONF(table, entry, sb, indent); 1983 } else if (cp != NULL) { /* Consumer configuration. */ 1984 KASSERT(pp == NULL, ("%s", __func__)); 1985 /* none */ 1986 } else if (pp != NULL) { /* Provider configuration. */ 1987 entry = pp->private; 1988 if (entry == NULL) 1989 return; 1990 sbuf_printf(sb, "%s<start>%ju</start>\n", indent, 1991 (uintmax_t)entry->gpe_start); 1992 sbuf_printf(sb, "%s<end>%ju</end>\n", indent, 1993 (uintmax_t)entry->gpe_end); 1994 sbuf_printf(sb, "%s<index>%u</index>\n", indent, 1995 entry->gpe_index); 1996 sbuf_printf(sb, "%s<type>%s</type>\n", indent, 1997 G_PART_TYPE(table, entry, buf, sizeof(buf))); 1998 sbuf_printf(sb, "%s<offset>%ju</offset>\n", indent, 1999 (uintmax_t)entry->gpe_offset); 2000 sbuf_printf(sb, "%s<length>%ju</length>\n", indent, 2001 (uintmax_t)pp->mediasize); 2002 G_PART_DUMPCONF(table, entry, sb, indent); 2003 } else { /* Geom configuration. */ 2004 sbuf_printf(sb, "%s<scheme>%s</scheme>\n", indent, 2005 table->gpt_scheme->name); 2006 sbuf_printf(sb, "%s<entries>%u</entries>\n", indent, 2007 table->gpt_entries); 2008 sbuf_printf(sb, "%s<first>%ju</first>\n", indent, 2009 (uintmax_t)table->gpt_first); 2010 sbuf_printf(sb, "%s<last>%ju</last>\n", indent, 2011 (uintmax_t)table->gpt_last); 2012 sbuf_printf(sb, "%s<fwsectors>%u</fwsectors>\n", indent, 2013 table->gpt_sectors); 2014 sbuf_printf(sb, "%s<fwheads>%u</fwheads>\n", indent, 2015 table->gpt_heads); 2016 sbuf_printf(sb, "%s<state>%s</state>\n", indent, 2017 table->gpt_corrupt ? "CORRUPT": "OK"); 2018 sbuf_printf(sb, "%s<modified>%s</modified>\n", indent, 2019 table->gpt_opened ? "true": "false"); 2020 G_PART_DUMPCONF(table, NULL, sb, indent); 2021 } 2022 } 2023 2024 static void 2025 g_part_orphan(struct g_consumer *cp) 2026 { 2027 struct g_provider *pp; 2028 struct g_part_table *table; 2029 2030 pp = cp->provider; 2031 KASSERT(pp != NULL, ("%s", __func__)); 2032 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, pp->name)); 2033 g_topology_assert(); 2034 2035 KASSERT(pp->error != 0, ("%s", __func__)); 2036 table = cp->geom->softc; 2037 if (table != NULL && table->gpt_opened) 2038 g_access(cp, -1, -1, -1); 2039 g_part_wither(cp->geom, pp->error); 2040 } 2041 2042 static void 2043 g_part_spoiled(struct g_consumer *cp) 2044 { 2045 2046 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, cp->provider->name)); 2047 g_topology_assert(); 2048 2049 g_part_wither(cp->geom, ENXIO); 2050 } 2051 2052 static void 2053 g_part_start(struct bio *bp) 2054 { 2055 struct bio *bp2; 2056 struct g_consumer *cp; 2057 struct g_geom *gp; 2058 struct g_part_entry *entry; 2059 struct g_part_table *table; 2060 struct g_kerneldump *gkd; 2061 struct g_provider *pp; 2062 char buf[64]; 2063 2064 pp = bp->bio_to; 2065 gp = pp->geom; 2066 table = gp->softc; 2067 cp = LIST_FIRST(&gp->consumer); 2068 2069 G_PART_TRACE((G_T_BIO, "%s: cmd=%d, provider=%s", __func__, bp->bio_cmd, 2070 pp->name)); 2071 2072 entry = pp->private; 2073 if (entry == NULL) { 2074 g_io_deliver(bp, ENXIO); 2075 return; 2076 } 2077 2078 switch(bp->bio_cmd) { 2079 case BIO_DELETE: 2080 case BIO_READ: 2081 case BIO_WRITE: 2082 if (bp->bio_offset >= pp->mediasize) { 2083 g_io_deliver(bp, EIO); 2084 return; 2085 } 2086 bp2 = g_clone_bio(bp); 2087 if (bp2 == NULL) { 2088 g_io_deliver(bp, ENOMEM); 2089 return; 2090 } 2091 if (bp2->bio_offset + bp2->bio_length > pp->mediasize) 2092 bp2->bio_length = pp->mediasize - bp2->bio_offset; 2093 bp2->bio_done = g_std_done; 2094 bp2->bio_offset += entry->gpe_offset; 2095 g_io_request(bp2, cp); 2096 return; 2097 case BIO_FLUSH: 2098 break; 2099 case BIO_GETATTR: 2100 if (g_handleattr_int(bp, "GEOM::fwheads", table->gpt_heads)) 2101 return; 2102 if (g_handleattr_int(bp, "GEOM::fwsectors", table->gpt_sectors)) 2103 return; 2104 if (g_handleattr_int(bp, "PART::isleaf", table->gpt_isleaf)) 2105 return; 2106 if (g_handleattr_int(bp, "PART::depth", table->gpt_depth)) 2107 return; 2108 if (g_handleattr_str(bp, "PART::scheme", 2109 table->gpt_scheme->name)) 2110 return; 2111 if (g_handleattr_str(bp, "PART::type", 2112 G_PART_TYPE(table, entry, buf, sizeof(buf)))) 2113 return; 2114 if (!strcmp("GEOM::kerneldump", bp->bio_attribute)) { 2115 /* 2116 * Check that the partition is suitable for kernel 2117 * dumps. Typically only swap partitions should be 2118 * used. If the request comes from the nested scheme 2119 * we allow dumping there as well. 2120 */ 2121 if ((bp->bio_from == NULL || 2122 bp->bio_from->geom->class != &g_part_class) && 2123 G_PART_DUMPTO(table, entry) == 0) { 2124 g_io_deliver(bp, ENODEV); 2125 printf("GEOM_PART: Partition '%s' not suitable" 2126 " for kernel dumps (wrong type?)\n", 2127 pp->name); 2128 return; 2129 } 2130 gkd = (struct g_kerneldump *)bp->bio_data; 2131 if (gkd->offset >= pp->mediasize) { 2132 g_io_deliver(bp, EIO); 2133 return; 2134 } 2135 if (gkd->offset + gkd->length > pp->mediasize) 2136 gkd->length = pp->mediasize - gkd->offset; 2137 gkd->offset += entry->gpe_offset; 2138 } 2139 break; 2140 default: 2141 g_io_deliver(bp, EOPNOTSUPP); 2142 return; 2143 } 2144 2145 bp2 = g_clone_bio(bp); 2146 if (bp2 == NULL) { 2147 g_io_deliver(bp, ENOMEM); 2148 return; 2149 } 2150 bp2->bio_done = g_std_done; 2151 g_io_request(bp2, cp); 2152 } 2153 2154 static void 2155 g_part_init(struct g_class *mp) 2156 { 2157 2158 TAILQ_INSERT_HEAD(&g_part_schemes, &g_part_null_scheme, scheme_list); 2159 } 2160 2161 static void 2162 g_part_fini(struct g_class *mp) 2163 { 2164 2165 TAILQ_REMOVE(&g_part_schemes, &g_part_null_scheme, scheme_list); 2166 } 2167 2168 static void 2169 g_part_unload_event(void *arg, int flag) 2170 { 2171 struct g_consumer *cp; 2172 struct g_geom *gp; 2173 struct g_provider *pp; 2174 struct g_part_scheme *scheme; 2175 struct g_part_table *table; 2176 uintptr_t *xchg; 2177 int acc, error; 2178 2179 if (flag == EV_CANCEL) 2180 return; 2181 2182 xchg = arg; 2183 error = 0; 2184 scheme = (void *)(*xchg); 2185 2186 g_topology_assert(); 2187 2188 LIST_FOREACH(gp, &g_part_class.geom, geom) { 2189 table = gp->softc; 2190 if (table->gpt_scheme != scheme) 2191 continue; 2192 2193 acc = 0; 2194 LIST_FOREACH(pp, &gp->provider, provider) 2195 acc += pp->acr + pp->acw + pp->ace; 2196 LIST_FOREACH(cp, &gp->consumer, consumer) 2197 acc += cp->acr + cp->acw + cp->ace; 2198 2199 if (!acc) 2200 g_part_wither(gp, ENOSYS); 2201 else 2202 error = EBUSY; 2203 } 2204 2205 if (!error) 2206 TAILQ_REMOVE(&g_part_schemes, scheme, scheme_list); 2207 2208 *xchg = error; 2209 } 2210 2211 int 2212 g_part_modevent(module_t mod, int type, struct g_part_scheme *scheme) 2213 { 2214 struct g_part_scheme *iter; 2215 uintptr_t arg; 2216 int error; 2217 2218 error = 0; 2219 switch (type) { 2220 case MOD_LOAD: 2221 TAILQ_FOREACH(iter, &g_part_schemes, scheme_list) { 2222 if (scheme == iter) { 2223 printf("GEOM_PART: scheme %s is already " 2224 "registered!\n", scheme->name); 2225 break; 2226 } 2227 } 2228 if (iter == NULL) { 2229 TAILQ_INSERT_TAIL(&g_part_schemes, scheme, 2230 scheme_list); 2231 g_retaste(&g_part_class); 2232 } 2233 break; 2234 case MOD_UNLOAD: 2235 arg = (uintptr_t)scheme; 2236 error = g_waitfor_event(g_part_unload_event, &arg, M_WAITOK, 2237 NULL); 2238 if (error == 0) 2239 error = arg; 2240 break; 2241 default: 2242 error = EOPNOTSUPP; 2243 break; 2244 } 2245 2246 return (error); 2247 } 2248