1 /*- 2 * Copyright (c) 2002, 2005-2009 Marcel Moolenaar 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 */ 26 27 #include <sys/cdefs.h> 28 __FBSDID("$FreeBSD$"); 29 30 #include <sys/param.h> 31 #include <sys/bio.h> 32 #include <sys/endian.h> 33 #include <sys/kernel.h> 34 #include <sys/kobj.h> 35 #include <sys/limits.h> 36 #include <sys/lock.h> 37 #include <sys/malloc.h> 38 #include <sys/mutex.h> 39 #include <sys/queue.h> 40 #include <sys/sbuf.h> 41 #include <sys/sysctl.h> 42 #include <sys/systm.h> 43 #include <sys/uuid.h> 44 #include <geom/geom.h> 45 #include <geom/geom_ctl.h> 46 #include <geom/geom_int.h> 47 #include <geom/part/g_part.h> 48 49 #include "g_part_if.h" 50 51 #ifndef _PATH_DEV 52 #define _PATH_DEV "/dev/" 53 #endif 54 55 static kobj_method_t g_part_null_methods[] = { 56 { 0, 0 } 57 }; 58 59 static struct g_part_scheme g_part_null_scheme = { 60 "(none)", 61 g_part_null_methods, 62 sizeof(struct g_part_table), 63 }; 64 65 TAILQ_HEAD(, g_part_scheme) g_part_schemes = 66 TAILQ_HEAD_INITIALIZER(g_part_schemes); 67 68 struct g_part_alias_list { 69 const char *lexeme; 70 enum g_part_alias alias; 71 } g_part_alias_list[G_PART_ALIAS_COUNT] = { 72 { "apple-boot", G_PART_ALIAS_APPLE_BOOT }, 73 { "apple-hfs", G_PART_ALIAS_APPLE_HFS }, 74 { "apple-label", G_PART_ALIAS_APPLE_LABEL }, 75 { "apple-raid", G_PART_ALIAS_APPLE_RAID }, 76 { "apple-raid-offline", G_PART_ALIAS_APPLE_RAID_OFFLINE }, 77 { "apple-tv-recovery", G_PART_ALIAS_APPLE_TV_RECOVERY }, 78 { "apple-ufs", G_PART_ALIAS_APPLE_UFS }, 79 { "bios-boot", G_PART_ALIAS_BIOS_BOOT }, 80 { "ebr", G_PART_ALIAS_EBR }, 81 { "efi", G_PART_ALIAS_EFI }, 82 { "fat16", G_PART_ALIAS_MS_FAT16 }, 83 { "fat32", G_PART_ALIAS_MS_FAT32 }, 84 { "freebsd", G_PART_ALIAS_FREEBSD }, 85 { "freebsd-boot", G_PART_ALIAS_FREEBSD_BOOT }, 86 { "freebsd-nandfs", G_PART_ALIAS_FREEBSD_NANDFS }, 87 { "freebsd-swap", G_PART_ALIAS_FREEBSD_SWAP }, 88 { "freebsd-ufs", G_PART_ALIAS_FREEBSD_UFS }, 89 { "freebsd-vinum", G_PART_ALIAS_FREEBSD_VINUM }, 90 { "freebsd-zfs", G_PART_ALIAS_FREEBSD_ZFS }, 91 { "linux-data", G_PART_ALIAS_LINUX_DATA }, 92 { "linux-lvm", G_PART_ALIAS_LINUX_LVM }, 93 { "linux-raid", G_PART_ALIAS_LINUX_RAID }, 94 { "linux-swap", G_PART_ALIAS_LINUX_SWAP }, 95 { "mbr", G_PART_ALIAS_MBR }, 96 { "ms-basic-data", G_PART_ALIAS_MS_BASIC_DATA }, 97 { "ms-ldm-data", G_PART_ALIAS_MS_LDM_DATA }, 98 { "ms-ldm-metadata", G_PART_ALIAS_MS_LDM_METADATA }, 99 { "ms-reserved", G_PART_ALIAS_MS_RESERVED }, 100 { "ntfs", G_PART_ALIAS_MS_NTFS }, 101 { "netbsd-ccd", G_PART_ALIAS_NETBSD_CCD }, 102 { "netbsd-cgd", G_PART_ALIAS_NETBSD_CGD }, 103 { "netbsd-ffs", G_PART_ALIAS_NETBSD_FFS }, 104 { "netbsd-lfs", G_PART_ALIAS_NETBSD_LFS }, 105 { "netbsd-raid", G_PART_ALIAS_NETBSD_RAID }, 106 { "netbsd-swap", G_PART_ALIAS_NETBSD_SWAP }, 107 { "vmware-vmfs", G_PART_ALIAS_VMFS }, 108 { "vmware-vmkdiag", G_PART_ALIAS_VMKDIAG }, 109 { "vmware-reserved", G_PART_ALIAS_VMRESERVED }, 110 { "vmware-vsanhdr", G_PART_ALIAS_VMVSANHDR }, 111 }; 112 113 SYSCTL_DECL(_kern_geom); 114 SYSCTL_NODE(_kern_geom, OID_AUTO, part, CTLFLAG_RW, 0, 115 "GEOM_PART stuff"); 116 static u_int check_integrity = 1; 117 TUNABLE_INT("kern.geom.part.check_integrity", &check_integrity); 118 SYSCTL_UINT(_kern_geom_part, OID_AUTO, check_integrity, 119 CTLFLAG_RW | CTLFLAG_TUN, &check_integrity, 1, 120 "Enable integrity checking"); 121 122 /* 123 * The GEOM partitioning class. 124 */ 125 static g_ctl_req_t g_part_ctlreq; 126 static g_ctl_destroy_geom_t g_part_destroy_geom; 127 static g_fini_t g_part_fini; 128 static g_init_t g_part_init; 129 static g_taste_t g_part_taste; 130 131 static g_access_t g_part_access; 132 static g_dumpconf_t g_part_dumpconf; 133 static g_orphan_t g_part_orphan; 134 static g_spoiled_t g_part_spoiled; 135 static g_start_t g_part_start; 136 static g_resize_t g_part_resize; 137 138 static struct g_class g_part_class = { 139 .name = "PART", 140 .version = G_VERSION, 141 /* Class methods. */ 142 .ctlreq = g_part_ctlreq, 143 .destroy_geom = g_part_destroy_geom, 144 .fini = g_part_fini, 145 .init = g_part_init, 146 .taste = g_part_taste, 147 /* Geom methods. */ 148 .access = g_part_access, 149 .dumpconf = g_part_dumpconf, 150 .orphan = g_part_orphan, 151 .spoiled = g_part_spoiled, 152 .start = g_part_start, 153 .resize = g_part_resize 154 }; 155 156 DECLARE_GEOM_CLASS(g_part_class, g_part); 157 MODULE_VERSION(g_part, 0); 158 159 /* 160 * Support functions. 161 */ 162 163 static void g_part_wither(struct g_geom *, int); 164 165 const char * 166 g_part_alias_name(enum g_part_alias alias) 167 { 168 int i; 169 170 for (i = 0; i < G_PART_ALIAS_COUNT; i++) { 171 if (g_part_alias_list[i].alias != alias) 172 continue; 173 return (g_part_alias_list[i].lexeme); 174 } 175 176 return (NULL); 177 } 178 179 void 180 g_part_geometry_heads(off_t blocks, u_int sectors, off_t *bestchs, 181 u_int *bestheads) 182 { 183 static u_int candidate_heads[] = { 1, 2, 16, 32, 64, 128, 255, 0 }; 184 off_t chs, cylinders; 185 u_int heads; 186 int idx; 187 188 *bestchs = 0; 189 *bestheads = 0; 190 for (idx = 0; candidate_heads[idx] != 0; idx++) { 191 heads = candidate_heads[idx]; 192 cylinders = blocks / heads / sectors; 193 if (cylinders < heads || cylinders < sectors) 194 break; 195 if (cylinders > 1023) 196 continue; 197 chs = cylinders * heads * sectors; 198 if (chs > *bestchs || (chs == *bestchs && *bestheads == 1)) { 199 *bestchs = chs; 200 *bestheads = heads; 201 } 202 } 203 } 204 205 static void 206 g_part_geometry(struct g_part_table *table, struct g_consumer *cp, 207 off_t blocks) 208 { 209 static u_int candidate_sectors[] = { 1, 9, 17, 33, 63, 0 }; 210 off_t chs, bestchs; 211 u_int heads, sectors; 212 int idx; 213 214 if (g_getattr("GEOM::fwsectors", cp, §ors) != 0 || sectors == 0 || 215 g_getattr("GEOM::fwheads", cp, &heads) != 0 || heads == 0) { 216 table->gpt_fixgeom = 0; 217 table->gpt_heads = 0; 218 table->gpt_sectors = 0; 219 bestchs = 0; 220 for (idx = 0; candidate_sectors[idx] != 0; idx++) { 221 sectors = candidate_sectors[idx]; 222 g_part_geometry_heads(blocks, sectors, &chs, &heads); 223 if (chs == 0) 224 continue; 225 /* 226 * Prefer a geometry with sectors > 1, but only if 227 * it doesn't bump down the number of heads to 1. 228 */ 229 if (chs > bestchs || (chs == bestchs && heads > 1 && 230 table->gpt_sectors == 1)) { 231 bestchs = chs; 232 table->gpt_heads = heads; 233 table->gpt_sectors = sectors; 234 } 235 } 236 /* 237 * If we didn't find a geometry at all, then the disk is 238 * too big. This means we can use the maximum number of 239 * heads and sectors. 240 */ 241 if (bestchs == 0) { 242 table->gpt_heads = 255; 243 table->gpt_sectors = 63; 244 } 245 } else { 246 table->gpt_fixgeom = 1; 247 table->gpt_heads = heads; 248 table->gpt_sectors = sectors; 249 } 250 } 251 252 #define DPRINTF(...) if (bootverbose) { \ 253 printf("GEOM_PART: " __VA_ARGS__); \ 254 } 255 256 static int 257 g_part_check_integrity(struct g_part_table *table, struct g_consumer *cp) 258 { 259 struct g_part_entry *e1, *e2; 260 struct g_provider *pp; 261 off_t offset; 262 int failed; 263 264 failed = 0; 265 pp = cp->provider; 266 if (table->gpt_last < table->gpt_first) { 267 DPRINTF("last LBA is below first LBA: %jd < %jd\n", 268 (intmax_t)table->gpt_last, (intmax_t)table->gpt_first); 269 failed++; 270 } 271 if (table->gpt_last > pp->mediasize / pp->sectorsize - 1) { 272 DPRINTF("last LBA extends beyond mediasize: " 273 "%jd > %jd\n", (intmax_t)table->gpt_last, 274 (intmax_t)pp->mediasize / pp->sectorsize - 1); 275 failed++; 276 } 277 LIST_FOREACH(e1, &table->gpt_entry, gpe_entry) { 278 if (e1->gpe_deleted || e1->gpe_internal) 279 continue; 280 if (e1->gpe_start < table->gpt_first) { 281 DPRINTF("partition %d has start offset below first " 282 "LBA: %jd < %jd\n", e1->gpe_index, 283 (intmax_t)e1->gpe_start, 284 (intmax_t)table->gpt_first); 285 failed++; 286 } 287 if (e1->gpe_start > table->gpt_last) { 288 DPRINTF("partition %d has start offset beyond last " 289 "LBA: %jd > %jd\n", e1->gpe_index, 290 (intmax_t)e1->gpe_start, 291 (intmax_t)table->gpt_last); 292 failed++; 293 } 294 if (e1->gpe_end < e1->gpe_start) { 295 DPRINTF("partition %d has end offset below start " 296 "offset: %jd < %jd\n", e1->gpe_index, 297 (intmax_t)e1->gpe_end, 298 (intmax_t)e1->gpe_start); 299 failed++; 300 } 301 if (e1->gpe_end > table->gpt_last) { 302 DPRINTF("partition %d has end offset beyond last " 303 "LBA: %jd > %jd\n", e1->gpe_index, 304 (intmax_t)e1->gpe_end, 305 (intmax_t)table->gpt_last); 306 failed++; 307 } 308 if (pp->stripesize > 0) { 309 offset = e1->gpe_start * pp->sectorsize; 310 if (e1->gpe_offset > offset) 311 offset = e1->gpe_offset; 312 if ((offset + pp->stripeoffset) % pp->stripesize) { 313 DPRINTF("partition %d is not aligned on %u " 314 "bytes\n", e1->gpe_index, pp->stripesize); 315 /* Don't treat this as a critical failure */ 316 } 317 } 318 e2 = e1; 319 while ((e2 = LIST_NEXT(e2, gpe_entry)) != NULL) { 320 if (e2->gpe_deleted || e2->gpe_internal) 321 continue; 322 if (e1->gpe_start >= e2->gpe_start && 323 e1->gpe_start <= e2->gpe_end) { 324 DPRINTF("partition %d has start offset inside " 325 "partition %d: start[%d] %jd >= start[%d] " 326 "%jd <= end[%d] %jd\n", 327 e1->gpe_index, e2->gpe_index, 328 e2->gpe_index, (intmax_t)e2->gpe_start, 329 e1->gpe_index, (intmax_t)e1->gpe_start, 330 e2->gpe_index, (intmax_t)e2->gpe_end); 331 failed++; 332 } 333 if (e1->gpe_end >= e2->gpe_start && 334 e1->gpe_end <= e2->gpe_end) { 335 DPRINTF("partition %d has end offset inside " 336 "partition %d: start[%d] %jd >= end[%d] " 337 "%jd <= end[%d] %jd\n", 338 e1->gpe_index, e2->gpe_index, 339 e2->gpe_index, (intmax_t)e2->gpe_start, 340 e1->gpe_index, (intmax_t)e1->gpe_end, 341 e2->gpe_index, (intmax_t)e2->gpe_end); 342 failed++; 343 } 344 if (e1->gpe_start < e2->gpe_start && 345 e1->gpe_end > e2->gpe_end) { 346 DPRINTF("partition %d contains partition %d: " 347 "start[%d] %jd > start[%d] %jd, end[%d] " 348 "%jd < end[%d] %jd\n", 349 e1->gpe_index, e2->gpe_index, 350 e1->gpe_index, (intmax_t)e1->gpe_start, 351 e2->gpe_index, (intmax_t)e2->gpe_start, 352 e2->gpe_index, (intmax_t)e2->gpe_end, 353 e1->gpe_index, (intmax_t)e1->gpe_end); 354 failed++; 355 } 356 } 357 } 358 if (failed != 0) { 359 printf("GEOM_PART: integrity check failed (%s, %s)\n", 360 pp->name, table->gpt_scheme->name); 361 if (check_integrity != 0) 362 return (EINVAL); 363 table->gpt_corrupt = 1; 364 } 365 return (0); 366 } 367 #undef DPRINTF 368 369 struct g_part_entry * 370 g_part_new_entry(struct g_part_table *table, int index, quad_t start, 371 quad_t end) 372 { 373 struct g_part_entry *entry, *last; 374 375 last = NULL; 376 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) { 377 if (entry->gpe_index == index) 378 break; 379 if (entry->gpe_index > index) { 380 entry = NULL; 381 break; 382 } 383 last = entry; 384 } 385 if (entry == NULL) { 386 entry = g_malloc(table->gpt_scheme->gps_entrysz, 387 M_WAITOK | M_ZERO); 388 entry->gpe_index = index; 389 if (last == NULL) 390 LIST_INSERT_HEAD(&table->gpt_entry, entry, gpe_entry); 391 else 392 LIST_INSERT_AFTER(last, entry, gpe_entry); 393 } else 394 entry->gpe_offset = 0; 395 entry->gpe_start = start; 396 entry->gpe_end = end; 397 return (entry); 398 } 399 400 static void 401 g_part_new_provider(struct g_geom *gp, struct g_part_table *table, 402 struct g_part_entry *entry) 403 { 404 struct g_consumer *cp; 405 struct g_provider *pp; 406 struct sbuf *sb; 407 off_t offset; 408 409 cp = LIST_FIRST(&gp->consumer); 410 pp = cp->provider; 411 412 offset = entry->gpe_start * pp->sectorsize; 413 if (entry->gpe_offset < offset) 414 entry->gpe_offset = offset; 415 416 if (entry->gpe_pp == NULL) { 417 sb = sbuf_new_auto(); 418 G_PART_FULLNAME(table, entry, sb, gp->name); 419 sbuf_finish(sb); 420 entry->gpe_pp = g_new_providerf(gp, "%s", sbuf_data(sb)); 421 sbuf_delete(sb); 422 entry->gpe_pp->flags |= G_PF_DIRECT_SEND | G_PF_DIRECT_RECEIVE; 423 entry->gpe_pp->private = entry; /* Close the circle. */ 424 } 425 entry->gpe_pp->index = entry->gpe_index - 1; /* index is 1-based. */ 426 entry->gpe_pp->mediasize = (entry->gpe_end - entry->gpe_start + 1) * 427 pp->sectorsize; 428 entry->gpe_pp->mediasize -= entry->gpe_offset - offset; 429 entry->gpe_pp->sectorsize = pp->sectorsize; 430 entry->gpe_pp->stripesize = pp->stripesize; 431 entry->gpe_pp->stripeoffset = pp->stripeoffset + entry->gpe_offset; 432 if (pp->stripesize > 0) 433 entry->gpe_pp->stripeoffset %= pp->stripesize; 434 entry->gpe_pp->flags |= pp->flags & G_PF_ACCEPT_UNMAPPED; 435 g_error_provider(entry->gpe_pp, 0); 436 } 437 438 static struct g_geom* 439 g_part_find_geom(const char *name) 440 { 441 struct g_geom *gp; 442 LIST_FOREACH(gp, &g_part_class.geom, geom) { 443 if (!strcmp(name, gp->name)) 444 break; 445 } 446 return (gp); 447 } 448 449 static int 450 g_part_parm_geom(struct gctl_req *req, const char *name, struct g_geom **v) 451 { 452 struct g_geom *gp; 453 const char *gname; 454 455 gname = gctl_get_asciiparam(req, name); 456 if (gname == NULL) 457 return (ENOATTR); 458 if (strncmp(gname, _PATH_DEV, sizeof(_PATH_DEV) - 1) == 0) 459 gname += sizeof(_PATH_DEV) - 1; 460 gp = g_part_find_geom(gname); 461 if (gp == NULL) { 462 gctl_error(req, "%d %s '%s'", EINVAL, name, gname); 463 return (EINVAL); 464 } 465 if ((gp->flags & G_GEOM_WITHER) != 0) { 466 gctl_error(req, "%d %s", ENXIO, gname); 467 return (ENXIO); 468 } 469 *v = gp; 470 return (0); 471 } 472 473 static int 474 g_part_parm_provider(struct gctl_req *req, const char *name, 475 struct g_provider **v) 476 { 477 struct g_provider *pp; 478 const char *pname; 479 480 pname = gctl_get_asciiparam(req, name); 481 if (pname == NULL) 482 return (ENOATTR); 483 if (strncmp(pname, _PATH_DEV, sizeof(_PATH_DEV) - 1) == 0) 484 pname += sizeof(_PATH_DEV) - 1; 485 pp = g_provider_by_name(pname); 486 if (pp == NULL) { 487 gctl_error(req, "%d %s '%s'", EINVAL, name, pname); 488 return (EINVAL); 489 } 490 *v = pp; 491 return (0); 492 } 493 494 static int 495 g_part_parm_quad(struct gctl_req *req, const char *name, quad_t *v) 496 { 497 const char *p; 498 char *x; 499 quad_t q; 500 501 p = gctl_get_asciiparam(req, name); 502 if (p == NULL) 503 return (ENOATTR); 504 q = strtoq(p, &x, 0); 505 if (*x != '\0' || q < 0) { 506 gctl_error(req, "%d %s '%s'", EINVAL, name, p); 507 return (EINVAL); 508 } 509 *v = q; 510 return (0); 511 } 512 513 static int 514 g_part_parm_scheme(struct gctl_req *req, const char *name, 515 struct g_part_scheme **v) 516 { 517 struct g_part_scheme *s; 518 const char *p; 519 520 p = gctl_get_asciiparam(req, name); 521 if (p == NULL) 522 return (ENOATTR); 523 TAILQ_FOREACH(s, &g_part_schemes, scheme_list) { 524 if (s == &g_part_null_scheme) 525 continue; 526 if (!strcasecmp(s->name, p)) 527 break; 528 } 529 if (s == NULL) { 530 gctl_error(req, "%d %s '%s'", EINVAL, name, p); 531 return (EINVAL); 532 } 533 *v = s; 534 return (0); 535 } 536 537 static int 538 g_part_parm_str(struct gctl_req *req, const char *name, const char **v) 539 { 540 const char *p; 541 542 p = gctl_get_asciiparam(req, name); 543 if (p == NULL) 544 return (ENOATTR); 545 /* An empty label is always valid. */ 546 if (strcmp(name, "label") != 0 && p[0] == '\0') { 547 gctl_error(req, "%d %s '%s'", EINVAL, name, p); 548 return (EINVAL); 549 } 550 *v = p; 551 return (0); 552 } 553 554 static int 555 g_part_parm_intmax(struct gctl_req *req, const char *name, u_int *v) 556 { 557 const intmax_t *p; 558 int size; 559 560 p = gctl_get_param(req, name, &size); 561 if (p == NULL) 562 return (ENOATTR); 563 if (size != sizeof(*p) || *p < 0 || *p > INT_MAX) { 564 gctl_error(req, "%d %s '%jd'", EINVAL, name, *p); 565 return (EINVAL); 566 } 567 *v = (u_int)*p; 568 return (0); 569 } 570 571 static int 572 g_part_parm_uint32(struct gctl_req *req, const char *name, u_int *v) 573 { 574 const uint32_t *p; 575 int size; 576 577 p = gctl_get_param(req, name, &size); 578 if (p == NULL) 579 return (ENOATTR); 580 if (size != sizeof(*p) || *p > INT_MAX) { 581 gctl_error(req, "%d %s '%u'", EINVAL, name, (unsigned int)*p); 582 return (EINVAL); 583 } 584 *v = (u_int)*p; 585 return (0); 586 } 587 588 static int 589 g_part_parm_bootcode(struct gctl_req *req, const char *name, const void **v, 590 unsigned int *s) 591 { 592 const void *p; 593 int size; 594 595 p = gctl_get_param(req, name, &size); 596 if (p == NULL) 597 return (ENOATTR); 598 *v = p; 599 *s = size; 600 return (0); 601 } 602 603 static int 604 g_part_probe(struct g_geom *gp, struct g_consumer *cp, int depth) 605 { 606 struct g_part_scheme *iter, *scheme; 607 struct g_part_table *table; 608 int pri, probe; 609 610 table = gp->softc; 611 scheme = (table != NULL) ? table->gpt_scheme : NULL; 612 pri = (scheme != NULL) ? G_PART_PROBE(table, cp) : INT_MIN; 613 if (pri == 0) 614 goto done; 615 if (pri > 0) { /* error */ 616 scheme = NULL; 617 pri = INT_MIN; 618 } 619 620 TAILQ_FOREACH(iter, &g_part_schemes, scheme_list) { 621 if (iter == &g_part_null_scheme) 622 continue; 623 table = (void *)kobj_create((kobj_class_t)iter, M_GEOM, 624 M_WAITOK); 625 table->gpt_gp = gp; 626 table->gpt_scheme = iter; 627 table->gpt_depth = depth; 628 probe = G_PART_PROBE(table, cp); 629 if (probe <= 0 && probe > pri) { 630 pri = probe; 631 scheme = iter; 632 if (gp->softc != NULL) 633 kobj_delete((kobj_t)gp->softc, M_GEOM); 634 gp->softc = table; 635 if (pri == 0) 636 goto done; 637 } else 638 kobj_delete((kobj_t)table, M_GEOM); 639 } 640 641 done: 642 return ((scheme == NULL) ? ENXIO : 0); 643 } 644 645 /* 646 * Control request functions. 647 */ 648 649 static int 650 g_part_ctl_add(struct gctl_req *req, struct g_part_parms *gpp) 651 { 652 struct g_geom *gp; 653 struct g_provider *pp; 654 struct g_part_entry *delent, *last, *entry; 655 struct g_part_table *table; 656 struct sbuf *sb; 657 quad_t end; 658 unsigned int index; 659 int error; 660 661 gp = gpp->gpp_geom; 662 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name)); 663 g_topology_assert(); 664 665 pp = LIST_FIRST(&gp->consumer)->provider; 666 table = gp->softc; 667 end = gpp->gpp_start + gpp->gpp_size - 1; 668 669 if (gpp->gpp_start < table->gpt_first || 670 gpp->gpp_start > table->gpt_last) { 671 gctl_error(req, "%d start '%jd'", EINVAL, 672 (intmax_t)gpp->gpp_start); 673 return (EINVAL); 674 } 675 if (end < gpp->gpp_start || end > table->gpt_last) { 676 gctl_error(req, "%d size '%jd'", EINVAL, 677 (intmax_t)gpp->gpp_size); 678 return (EINVAL); 679 } 680 if (gpp->gpp_index > table->gpt_entries) { 681 gctl_error(req, "%d index '%d'", EINVAL, gpp->gpp_index); 682 return (EINVAL); 683 } 684 685 delent = last = NULL; 686 index = (gpp->gpp_index > 0) ? gpp->gpp_index : 1; 687 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) { 688 if (entry->gpe_deleted) { 689 if (entry->gpe_index == index) 690 delent = entry; 691 continue; 692 } 693 if (entry->gpe_index == index) 694 index = entry->gpe_index + 1; 695 if (entry->gpe_index < index) 696 last = entry; 697 if (entry->gpe_internal) 698 continue; 699 if (gpp->gpp_start >= entry->gpe_start && 700 gpp->gpp_start <= entry->gpe_end) { 701 gctl_error(req, "%d start '%jd'", ENOSPC, 702 (intmax_t)gpp->gpp_start); 703 return (ENOSPC); 704 } 705 if (end >= entry->gpe_start && end <= entry->gpe_end) { 706 gctl_error(req, "%d end '%jd'", ENOSPC, (intmax_t)end); 707 return (ENOSPC); 708 } 709 if (gpp->gpp_start < entry->gpe_start && end > entry->gpe_end) { 710 gctl_error(req, "%d size '%jd'", ENOSPC, 711 (intmax_t)gpp->gpp_size); 712 return (ENOSPC); 713 } 714 } 715 if (gpp->gpp_index > 0 && index != gpp->gpp_index) { 716 gctl_error(req, "%d index '%d'", EEXIST, gpp->gpp_index); 717 return (EEXIST); 718 } 719 if (index > table->gpt_entries) { 720 gctl_error(req, "%d index '%d'", ENOSPC, index); 721 return (ENOSPC); 722 } 723 724 entry = (delent == NULL) ? g_malloc(table->gpt_scheme->gps_entrysz, 725 M_WAITOK | M_ZERO) : delent; 726 entry->gpe_index = index; 727 entry->gpe_start = gpp->gpp_start; 728 entry->gpe_end = end; 729 error = G_PART_ADD(table, entry, gpp); 730 if (error) { 731 gctl_error(req, "%d", error); 732 if (delent == NULL) 733 g_free(entry); 734 return (error); 735 } 736 if (delent == NULL) { 737 if (last == NULL) 738 LIST_INSERT_HEAD(&table->gpt_entry, entry, gpe_entry); 739 else 740 LIST_INSERT_AFTER(last, entry, gpe_entry); 741 entry->gpe_created = 1; 742 } else { 743 entry->gpe_deleted = 0; 744 entry->gpe_modified = 1; 745 } 746 g_part_new_provider(gp, table, entry); 747 748 /* Provide feedback if so requested. */ 749 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) { 750 sb = sbuf_new_auto(); 751 G_PART_FULLNAME(table, entry, sb, gp->name); 752 if (pp->stripesize > 0 && entry->gpe_pp->stripeoffset != 0) 753 sbuf_printf(sb, " added, but partition is not " 754 "aligned on %u bytes\n", pp->stripesize); 755 else 756 sbuf_cat(sb, " added\n"); 757 sbuf_finish(sb); 758 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1); 759 sbuf_delete(sb); 760 } 761 return (0); 762 } 763 764 static int 765 g_part_ctl_bootcode(struct gctl_req *req, struct g_part_parms *gpp) 766 { 767 struct g_geom *gp; 768 struct g_part_table *table; 769 struct sbuf *sb; 770 int error, sz; 771 772 gp = gpp->gpp_geom; 773 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name)); 774 g_topology_assert(); 775 776 table = gp->softc; 777 sz = table->gpt_scheme->gps_bootcodesz; 778 if (sz == 0) { 779 error = ENODEV; 780 goto fail; 781 } 782 if (gpp->gpp_codesize > sz) { 783 error = EFBIG; 784 goto fail; 785 } 786 787 error = G_PART_BOOTCODE(table, gpp); 788 if (error) 789 goto fail; 790 791 /* Provide feedback if so requested. */ 792 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) { 793 sb = sbuf_new_auto(); 794 sbuf_printf(sb, "bootcode written to %s\n", gp->name); 795 sbuf_finish(sb); 796 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1); 797 sbuf_delete(sb); 798 } 799 return (0); 800 801 fail: 802 gctl_error(req, "%d", error); 803 return (error); 804 } 805 806 static int 807 g_part_ctl_commit(struct gctl_req *req, struct g_part_parms *gpp) 808 { 809 struct g_consumer *cp; 810 struct g_geom *gp; 811 struct g_provider *pp; 812 struct g_part_entry *entry, *tmp; 813 struct g_part_table *table; 814 char *buf; 815 int error, i; 816 817 gp = gpp->gpp_geom; 818 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name)); 819 g_topology_assert(); 820 821 table = gp->softc; 822 if (!table->gpt_opened) { 823 gctl_error(req, "%d", EPERM); 824 return (EPERM); 825 } 826 827 g_topology_unlock(); 828 829 cp = LIST_FIRST(&gp->consumer); 830 if ((table->gpt_smhead | table->gpt_smtail) != 0) { 831 pp = cp->provider; 832 buf = g_malloc(pp->sectorsize, M_WAITOK | M_ZERO); 833 while (table->gpt_smhead != 0) { 834 i = ffs(table->gpt_smhead) - 1; 835 error = g_write_data(cp, i * pp->sectorsize, buf, 836 pp->sectorsize); 837 if (error) { 838 g_free(buf); 839 goto fail; 840 } 841 table->gpt_smhead &= ~(1 << i); 842 } 843 while (table->gpt_smtail != 0) { 844 i = ffs(table->gpt_smtail) - 1; 845 error = g_write_data(cp, pp->mediasize - (i + 1) * 846 pp->sectorsize, buf, pp->sectorsize); 847 if (error) { 848 g_free(buf); 849 goto fail; 850 } 851 table->gpt_smtail &= ~(1 << i); 852 } 853 g_free(buf); 854 } 855 856 if (table->gpt_scheme == &g_part_null_scheme) { 857 g_topology_lock(); 858 g_access(cp, -1, -1, -1); 859 g_part_wither(gp, ENXIO); 860 return (0); 861 } 862 863 error = G_PART_WRITE(table, cp); 864 if (error) 865 goto fail; 866 867 LIST_FOREACH_SAFE(entry, &table->gpt_entry, gpe_entry, tmp) { 868 if (!entry->gpe_deleted) { 869 entry->gpe_created = 0; 870 entry->gpe_modified = 0; 871 continue; 872 } 873 LIST_REMOVE(entry, gpe_entry); 874 g_free(entry); 875 } 876 table->gpt_created = 0; 877 table->gpt_opened = 0; 878 879 g_topology_lock(); 880 g_access(cp, -1, -1, -1); 881 return (0); 882 883 fail: 884 g_topology_lock(); 885 gctl_error(req, "%d", error); 886 return (error); 887 } 888 889 static int 890 g_part_ctl_create(struct gctl_req *req, struct g_part_parms *gpp) 891 { 892 struct g_consumer *cp; 893 struct g_geom *gp; 894 struct g_provider *pp; 895 struct g_part_scheme *scheme; 896 struct g_part_table *null, *table; 897 struct sbuf *sb; 898 int attr, error; 899 900 pp = gpp->gpp_provider; 901 scheme = gpp->gpp_scheme; 902 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, pp->name)); 903 g_topology_assert(); 904 905 /* Check that there isn't already a g_part geom on the provider. */ 906 gp = g_part_find_geom(pp->name); 907 if (gp != NULL) { 908 null = gp->softc; 909 if (null->gpt_scheme != &g_part_null_scheme) { 910 gctl_error(req, "%d geom '%s'", EEXIST, pp->name); 911 return (EEXIST); 912 } 913 } else 914 null = NULL; 915 916 if ((gpp->gpp_parms & G_PART_PARM_ENTRIES) && 917 (gpp->gpp_entries < scheme->gps_minent || 918 gpp->gpp_entries > scheme->gps_maxent)) { 919 gctl_error(req, "%d entries '%d'", EINVAL, gpp->gpp_entries); 920 return (EINVAL); 921 } 922 923 if (null == NULL) 924 gp = g_new_geomf(&g_part_class, "%s", pp->name); 925 gp->softc = kobj_create((kobj_class_t)gpp->gpp_scheme, M_GEOM, 926 M_WAITOK); 927 table = gp->softc; 928 table->gpt_gp = gp; 929 table->gpt_scheme = gpp->gpp_scheme; 930 table->gpt_entries = (gpp->gpp_parms & G_PART_PARM_ENTRIES) ? 931 gpp->gpp_entries : scheme->gps_minent; 932 LIST_INIT(&table->gpt_entry); 933 if (null == NULL) { 934 cp = g_new_consumer(gp); 935 cp->flags |= G_CF_DIRECT_SEND | G_CF_DIRECT_RECEIVE; 936 error = g_attach(cp, pp); 937 if (error == 0) 938 error = g_access(cp, 1, 1, 1); 939 if (error != 0) { 940 g_part_wither(gp, error); 941 gctl_error(req, "%d geom '%s'", error, pp->name); 942 return (error); 943 } 944 table->gpt_opened = 1; 945 } else { 946 cp = LIST_FIRST(&gp->consumer); 947 table->gpt_opened = null->gpt_opened; 948 table->gpt_smhead = null->gpt_smhead; 949 table->gpt_smtail = null->gpt_smtail; 950 } 951 952 g_topology_unlock(); 953 954 /* Make sure the provider has media. */ 955 if (pp->mediasize == 0 || pp->sectorsize == 0) { 956 error = ENODEV; 957 goto fail; 958 } 959 960 /* Make sure we can nest and if so, determine our depth. */ 961 error = g_getattr("PART::isleaf", cp, &attr); 962 if (!error && attr) { 963 error = ENODEV; 964 goto fail; 965 } 966 error = g_getattr("PART::depth", cp, &attr); 967 table->gpt_depth = (!error) ? attr + 1 : 0; 968 969 /* 970 * Synthesize a disk geometry. Some partitioning schemes 971 * depend on it and since some file systems need it even 972 * when the partitition scheme doesn't, we do it here in 973 * scheme-independent code. 974 */ 975 g_part_geometry(table, cp, pp->mediasize / pp->sectorsize); 976 977 error = G_PART_CREATE(table, gpp); 978 if (error) 979 goto fail; 980 981 g_topology_lock(); 982 983 table->gpt_created = 1; 984 if (null != NULL) 985 kobj_delete((kobj_t)null, M_GEOM); 986 987 /* 988 * Support automatic commit by filling in the gpp_geom 989 * parameter. 990 */ 991 gpp->gpp_parms |= G_PART_PARM_GEOM; 992 gpp->gpp_geom = gp; 993 994 /* Provide feedback if so requested. */ 995 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) { 996 sb = sbuf_new_auto(); 997 sbuf_printf(sb, "%s created\n", gp->name); 998 sbuf_finish(sb); 999 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1); 1000 sbuf_delete(sb); 1001 } 1002 return (0); 1003 1004 fail: 1005 g_topology_lock(); 1006 if (null == NULL) { 1007 g_access(cp, -1, -1, -1); 1008 g_part_wither(gp, error); 1009 } else { 1010 kobj_delete((kobj_t)gp->softc, M_GEOM); 1011 gp->softc = null; 1012 } 1013 gctl_error(req, "%d provider", error); 1014 return (error); 1015 } 1016 1017 static int 1018 g_part_ctl_delete(struct gctl_req *req, struct g_part_parms *gpp) 1019 { 1020 struct g_geom *gp; 1021 struct g_provider *pp; 1022 struct g_part_entry *entry; 1023 struct g_part_table *table; 1024 struct sbuf *sb; 1025 1026 gp = gpp->gpp_geom; 1027 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name)); 1028 g_topology_assert(); 1029 1030 table = gp->softc; 1031 1032 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) { 1033 if (entry->gpe_deleted || entry->gpe_internal) 1034 continue; 1035 if (entry->gpe_index == gpp->gpp_index) 1036 break; 1037 } 1038 if (entry == NULL) { 1039 gctl_error(req, "%d index '%d'", ENOENT, gpp->gpp_index); 1040 return (ENOENT); 1041 } 1042 1043 pp = entry->gpe_pp; 1044 if (pp != NULL) { 1045 if (pp->acr > 0 || pp->acw > 0 || pp->ace > 0) { 1046 gctl_error(req, "%d", EBUSY); 1047 return (EBUSY); 1048 } 1049 1050 pp->private = NULL; 1051 entry->gpe_pp = NULL; 1052 } 1053 1054 if (pp != NULL) 1055 g_wither_provider(pp, ENXIO); 1056 1057 /* Provide feedback if so requested. */ 1058 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) { 1059 sb = sbuf_new_auto(); 1060 G_PART_FULLNAME(table, entry, sb, gp->name); 1061 sbuf_cat(sb, " deleted\n"); 1062 sbuf_finish(sb); 1063 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1); 1064 sbuf_delete(sb); 1065 } 1066 1067 if (entry->gpe_created) { 1068 LIST_REMOVE(entry, gpe_entry); 1069 g_free(entry); 1070 } else { 1071 entry->gpe_modified = 0; 1072 entry->gpe_deleted = 1; 1073 } 1074 return (0); 1075 } 1076 1077 static int 1078 g_part_ctl_destroy(struct gctl_req *req, struct g_part_parms *gpp) 1079 { 1080 struct g_consumer *cp; 1081 struct g_geom *gp; 1082 struct g_provider *pp; 1083 struct g_part_entry *entry, *tmp; 1084 struct g_part_table *null, *table; 1085 struct sbuf *sb; 1086 int error; 1087 1088 gp = gpp->gpp_geom; 1089 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name)); 1090 g_topology_assert(); 1091 1092 table = gp->softc; 1093 /* Check for busy providers. */ 1094 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) { 1095 if (entry->gpe_deleted || entry->gpe_internal) 1096 continue; 1097 if (gpp->gpp_force) { 1098 pp = entry->gpe_pp; 1099 if (pp == NULL) 1100 continue; 1101 if (pp->acr == 0 && pp->acw == 0 && pp->ace == 0) 1102 continue; 1103 } 1104 gctl_error(req, "%d", EBUSY); 1105 return (EBUSY); 1106 } 1107 1108 if (gpp->gpp_force) { 1109 /* Destroy all providers. */ 1110 LIST_FOREACH_SAFE(entry, &table->gpt_entry, gpe_entry, tmp) { 1111 pp = entry->gpe_pp; 1112 if (pp != NULL) { 1113 pp->private = NULL; 1114 g_wither_provider(pp, ENXIO); 1115 } 1116 LIST_REMOVE(entry, gpe_entry); 1117 g_free(entry); 1118 } 1119 } 1120 1121 error = G_PART_DESTROY(table, gpp); 1122 if (error) { 1123 gctl_error(req, "%d", error); 1124 return (error); 1125 } 1126 1127 gp->softc = kobj_create((kobj_class_t)&g_part_null_scheme, M_GEOM, 1128 M_WAITOK); 1129 null = gp->softc; 1130 null->gpt_gp = gp; 1131 null->gpt_scheme = &g_part_null_scheme; 1132 LIST_INIT(&null->gpt_entry); 1133 1134 cp = LIST_FIRST(&gp->consumer); 1135 pp = cp->provider; 1136 null->gpt_last = pp->mediasize / pp->sectorsize - 1; 1137 1138 null->gpt_depth = table->gpt_depth; 1139 null->gpt_opened = table->gpt_opened; 1140 null->gpt_smhead = table->gpt_smhead; 1141 null->gpt_smtail = table->gpt_smtail; 1142 1143 while ((entry = LIST_FIRST(&table->gpt_entry)) != NULL) { 1144 LIST_REMOVE(entry, gpe_entry); 1145 g_free(entry); 1146 } 1147 kobj_delete((kobj_t)table, M_GEOM); 1148 1149 /* Provide feedback if so requested. */ 1150 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) { 1151 sb = sbuf_new_auto(); 1152 sbuf_printf(sb, "%s destroyed\n", gp->name); 1153 sbuf_finish(sb); 1154 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1); 1155 sbuf_delete(sb); 1156 } 1157 return (0); 1158 } 1159 1160 static int 1161 g_part_ctl_modify(struct gctl_req *req, struct g_part_parms *gpp) 1162 { 1163 struct g_geom *gp; 1164 struct g_part_entry *entry; 1165 struct g_part_table *table; 1166 struct sbuf *sb; 1167 int error; 1168 1169 gp = gpp->gpp_geom; 1170 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name)); 1171 g_topology_assert(); 1172 1173 table = gp->softc; 1174 1175 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) { 1176 if (entry->gpe_deleted || entry->gpe_internal) 1177 continue; 1178 if (entry->gpe_index == gpp->gpp_index) 1179 break; 1180 } 1181 if (entry == NULL) { 1182 gctl_error(req, "%d index '%d'", ENOENT, gpp->gpp_index); 1183 return (ENOENT); 1184 } 1185 1186 error = G_PART_MODIFY(table, entry, gpp); 1187 if (error) { 1188 gctl_error(req, "%d", error); 1189 return (error); 1190 } 1191 1192 if (!entry->gpe_created) 1193 entry->gpe_modified = 1; 1194 1195 /* Provide feedback if so requested. */ 1196 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) { 1197 sb = sbuf_new_auto(); 1198 G_PART_FULLNAME(table, entry, sb, gp->name); 1199 sbuf_cat(sb, " modified\n"); 1200 sbuf_finish(sb); 1201 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1); 1202 sbuf_delete(sb); 1203 } 1204 return (0); 1205 } 1206 1207 static int 1208 g_part_ctl_move(struct gctl_req *req, struct g_part_parms *gpp) 1209 { 1210 gctl_error(req, "%d verb 'move'", ENOSYS); 1211 return (ENOSYS); 1212 } 1213 1214 static int 1215 g_part_ctl_recover(struct gctl_req *req, struct g_part_parms *gpp) 1216 { 1217 struct g_part_table *table; 1218 struct g_geom *gp; 1219 struct sbuf *sb; 1220 int error, recovered; 1221 1222 gp = gpp->gpp_geom; 1223 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name)); 1224 g_topology_assert(); 1225 table = gp->softc; 1226 error = recovered = 0; 1227 1228 if (table->gpt_corrupt) { 1229 error = G_PART_RECOVER(table); 1230 if (error == 0) 1231 error = g_part_check_integrity(table, 1232 LIST_FIRST(&gp->consumer)); 1233 if (error) { 1234 gctl_error(req, "%d recovering '%s' failed", 1235 error, gp->name); 1236 return (error); 1237 } 1238 recovered = 1; 1239 } 1240 /* Provide feedback if so requested. */ 1241 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) { 1242 sb = sbuf_new_auto(); 1243 if (recovered) 1244 sbuf_printf(sb, "%s recovered\n", gp->name); 1245 else 1246 sbuf_printf(sb, "%s recovering is not needed\n", 1247 gp->name); 1248 sbuf_finish(sb); 1249 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1); 1250 sbuf_delete(sb); 1251 } 1252 return (0); 1253 } 1254 1255 static int 1256 g_part_ctl_resize(struct gctl_req *req, struct g_part_parms *gpp) 1257 { 1258 struct g_geom *gp; 1259 struct g_provider *pp; 1260 struct g_part_entry *pe, *entry; 1261 struct g_part_table *table; 1262 struct sbuf *sb; 1263 quad_t end; 1264 int error; 1265 off_t mediasize; 1266 1267 gp = gpp->gpp_geom; 1268 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name)); 1269 g_topology_assert(); 1270 table = gp->softc; 1271 1272 /* check gpp_index */ 1273 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) { 1274 if (entry->gpe_deleted || entry->gpe_internal) 1275 continue; 1276 if (entry->gpe_index == gpp->gpp_index) 1277 break; 1278 } 1279 if (entry == NULL) { 1280 gctl_error(req, "%d index '%d'", ENOENT, gpp->gpp_index); 1281 return (ENOENT); 1282 } 1283 1284 /* check gpp_size */ 1285 end = entry->gpe_start + gpp->gpp_size - 1; 1286 if (gpp->gpp_size < 1 || end > table->gpt_last) { 1287 gctl_error(req, "%d size '%jd'", EINVAL, 1288 (intmax_t)gpp->gpp_size); 1289 return (EINVAL); 1290 } 1291 1292 LIST_FOREACH(pe, &table->gpt_entry, gpe_entry) { 1293 if (pe->gpe_deleted || pe->gpe_internal || pe == entry) 1294 continue; 1295 if (end >= pe->gpe_start && end <= pe->gpe_end) { 1296 gctl_error(req, "%d end '%jd'", ENOSPC, 1297 (intmax_t)end); 1298 return (ENOSPC); 1299 } 1300 if (entry->gpe_start < pe->gpe_start && end > pe->gpe_end) { 1301 gctl_error(req, "%d size '%jd'", ENOSPC, 1302 (intmax_t)gpp->gpp_size); 1303 return (ENOSPC); 1304 } 1305 } 1306 1307 pp = entry->gpe_pp; 1308 if ((g_debugflags & 16) == 0 && 1309 (pp->acr > 0 || pp->acw > 0 || pp->ace > 0)) { 1310 if (entry->gpe_end - entry->gpe_start + 1 > gpp->gpp_size) { 1311 /* Deny shrinking of an opened partition. */ 1312 gctl_error(req, "%d", EBUSY); 1313 return (EBUSY); 1314 } 1315 } 1316 1317 error = G_PART_RESIZE(table, entry, gpp); 1318 if (error) { 1319 gctl_error(req, "%d%s", error, error != EBUSY ? "": 1320 " resizing will lead to unexpected shrinking" 1321 " due to alignment"); 1322 return (error); 1323 } 1324 1325 if (!entry->gpe_created) 1326 entry->gpe_modified = 1; 1327 1328 /* update mediasize of changed provider */ 1329 mediasize = (entry->gpe_end - entry->gpe_start + 1) * 1330 pp->sectorsize; 1331 g_resize_provider(pp, mediasize); 1332 1333 /* Provide feedback if so requested. */ 1334 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) { 1335 sb = sbuf_new_auto(); 1336 G_PART_FULLNAME(table, entry, sb, gp->name); 1337 sbuf_cat(sb, " resized\n"); 1338 sbuf_finish(sb); 1339 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1); 1340 sbuf_delete(sb); 1341 } 1342 return (0); 1343 } 1344 1345 static int 1346 g_part_ctl_setunset(struct gctl_req *req, struct g_part_parms *gpp, 1347 unsigned int set) 1348 { 1349 struct g_geom *gp; 1350 struct g_part_entry *entry; 1351 struct g_part_table *table; 1352 struct sbuf *sb; 1353 int error; 1354 1355 gp = gpp->gpp_geom; 1356 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name)); 1357 g_topology_assert(); 1358 1359 table = gp->softc; 1360 1361 if (gpp->gpp_parms & G_PART_PARM_INDEX) { 1362 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) { 1363 if (entry->gpe_deleted || entry->gpe_internal) 1364 continue; 1365 if (entry->gpe_index == gpp->gpp_index) 1366 break; 1367 } 1368 if (entry == NULL) { 1369 gctl_error(req, "%d index '%d'", ENOENT, 1370 gpp->gpp_index); 1371 return (ENOENT); 1372 } 1373 } else 1374 entry = NULL; 1375 1376 error = G_PART_SETUNSET(table, entry, gpp->gpp_attrib, set); 1377 if (error) { 1378 gctl_error(req, "%d attrib '%s'", error, gpp->gpp_attrib); 1379 return (error); 1380 } 1381 1382 /* Provide feedback if so requested. */ 1383 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) { 1384 sb = sbuf_new_auto(); 1385 sbuf_printf(sb, "%s %sset on ", gpp->gpp_attrib, 1386 (set) ? "" : "un"); 1387 if (entry) 1388 G_PART_FULLNAME(table, entry, sb, gp->name); 1389 else 1390 sbuf_cat(sb, gp->name); 1391 sbuf_cat(sb, "\n"); 1392 sbuf_finish(sb); 1393 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1); 1394 sbuf_delete(sb); 1395 } 1396 return (0); 1397 } 1398 1399 static int 1400 g_part_ctl_undo(struct gctl_req *req, struct g_part_parms *gpp) 1401 { 1402 struct g_consumer *cp; 1403 struct g_provider *pp; 1404 struct g_geom *gp; 1405 struct g_part_entry *entry, *tmp; 1406 struct g_part_table *table; 1407 int error, reprobe; 1408 1409 gp = gpp->gpp_geom; 1410 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name)); 1411 g_topology_assert(); 1412 1413 table = gp->softc; 1414 if (!table->gpt_opened) { 1415 gctl_error(req, "%d", EPERM); 1416 return (EPERM); 1417 } 1418 1419 cp = LIST_FIRST(&gp->consumer); 1420 LIST_FOREACH_SAFE(entry, &table->gpt_entry, gpe_entry, tmp) { 1421 entry->gpe_modified = 0; 1422 if (entry->gpe_created) { 1423 pp = entry->gpe_pp; 1424 if (pp != NULL) { 1425 pp->private = NULL; 1426 entry->gpe_pp = NULL; 1427 g_wither_provider(pp, ENXIO); 1428 } 1429 entry->gpe_deleted = 1; 1430 } 1431 if (entry->gpe_deleted) { 1432 LIST_REMOVE(entry, gpe_entry); 1433 g_free(entry); 1434 } 1435 } 1436 1437 g_topology_unlock(); 1438 1439 reprobe = (table->gpt_scheme == &g_part_null_scheme || 1440 table->gpt_created) ? 1 : 0; 1441 1442 if (reprobe) { 1443 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) { 1444 if (entry->gpe_internal) 1445 continue; 1446 error = EBUSY; 1447 goto fail; 1448 } 1449 while ((entry = LIST_FIRST(&table->gpt_entry)) != NULL) { 1450 LIST_REMOVE(entry, gpe_entry); 1451 g_free(entry); 1452 } 1453 error = g_part_probe(gp, cp, table->gpt_depth); 1454 if (error) { 1455 g_topology_lock(); 1456 g_access(cp, -1, -1, -1); 1457 g_part_wither(gp, error); 1458 return (0); 1459 } 1460 table = gp->softc; 1461 1462 /* 1463 * Synthesize a disk geometry. Some partitioning schemes 1464 * depend on it and since some file systems need it even 1465 * when the partitition scheme doesn't, we do it here in 1466 * scheme-independent code. 1467 */ 1468 pp = cp->provider; 1469 g_part_geometry(table, cp, pp->mediasize / pp->sectorsize); 1470 } 1471 1472 error = G_PART_READ(table, cp); 1473 if (error) 1474 goto fail; 1475 error = g_part_check_integrity(table, cp); 1476 if (error) 1477 goto fail; 1478 1479 g_topology_lock(); 1480 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) { 1481 if (!entry->gpe_internal) 1482 g_part_new_provider(gp, table, entry); 1483 } 1484 1485 table->gpt_opened = 0; 1486 g_access(cp, -1, -1, -1); 1487 return (0); 1488 1489 fail: 1490 g_topology_lock(); 1491 gctl_error(req, "%d", error); 1492 return (error); 1493 } 1494 1495 static void 1496 g_part_wither(struct g_geom *gp, int error) 1497 { 1498 struct g_part_entry *entry; 1499 struct g_part_table *table; 1500 1501 table = gp->softc; 1502 if (table != NULL) { 1503 G_PART_DESTROY(table, NULL); 1504 while ((entry = LIST_FIRST(&table->gpt_entry)) != NULL) { 1505 LIST_REMOVE(entry, gpe_entry); 1506 g_free(entry); 1507 } 1508 if (gp->softc != NULL) { 1509 kobj_delete((kobj_t)gp->softc, M_GEOM); 1510 gp->softc = NULL; 1511 } 1512 } 1513 g_wither_geom(gp, error); 1514 } 1515 1516 /* 1517 * Class methods. 1518 */ 1519 1520 static void 1521 g_part_ctlreq(struct gctl_req *req, struct g_class *mp, const char *verb) 1522 { 1523 struct g_part_parms gpp; 1524 struct g_part_table *table; 1525 struct gctl_req_arg *ap; 1526 enum g_part_ctl ctlreq; 1527 unsigned int i, mparms, oparms, parm; 1528 int auto_commit, close_on_error; 1529 int error, modifies; 1530 1531 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s,%s)", __func__, mp->name, verb)); 1532 g_topology_assert(); 1533 1534 ctlreq = G_PART_CTL_NONE; 1535 modifies = 1; 1536 mparms = 0; 1537 oparms = G_PART_PARM_FLAGS | G_PART_PARM_OUTPUT | G_PART_PARM_VERSION; 1538 switch (*verb) { 1539 case 'a': 1540 if (!strcmp(verb, "add")) { 1541 ctlreq = G_PART_CTL_ADD; 1542 mparms |= G_PART_PARM_GEOM | G_PART_PARM_SIZE | 1543 G_PART_PARM_START | G_PART_PARM_TYPE; 1544 oparms |= G_PART_PARM_INDEX | G_PART_PARM_LABEL; 1545 } 1546 break; 1547 case 'b': 1548 if (!strcmp(verb, "bootcode")) { 1549 ctlreq = G_PART_CTL_BOOTCODE; 1550 mparms |= G_PART_PARM_GEOM | G_PART_PARM_BOOTCODE; 1551 } 1552 break; 1553 case 'c': 1554 if (!strcmp(verb, "commit")) { 1555 ctlreq = G_PART_CTL_COMMIT; 1556 mparms |= G_PART_PARM_GEOM; 1557 modifies = 0; 1558 } else if (!strcmp(verb, "create")) { 1559 ctlreq = G_PART_CTL_CREATE; 1560 mparms |= G_PART_PARM_PROVIDER | G_PART_PARM_SCHEME; 1561 oparms |= G_PART_PARM_ENTRIES; 1562 } 1563 break; 1564 case 'd': 1565 if (!strcmp(verb, "delete")) { 1566 ctlreq = G_PART_CTL_DELETE; 1567 mparms |= G_PART_PARM_GEOM | G_PART_PARM_INDEX; 1568 } else if (!strcmp(verb, "destroy")) { 1569 ctlreq = G_PART_CTL_DESTROY; 1570 mparms |= G_PART_PARM_GEOM; 1571 oparms |= G_PART_PARM_FORCE; 1572 } 1573 break; 1574 case 'm': 1575 if (!strcmp(verb, "modify")) { 1576 ctlreq = G_PART_CTL_MODIFY; 1577 mparms |= G_PART_PARM_GEOM | G_PART_PARM_INDEX; 1578 oparms |= G_PART_PARM_LABEL | G_PART_PARM_TYPE; 1579 } else if (!strcmp(verb, "move")) { 1580 ctlreq = G_PART_CTL_MOVE; 1581 mparms |= G_PART_PARM_GEOM | G_PART_PARM_INDEX; 1582 } 1583 break; 1584 case 'r': 1585 if (!strcmp(verb, "recover")) { 1586 ctlreq = G_PART_CTL_RECOVER; 1587 mparms |= G_PART_PARM_GEOM; 1588 } else if (!strcmp(verb, "resize")) { 1589 ctlreq = G_PART_CTL_RESIZE; 1590 mparms |= G_PART_PARM_GEOM | G_PART_PARM_INDEX | 1591 G_PART_PARM_SIZE; 1592 } 1593 break; 1594 case 's': 1595 if (!strcmp(verb, "set")) { 1596 ctlreq = G_PART_CTL_SET; 1597 mparms |= G_PART_PARM_ATTRIB | G_PART_PARM_GEOM; 1598 oparms |= G_PART_PARM_INDEX; 1599 } 1600 break; 1601 case 'u': 1602 if (!strcmp(verb, "undo")) { 1603 ctlreq = G_PART_CTL_UNDO; 1604 mparms |= G_PART_PARM_GEOM; 1605 modifies = 0; 1606 } else if (!strcmp(verb, "unset")) { 1607 ctlreq = G_PART_CTL_UNSET; 1608 mparms |= G_PART_PARM_ATTRIB | G_PART_PARM_GEOM; 1609 oparms |= G_PART_PARM_INDEX; 1610 } 1611 break; 1612 } 1613 if (ctlreq == G_PART_CTL_NONE) { 1614 gctl_error(req, "%d verb '%s'", EINVAL, verb); 1615 return; 1616 } 1617 1618 bzero(&gpp, sizeof(gpp)); 1619 for (i = 0; i < req->narg; i++) { 1620 ap = &req->arg[i]; 1621 parm = 0; 1622 switch (ap->name[0]) { 1623 case 'a': 1624 if (!strcmp(ap->name, "arg0")) { 1625 parm = mparms & 1626 (G_PART_PARM_GEOM | G_PART_PARM_PROVIDER); 1627 } 1628 if (!strcmp(ap->name, "attrib")) 1629 parm = G_PART_PARM_ATTRIB; 1630 break; 1631 case 'b': 1632 if (!strcmp(ap->name, "bootcode")) 1633 parm = G_PART_PARM_BOOTCODE; 1634 break; 1635 case 'c': 1636 if (!strcmp(ap->name, "class")) 1637 continue; 1638 break; 1639 case 'e': 1640 if (!strcmp(ap->name, "entries")) 1641 parm = G_PART_PARM_ENTRIES; 1642 break; 1643 case 'f': 1644 if (!strcmp(ap->name, "flags")) 1645 parm = G_PART_PARM_FLAGS; 1646 else if (!strcmp(ap->name, "force")) 1647 parm = G_PART_PARM_FORCE; 1648 break; 1649 case 'i': 1650 if (!strcmp(ap->name, "index")) 1651 parm = G_PART_PARM_INDEX; 1652 break; 1653 case 'l': 1654 if (!strcmp(ap->name, "label")) 1655 parm = G_PART_PARM_LABEL; 1656 break; 1657 case 'o': 1658 if (!strcmp(ap->name, "output")) 1659 parm = G_PART_PARM_OUTPUT; 1660 break; 1661 case 's': 1662 if (!strcmp(ap->name, "scheme")) 1663 parm = G_PART_PARM_SCHEME; 1664 else if (!strcmp(ap->name, "size")) 1665 parm = G_PART_PARM_SIZE; 1666 else if (!strcmp(ap->name, "start")) 1667 parm = G_PART_PARM_START; 1668 break; 1669 case 't': 1670 if (!strcmp(ap->name, "type")) 1671 parm = G_PART_PARM_TYPE; 1672 break; 1673 case 'v': 1674 if (!strcmp(ap->name, "verb")) 1675 continue; 1676 else if (!strcmp(ap->name, "version")) 1677 parm = G_PART_PARM_VERSION; 1678 break; 1679 } 1680 if ((parm & (mparms | oparms)) == 0) { 1681 gctl_error(req, "%d param '%s'", EINVAL, ap->name); 1682 return; 1683 } 1684 switch (parm) { 1685 case G_PART_PARM_ATTRIB: 1686 error = g_part_parm_str(req, ap->name, 1687 &gpp.gpp_attrib); 1688 break; 1689 case G_PART_PARM_BOOTCODE: 1690 error = g_part_parm_bootcode(req, ap->name, 1691 &gpp.gpp_codeptr, &gpp.gpp_codesize); 1692 break; 1693 case G_PART_PARM_ENTRIES: 1694 error = g_part_parm_intmax(req, ap->name, 1695 &gpp.gpp_entries); 1696 break; 1697 case G_PART_PARM_FLAGS: 1698 error = g_part_parm_str(req, ap->name, &gpp.gpp_flags); 1699 break; 1700 case G_PART_PARM_FORCE: 1701 error = g_part_parm_uint32(req, ap->name, 1702 &gpp.gpp_force); 1703 break; 1704 case G_PART_PARM_GEOM: 1705 error = g_part_parm_geom(req, ap->name, &gpp.gpp_geom); 1706 break; 1707 case G_PART_PARM_INDEX: 1708 error = g_part_parm_intmax(req, ap->name, 1709 &gpp.gpp_index); 1710 break; 1711 case G_PART_PARM_LABEL: 1712 error = g_part_parm_str(req, ap->name, &gpp.gpp_label); 1713 break; 1714 case G_PART_PARM_OUTPUT: 1715 error = 0; /* Write-only parameter */ 1716 break; 1717 case G_PART_PARM_PROVIDER: 1718 error = g_part_parm_provider(req, ap->name, 1719 &gpp.gpp_provider); 1720 break; 1721 case G_PART_PARM_SCHEME: 1722 error = g_part_parm_scheme(req, ap->name, 1723 &gpp.gpp_scheme); 1724 break; 1725 case G_PART_PARM_SIZE: 1726 error = g_part_parm_quad(req, ap->name, &gpp.gpp_size); 1727 break; 1728 case G_PART_PARM_START: 1729 error = g_part_parm_quad(req, ap->name, 1730 &gpp.gpp_start); 1731 break; 1732 case G_PART_PARM_TYPE: 1733 error = g_part_parm_str(req, ap->name, &gpp.gpp_type); 1734 break; 1735 case G_PART_PARM_VERSION: 1736 error = g_part_parm_uint32(req, ap->name, 1737 &gpp.gpp_version); 1738 break; 1739 default: 1740 error = EDOOFUS; 1741 gctl_error(req, "%d %s", error, ap->name); 1742 break; 1743 } 1744 if (error != 0) { 1745 if (error == ENOATTR) { 1746 gctl_error(req, "%d param '%s'", error, 1747 ap->name); 1748 } 1749 return; 1750 } 1751 gpp.gpp_parms |= parm; 1752 } 1753 if ((gpp.gpp_parms & mparms) != mparms) { 1754 parm = mparms - (gpp.gpp_parms & mparms); 1755 gctl_error(req, "%d param '%x'", ENOATTR, parm); 1756 return; 1757 } 1758 1759 /* Obtain permissions if possible/necessary. */ 1760 close_on_error = 0; 1761 table = NULL; 1762 if (modifies && (gpp.gpp_parms & G_PART_PARM_GEOM)) { 1763 table = gpp.gpp_geom->softc; 1764 if (table != NULL && table->gpt_corrupt && 1765 ctlreq != G_PART_CTL_DESTROY && 1766 ctlreq != G_PART_CTL_RECOVER) { 1767 gctl_error(req, "%d table '%s' is corrupt", 1768 EPERM, gpp.gpp_geom->name); 1769 return; 1770 } 1771 if (table != NULL && !table->gpt_opened) { 1772 error = g_access(LIST_FIRST(&gpp.gpp_geom->consumer), 1773 1, 1, 1); 1774 if (error) { 1775 gctl_error(req, "%d geom '%s'", error, 1776 gpp.gpp_geom->name); 1777 return; 1778 } 1779 table->gpt_opened = 1; 1780 close_on_error = 1; 1781 } 1782 } 1783 1784 /* Allow the scheme to check or modify the parameters. */ 1785 if (table != NULL) { 1786 error = G_PART_PRECHECK(table, ctlreq, &gpp); 1787 if (error) { 1788 gctl_error(req, "%d pre-check failed", error); 1789 goto out; 1790 } 1791 } else 1792 error = EDOOFUS; /* Prevent bogus uninit. warning. */ 1793 1794 switch (ctlreq) { 1795 case G_PART_CTL_NONE: 1796 panic("%s", __func__); 1797 case G_PART_CTL_ADD: 1798 error = g_part_ctl_add(req, &gpp); 1799 break; 1800 case G_PART_CTL_BOOTCODE: 1801 error = g_part_ctl_bootcode(req, &gpp); 1802 break; 1803 case G_PART_CTL_COMMIT: 1804 error = g_part_ctl_commit(req, &gpp); 1805 break; 1806 case G_PART_CTL_CREATE: 1807 error = g_part_ctl_create(req, &gpp); 1808 break; 1809 case G_PART_CTL_DELETE: 1810 error = g_part_ctl_delete(req, &gpp); 1811 break; 1812 case G_PART_CTL_DESTROY: 1813 error = g_part_ctl_destroy(req, &gpp); 1814 break; 1815 case G_PART_CTL_MODIFY: 1816 error = g_part_ctl_modify(req, &gpp); 1817 break; 1818 case G_PART_CTL_MOVE: 1819 error = g_part_ctl_move(req, &gpp); 1820 break; 1821 case G_PART_CTL_RECOVER: 1822 error = g_part_ctl_recover(req, &gpp); 1823 break; 1824 case G_PART_CTL_RESIZE: 1825 error = g_part_ctl_resize(req, &gpp); 1826 break; 1827 case G_PART_CTL_SET: 1828 error = g_part_ctl_setunset(req, &gpp, 1); 1829 break; 1830 case G_PART_CTL_UNDO: 1831 error = g_part_ctl_undo(req, &gpp); 1832 break; 1833 case G_PART_CTL_UNSET: 1834 error = g_part_ctl_setunset(req, &gpp, 0); 1835 break; 1836 } 1837 1838 /* Implement automatic commit. */ 1839 if (!error) { 1840 auto_commit = (modifies && 1841 (gpp.gpp_parms & G_PART_PARM_FLAGS) && 1842 strchr(gpp.gpp_flags, 'C') != NULL) ? 1 : 0; 1843 if (auto_commit) { 1844 KASSERT(gpp.gpp_parms & G_PART_PARM_GEOM, ("%s", 1845 __func__)); 1846 error = g_part_ctl_commit(req, &gpp); 1847 } 1848 } 1849 1850 out: 1851 if (error && close_on_error) { 1852 g_access(LIST_FIRST(&gpp.gpp_geom->consumer), -1, -1, -1); 1853 table->gpt_opened = 0; 1854 } 1855 } 1856 1857 static int 1858 g_part_destroy_geom(struct gctl_req *req, struct g_class *mp, 1859 struct g_geom *gp) 1860 { 1861 1862 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s,%s)", __func__, mp->name, gp->name)); 1863 g_topology_assert(); 1864 1865 g_part_wither(gp, EINVAL); 1866 return (0); 1867 } 1868 1869 static struct g_geom * 1870 g_part_taste(struct g_class *mp, struct g_provider *pp, int flags __unused) 1871 { 1872 struct g_consumer *cp; 1873 struct g_geom *gp; 1874 struct g_part_entry *entry; 1875 struct g_part_table *table; 1876 struct root_hold_token *rht; 1877 int attr, depth; 1878 int error; 1879 1880 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s,%s)", __func__, mp->name, pp->name)); 1881 g_topology_assert(); 1882 1883 /* Skip providers that are already open for writing. */ 1884 if (pp->acw > 0) 1885 return (NULL); 1886 1887 /* 1888 * Create a GEOM with consumer and hook it up to the provider. 1889 * With that we become part of the topology. Optain read access 1890 * to the provider. 1891 */ 1892 gp = g_new_geomf(mp, "%s", pp->name); 1893 cp = g_new_consumer(gp); 1894 cp->flags |= G_CF_DIRECT_SEND | G_CF_DIRECT_RECEIVE; 1895 error = g_attach(cp, pp); 1896 if (error == 0) 1897 error = g_access(cp, 1, 0, 0); 1898 if (error != 0) { 1899 if (cp->provider) 1900 g_detach(cp); 1901 g_destroy_consumer(cp); 1902 g_destroy_geom(gp); 1903 return (NULL); 1904 } 1905 1906 rht = root_mount_hold(mp->name); 1907 g_topology_unlock(); 1908 1909 /* 1910 * Short-circuit the whole probing galore when there's no 1911 * media present. 1912 */ 1913 if (pp->mediasize == 0 || pp->sectorsize == 0) { 1914 error = ENODEV; 1915 goto fail; 1916 } 1917 1918 /* Make sure we can nest and if so, determine our depth. */ 1919 error = g_getattr("PART::isleaf", cp, &attr); 1920 if (!error && attr) { 1921 error = ENODEV; 1922 goto fail; 1923 } 1924 error = g_getattr("PART::depth", cp, &attr); 1925 depth = (!error) ? attr + 1 : 0; 1926 1927 error = g_part_probe(gp, cp, depth); 1928 if (error) 1929 goto fail; 1930 1931 table = gp->softc; 1932 1933 /* 1934 * Synthesize a disk geometry. Some partitioning schemes 1935 * depend on it and since some file systems need it even 1936 * when the partitition scheme doesn't, we do it here in 1937 * scheme-independent code. 1938 */ 1939 g_part_geometry(table, cp, pp->mediasize / pp->sectorsize); 1940 1941 error = G_PART_READ(table, cp); 1942 if (error) 1943 goto fail; 1944 error = g_part_check_integrity(table, cp); 1945 if (error) 1946 goto fail; 1947 1948 g_topology_lock(); 1949 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) { 1950 if (!entry->gpe_internal) 1951 g_part_new_provider(gp, table, entry); 1952 } 1953 1954 root_mount_rel(rht); 1955 g_access(cp, -1, 0, 0); 1956 return (gp); 1957 1958 fail: 1959 g_topology_lock(); 1960 root_mount_rel(rht); 1961 g_access(cp, -1, 0, 0); 1962 g_detach(cp); 1963 g_destroy_consumer(cp); 1964 g_destroy_geom(gp); 1965 return (NULL); 1966 } 1967 1968 /* 1969 * Geom methods. 1970 */ 1971 1972 static int 1973 g_part_access(struct g_provider *pp, int dr, int dw, int de) 1974 { 1975 struct g_consumer *cp; 1976 1977 G_PART_TRACE((G_T_ACCESS, "%s(%s,%d,%d,%d)", __func__, pp->name, dr, 1978 dw, de)); 1979 1980 cp = LIST_FIRST(&pp->geom->consumer); 1981 1982 /* We always gain write-exclusive access. */ 1983 return (g_access(cp, dr, dw, dw + de)); 1984 } 1985 1986 static void 1987 g_part_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp, 1988 struct g_consumer *cp, struct g_provider *pp) 1989 { 1990 char buf[64]; 1991 struct g_part_entry *entry; 1992 struct g_part_table *table; 1993 1994 KASSERT(sb != NULL && gp != NULL, ("%s", __func__)); 1995 table = gp->softc; 1996 1997 if (indent == NULL) { 1998 KASSERT(cp == NULL && pp != NULL, ("%s", __func__)); 1999 entry = pp->private; 2000 if (entry == NULL) 2001 return; 2002 sbuf_printf(sb, " i %u o %ju ty %s", entry->gpe_index, 2003 (uintmax_t)entry->gpe_offset, 2004 G_PART_TYPE(table, entry, buf, sizeof(buf))); 2005 /* 2006 * libdisk compatibility quirk - the scheme dumps the 2007 * slicer name and partition type in a way that is 2008 * compatible with libdisk. When libdisk is not used 2009 * anymore, this should go away. 2010 */ 2011 G_PART_DUMPCONF(table, entry, sb, indent); 2012 } else if (cp != NULL) { /* Consumer configuration. */ 2013 KASSERT(pp == NULL, ("%s", __func__)); 2014 /* none */ 2015 } else if (pp != NULL) { /* Provider configuration. */ 2016 entry = pp->private; 2017 if (entry == NULL) 2018 return; 2019 sbuf_printf(sb, "%s<start>%ju</start>\n", indent, 2020 (uintmax_t)entry->gpe_start); 2021 sbuf_printf(sb, "%s<end>%ju</end>\n", indent, 2022 (uintmax_t)entry->gpe_end); 2023 sbuf_printf(sb, "%s<index>%u</index>\n", indent, 2024 entry->gpe_index); 2025 sbuf_printf(sb, "%s<type>%s</type>\n", indent, 2026 G_PART_TYPE(table, entry, buf, sizeof(buf))); 2027 sbuf_printf(sb, "%s<offset>%ju</offset>\n", indent, 2028 (uintmax_t)entry->gpe_offset); 2029 sbuf_printf(sb, "%s<length>%ju</length>\n", indent, 2030 (uintmax_t)pp->mediasize); 2031 G_PART_DUMPCONF(table, entry, sb, indent); 2032 } else { /* Geom configuration. */ 2033 sbuf_printf(sb, "%s<scheme>%s</scheme>\n", indent, 2034 table->gpt_scheme->name); 2035 sbuf_printf(sb, "%s<entries>%u</entries>\n", indent, 2036 table->gpt_entries); 2037 sbuf_printf(sb, "%s<first>%ju</first>\n", indent, 2038 (uintmax_t)table->gpt_first); 2039 sbuf_printf(sb, "%s<last>%ju</last>\n", indent, 2040 (uintmax_t)table->gpt_last); 2041 sbuf_printf(sb, "%s<fwsectors>%u</fwsectors>\n", indent, 2042 table->gpt_sectors); 2043 sbuf_printf(sb, "%s<fwheads>%u</fwheads>\n", indent, 2044 table->gpt_heads); 2045 sbuf_printf(sb, "%s<state>%s</state>\n", indent, 2046 table->gpt_corrupt ? "CORRUPT": "OK"); 2047 sbuf_printf(sb, "%s<modified>%s</modified>\n", indent, 2048 table->gpt_opened ? "true": "false"); 2049 G_PART_DUMPCONF(table, NULL, sb, indent); 2050 } 2051 } 2052 2053 static void 2054 g_part_resize(struct g_consumer *cp) 2055 { 2056 struct g_part_table *table; 2057 2058 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, cp->provider->name)); 2059 g_topology_assert(); 2060 2061 table = cp->geom->softc; 2062 if (table->gpt_opened == 0) { 2063 if (g_access(cp, 1, 1, 1) != 0) 2064 return; 2065 table->gpt_opened = 1; 2066 } 2067 if (G_PART_RESIZE(table, NULL, NULL) == 0) 2068 printf("GEOM_PART: %s was automatically resized.\n" 2069 " Use `gpart commit %s` to save changes or " 2070 "`gpart undo %s` to revert them.\n", cp->geom->name, 2071 cp->geom->name, cp->geom->name); 2072 if (g_part_check_integrity(table, cp) != 0) { 2073 g_access(cp, -1, -1, -1); 2074 table->gpt_opened = 0; 2075 g_part_wither(table->gpt_gp, ENXIO); 2076 } 2077 } 2078 2079 static void 2080 g_part_orphan(struct g_consumer *cp) 2081 { 2082 struct g_provider *pp; 2083 struct g_part_table *table; 2084 2085 pp = cp->provider; 2086 KASSERT(pp != NULL, ("%s", __func__)); 2087 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, pp->name)); 2088 g_topology_assert(); 2089 2090 KASSERT(pp->error != 0, ("%s", __func__)); 2091 table = cp->geom->softc; 2092 if (table != NULL && table->gpt_opened) 2093 g_access(cp, -1, -1, -1); 2094 g_part_wither(cp->geom, pp->error); 2095 } 2096 2097 static void 2098 g_part_spoiled(struct g_consumer *cp) 2099 { 2100 2101 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, cp->provider->name)); 2102 g_topology_assert(); 2103 2104 cp->flags |= G_CF_ORPHAN; 2105 g_part_wither(cp->geom, ENXIO); 2106 } 2107 2108 static void 2109 g_part_start(struct bio *bp) 2110 { 2111 struct bio *bp2; 2112 struct g_consumer *cp; 2113 struct g_geom *gp; 2114 struct g_part_entry *entry; 2115 struct g_part_table *table; 2116 struct g_kerneldump *gkd; 2117 struct g_provider *pp; 2118 char buf[64]; 2119 2120 pp = bp->bio_to; 2121 gp = pp->geom; 2122 table = gp->softc; 2123 cp = LIST_FIRST(&gp->consumer); 2124 2125 G_PART_TRACE((G_T_BIO, "%s: cmd=%d, provider=%s", __func__, bp->bio_cmd, 2126 pp->name)); 2127 2128 entry = pp->private; 2129 if (entry == NULL) { 2130 g_io_deliver(bp, ENXIO); 2131 return; 2132 } 2133 2134 switch(bp->bio_cmd) { 2135 case BIO_DELETE: 2136 case BIO_READ: 2137 case BIO_WRITE: 2138 if (bp->bio_offset >= pp->mediasize) { 2139 g_io_deliver(bp, EIO); 2140 return; 2141 } 2142 bp2 = g_clone_bio(bp); 2143 if (bp2 == NULL) { 2144 g_io_deliver(bp, ENOMEM); 2145 return; 2146 } 2147 if (bp2->bio_offset + bp2->bio_length > pp->mediasize) 2148 bp2->bio_length = pp->mediasize - bp2->bio_offset; 2149 bp2->bio_done = g_std_done; 2150 bp2->bio_offset += entry->gpe_offset; 2151 g_io_request(bp2, cp); 2152 return; 2153 case BIO_FLUSH: 2154 break; 2155 case BIO_GETATTR: 2156 if (g_handleattr_int(bp, "GEOM::fwheads", table->gpt_heads)) 2157 return; 2158 if (g_handleattr_int(bp, "GEOM::fwsectors", table->gpt_sectors)) 2159 return; 2160 if (g_handleattr_int(bp, "PART::isleaf", table->gpt_isleaf)) 2161 return; 2162 if (g_handleattr_int(bp, "PART::depth", table->gpt_depth)) 2163 return; 2164 if (g_handleattr_str(bp, "PART::scheme", 2165 table->gpt_scheme->name)) 2166 return; 2167 if (g_handleattr_str(bp, "PART::type", 2168 G_PART_TYPE(table, entry, buf, sizeof(buf)))) 2169 return; 2170 if (!strcmp("GEOM::kerneldump", bp->bio_attribute)) { 2171 /* 2172 * Check that the partition is suitable for kernel 2173 * dumps. Typically only swap partitions should be 2174 * used. If the request comes from the nested scheme 2175 * we allow dumping there as well. 2176 */ 2177 if ((bp->bio_from == NULL || 2178 bp->bio_from->geom->class != &g_part_class) && 2179 G_PART_DUMPTO(table, entry) == 0) { 2180 g_io_deliver(bp, ENODEV); 2181 printf("GEOM_PART: Partition '%s' not suitable" 2182 " for kernel dumps (wrong type?)\n", 2183 pp->name); 2184 return; 2185 } 2186 gkd = (struct g_kerneldump *)bp->bio_data; 2187 if (gkd->offset >= pp->mediasize) { 2188 g_io_deliver(bp, EIO); 2189 return; 2190 } 2191 if (gkd->offset + gkd->length > pp->mediasize) 2192 gkd->length = pp->mediasize - gkd->offset; 2193 gkd->offset += entry->gpe_offset; 2194 } 2195 break; 2196 default: 2197 g_io_deliver(bp, EOPNOTSUPP); 2198 return; 2199 } 2200 2201 bp2 = g_clone_bio(bp); 2202 if (bp2 == NULL) { 2203 g_io_deliver(bp, ENOMEM); 2204 return; 2205 } 2206 bp2->bio_done = g_std_done; 2207 g_io_request(bp2, cp); 2208 } 2209 2210 static void 2211 g_part_init(struct g_class *mp) 2212 { 2213 2214 TAILQ_INSERT_HEAD(&g_part_schemes, &g_part_null_scheme, scheme_list); 2215 } 2216 2217 static void 2218 g_part_fini(struct g_class *mp) 2219 { 2220 2221 TAILQ_REMOVE(&g_part_schemes, &g_part_null_scheme, scheme_list); 2222 } 2223 2224 static void 2225 g_part_unload_event(void *arg, int flag) 2226 { 2227 struct g_consumer *cp; 2228 struct g_geom *gp; 2229 struct g_provider *pp; 2230 struct g_part_scheme *scheme; 2231 struct g_part_table *table; 2232 uintptr_t *xchg; 2233 int acc, error; 2234 2235 if (flag == EV_CANCEL) 2236 return; 2237 2238 xchg = arg; 2239 error = 0; 2240 scheme = (void *)(*xchg); 2241 2242 g_topology_assert(); 2243 2244 LIST_FOREACH(gp, &g_part_class.geom, geom) { 2245 table = gp->softc; 2246 if (table->gpt_scheme != scheme) 2247 continue; 2248 2249 acc = 0; 2250 LIST_FOREACH(pp, &gp->provider, provider) 2251 acc += pp->acr + pp->acw + pp->ace; 2252 LIST_FOREACH(cp, &gp->consumer, consumer) 2253 acc += cp->acr + cp->acw + cp->ace; 2254 2255 if (!acc) 2256 g_part_wither(gp, ENOSYS); 2257 else 2258 error = EBUSY; 2259 } 2260 2261 if (!error) 2262 TAILQ_REMOVE(&g_part_schemes, scheme, scheme_list); 2263 2264 *xchg = error; 2265 } 2266 2267 int 2268 g_part_modevent(module_t mod, int type, struct g_part_scheme *scheme) 2269 { 2270 struct g_part_scheme *iter; 2271 uintptr_t arg; 2272 int error; 2273 2274 error = 0; 2275 switch (type) { 2276 case MOD_LOAD: 2277 TAILQ_FOREACH(iter, &g_part_schemes, scheme_list) { 2278 if (scheme == iter) { 2279 printf("GEOM_PART: scheme %s is already " 2280 "registered!\n", scheme->name); 2281 break; 2282 } 2283 } 2284 if (iter == NULL) { 2285 TAILQ_INSERT_TAIL(&g_part_schemes, scheme, 2286 scheme_list); 2287 g_retaste(&g_part_class); 2288 } 2289 break; 2290 case MOD_UNLOAD: 2291 arg = (uintptr_t)scheme; 2292 error = g_waitfor_event(g_part_unload_event, &arg, M_WAITOK, 2293 NULL); 2294 if (error == 0) 2295 error = arg; 2296 break; 2297 default: 2298 error = EOPNOTSUPP; 2299 break; 2300 } 2301 2302 return (error); 2303 } 2304