1 /*- 2 * Copyright (c) 2002, 2005-2009 Marcel Moolenaar 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 */ 26 27 #include <sys/cdefs.h> 28 __FBSDID("$FreeBSD$"); 29 30 #include <sys/param.h> 31 #include <sys/bio.h> 32 #include <sys/endian.h> 33 #include <sys/kernel.h> 34 #include <sys/kobj.h> 35 #include <sys/limits.h> 36 #include <sys/lock.h> 37 #include <sys/malloc.h> 38 #include <sys/mutex.h> 39 #include <sys/queue.h> 40 #include <sys/sbuf.h> 41 #include <sys/sysctl.h> 42 #include <sys/systm.h> 43 #include <sys/uuid.h> 44 #include <geom/geom.h> 45 #include <geom/geom_ctl.h> 46 #include <geom/geom_int.h> 47 #include <geom/part/g_part.h> 48 49 #include "g_part_if.h" 50 51 #ifndef _PATH_DEV 52 #define _PATH_DEV "/dev/" 53 #endif 54 55 static kobj_method_t g_part_null_methods[] = { 56 { 0, 0 } 57 }; 58 59 static struct g_part_scheme g_part_null_scheme = { 60 "(none)", 61 g_part_null_methods, 62 sizeof(struct g_part_table), 63 }; 64 65 TAILQ_HEAD(, g_part_scheme) g_part_schemes = 66 TAILQ_HEAD_INITIALIZER(g_part_schemes); 67 68 struct g_part_alias_list { 69 const char *lexeme; 70 enum g_part_alias alias; 71 } g_part_alias_list[G_PART_ALIAS_COUNT] = { 72 { "apple-boot", G_PART_ALIAS_APPLE_BOOT }, 73 { "apple-hfs", G_PART_ALIAS_APPLE_HFS }, 74 { "apple-label", G_PART_ALIAS_APPLE_LABEL }, 75 { "apple-raid", G_PART_ALIAS_APPLE_RAID }, 76 { "apple-raid-offline", G_PART_ALIAS_APPLE_RAID_OFFLINE }, 77 { "apple-tv-recovery", G_PART_ALIAS_APPLE_TV_RECOVERY }, 78 { "apple-ufs", G_PART_ALIAS_APPLE_UFS }, 79 { "bios-boot", G_PART_ALIAS_BIOS_BOOT }, 80 { "ebr", G_PART_ALIAS_EBR }, 81 { "efi", G_PART_ALIAS_EFI }, 82 { "fat16", G_PART_ALIAS_MS_FAT16 }, 83 { "fat32", G_PART_ALIAS_MS_FAT32 }, 84 { "freebsd", G_PART_ALIAS_FREEBSD }, 85 { "freebsd-boot", G_PART_ALIAS_FREEBSD_BOOT }, 86 { "freebsd-nandfs", G_PART_ALIAS_FREEBSD_NANDFS }, 87 { "freebsd-swap", G_PART_ALIAS_FREEBSD_SWAP }, 88 { "freebsd-ufs", G_PART_ALIAS_FREEBSD_UFS }, 89 { "freebsd-vinum", G_PART_ALIAS_FREEBSD_VINUM }, 90 { "freebsd-zfs", G_PART_ALIAS_FREEBSD_ZFS }, 91 { "linux-data", G_PART_ALIAS_LINUX_DATA }, 92 { "linux-lvm", G_PART_ALIAS_LINUX_LVM }, 93 { "linux-raid", G_PART_ALIAS_LINUX_RAID }, 94 { "linux-swap", G_PART_ALIAS_LINUX_SWAP }, 95 { "mbr", G_PART_ALIAS_MBR }, 96 { "ms-basic-data", G_PART_ALIAS_MS_BASIC_DATA }, 97 { "ms-ldm-data", G_PART_ALIAS_MS_LDM_DATA }, 98 { "ms-ldm-metadata", G_PART_ALIAS_MS_LDM_METADATA }, 99 { "ms-reserved", G_PART_ALIAS_MS_RESERVED }, 100 { "ntfs", G_PART_ALIAS_MS_NTFS }, 101 { "netbsd-ccd", G_PART_ALIAS_NETBSD_CCD }, 102 { "netbsd-cgd", G_PART_ALIAS_NETBSD_CGD }, 103 { "netbsd-ffs", G_PART_ALIAS_NETBSD_FFS }, 104 { "netbsd-lfs", G_PART_ALIAS_NETBSD_LFS }, 105 { "netbsd-raid", G_PART_ALIAS_NETBSD_RAID }, 106 { "netbsd-swap", G_PART_ALIAS_NETBSD_SWAP }, 107 { "vmware-vmfs", G_PART_ALIAS_VMFS }, 108 { "vmware-vmkdiag", G_PART_ALIAS_VMKDIAG }, 109 { "vmware-reserved", G_PART_ALIAS_VMRESERVED }, 110 }; 111 112 SYSCTL_DECL(_kern_geom); 113 SYSCTL_NODE(_kern_geom, OID_AUTO, part, CTLFLAG_RW, 0, 114 "GEOM_PART stuff"); 115 static u_int check_integrity = 1; 116 TUNABLE_INT("kern.geom.part.check_integrity", &check_integrity); 117 SYSCTL_UINT(_kern_geom_part, OID_AUTO, check_integrity, 118 CTLFLAG_RW | CTLFLAG_TUN, &check_integrity, 1, 119 "Enable integrity checking"); 120 121 /* 122 * The GEOM partitioning class. 123 */ 124 static g_ctl_req_t g_part_ctlreq; 125 static g_ctl_destroy_geom_t g_part_destroy_geom; 126 static g_fini_t g_part_fini; 127 static g_init_t g_part_init; 128 static g_taste_t g_part_taste; 129 130 static g_access_t g_part_access; 131 static g_dumpconf_t g_part_dumpconf; 132 static g_orphan_t g_part_orphan; 133 static g_spoiled_t g_part_spoiled; 134 static g_start_t g_part_start; 135 static g_resize_t g_part_resize; 136 137 static struct g_class g_part_class = { 138 .name = "PART", 139 .version = G_VERSION, 140 /* Class methods. */ 141 .ctlreq = g_part_ctlreq, 142 .destroy_geom = g_part_destroy_geom, 143 .fini = g_part_fini, 144 .init = g_part_init, 145 .taste = g_part_taste, 146 /* Geom methods. */ 147 .access = g_part_access, 148 .dumpconf = g_part_dumpconf, 149 .orphan = g_part_orphan, 150 .spoiled = g_part_spoiled, 151 .start = g_part_start, 152 .resize = g_part_resize 153 }; 154 155 DECLARE_GEOM_CLASS(g_part_class, g_part); 156 MODULE_VERSION(g_part, 0); 157 158 /* 159 * Support functions. 160 */ 161 162 static void g_part_wither(struct g_geom *, int); 163 164 const char * 165 g_part_alias_name(enum g_part_alias alias) 166 { 167 int i; 168 169 for (i = 0; i < G_PART_ALIAS_COUNT; i++) { 170 if (g_part_alias_list[i].alias != alias) 171 continue; 172 return (g_part_alias_list[i].lexeme); 173 } 174 175 return (NULL); 176 } 177 178 void 179 g_part_geometry_heads(off_t blocks, u_int sectors, off_t *bestchs, 180 u_int *bestheads) 181 { 182 static u_int candidate_heads[] = { 1, 2, 16, 32, 64, 128, 255, 0 }; 183 off_t chs, cylinders; 184 u_int heads; 185 int idx; 186 187 *bestchs = 0; 188 *bestheads = 0; 189 for (idx = 0; candidate_heads[idx] != 0; idx++) { 190 heads = candidate_heads[idx]; 191 cylinders = blocks / heads / sectors; 192 if (cylinders < heads || cylinders < sectors) 193 break; 194 if (cylinders > 1023) 195 continue; 196 chs = cylinders * heads * sectors; 197 if (chs > *bestchs || (chs == *bestchs && *bestheads == 1)) { 198 *bestchs = chs; 199 *bestheads = heads; 200 } 201 } 202 } 203 204 static void 205 g_part_geometry(struct g_part_table *table, struct g_consumer *cp, 206 off_t blocks) 207 { 208 static u_int candidate_sectors[] = { 1, 9, 17, 33, 63, 0 }; 209 off_t chs, bestchs; 210 u_int heads, sectors; 211 int idx; 212 213 if (g_getattr("GEOM::fwsectors", cp, §ors) != 0 || sectors == 0 || 214 g_getattr("GEOM::fwheads", cp, &heads) != 0 || heads == 0) { 215 table->gpt_fixgeom = 0; 216 table->gpt_heads = 0; 217 table->gpt_sectors = 0; 218 bestchs = 0; 219 for (idx = 0; candidate_sectors[idx] != 0; idx++) { 220 sectors = candidate_sectors[idx]; 221 g_part_geometry_heads(blocks, sectors, &chs, &heads); 222 if (chs == 0) 223 continue; 224 /* 225 * Prefer a geometry with sectors > 1, but only if 226 * it doesn't bump down the number of heads to 1. 227 */ 228 if (chs > bestchs || (chs == bestchs && heads > 1 && 229 table->gpt_sectors == 1)) { 230 bestchs = chs; 231 table->gpt_heads = heads; 232 table->gpt_sectors = sectors; 233 } 234 } 235 /* 236 * If we didn't find a geometry at all, then the disk is 237 * too big. This means we can use the maximum number of 238 * heads and sectors. 239 */ 240 if (bestchs == 0) { 241 table->gpt_heads = 255; 242 table->gpt_sectors = 63; 243 } 244 } else { 245 table->gpt_fixgeom = 1; 246 table->gpt_heads = heads; 247 table->gpt_sectors = sectors; 248 } 249 } 250 251 #define DPRINTF(...) if (bootverbose) { \ 252 printf("GEOM_PART: " __VA_ARGS__); \ 253 } 254 255 static int 256 g_part_check_integrity(struct g_part_table *table, struct g_consumer *cp) 257 { 258 struct g_part_entry *e1, *e2; 259 struct g_provider *pp; 260 off_t offset; 261 int failed; 262 263 failed = 0; 264 pp = cp->provider; 265 if (table->gpt_last < table->gpt_first) { 266 DPRINTF("last LBA is below first LBA: %jd < %jd\n", 267 (intmax_t)table->gpt_last, (intmax_t)table->gpt_first); 268 failed++; 269 } 270 if (table->gpt_last > pp->mediasize / pp->sectorsize - 1) { 271 DPRINTF("last LBA extends beyond mediasize: " 272 "%jd > %jd\n", (intmax_t)table->gpt_last, 273 (intmax_t)pp->mediasize / pp->sectorsize - 1); 274 failed++; 275 } 276 LIST_FOREACH(e1, &table->gpt_entry, gpe_entry) { 277 if (e1->gpe_deleted || e1->gpe_internal) 278 continue; 279 if (e1->gpe_start < table->gpt_first) { 280 DPRINTF("partition %d has start offset below first " 281 "LBA: %jd < %jd\n", e1->gpe_index, 282 (intmax_t)e1->gpe_start, 283 (intmax_t)table->gpt_first); 284 failed++; 285 } 286 if (e1->gpe_start > table->gpt_last) { 287 DPRINTF("partition %d has start offset beyond last " 288 "LBA: %jd > %jd\n", e1->gpe_index, 289 (intmax_t)e1->gpe_start, 290 (intmax_t)table->gpt_last); 291 failed++; 292 } 293 if (e1->gpe_end < e1->gpe_start) { 294 DPRINTF("partition %d has end offset below start " 295 "offset: %jd < %jd\n", e1->gpe_index, 296 (intmax_t)e1->gpe_end, 297 (intmax_t)e1->gpe_start); 298 failed++; 299 } 300 if (e1->gpe_end > table->gpt_last) { 301 DPRINTF("partition %d has end offset beyond last " 302 "LBA: %jd > %jd\n", e1->gpe_index, 303 (intmax_t)e1->gpe_end, 304 (intmax_t)table->gpt_last); 305 failed++; 306 } 307 if (pp->stripesize > 0) { 308 offset = e1->gpe_start * pp->sectorsize; 309 if (e1->gpe_offset > offset) 310 offset = e1->gpe_offset; 311 if ((offset + pp->stripeoffset) % pp->stripesize) { 312 DPRINTF("partition %d is not aligned on %u " 313 "bytes\n", e1->gpe_index, pp->stripesize); 314 /* Don't treat this as a critical failure */ 315 } 316 } 317 e2 = e1; 318 while ((e2 = LIST_NEXT(e2, gpe_entry)) != NULL) { 319 if (e2->gpe_deleted || e2->gpe_internal) 320 continue; 321 if (e1->gpe_start >= e2->gpe_start && 322 e1->gpe_start <= e2->gpe_end) { 323 DPRINTF("partition %d has start offset inside " 324 "partition %d: start[%d] %jd >= start[%d] " 325 "%jd <= end[%d] %jd\n", 326 e1->gpe_index, e2->gpe_index, 327 e2->gpe_index, (intmax_t)e2->gpe_start, 328 e1->gpe_index, (intmax_t)e1->gpe_start, 329 e2->gpe_index, (intmax_t)e2->gpe_end); 330 failed++; 331 } 332 if (e1->gpe_end >= e2->gpe_start && 333 e1->gpe_end <= e2->gpe_end) { 334 DPRINTF("partition %d has end offset inside " 335 "partition %d: start[%d] %jd >= end[%d] " 336 "%jd <= end[%d] %jd\n", 337 e1->gpe_index, e2->gpe_index, 338 e2->gpe_index, (intmax_t)e2->gpe_start, 339 e1->gpe_index, (intmax_t)e1->gpe_end, 340 e2->gpe_index, (intmax_t)e2->gpe_end); 341 failed++; 342 } 343 if (e1->gpe_start < e2->gpe_start && 344 e1->gpe_end > e2->gpe_end) { 345 DPRINTF("partition %d contains partition %d: " 346 "start[%d] %jd > start[%d] %jd, end[%d] " 347 "%jd < end[%d] %jd\n", 348 e1->gpe_index, e2->gpe_index, 349 e1->gpe_index, (intmax_t)e1->gpe_start, 350 e2->gpe_index, (intmax_t)e2->gpe_start, 351 e2->gpe_index, (intmax_t)e2->gpe_end, 352 e1->gpe_index, (intmax_t)e1->gpe_end); 353 failed++; 354 } 355 } 356 } 357 if (failed != 0) { 358 printf("GEOM_PART: integrity check failed (%s, %s)\n", 359 pp->name, table->gpt_scheme->name); 360 if (check_integrity != 0) 361 return (EINVAL); 362 table->gpt_corrupt = 1; 363 } 364 return (0); 365 } 366 #undef DPRINTF 367 368 struct g_part_entry * 369 g_part_new_entry(struct g_part_table *table, int index, quad_t start, 370 quad_t end) 371 { 372 struct g_part_entry *entry, *last; 373 374 last = NULL; 375 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) { 376 if (entry->gpe_index == index) 377 break; 378 if (entry->gpe_index > index) { 379 entry = NULL; 380 break; 381 } 382 last = entry; 383 } 384 if (entry == NULL) { 385 entry = g_malloc(table->gpt_scheme->gps_entrysz, 386 M_WAITOK | M_ZERO); 387 entry->gpe_index = index; 388 if (last == NULL) 389 LIST_INSERT_HEAD(&table->gpt_entry, entry, gpe_entry); 390 else 391 LIST_INSERT_AFTER(last, entry, gpe_entry); 392 } else 393 entry->gpe_offset = 0; 394 entry->gpe_start = start; 395 entry->gpe_end = end; 396 return (entry); 397 } 398 399 static void 400 g_part_new_provider(struct g_geom *gp, struct g_part_table *table, 401 struct g_part_entry *entry) 402 { 403 struct g_consumer *cp; 404 struct g_provider *pp; 405 struct sbuf *sb; 406 off_t offset; 407 408 cp = LIST_FIRST(&gp->consumer); 409 pp = cp->provider; 410 411 offset = entry->gpe_start * pp->sectorsize; 412 if (entry->gpe_offset < offset) 413 entry->gpe_offset = offset; 414 415 if (entry->gpe_pp == NULL) { 416 sb = sbuf_new_auto(); 417 G_PART_FULLNAME(table, entry, sb, gp->name); 418 sbuf_finish(sb); 419 entry->gpe_pp = g_new_providerf(gp, "%s", sbuf_data(sb)); 420 sbuf_delete(sb); 421 entry->gpe_pp->flags |= G_PF_DIRECT_SEND | G_PF_DIRECT_RECEIVE; 422 entry->gpe_pp->private = entry; /* Close the circle. */ 423 } 424 entry->gpe_pp->index = entry->gpe_index - 1; /* index is 1-based. */ 425 entry->gpe_pp->mediasize = (entry->gpe_end - entry->gpe_start + 1) * 426 pp->sectorsize; 427 entry->gpe_pp->mediasize -= entry->gpe_offset - offset; 428 entry->gpe_pp->sectorsize = pp->sectorsize; 429 entry->gpe_pp->stripesize = pp->stripesize; 430 entry->gpe_pp->stripeoffset = pp->stripeoffset + entry->gpe_offset; 431 if (pp->stripesize > 0) 432 entry->gpe_pp->stripeoffset %= pp->stripesize; 433 entry->gpe_pp->flags |= pp->flags & G_PF_ACCEPT_UNMAPPED; 434 g_error_provider(entry->gpe_pp, 0); 435 } 436 437 static struct g_geom* 438 g_part_find_geom(const char *name) 439 { 440 struct g_geom *gp; 441 LIST_FOREACH(gp, &g_part_class.geom, geom) { 442 if (!strcmp(name, gp->name)) 443 break; 444 } 445 return (gp); 446 } 447 448 static int 449 g_part_parm_geom(struct gctl_req *req, const char *name, struct g_geom **v) 450 { 451 struct g_geom *gp; 452 const char *gname; 453 454 gname = gctl_get_asciiparam(req, name); 455 if (gname == NULL) 456 return (ENOATTR); 457 if (strncmp(gname, _PATH_DEV, sizeof(_PATH_DEV) - 1) == 0) 458 gname += sizeof(_PATH_DEV) - 1; 459 gp = g_part_find_geom(gname); 460 if (gp == NULL) { 461 gctl_error(req, "%d %s '%s'", EINVAL, name, gname); 462 return (EINVAL); 463 } 464 if ((gp->flags & G_GEOM_WITHER) != 0) { 465 gctl_error(req, "%d %s", ENXIO, gname); 466 return (ENXIO); 467 } 468 *v = gp; 469 return (0); 470 } 471 472 static int 473 g_part_parm_provider(struct gctl_req *req, const char *name, 474 struct g_provider **v) 475 { 476 struct g_provider *pp; 477 const char *pname; 478 479 pname = gctl_get_asciiparam(req, name); 480 if (pname == NULL) 481 return (ENOATTR); 482 if (strncmp(pname, _PATH_DEV, sizeof(_PATH_DEV) - 1) == 0) 483 pname += sizeof(_PATH_DEV) - 1; 484 pp = g_provider_by_name(pname); 485 if (pp == NULL) { 486 gctl_error(req, "%d %s '%s'", EINVAL, name, pname); 487 return (EINVAL); 488 } 489 *v = pp; 490 return (0); 491 } 492 493 static int 494 g_part_parm_quad(struct gctl_req *req, const char *name, quad_t *v) 495 { 496 const char *p; 497 char *x; 498 quad_t q; 499 500 p = gctl_get_asciiparam(req, name); 501 if (p == NULL) 502 return (ENOATTR); 503 q = strtoq(p, &x, 0); 504 if (*x != '\0' || q < 0) { 505 gctl_error(req, "%d %s '%s'", EINVAL, name, p); 506 return (EINVAL); 507 } 508 *v = q; 509 return (0); 510 } 511 512 static int 513 g_part_parm_scheme(struct gctl_req *req, const char *name, 514 struct g_part_scheme **v) 515 { 516 struct g_part_scheme *s; 517 const char *p; 518 519 p = gctl_get_asciiparam(req, name); 520 if (p == NULL) 521 return (ENOATTR); 522 TAILQ_FOREACH(s, &g_part_schemes, scheme_list) { 523 if (s == &g_part_null_scheme) 524 continue; 525 if (!strcasecmp(s->name, p)) 526 break; 527 } 528 if (s == NULL) { 529 gctl_error(req, "%d %s '%s'", EINVAL, name, p); 530 return (EINVAL); 531 } 532 *v = s; 533 return (0); 534 } 535 536 static int 537 g_part_parm_str(struct gctl_req *req, const char *name, const char **v) 538 { 539 const char *p; 540 541 p = gctl_get_asciiparam(req, name); 542 if (p == NULL) 543 return (ENOATTR); 544 /* An empty label is always valid. */ 545 if (strcmp(name, "label") != 0 && p[0] == '\0') { 546 gctl_error(req, "%d %s '%s'", EINVAL, name, p); 547 return (EINVAL); 548 } 549 *v = p; 550 return (0); 551 } 552 553 static int 554 g_part_parm_intmax(struct gctl_req *req, const char *name, u_int *v) 555 { 556 const intmax_t *p; 557 int size; 558 559 p = gctl_get_param(req, name, &size); 560 if (p == NULL) 561 return (ENOATTR); 562 if (size != sizeof(*p) || *p < 0 || *p > INT_MAX) { 563 gctl_error(req, "%d %s '%jd'", EINVAL, name, *p); 564 return (EINVAL); 565 } 566 *v = (u_int)*p; 567 return (0); 568 } 569 570 static int 571 g_part_parm_uint32(struct gctl_req *req, const char *name, u_int *v) 572 { 573 const uint32_t *p; 574 int size; 575 576 p = gctl_get_param(req, name, &size); 577 if (p == NULL) 578 return (ENOATTR); 579 if (size != sizeof(*p) || *p > INT_MAX) { 580 gctl_error(req, "%d %s '%u'", EINVAL, name, (unsigned int)*p); 581 return (EINVAL); 582 } 583 *v = (u_int)*p; 584 return (0); 585 } 586 587 static int 588 g_part_parm_bootcode(struct gctl_req *req, const char *name, const void **v, 589 unsigned int *s) 590 { 591 const void *p; 592 int size; 593 594 p = gctl_get_param(req, name, &size); 595 if (p == NULL) 596 return (ENOATTR); 597 *v = p; 598 *s = size; 599 return (0); 600 } 601 602 static int 603 g_part_probe(struct g_geom *gp, struct g_consumer *cp, int depth) 604 { 605 struct g_part_scheme *iter, *scheme; 606 struct g_part_table *table; 607 int pri, probe; 608 609 table = gp->softc; 610 scheme = (table != NULL) ? table->gpt_scheme : NULL; 611 pri = (scheme != NULL) ? G_PART_PROBE(table, cp) : INT_MIN; 612 if (pri == 0) 613 goto done; 614 if (pri > 0) { /* error */ 615 scheme = NULL; 616 pri = INT_MIN; 617 } 618 619 TAILQ_FOREACH(iter, &g_part_schemes, scheme_list) { 620 if (iter == &g_part_null_scheme) 621 continue; 622 table = (void *)kobj_create((kobj_class_t)iter, M_GEOM, 623 M_WAITOK); 624 table->gpt_gp = gp; 625 table->gpt_scheme = iter; 626 table->gpt_depth = depth; 627 probe = G_PART_PROBE(table, cp); 628 if (probe <= 0 && probe > pri) { 629 pri = probe; 630 scheme = iter; 631 if (gp->softc != NULL) 632 kobj_delete((kobj_t)gp->softc, M_GEOM); 633 gp->softc = table; 634 if (pri == 0) 635 goto done; 636 } else 637 kobj_delete((kobj_t)table, M_GEOM); 638 } 639 640 done: 641 return ((scheme == NULL) ? ENXIO : 0); 642 } 643 644 /* 645 * Control request functions. 646 */ 647 648 static int 649 g_part_ctl_add(struct gctl_req *req, struct g_part_parms *gpp) 650 { 651 struct g_geom *gp; 652 struct g_provider *pp; 653 struct g_part_entry *delent, *last, *entry; 654 struct g_part_table *table; 655 struct sbuf *sb; 656 quad_t end; 657 unsigned int index; 658 int error; 659 660 gp = gpp->gpp_geom; 661 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name)); 662 g_topology_assert(); 663 664 pp = LIST_FIRST(&gp->consumer)->provider; 665 table = gp->softc; 666 end = gpp->gpp_start + gpp->gpp_size - 1; 667 668 if (gpp->gpp_start < table->gpt_first || 669 gpp->gpp_start > table->gpt_last) { 670 gctl_error(req, "%d start '%jd'", EINVAL, 671 (intmax_t)gpp->gpp_start); 672 return (EINVAL); 673 } 674 if (end < gpp->gpp_start || end > table->gpt_last) { 675 gctl_error(req, "%d size '%jd'", EINVAL, 676 (intmax_t)gpp->gpp_size); 677 return (EINVAL); 678 } 679 if (gpp->gpp_index > table->gpt_entries) { 680 gctl_error(req, "%d index '%d'", EINVAL, gpp->gpp_index); 681 return (EINVAL); 682 } 683 684 delent = last = NULL; 685 index = (gpp->gpp_index > 0) ? gpp->gpp_index : 1; 686 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) { 687 if (entry->gpe_deleted) { 688 if (entry->gpe_index == index) 689 delent = entry; 690 continue; 691 } 692 if (entry->gpe_index == index) 693 index = entry->gpe_index + 1; 694 if (entry->gpe_index < index) 695 last = entry; 696 if (entry->gpe_internal) 697 continue; 698 if (gpp->gpp_start >= entry->gpe_start && 699 gpp->gpp_start <= entry->gpe_end) { 700 gctl_error(req, "%d start '%jd'", ENOSPC, 701 (intmax_t)gpp->gpp_start); 702 return (ENOSPC); 703 } 704 if (end >= entry->gpe_start && end <= entry->gpe_end) { 705 gctl_error(req, "%d end '%jd'", ENOSPC, (intmax_t)end); 706 return (ENOSPC); 707 } 708 if (gpp->gpp_start < entry->gpe_start && end > entry->gpe_end) { 709 gctl_error(req, "%d size '%jd'", ENOSPC, 710 (intmax_t)gpp->gpp_size); 711 return (ENOSPC); 712 } 713 } 714 if (gpp->gpp_index > 0 && index != gpp->gpp_index) { 715 gctl_error(req, "%d index '%d'", EEXIST, gpp->gpp_index); 716 return (EEXIST); 717 } 718 if (index > table->gpt_entries) { 719 gctl_error(req, "%d index '%d'", ENOSPC, index); 720 return (ENOSPC); 721 } 722 723 entry = (delent == NULL) ? g_malloc(table->gpt_scheme->gps_entrysz, 724 M_WAITOK | M_ZERO) : delent; 725 entry->gpe_index = index; 726 entry->gpe_start = gpp->gpp_start; 727 entry->gpe_end = end; 728 error = G_PART_ADD(table, entry, gpp); 729 if (error) { 730 gctl_error(req, "%d", error); 731 if (delent == NULL) 732 g_free(entry); 733 return (error); 734 } 735 if (delent == NULL) { 736 if (last == NULL) 737 LIST_INSERT_HEAD(&table->gpt_entry, entry, gpe_entry); 738 else 739 LIST_INSERT_AFTER(last, entry, gpe_entry); 740 entry->gpe_created = 1; 741 } else { 742 entry->gpe_deleted = 0; 743 entry->gpe_modified = 1; 744 } 745 g_part_new_provider(gp, table, entry); 746 747 /* Provide feedback if so requested. */ 748 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) { 749 sb = sbuf_new_auto(); 750 G_PART_FULLNAME(table, entry, sb, gp->name); 751 if (pp->stripesize > 0 && entry->gpe_pp->stripeoffset != 0) 752 sbuf_printf(sb, " added, but partition is not " 753 "aligned on %u bytes\n", pp->stripesize); 754 else 755 sbuf_cat(sb, " added\n"); 756 sbuf_finish(sb); 757 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1); 758 sbuf_delete(sb); 759 } 760 return (0); 761 } 762 763 static int 764 g_part_ctl_bootcode(struct gctl_req *req, struct g_part_parms *gpp) 765 { 766 struct g_geom *gp; 767 struct g_part_table *table; 768 struct sbuf *sb; 769 int error, sz; 770 771 gp = gpp->gpp_geom; 772 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name)); 773 g_topology_assert(); 774 775 table = gp->softc; 776 sz = table->gpt_scheme->gps_bootcodesz; 777 if (sz == 0) { 778 error = ENODEV; 779 goto fail; 780 } 781 if (gpp->gpp_codesize > sz) { 782 error = EFBIG; 783 goto fail; 784 } 785 786 error = G_PART_BOOTCODE(table, gpp); 787 if (error) 788 goto fail; 789 790 /* Provide feedback if so requested. */ 791 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) { 792 sb = sbuf_new_auto(); 793 sbuf_printf(sb, "bootcode written to %s\n", gp->name); 794 sbuf_finish(sb); 795 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1); 796 sbuf_delete(sb); 797 } 798 return (0); 799 800 fail: 801 gctl_error(req, "%d", error); 802 return (error); 803 } 804 805 static int 806 g_part_ctl_commit(struct gctl_req *req, struct g_part_parms *gpp) 807 { 808 struct g_consumer *cp; 809 struct g_geom *gp; 810 struct g_provider *pp; 811 struct g_part_entry *entry, *tmp; 812 struct g_part_table *table; 813 char *buf; 814 int error, i; 815 816 gp = gpp->gpp_geom; 817 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name)); 818 g_topology_assert(); 819 820 table = gp->softc; 821 if (!table->gpt_opened) { 822 gctl_error(req, "%d", EPERM); 823 return (EPERM); 824 } 825 826 g_topology_unlock(); 827 828 cp = LIST_FIRST(&gp->consumer); 829 if ((table->gpt_smhead | table->gpt_smtail) != 0) { 830 pp = cp->provider; 831 buf = g_malloc(pp->sectorsize, M_WAITOK | M_ZERO); 832 while (table->gpt_smhead != 0) { 833 i = ffs(table->gpt_smhead) - 1; 834 error = g_write_data(cp, i * pp->sectorsize, buf, 835 pp->sectorsize); 836 if (error) { 837 g_free(buf); 838 goto fail; 839 } 840 table->gpt_smhead &= ~(1 << i); 841 } 842 while (table->gpt_smtail != 0) { 843 i = ffs(table->gpt_smtail) - 1; 844 error = g_write_data(cp, pp->mediasize - (i + 1) * 845 pp->sectorsize, buf, pp->sectorsize); 846 if (error) { 847 g_free(buf); 848 goto fail; 849 } 850 table->gpt_smtail &= ~(1 << i); 851 } 852 g_free(buf); 853 } 854 855 if (table->gpt_scheme == &g_part_null_scheme) { 856 g_topology_lock(); 857 g_access(cp, -1, -1, -1); 858 g_part_wither(gp, ENXIO); 859 return (0); 860 } 861 862 error = G_PART_WRITE(table, cp); 863 if (error) 864 goto fail; 865 866 LIST_FOREACH_SAFE(entry, &table->gpt_entry, gpe_entry, tmp) { 867 if (!entry->gpe_deleted) { 868 entry->gpe_created = 0; 869 entry->gpe_modified = 0; 870 continue; 871 } 872 LIST_REMOVE(entry, gpe_entry); 873 g_free(entry); 874 } 875 table->gpt_created = 0; 876 table->gpt_opened = 0; 877 878 g_topology_lock(); 879 g_access(cp, -1, -1, -1); 880 return (0); 881 882 fail: 883 g_topology_lock(); 884 gctl_error(req, "%d", error); 885 return (error); 886 } 887 888 static int 889 g_part_ctl_create(struct gctl_req *req, struct g_part_parms *gpp) 890 { 891 struct g_consumer *cp; 892 struct g_geom *gp; 893 struct g_provider *pp; 894 struct g_part_scheme *scheme; 895 struct g_part_table *null, *table; 896 struct sbuf *sb; 897 int attr, error; 898 899 pp = gpp->gpp_provider; 900 scheme = gpp->gpp_scheme; 901 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, pp->name)); 902 g_topology_assert(); 903 904 /* Check that there isn't already a g_part geom on the provider. */ 905 gp = g_part_find_geom(pp->name); 906 if (gp != NULL) { 907 null = gp->softc; 908 if (null->gpt_scheme != &g_part_null_scheme) { 909 gctl_error(req, "%d geom '%s'", EEXIST, pp->name); 910 return (EEXIST); 911 } 912 } else 913 null = NULL; 914 915 if ((gpp->gpp_parms & G_PART_PARM_ENTRIES) && 916 (gpp->gpp_entries < scheme->gps_minent || 917 gpp->gpp_entries > scheme->gps_maxent)) { 918 gctl_error(req, "%d entries '%d'", EINVAL, gpp->gpp_entries); 919 return (EINVAL); 920 } 921 922 if (null == NULL) 923 gp = g_new_geomf(&g_part_class, "%s", pp->name); 924 gp->softc = kobj_create((kobj_class_t)gpp->gpp_scheme, M_GEOM, 925 M_WAITOK); 926 table = gp->softc; 927 table->gpt_gp = gp; 928 table->gpt_scheme = gpp->gpp_scheme; 929 table->gpt_entries = (gpp->gpp_parms & G_PART_PARM_ENTRIES) ? 930 gpp->gpp_entries : scheme->gps_minent; 931 LIST_INIT(&table->gpt_entry); 932 if (null == NULL) { 933 cp = g_new_consumer(gp); 934 cp->flags |= G_CF_DIRECT_SEND | G_CF_DIRECT_RECEIVE; 935 error = g_attach(cp, pp); 936 if (error == 0) 937 error = g_access(cp, 1, 1, 1); 938 if (error != 0) { 939 g_part_wither(gp, error); 940 gctl_error(req, "%d geom '%s'", error, pp->name); 941 return (error); 942 } 943 table->gpt_opened = 1; 944 } else { 945 cp = LIST_FIRST(&gp->consumer); 946 table->gpt_opened = null->gpt_opened; 947 table->gpt_smhead = null->gpt_smhead; 948 table->gpt_smtail = null->gpt_smtail; 949 } 950 951 g_topology_unlock(); 952 953 /* Make sure the provider has media. */ 954 if (pp->mediasize == 0 || pp->sectorsize == 0) { 955 error = ENODEV; 956 goto fail; 957 } 958 959 /* Make sure we can nest and if so, determine our depth. */ 960 error = g_getattr("PART::isleaf", cp, &attr); 961 if (!error && attr) { 962 error = ENODEV; 963 goto fail; 964 } 965 error = g_getattr("PART::depth", cp, &attr); 966 table->gpt_depth = (!error) ? attr + 1 : 0; 967 968 /* 969 * Synthesize a disk geometry. Some partitioning schemes 970 * depend on it and since some file systems need it even 971 * when the partitition scheme doesn't, we do it here in 972 * scheme-independent code. 973 */ 974 g_part_geometry(table, cp, pp->mediasize / pp->sectorsize); 975 976 error = G_PART_CREATE(table, gpp); 977 if (error) 978 goto fail; 979 980 g_topology_lock(); 981 982 table->gpt_created = 1; 983 if (null != NULL) 984 kobj_delete((kobj_t)null, M_GEOM); 985 986 /* 987 * Support automatic commit by filling in the gpp_geom 988 * parameter. 989 */ 990 gpp->gpp_parms |= G_PART_PARM_GEOM; 991 gpp->gpp_geom = gp; 992 993 /* Provide feedback if so requested. */ 994 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) { 995 sb = sbuf_new_auto(); 996 sbuf_printf(sb, "%s created\n", gp->name); 997 sbuf_finish(sb); 998 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1); 999 sbuf_delete(sb); 1000 } 1001 return (0); 1002 1003 fail: 1004 g_topology_lock(); 1005 if (null == NULL) { 1006 g_access(cp, -1, -1, -1); 1007 g_part_wither(gp, error); 1008 } else { 1009 kobj_delete((kobj_t)gp->softc, M_GEOM); 1010 gp->softc = null; 1011 } 1012 gctl_error(req, "%d provider", error); 1013 return (error); 1014 } 1015 1016 static int 1017 g_part_ctl_delete(struct gctl_req *req, struct g_part_parms *gpp) 1018 { 1019 struct g_geom *gp; 1020 struct g_provider *pp; 1021 struct g_part_entry *entry; 1022 struct g_part_table *table; 1023 struct sbuf *sb; 1024 1025 gp = gpp->gpp_geom; 1026 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name)); 1027 g_topology_assert(); 1028 1029 table = gp->softc; 1030 1031 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) { 1032 if (entry->gpe_deleted || entry->gpe_internal) 1033 continue; 1034 if (entry->gpe_index == gpp->gpp_index) 1035 break; 1036 } 1037 if (entry == NULL) { 1038 gctl_error(req, "%d index '%d'", ENOENT, gpp->gpp_index); 1039 return (ENOENT); 1040 } 1041 1042 pp = entry->gpe_pp; 1043 if (pp != NULL) { 1044 if (pp->acr > 0 || pp->acw > 0 || pp->ace > 0) { 1045 gctl_error(req, "%d", EBUSY); 1046 return (EBUSY); 1047 } 1048 1049 pp->private = NULL; 1050 entry->gpe_pp = NULL; 1051 } 1052 1053 if (pp != NULL) 1054 g_wither_provider(pp, ENXIO); 1055 1056 /* Provide feedback if so requested. */ 1057 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) { 1058 sb = sbuf_new_auto(); 1059 G_PART_FULLNAME(table, entry, sb, gp->name); 1060 sbuf_cat(sb, " deleted\n"); 1061 sbuf_finish(sb); 1062 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1); 1063 sbuf_delete(sb); 1064 } 1065 1066 if (entry->gpe_created) { 1067 LIST_REMOVE(entry, gpe_entry); 1068 g_free(entry); 1069 } else { 1070 entry->gpe_modified = 0; 1071 entry->gpe_deleted = 1; 1072 } 1073 return (0); 1074 } 1075 1076 static int 1077 g_part_ctl_destroy(struct gctl_req *req, struct g_part_parms *gpp) 1078 { 1079 struct g_consumer *cp; 1080 struct g_geom *gp; 1081 struct g_provider *pp; 1082 struct g_part_entry *entry, *tmp; 1083 struct g_part_table *null, *table; 1084 struct sbuf *sb; 1085 int error; 1086 1087 gp = gpp->gpp_geom; 1088 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name)); 1089 g_topology_assert(); 1090 1091 table = gp->softc; 1092 /* Check for busy providers. */ 1093 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) { 1094 if (entry->gpe_deleted || entry->gpe_internal) 1095 continue; 1096 if (gpp->gpp_force) { 1097 pp = entry->gpe_pp; 1098 if (pp == NULL) 1099 continue; 1100 if (pp->acr == 0 && pp->acw == 0 && pp->ace == 0) 1101 continue; 1102 } 1103 gctl_error(req, "%d", EBUSY); 1104 return (EBUSY); 1105 } 1106 1107 if (gpp->gpp_force) { 1108 /* Destroy all providers. */ 1109 LIST_FOREACH_SAFE(entry, &table->gpt_entry, gpe_entry, tmp) { 1110 pp = entry->gpe_pp; 1111 if (pp != NULL) { 1112 pp->private = NULL; 1113 g_wither_provider(pp, ENXIO); 1114 } 1115 LIST_REMOVE(entry, gpe_entry); 1116 g_free(entry); 1117 } 1118 } 1119 1120 error = G_PART_DESTROY(table, gpp); 1121 if (error) { 1122 gctl_error(req, "%d", error); 1123 return (error); 1124 } 1125 1126 gp->softc = kobj_create((kobj_class_t)&g_part_null_scheme, M_GEOM, 1127 M_WAITOK); 1128 null = gp->softc; 1129 null->gpt_gp = gp; 1130 null->gpt_scheme = &g_part_null_scheme; 1131 LIST_INIT(&null->gpt_entry); 1132 1133 cp = LIST_FIRST(&gp->consumer); 1134 pp = cp->provider; 1135 null->gpt_last = pp->mediasize / pp->sectorsize - 1; 1136 1137 null->gpt_depth = table->gpt_depth; 1138 null->gpt_opened = table->gpt_opened; 1139 null->gpt_smhead = table->gpt_smhead; 1140 null->gpt_smtail = table->gpt_smtail; 1141 1142 while ((entry = LIST_FIRST(&table->gpt_entry)) != NULL) { 1143 LIST_REMOVE(entry, gpe_entry); 1144 g_free(entry); 1145 } 1146 kobj_delete((kobj_t)table, M_GEOM); 1147 1148 /* Provide feedback if so requested. */ 1149 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) { 1150 sb = sbuf_new_auto(); 1151 sbuf_printf(sb, "%s destroyed\n", gp->name); 1152 sbuf_finish(sb); 1153 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1); 1154 sbuf_delete(sb); 1155 } 1156 return (0); 1157 } 1158 1159 static int 1160 g_part_ctl_modify(struct gctl_req *req, struct g_part_parms *gpp) 1161 { 1162 struct g_geom *gp; 1163 struct g_part_entry *entry; 1164 struct g_part_table *table; 1165 struct sbuf *sb; 1166 int error; 1167 1168 gp = gpp->gpp_geom; 1169 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name)); 1170 g_topology_assert(); 1171 1172 table = gp->softc; 1173 1174 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) { 1175 if (entry->gpe_deleted || entry->gpe_internal) 1176 continue; 1177 if (entry->gpe_index == gpp->gpp_index) 1178 break; 1179 } 1180 if (entry == NULL) { 1181 gctl_error(req, "%d index '%d'", ENOENT, gpp->gpp_index); 1182 return (ENOENT); 1183 } 1184 1185 error = G_PART_MODIFY(table, entry, gpp); 1186 if (error) { 1187 gctl_error(req, "%d", error); 1188 return (error); 1189 } 1190 1191 if (!entry->gpe_created) 1192 entry->gpe_modified = 1; 1193 1194 /* Provide feedback if so requested. */ 1195 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) { 1196 sb = sbuf_new_auto(); 1197 G_PART_FULLNAME(table, entry, sb, gp->name); 1198 sbuf_cat(sb, " modified\n"); 1199 sbuf_finish(sb); 1200 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1); 1201 sbuf_delete(sb); 1202 } 1203 return (0); 1204 } 1205 1206 static int 1207 g_part_ctl_move(struct gctl_req *req, struct g_part_parms *gpp) 1208 { 1209 gctl_error(req, "%d verb 'move'", ENOSYS); 1210 return (ENOSYS); 1211 } 1212 1213 static int 1214 g_part_ctl_recover(struct gctl_req *req, struct g_part_parms *gpp) 1215 { 1216 struct g_part_table *table; 1217 struct g_geom *gp; 1218 struct sbuf *sb; 1219 int error, recovered; 1220 1221 gp = gpp->gpp_geom; 1222 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name)); 1223 g_topology_assert(); 1224 table = gp->softc; 1225 error = recovered = 0; 1226 1227 if (table->gpt_corrupt) { 1228 error = G_PART_RECOVER(table); 1229 if (error == 0) 1230 error = g_part_check_integrity(table, 1231 LIST_FIRST(&gp->consumer)); 1232 if (error) { 1233 gctl_error(req, "%d recovering '%s' failed", 1234 error, gp->name); 1235 return (error); 1236 } 1237 recovered = 1; 1238 } 1239 /* Provide feedback if so requested. */ 1240 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) { 1241 sb = sbuf_new_auto(); 1242 if (recovered) 1243 sbuf_printf(sb, "%s recovered\n", gp->name); 1244 else 1245 sbuf_printf(sb, "%s recovering is not needed\n", 1246 gp->name); 1247 sbuf_finish(sb); 1248 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1); 1249 sbuf_delete(sb); 1250 } 1251 return (0); 1252 } 1253 1254 static int 1255 g_part_ctl_resize(struct gctl_req *req, struct g_part_parms *gpp) 1256 { 1257 struct g_geom *gp; 1258 struct g_provider *pp; 1259 struct g_part_entry *pe, *entry; 1260 struct g_part_table *table; 1261 struct sbuf *sb; 1262 quad_t end; 1263 int error; 1264 off_t mediasize; 1265 1266 gp = gpp->gpp_geom; 1267 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name)); 1268 g_topology_assert(); 1269 table = gp->softc; 1270 1271 /* check gpp_index */ 1272 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) { 1273 if (entry->gpe_deleted || entry->gpe_internal) 1274 continue; 1275 if (entry->gpe_index == gpp->gpp_index) 1276 break; 1277 } 1278 if (entry == NULL) { 1279 gctl_error(req, "%d index '%d'", ENOENT, gpp->gpp_index); 1280 return (ENOENT); 1281 } 1282 1283 /* check gpp_size */ 1284 end = entry->gpe_start + gpp->gpp_size - 1; 1285 if (gpp->gpp_size < 1 || end > table->gpt_last) { 1286 gctl_error(req, "%d size '%jd'", EINVAL, 1287 (intmax_t)gpp->gpp_size); 1288 return (EINVAL); 1289 } 1290 1291 LIST_FOREACH(pe, &table->gpt_entry, gpe_entry) { 1292 if (pe->gpe_deleted || pe->gpe_internal || pe == entry) 1293 continue; 1294 if (end >= pe->gpe_start && end <= pe->gpe_end) { 1295 gctl_error(req, "%d end '%jd'", ENOSPC, 1296 (intmax_t)end); 1297 return (ENOSPC); 1298 } 1299 if (entry->gpe_start < pe->gpe_start && end > pe->gpe_end) { 1300 gctl_error(req, "%d size '%jd'", ENOSPC, 1301 (intmax_t)gpp->gpp_size); 1302 return (ENOSPC); 1303 } 1304 } 1305 1306 pp = entry->gpe_pp; 1307 if ((g_debugflags & 16) == 0 && 1308 (pp->acr > 0 || pp->acw > 0 || pp->ace > 0)) { 1309 if (entry->gpe_end - entry->gpe_start + 1 > gpp->gpp_size) { 1310 /* Deny shrinking of an opened partition. */ 1311 gctl_error(req, "%d", EBUSY); 1312 return (EBUSY); 1313 } 1314 } 1315 1316 error = G_PART_RESIZE(table, entry, gpp); 1317 if (error) { 1318 gctl_error(req, "%d", error); 1319 return (error); 1320 } 1321 1322 if (!entry->gpe_created) 1323 entry->gpe_modified = 1; 1324 1325 /* update mediasize of changed provider */ 1326 mediasize = (entry->gpe_end - entry->gpe_start + 1) * 1327 pp->sectorsize; 1328 g_resize_provider(pp, mediasize); 1329 1330 /* Provide feedback if so requested. */ 1331 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) { 1332 sb = sbuf_new_auto(); 1333 G_PART_FULLNAME(table, entry, sb, gp->name); 1334 sbuf_cat(sb, " resized\n"); 1335 sbuf_finish(sb); 1336 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1); 1337 sbuf_delete(sb); 1338 } 1339 return (0); 1340 } 1341 1342 static int 1343 g_part_ctl_setunset(struct gctl_req *req, struct g_part_parms *gpp, 1344 unsigned int set) 1345 { 1346 struct g_geom *gp; 1347 struct g_part_entry *entry; 1348 struct g_part_table *table; 1349 struct sbuf *sb; 1350 int error; 1351 1352 gp = gpp->gpp_geom; 1353 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name)); 1354 g_topology_assert(); 1355 1356 table = gp->softc; 1357 1358 if (gpp->gpp_parms & G_PART_PARM_INDEX) { 1359 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) { 1360 if (entry->gpe_deleted || entry->gpe_internal) 1361 continue; 1362 if (entry->gpe_index == gpp->gpp_index) 1363 break; 1364 } 1365 if (entry == NULL) { 1366 gctl_error(req, "%d index '%d'", ENOENT, 1367 gpp->gpp_index); 1368 return (ENOENT); 1369 } 1370 } else 1371 entry = NULL; 1372 1373 error = G_PART_SETUNSET(table, entry, gpp->gpp_attrib, set); 1374 if (error) { 1375 gctl_error(req, "%d attrib '%s'", error, gpp->gpp_attrib); 1376 return (error); 1377 } 1378 1379 /* Provide feedback if so requested. */ 1380 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) { 1381 sb = sbuf_new_auto(); 1382 sbuf_printf(sb, "%s %sset on ", gpp->gpp_attrib, 1383 (set) ? "" : "un"); 1384 if (entry) 1385 G_PART_FULLNAME(table, entry, sb, gp->name); 1386 else 1387 sbuf_cat(sb, gp->name); 1388 sbuf_cat(sb, "\n"); 1389 sbuf_finish(sb); 1390 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1); 1391 sbuf_delete(sb); 1392 } 1393 return (0); 1394 } 1395 1396 static int 1397 g_part_ctl_undo(struct gctl_req *req, struct g_part_parms *gpp) 1398 { 1399 struct g_consumer *cp; 1400 struct g_provider *pp; 1401 struct g_geom *gp; 1402 struct g_part_entry *entry, *tmp; 1403 struct g_part_table *table; 1404 int error, reprobe; 1405 1406 gp = gpp->gpp_geom; 1407 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name)); 1408 g_topology_assert(); 1409 1410 table = gp->softc; 1411 if (!table->gpt_opened) { 1412 gctl_error(req, "%d", EPERM); 1413 return (EPERM); 1414 } 1415 1416 cp = LIST_FIRST(&gp->consumer); 1417 LIST_FOREACH_SAFE(entry, &table->gpt_entry, gpe_entry, tmp) { 1418 entry->gpe_modified = 0; 1419 if (entry->gpe_created) { 1420 pp = entry->gpe_pp; 1421 if (pp != NULL) { 1422 pp->private = NULL; 1423 entry->gpe_pp = NULL; 1424 g_wither_provider(pp, ENXIO); 1425 } 1426 entry->gpe_deleted = 1; 1427 } 1428 if (entry->gpe_deleted) { 1429 LIST_REMOVE(entry, gpe_entry); 1430 g_free(entry); 1431 } 1432 } 1433 1434 g_topology_unlock(); 1435 1436 reprobe = (table->gpt_scheme == &g_part_null_scheme || 1437 table->gpt_created) ? 1 : 0; 1438 1439 if (reprobe) { 1440 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) { 1441 if (entry->gpe_internal) 1442 continue; 1443 error = EBUSY; 1444 goto fail; 1445 } 1446 while ((entry = LIST_FIRST(&table->gpt_entry)) != NULL) { 1447 LIST_REMOVE(entry, gpe_entry); 1448 g_free(entry); 1449 } 1450 error = g_part_probe(gp, cp, table->gpt_depth); 1451 if (error) { 1452 g_topology_lock(); 1453 g_access(cp, -1, -1, -1); 1454 g_part_wither(gp, error); 1455 return (0); 1456 } 1457 table = gp->softc; 1458 1459 /* 1460 * Synthesize a disk geometry. Some partitioning schemes 1461 * depend on it and since some file systems need it even 1462 * when the partitition scheme doesn't, we do it here in 1463 * scheme-independent code. 1464 */ 1465 pp = cp->provider; 1466 g_part_geometry(table, cp, pp->mediasize / pp->sectorsize); 1467 } 1468 1469 error = G_PART_READ(table, cp); 1470 if (error) 1471 goto fail; 1472 error = g_part_check_integrity(table, cp); 1473 if (error) 1474 goto fail; 1475 1476 g_topology_lock(); 1477 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) { 1478 if (!entry->gpe_internal) 1479 g_part_new_provider(gp, table, entry); 1480 } 1481 1482 table->gpt_opened = 0; 1483 g_access(cp, -1, -1, -1); 1484 return (0); 1485 1486 fail: 1487 g_topology_lock(); 1488 gctl_error(req, "%d", error); 1489 return (error); 1490 } 1491 1492 static void 1493 g_part_wither(struct g_geom *gp, int error) 1494 { 1495 struct g_part_entry *entry; 1496 struct g_part_table *table; 1497 1498 table = gp->softc; 1499 if (table != NULL) { 1500 G_PART_DESTROY(table, NULL); 1501 while ((entry = LIST_FIRST(&table->gpt_entry)) != NULL) { 1502 LIST_REMOVE(entry, gpe_entry); 1503 g_free(entry); 1504 } 1505 if (gp->softc != NULL) { 1506 kobj_delete((kobj_t)gp->softc, M_GEOM); 1507 gp->softc = NULL; 1508 } 1509 } 1510 g_wither_geom(gp, error); 1511 } 1512 1513 /* 1514 * Class methods. 1515 */ 1516 1517 static void 1518 g_part_ctlreq(struct gctl_req *req, struct g_class *mp, const char *verb) 1519 { 1520 struct g_part_parms gpp; 1521 struct g_part_table *table; 1522 struct gctl_req_arg *ap; 1523 enum g_part_ctl ctlreq; 1524 unsigned int i, mparms, oparms, parm; 1525 int auto_commit, close_on_error; 1526 int error, modifies; 1527 1528 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s,%s)", __func__, mp->name, verb)); 1529 g_topology_assert(); 1530 1531 ctlreq = G_PART_CTL_NONE; 1532 modifies = 1; 1533 mparms = 0; 1534 oparms = G_PART_PARM_FLAGS | G_PART_PARM_OUTPUT | G_PART_PARM_VERSION; 1535 switch (*verb) { 1536 case 'a': 1537 if (!strcmp(verb, "add")) { 1538 ctlreq = G_PART_CTL_ADD; 1539 mparms |= G_PART_PARM_GEOM | G_PART_PARM_SIZE | 1540 G_PART_PARM_START | G_PART_PARM_TYPE; 1541 oparms |= G_PART_PARM_INDEX | G_PART_PARM_LABEL; 1542 } 1543 break; 1544 case 'b': 1545 if (!strcmp(verb, "bootcode")) { 1546 ctlreq = G_PART_CTL_BOOTCODE; 1547 mparms |= G_PART_PARM_GEOM | G_PART_PARM_BOOTCODE; 1548 } 1549 break; 1550 case 'c': 1551 if (!strcmp(verb, "commit")) { 1552 ctlreq = G_PART_CTL_COMMIT; 1553 mparms |= G_PART_PARM_GEOM; 1554 modifies = 0; 1555 } else if (!strcmp(verb, "create")) { 1556 ctlreq = G_PART_CTL_CREATE; 1557 mparms |= G_PART_PARM_PROVIDER | G_PART_PARM_SCHEME; 1558 oparms |= G_PART_PARM_ENTRIES; 1559 } 1560 break; 1561 case 'd': 1562 if (!strcmp(verb, "delete")) { 1563 ctlreq = G_PART_CTL_DELETE; 1564 mparms |= G_PART_PARM_GEOM | G_PART_PARM_INDEX; 1565 } else if (!strcmp(verb, "destroy")) { 1566 ctlreq = G_PART_CTL_DESTROY; 1567 mparms |= G_PART_PARM_GEOM; 1568 oparms |= G_PART_PARM_FORCE; 1569 } 1570 break; 1571 case 'm': 1572 if (!strcmp(verb, "modify")) { 1573 ctlreq = G_PART_CTL_MODIFY; 1574 mparms |= G_PART_PARM_GEOM | G_PART_PARM_INDEX; 1575 oparms |= G_PART_PARM_LABEL | G_PART_PARM_TYPE; 1576 } else if (!strcmp(verb, "move")) { 1577 ctlreq = G_PART_CTL_MOVE; 1578 mparms |= G_PART_PARM_GEOM | G_PART_PARM_INDEX; 1579 } 1580 break; 1581 case 'r': 1582 if (!strcmp(verb, "recover")) { 1583 ctlreq = G_PART_CTL_RECOVER; 1584 mparms |= G_PART_PARM_GEOM; 1585 } else if (!strcmp(verb, "resize")) { 1586 ctlreq = G_PART_CTL_RESIZE; 1587 mparms |= G_PART_PARM_GEOM | G_PART_PARM_INDEX | 1588 G_PART_PARM_SIZE; 1589 } 1590 break; 1591 case 's': 1592 if (!strcmp(verb, "set")) { 1593 ctlreq = G_PART_CTL_SET; 1594 mparms |= G_PART_PARM_ATTRIB | G_PART_PARM_GEOM; 1595 oparms |= G_PART_PARM_INDEX; 1596 } 1597 break; 1598 case 'u': 1599 if (!strcmp(verb, "undo")) { 1600 ctlreq = G_PART_CTL_UNDO; 1601 mparms |= G_PART_PARM_GEOM; 1602 modifies = 0; 1603 } else if (!strcmp(verb, "unset")) { 1604 ctlreq = G_PART_CTL_UNSET; 1605 mparms |= G_PART_PARM_ATTRIB | G_PART_PARM_GEOM; 1606 oparms |= G_PART_PARM_INDEX; 1607 } 1608 break; 1609 } 1610 if (ctlreq == G_PART_CTL_NONE) { 1611 gctl_error(req, "%d verb '%s'", EINVAL, verb); 1612 return; 1613 } 1614 1615 bzero(&gpp, sizeof(gpp)); 1616 for (i = 0; i < req->narg; i++) { 1617 ap = &req->arg[i]; 1618 parm = 0; 1619 switch (ap->name[0]) { 1620 case 'a': 1621 if (!strcmp(ap->name, "arg0")) { 1622 parm = mparms & 1623 (G_PART_PARM_GEOM | G_PART_PARM_PROVIDER); 1624 } 1625 if (!strcmp(ap->name, "attrib")) 1626 parm = G_PART_PARM_ATTRIB; 1627 break; 1628 case 'b': 1629 if (!strcmp(ap->name, "bootcode")) 1630 parm = G_PART_PARM_BOOTCODE; 1631 break; 1632 case 'c': 1633 if (!strcmp(ap->name, "class")) 1634 continue; 1635 break; 1636 case 'e': 1637 if (!strcmp(ap->name, "entries")) 1638 parm = G_PART_PARM_ENTRIES; 1639 break; 1640 case 'f': 1641 if (!strcmp(ap->name, "flags")) 1642 parm = G_PART_PARM_FLAGS; 1643 else if (!strcmp(ap->name, "force")) 1644 parm = G_PART_PARM_FORCE; 1645 break; 1646 case 'i': 1647 if (!strcmp(ap->name, "index")) 1648 parm = G_PART_PARM_INDEX; 1649 break; 1650 case 'l': 1651 if (!strcmp(ap->name, "label")) 1652 parm = G_PART_PARM_LABEL; 1653 break; 1654 case 'o': 1655 if (!strcmp(ap->name, "output")) 1656 parm = G_PART_PARM_OUTPUT; 1657 break; 1658 case 's': 1659 if (!strcmp(ap->name, "scheme")) 1660 parm = G_PART_PARM_SCHEME; 1661 else if (!strcmp(ap->name, "size")) 1662 parm = G_PART_PARM_SIZE; 1663 else if (!strcmp(ap->name, "start")) 1664 parm = G_PART_PARM_START; 1665 break; 1666 case 't': 1667 if (!strcmp(ap->name, "type")) 1668 parm = G_PART_PARM_TYPE; 1669 break; 1670 case 'v': 1671 if (!strcmp(ap->name, "verb")) 1672 continue; 1673 else if (!strcmp(ap->name, "version")) 1674 parm = G_PART_PARM_VERSION; 1675 break; 1676 } 1677 if ((parm & (mparms | oparms)) == 0) { 1678 gctl_error(req, "%d param '%s'", EINVAL, ap->name); 1679 return; 1680 } 1681 switch (parm) { 1682 case G_PART_PARM_ATTRIB: 1683 error = g_part_parm_str(req, ap->name, 1684 &gpp.gpp_attrib); 1685 break; 1686 case G_PART_PARM_BOOTCODE: 1687 error = g_part_parm_bootcode(req, ap->name, 1688 &gpp.gpp_codeptr, &gpp.gpp_codesize); 1689 break; 1690 case G_PART_PARM_ENTRIES: 1691 error = g_part_parm_intmax(req, ap->name, 1692 &gpp.gpp_entries); 1693 break; 1694 case G_PART_PARM_FLAGS: 1695 error = g_part_parm_str(req, ap->name, &gpp.gpp_flags); 1696 break; 1697 case G_PART_PARM_FORCE: 1698 error = g_part_parm_uint32(req, ap->name, 1699 &gpp.gpp_force); 1700 break; 1701 case G_PART_PARM_GEOM: 1702 error = g_part_parm_geom(req, ap->name, &gpp.gpp_geom); 1703 break; 1704 case G_PART_PARM_INDEX: 1705 error = g_part_parm_intmax(req, ap->name, 1706 &gpp.gpp_index); 1707 break; 1708 case G_PART_PARM_LABEL: 1709 error = g_part_parm_str(req, ap->name, &gpp.gpp_label); 1710 break; 1711 case G_PART_PARM_OUTPUT: 1712 error = 0; /* Write-only parameter */ 1713 break; 1714 case G_PART_PARM_PROVIDER: 1715 error = g_part_parm_provider(req, ap->name, 1716 &gpp.gpp_provider); 1717 break; 1718 case G_PART_PARM_SCHEME: 1719 error = g_part_parm_scheme(req, ap->name, 1720 &gpp.gpp_scheme); 1721 break; 1722 case G_PART_PARM_SIZE: 1723 error = g_part_parm_quad(req, ap->name, &gpp.gpp_size); 1724 break; 1725 case G_PART_PARM_START: 1726 error = g_part_parm_quad(req, ap->name, 1727 &gpp.gpp_start); 1728 break; 1729 case G_PART_PARM_TYPE: 1730 error = g_part_parm_str(req, ap->name, &gpp.gpp_type); 1731 break; 1732 case G_PART_PARM_VERSION: 1733 error = g_part_parm_uint32(req, ap->name, 1734 &gpp.gpp_version); 1735 break; 1736 default: 1737 error = EDOOFUS; 1738 gctl_error(req, "%d %s", error, ap->name); 1739 break; 1740 } 1741 if (error != 0) { 1742 if (error == ENOATTR) { 1743 gctl_error(req, "%d param '%s'", error, 1744 ap->name); 1745 } 1746 return; 1747 } 1748 gpp.gpp_parms |= parm; 1749 } 1750 if ((gpp.gpp_parms & mparms) != mparms) { 1751 parm = mparms - (gpp.gpp_parms & mparms); 1752 gctl_error(req, "%d param '%x'", ENOATTR, parm); 1753 return; 1754 } 1755 1756 /* Obtain permissions if possible/necessary. */ 1757 close_on_error = 0; 1758 table = NULL; 1759 if (modifies && (gpp.gpp_parms & G_PART_PARM_GEOM)) { 1760 table = gpp.gpp_geom->softc; 1761 if (table != NULL && table->gpt_corrupt && 1762 ctlreq != G_PART_CTL_DESTROY && 1763 ctlreq != G_PART_CTL_RECOVER) { 1764 gctl_error(req, "%d table '%s' is corrupt", 1765 EPERM, gpp.gpp_geom->name); 1766 return; 1767 } 1768 if (table != NULL && !table->gpt_opened) { 1769 error = g_access(LIST_FIRST(&gpp.gpp_geom->consumer), 1770 1, 1, 1); 1771 if (error) { 1772 gctl_error(req, "%d geom '%s'", error, 1773 gpp.gpp_geom->name); 1774 return; 1775 } 1776 table->gpt_opened = 1; 1777 close_on_error = 1; 1778 } 1779 } 1780 1781 /* Allow the scheme to check or modify the parameters. */ 1782 if (table != NULL) { 1783 error = G_PART_PRECHECK(table, ctlreq, &gpp); 1784 if (error) { 1785 gctl_error(req, "%d pre-check failed", error); 1786 goto out; 1787 } 1788 } else 1789 error = EDOOFUS; /* Prevent bogus uninit. warning. */ 1790 1791 switch (ctlreq) { 1792 case G_PART_CTL_NONE: 1793 panic("%s", __func__); 1794 case G_PART_CTL_ADD: 1795 error = g_part_ctl_add(req, &gpp); 1796 break; 1797 case G_PART_CTL_BOOTCODE: 1798 error = g_part_ctl_bootcode(req, &gpp); 1799 break; 1800 case G_PART_CTL_COMMIT: 1801 error = g_part_ctl_commit(req, &gpp); 1802 break; 1803 case G_PART_CTL_CREATE: 1804 error = g_part_ctl_create(req, &gpp); 1805 break; 1806 case G_PART_CTL_DELETE: 1807 error = g_part_ctl_delete(req, &gpp); 1808 break; 1809 case G_PART_CTL_DESTROY: 1810 error = g_part_ctl_destroy(req, &gpp); 1811 break; 1812 case G_PART_CTL_MODIFY: 1813 error = g_part_ctl_modify(req, &gpp); 1814 break; 1815 case G_PART_CTL_MOVE: 1816 error = g_part_ctl_move(req, &gpp); 1817 break; 1818 case G_PART_CTL_RECOVER: 1819 error = g_part_ctl_recover(req, &gpp); 1820 break; 1821 case G_PART_CTL_RESIZE: 1822 error = g_part_ctl_resize(req, &gpp); 1823 break; 1824 case G_PART_CTL_SET: 1825 error = g_part_ctl_setunset(req, &gpp, 1); 1826 break; 1827 case G_PART_CTL_UNDO: 1828 error = g_part_ctl_undo(req, &gpp); 1829 break; 1830 case G_PART_CTL_UNSET: 1831 error = g_part_ctl_setunset(req, &gpp, 0); 1832 break; 1833 } 1834 1835 /* Implement automatic commit. */ 1836 if (!error) { 1837 auto_commit = (modifies && 1838 (gpp.gpp_parms & G_PART_PARM_FLAGS) && 1839 strchr(gpp.gpp_flags, 'C') != NULL) ? 1 : 0; 1840 if (auto_commit) { 1841 KASSERT(gpp.gpp_parms & G_PART_PARM_GEOM, ("%s", 1842 __func__)); 1843 error = g_part_ctl_commit(req, &gpp); 1844 } 1845 } 1846 1847 out: 1848 if (error && close_on_error) { 1849 g_access(LIST_FIRST(&gpp.gpp_geom->consumer), -1, -1, -1); 1850 table->gpt_opened = 0; 1851 } 1852 } 1853 1854 static int 1855 g_part_destroy_geom(struct gctl_req *req, struct g_class *mp, 1856 struct g_geom *gp) 1857 { 1858 1859 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s,%s)", __func__, mp->name, gp->name)); 1860 g_topology_assert(); 1861 1862 g_part_wither(gp, EINVAL); 1863 return (0); 1864 } 1865 1866 static struct g_geom * 1867 g_part_taste(struct g_class *mp, struct g_provider *pp, int flags __unused) 1868 { 1869 struct g_consumer *cp; 1870 struct g_geom *gp; 1871 struct g_part_entry *entry; 1872 struct g_part_table *table; 1873 struct root_hold_token *rht; 1874 int attr, depth; 1875 int error; 1876 1877 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s,%s)", __func__, mp->name, pp->name)); 1878 g_topology_assert(); 1879 1880 /* Skip providers that are already open for writing. */ 1881 if (pp->acw > 0) 1882 return (NULL); 1883 1884 /* 1885 * Create a GEOM with consumer and hook it up to the provider. 1886 * With that we become part of the topology. Optain read access 1887 * to the provider. 1888 */ 1889 gp = g_new_geomf(mp, "%s", pp->name); 1890 cp = g_new_consumer(gp); 1891 cp->flags |= G_CF_DIRECT_SEND | G_CF_DIRECT_RECEIVE; 1892 error = g_attach(cp, pp); 1893 if (error == 0) 1894 error = g_access(cp, 1, 0, 0); 1895 if (error != 0) { 1896 if (cp->provider) 1897 g_detach(cp); 1898 g_destroy_consumer(cp); 1899 g_destroy_geom(gp); 1900 return (NULL); 1901 } 1902 1903 rht = root_mount_hold(mp->name); 1904 g_topology_unlock(); 1905 1906 /* 1907 * Short-circuit the whole probing galore when there's no 1908 * media present. 1909 */ 1910 if (pp->mediasize == 0 || pp->sectorsize == 0) { 1911 error = ENODEV; 1912 goto fail; 1913 } 1914 1915 /* Make sure we can nest and if so, determine our depth. */ 1916 error = g_getattr("PART::isleaf", cp, &attr); 1917 if (!error && attr) { 1918 error = ENODEV; 1919 goto fail; 1920 } 1921 error = g_getattr("PART::depth", cp, &attr); 1922 depth = (!error) ? attr + 1 : 0; 1923 1924 error = g_part_probe(gp, cp, depth); 1925 if (error) 1926 goto fail; 1927 1928 table = gp->softc; 1929 1930 /* 1931 * Synthesize a disk geometry. Some partitioning schemes 1932 * depend on it and since some file systems need it even 1933 * when the partitition scheme doesn't, we do it here in 1934 * scheme-independent code. 1935 */ 1936 g_part_geometry(table, cp, pp->mediasize / pp->sectorsize); 1937 1938 error = G_PART_READ(table, cp); 1939 if (error) 1940 goto fail; 1941 error = g_part_check_integrity(table, cp); 1942 if (error) 1943 goto fail; 1944 1945 g_topology_lock(); 1946 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) { 1947 if (!entry->gpe_internal) 1948 g_part_new_provider(gp, table, entry); 1949 } 1950 1951 root_mount_rel(rht); 1952 g_access(cp, -1, 0, 0); 1953 return (gp); 1954 1955 fail: 1956 g_topology_lock(); 1957 root_mount_rel(rht); 1958 g_access(cp, -1, 0, 0); 1959 g_detach(cp); 1960 g_destroy_consumer(cp); 1961 g_destroy_geom(gp); 1962 return (NULL); 1963 } 1964 1965 /* 1966 * Geom methods. 1967 */ 1968 1969 static int 1970 g_part_access(struct g_provider *pp, int dr, int dw, int de) 1971 { 1972 struct g_consumer *cp; 1973 1974 G_PART_TRACE((G_T_ACCESS, "%s(%s,%d,%d,%d)", __func__, pp->name, dr, 1975 dw, de)); 1976 1977 cp = LIST_FIRST(&pp->geom->consumer); 1978 1979 /* We always gain write-exclusive access. */ 1980 return (g_access(cp, dr, dw, dw + de)); 1981 } 1982 1983 static void 1984 g_part_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp, 1985 struct g_consumer *cp, struct g_provider *pp) 1986 { 1987 char buf[64]; 1988 struct g_part_entry *entry; 1989 struct g_part_table *table; 1990 1991 KASSERT(sb != NULL && gp != NULL, ("%s", __func__)); 1992 table = gp->softc; 1993 1994 if (indent == NULL) { 1995 KASSERT(cp == NULL && pp != NULL, ("%s", __func__)); 1996 entry = pp->private; 1997 if (entry == NULL) 1998 return; 1999 sbuf_printf(sb, " i %u o %ju ty %s", entry->gpe_index, 2000 (uintmax_t)entry->gpe_offset, 2001 G_PART_TYPE(table, entry, buf, sizeof(buf))); 2002 /* 2003 * libdisk compatibility quirk - the scheme dumps the 2004 * slicer name and partition type in a way that is 2005 * compatible with libdisk. When libdisk is not used 2006 * anymore, this should go away. 2007 */ 2008 G_PART_DUMPCONF(table, entry, sb, indent); 2009 } else if (cp != NULL) { /* Consumer configuration. */ 2010 KASSERT(pp == NULL, ("%s", __func__)); 2011 /* none */ 2012 } else if (pp != NULL) { /* Provider configuration. */ 2013 entry = pp->private; 2014 if (entry == NULL) 2015 return; 2016 sbuf_printf(sb, "%s<start>%ju</start>\n", indent, 2017 (uintmax_t)entry->gpe_start); 2018 sbuf_printf(sb, "%s<end>%ju</end>\n", indent, 2019 (uintmax_t)entry->gpe_end); 2020 sbuf_printf(sb, "%s<index>%u</index>\n", indent, 2021 entry->gpe_index); 2022 sbuf_printf(sb, "%s<type>%s</type>\n", indent, 2023 G_PART_TYPE(table, entry, buf, sizeof(buf))); 2024 sbuf_printf(sb, "%s<offset>%ju</offset>\n", indent, 2025 (uintmax_t)entry->gpe_offset); 2026 sbuf_printf(sb, "%s<length>%ju</length>\n", indent, 2027 (uintmax_t)pp->mediasize); 2028 G_PART_DUMPCONF(table, entry, sb, indent); 2029 } else { /* Geom configuration. */ 2030 sbuf_printf(sb, "%s<scheme>%s</scheme>\n", indent, 2031 table->gpt_scheme->name); 2032 sbuf_printf(sb, "%s<entries>%u</entries>\n", indent, 2033 table->gpt_entries); 2034 sbuf_printf(sb, "%s<first>%ju</first>\n", indent, 2035 (uintmax_t)table->gpt_first); 2036 sbuf_printf(sb, "%s<last>%ju</last>\n", indent, 2037 (uintmax_t)table->gpt_last); 2038 sbuf_printf(sb, "%s<fwsectors>%u</fwsectors>\n", indent, 2039 table->gpt_sectors); 2040 sbuf_printf(sb, "%s<fwheads>%u</fwheads>\n", indent, 2041 table->gpt_heads); 2042 sbuf_printf(sb, "%s<state>%s</state>\n", indent, 2043 table->gpt_corrupt ? "CORRUPT": "OK"); 2044 sbuf_printf(sb, "%s<modified>%s</modified>\n", indent, 2045 table->gpt_opened ? "true": "false"); 2046 G_PART_DUMPCONF(table, NULL, sb, indent); 2047 } 2048 } 2049 2050 static void 2051 g_part_resize(struct g_consumer *cp) 2052 { 2053 struct g_part_table *table; 2054 2055 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, cp->provider->name)); 2056 g_topology_assert(); 2057 2058 table = cp->geom->softc; 2059 if (table->gpt_opened == 0) { 2060 if (g_access(cp, 1, 1, 1) != 0) 2061 return; 2062 table->gpt_opened = 1; 2063 } 2064 if (G_PART_RESIZE(table, NULL, NULL) == 0) 2065 printf("GEOM_PART: %s was automatically resized\n", 2066 cp->geom->name); 2067 if (g_part_check_integrity(table, cp) != 0) { 2068 g_access(cp, -1, -1, -1); 2069 table->gpt_opened = 0; 2070 g_part_wither(table->gpt_gp, ENXIO); 2071 } 2072 } 2073 2074 static void 2075 g_part_orphan(struct g_consumer *cp) 2076 { 2077 struct g_provider *pp; 2078 struct g_part_table *table; 2079 2080 pp = cp->provider; 2081 KASSERT(pp != NULL, ("%s", __func__)); 2082 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, pp->name)); 2083 g_topology_assert(); 2084 2085 KASSERT(pp->error != 0, ("%s", __func__)); 2086 table = cp->geom->softc; 2087 if (table != NULL && table->gpt_opened) 2088 g_access(cp, -1, -1, -1); 2089 g_part_wither(cp->geom, pp->error); 2090 } 2091 2092 static void 2093 g_part_spoiled(struct g_consumer *cp) 2094 { 2095 2096 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, cp->provider->name)); 2097 g_topology_assert(); 2098 2099 cp->flags |= G_CF_ORPHAN; 2100 g_part_wither(cp->geom, ENXIO); 2101 } 2102 2103 static void 2104 g_part_start(struct bio *bp) 2105 { 2106 struct bio *bp2; 2107 struct g_consumer *cp; 2108 struct g_geom *gp; 2109 struct g_part_entry *entry; 2110 struct g_part_table *table; 2111 struct g_kerneldump *gkd; 2112 struct g_provider *pp; 2113 char buf[64]; 2114 2115 pp = bp->bio_to; 2116 gp = pp->geom; 2117 table = gp->softc; 2118 cp = LIST_FIRST(&gp->consumer); 2119 2120 G_PART_TRACE((G_T_BIO, "%s: cmd=%d, provider=%s", __func__, bp->bio_cmd, 2121 pp->name)); 2122 2123 entry = pp->private; 2124 if (entry == NULL) { 2125 g_io_deliver(bp, ENXIO); 2126 return; 2127 } 2128 2129 switch(bp->bio_cmd) { 2130 case BIO_DELETE: 2131 case BIO_READ: 2132 case BIO_WRITE: 2133 if (bp->bio_offset >= pp->mediasize) { 2134 g_io_deliver(bp, EIO); 2135 return; 2136 } 2137 bp2 = g_clone_bio(bp); 2138 if (bp2 == NULL) { 2139 g_io_deliver(bp, ENOMEM); 2140 return; 2141 } 2142 if (bp2->bio_offset + bp2->bio_length > pp->mediasize) 2143 bp2->bio_length = pp->mediasize - bp2->bio_offset; 2144 bp2->bio_done = g_std_done; 2145 bp2->bio_offset += entry->gpe_offset; 2146 g_io_request(bp2, cp); 2147 return; 2148 case BIO_FLUSH: 2149 break; 2150 case BIO_GETATTR: 2151 if (g_handleattr_int(bp, "GEOM::fwheads", table->gpt_heads)) 2152 return; 2153 if (g_handleattr_int(bp, "GEOM::fwsectors", table->gpt_sectors)) 2154 return; 2155 if (g_handleattr_int(bp, "PART::isleaf", table->gpt_isleaf)) 2156 return; 2157 if (g_handleattr_int(bp, "PART::depth", table->gpt_depth)) 2158 return; 2159 if (g_handleattr_str(bp, "PART::scheme", 2160 table->gpt_scheme->name)) 2161 return; 2162 if (g_handleattr_str(bp, "PART::type", 2163 G_PART_TYPE(table, entry, buf, sizeof(buf)))) 2164 return; 2165 if (!strcmp("GEOM::kerneldump", bp->bio_attribute)) { 2166 /* 2167 * Check that the partition is suitable for kernel 2168 * dumps. Typically only swap partitions should be 2169 * used. If the request comes from the nested scheme 2170 * we allow dumping there as well. 2171 */ 2172 if ((bp->bio_from == NULL || 2173 bp->bio_from->geom->class != &g_part_class) && 2174 G_PART_DUMPTO(table, entry) == 0) { 2175 g_io_deliver(bp, ENODEV); 2176 printf("GEOM_PART: Partition '%s' not suitable" 2177 " for kernel dumps (wrong type?)\n", 2178 pp->name); 2179 return; 2180 } 2181 gkd = (struct g_kerneldump *)bp->bio_data; 2182 if (gkd->offset >= pp->mediasize) { 2183 g_io_deliver(bp, EIO); 2184 return; 2185 } 2186 if (gkd->offset + gkd->length > pp->mediasize) 2187 gkd->length = pp->mediasize - gkd->offset; 2188 gkd->offset += entry->gpe_offset; 2189 } 2190 break; 2191 default: 2192 g_io_deliver(bp, EOPNOTSUPP); 2193 return; 2194 } 2195 2196 bp2 = g_clone_bio(bp); 2197 if (bp2 == NULL) { 2198 g_io_deliver(bp, ENOMEM); 2199 return; 2200 } 2201 bp2->bio_done = g_std_done; 2202 g_io_request(bp2, cp); 2203 } 2204 2205 static void 2206 g_part_init(struct g_class *mp) 2207 { 2208 2209 TAILQ_INSERT_HEAD(&g_part_schemes, &g_part_null_scheme, scheme_list); 2210 } 2211 2212 static void 2213 g_part_fini(struct g_class *mp) 2214 { 2215 2216 TAILQ_REMOVE(&g_part_schemes, &g_part_null_scheme, scheme_list); 2217 } 2218 2219 static void 2220 g_part_unload_event(void *arg, int flag) 2221 { 2222 struct g_consumer *cp; 2223 struct g_geom *gp; 2224 struct g_provider *pp; 2225 struct g_part_scheme *scheme; 2226 struct g_part_table *table; 2227 uintptr_t *xchg; 2228 int acc, error; 2229 2230 if (flag == EV_CANCEL) 2231 return; 2232 2233 xchg = arg; 2234 error = 0; 2235 scheme = (void *)(*xchg); 2236 2237 g_topology_assert(); 2238 2239 LIST_FOREACH(gp, &g_part_class.geom, geom) { 2240 table = gp->softc; 2241 if (table->gpt_scheme != scheme) 2242 continue; 2243 2244 acc = 0; 2245 LIST_FOREACH(pp, &gp->provider, provider) 2246 acc += pp->acr + pp->acw + pp->ace; 2247 LIST_FOREACH(cp, &gp->consumer, consumer) 2248 acc += cp->acr + cp->acw + cp->ace; 2249 2250 if (!acc) 2251 g_part_wither(gp, ENOSYS); 2252 else 2253 error = EBUSY; 2254 } 2255 2256 if (!error) 2257 TAILQ_REMOVE(&g_part_schemes, scheme, scheme_list); 2258 2259 *xchg = error; 2260 } 2261 2262 int 2263 g_part_modevent(module_t mod, int type, struct g_part_scheme *scheme) 2264 { 2265 struct g_part_scheme *iter; 2266 uintptr_t arg; 2267 int error; 2268 2269 error = 0; 2270 switch (type) { 2271 case MOD_LOAD: 2272 TAILQ_FOREACH(iter, &g_part_schemes, scheme_list) { 2273 if (scheme == iter) { 2274 printf("GEOM_PART: scheme %s is already " 2275 "registered!\n", scheme->name); 2276 break; 2277 } 2278 } 2279 if (iter == NULL) { 2280 TAILQ_INSERT_TAIL(&g_part_schemes, scheme, 2281 scheme_list); 2282 g_retaste(&g_part_class); 2283 } 2284 break; 2285 case MOD_UNLOAD: 2286 arg = (uintptr_t)scheme; 2287 error = g_waitfor_event(g_part_unload_event, &arg, M_WAITOK, 2288 NULL); 2289 if (error == 0) 2290 error = arg; 2291 break; 2292 default: 2293 error = EOPNOTSUPP; 2294 break; 2295 } 2296 2297 return (error); 2298 } 2299