1 /*- 2 * Copyright (c) 2002, 2005-2009 Marcel Moolenaar 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 */ 26 27 #include <sys/cdefs.h> 28 __FBSDID("$FreeBSD$"); 29 30 #include <sys/param.h> 31 #include <sys/bio.h> 32 #include <sys/endian.h> 33 #include <sys/kernel.h> 34 #include <sys/kobj.h> 35 #include <sys/limits.h> 36 #include <sys/lock.h> 37 #include <sys/malloc.h> 38 #include <sys/mutex.h> 39 #include <sys/queue.h> 40 #include <sys/sbuf.h> 41 #include <sys/sysctl.h> 42 #include <sys/systm.h> 43 #include <sys/uuid.h> 44 #include <geom/geom.h> 45 #include <geom/geom_ctl.h> 46 #include <geom/geom_int.h> 47 #include <geom/part/g_part.h> 48 49 #include "g_part_if.h" 50 51 #ifndef _PATH_DEV 52 #define _PATH_DEV "/dev/" 53 #endif 54 55 static kobj_method_t g_part_null_methods[] = { 56 { 0, 0 } 57 }; 58 59 static struct g_part_scheme g_part_null_scheme = { 60 "(none)", 61 g_part_null_methods, 62 sizeof(struct g_part_table), 63 }; 64 65 TAILQ_HEAD(, g_part_scheme) g_part_schemes = 66 TAILQ_HEAD_INITIALIZER(g_part_schemes); 67 68 struct g_part_alias_list { 69 const char *lexeme; 70 enum g_part_alias alias; 71 } g_part_alias_list[G_PART_ALIAS_COUNT] = { 72 { "apple-boot", G_PART_ALIAS_APPLE_BOOT }, 73 { "apple-hfs", G_PART_ALIAS_APPLE_HFS }, 74 { "apple-label", G_PART_ALIAS_APPLE_LABEL }, 75 { "apple-raid", G_PART_ALIAS_APPLE_RAID }, 76 { "apple-raid-offline", G_PART_ALIAS_APPLE_RAID_OFFLINE }, 77 { "apple-tv-recovery", G_PART_ALIAS_APPLE_TV_RECOVERY }, 78 { "apple-ufs", G_PART_ALIAS_APPLE_UFS }, 79 { "bios-boot", G_PART_ALIAS_BIOS_BOOT }, 80 { "ebr", G_PART_ALIAS_EBR }, 81 { "efi", G_PART_ALIAS_EFI }, 82 { "fat16", G_PART_ALIAS_MS_FAT16 }, 83 { "fat32", G_PART_ALIAS_MS_FAT32 }, 84 { "freebsd", G_PART_ALIAS_FREEBSD }, 85 { "freebsd-boot", G_PART_ALIAS_FREEBSD_BOOT }, 86 { "freebsd-nandfs", G_PART_ALIAS_FREEBSD_NANDFS }, 87 { "freebsd-swap", G_PART_ALIAS_FREEBSD_SWAP }, 88 { "freebsd-ufs", G_PART_ALIAS_FREEBSD_UFS }, 89 { "freebsd-vinum", G_PART_ALIAS_FREEBSD_VINUM }, 90 { "freebsd-zfs", G_PART_ALIAS_FREEBSD_ZFS }, 91 { "linux-data", G_PART_ALIAS_LINUX_DATA }, 92 { "linux-lvm", G_PART_ALIAS_LINUX_LVM }, 93 { "linux-raid", G_PART_ALIAS_LINUX_RAID }, 94 { "linux-swap", G_PART_ALIAS_LINUX_SWAP }, 95 { "mbr", G_PART_ALIAS_MBR }, 96 { "ms-basic-data", G_PART_ALIAS_MS_BASIC_DATA }, 97 { "ms-ldm-data", G_PART_ALIAS_MS_LDM_DATA }, 98 { "ms-ldm-metadata", G_PART_ALIAS_MS_LDM_METADATA }, 99 { "ms-reserved", G_PART_ALIAS_MS_RESERVED }, 100 { "ntfs", G_PART_ALIAS_MS_NTFS }, 101 { "netbsd-ccd", G_PART_ALIAS_NETBSD_CCD }, 102 { "netbsd-cgd", G_PART_ALIAS_NETBSD_CGD }, 103 { "netbsd-ffs", G_PART_ALIAS_NETBSD_FFS }, 104 { "netbsd-lfs", G_PART_ALIAS_NETBSD_LFS }, 105 { "netbsd-raid", G_PART_ALIAS_NETBSD_RAID }, 106 { "netbsd-swap", G_PART_ALIAS_NETBSD_SWAP }, 107 { "vmware-vmfs", G_PART_ALIAS_VMFS }, 108 { "vmware-vmkdiag", G_PART_ALIAS_VMKDIAG }, 109 { "vmware-reserved", G_PART_ALIAS_VMRESERVED }, 110 }; 111 112 SYSCTL_DECL(_kern_geom); 113 SYSCTL_NODE(_kern_geom, OID_AUTO, part, CTLFLAG_RW, 0, 114 "GEOM_PART stuff"); 115 static u_int check_integrity = 1; 116 TUNABLE_INT("kern.geom.part.check_integrity", &check_integrity); 117 SYSCTL_UINT(_kern_geom_part, OID_AUTO, check_integrity, 118 CTLFLAG_RW | CTLFLAG_TUN, &check_integrity, 1, 119 "Enable integrity checking"); 120 121 /* 122 * The GEOM partitioning class. 123 */ 124 static g_ctl_req_t g_part_ctlreq; 125 static g_ctl_destroy_geom_t g_part_destroy_geom; 126 static g_fini_t g_part_fini; 127 static g_init_t g_part_init; 128 static g_taste_t g_part_taste; 129 130 static g_access_t g_part_access; 131 static g_dumpconf_t g_part_dumpconf; 132 static g_orphan_t g_part_orphan; 133 static g_spoiled_t g_part_spoiled; 134 static g_start_t g_part_start; 135 136 static struct g_class g_part_class = { 137 .name = "PART", 138 .version = G_VERSION, 139 /* Class methods. */ 140 .ctlreq = g_part_ctlreq, 141 .destroy_geom = g_part_destroy_geom, 142 .fini = g_part_fini, 143 .init = g_part_init, 144 .taste = g_part_taste, 145 /* Geom methods. */ 146 .access = g_part_access, 147 .dumpconf = g_part_dumpconf, 148 .orphan = g_part_orphan, 149 .spoiled = g_part_spoiled, 150 .start = g_part_start, 151 }; 152 153 DECLARE_GEOM_CLASS(g_part_class, g_part); 154 MODULE_VERSION(g_part, 0); 155 156 /* 157 * Support functions. 158 */ 159 160 static void g_part_wither(struct g_geom *, int); 161 162 const char * 163 g_part_alias_name(enum g_part_alias alias) 164 { 165 int i; 166 167 for (i = 0; i < G_PART_ALIAS_COUNT; i++) { 168 if (g_part_alias_list[i].alias != alias) 169 continue; 170 return (g_part_alias_list[i].lexeme); 171 } 172 173 return (NULL); 174 } 175 176 void 177 g_part_geometry_heads(off_t blocks, u_int sectors, off_t *bestchs, 178 u_int *bestheads) 179 { 180 static u_int candidate_heads[] = { 1, 2, 16, 32, 64, 128, 255, 0 }; 181 off_t chs, cylinders; 182 u_int heads; 183 int idx; 184 185 *bestchs = 0; 186 *bestheads = 0; 187 for (idx = 0; candidate_heads[idx] != 0; idx++) { 188 heads = candidate_heads[idx]; 189 cylinders = blocks / heads / sectors; 190 if (cylinders < heads || cylinders < sectors) 191 break; 192 if (cylinders > 1023) 193 continue; 194 chs = cylinders * heads * sectors; 195 if (chs > *bestchs || (chs == *bestchs && *bestheads == 1)) { 196 *bestchs = chs; 197 *bestheads = heads; 198 } 199 } 200 } 201 202 static void 203 g_part_geometry(struct g_part_table *table, struct g_consumer *cp, 204 off_t blocks) 205 { 206 static u_int candidate_sectors[] = { 1, 9, 17, 33, 63, 0 }; 207 off_t chs, bestchs; 208 u_int heads, sectors; 209 int idx; 210 211 if (g_getattr("GEOM::fwsectors", cp, §ors) != 0 || sectors == 0 || 212 g_getattr("GEOM::fwheads", cp, &heads) != 0 || heads == 0) { 213 table->gpt_fixgeom = 0; 214 table->gpt_heads = 0; 215 table->gpt_sectors = 0; 216 bestchs = 0; 217 for (idx = 0; candidate_sectors[idx] != 0; idx++) { 218 sectors = candidate_sectors[idx]; 219 g_part_geometry_heads(blocks, sectors, &chs, &heads); 220 if (chs == 0) 221 continue; 222 /* 223 * Prefer a geometry with sectors > 1, but only if 224 * it doesn't bump down the number of heads to 1. 225 */ 226 if (chs > bestchs || (chs == bestchs && heads > 1 && 227 table->gpt_sectors == 1)) { 228 bestchs = chs; 229 table->gpt_heads = heads; 230 table->gpt_sectors = sectors; 231 } 232 } 233 /* 234 * If we didn't find a geometry at all, then the disk is 235 * too big. This means we can use the maximum number of 236 * heads and sectors. 237 */ 238 if (bestchs == 0) { 239 table->gpt_heads = 255; 240 table->gpt_sectors = 63; 241 } 242 } else { 243 table->gpt_fixgeom = 1; 244 table->gpt_heads = heads; 245 table->gpt_sectors = sectors; 246 } 247 } 248 249 #define DPRINTF(...) if (bootverbose) { \ 250 printf("GEOM_PART: " __VA_ARGS__); \ 251 } 252 253 static int 254 g_part_check_integrity(struct g_part_table *table, struct g_consumer *cp) 255 { 256 struct g_part_entry *e1, *e2; 257 struct g_provider *pp; 258 off_t offset; 259 int failed; 260 261 failed = 0; 262 pp = cp->provider; 263 if (table->gpt_last < table->gpt_first) { 264 DPRINTF("last LBA is below first LBA: %jd < %jd\n", 265 (intmax_t)table->gpt_last, (intmax_t)table->gpt_first); 266 failed++; 267 } 268 if (table->gpt_last > pp->mediasize / pp->sectorsize - 1) { 269 DPRINTF("last LBA extends beyond mediasize: " 270 "%jd > %jd\n", (intmax_t)table->gpt_last, 271 (intmax_t)pp->mediasize / pp->sectorsize - 1); 272 failed++; 273 } 274 LIST_FOREACH(e1, &table->gpt_entry, gpe_entry) { 275 if (e1->gpe_deleted || e1->gpe_internal) 276 continue; 277 if (e1->gpe_start < table->gpt_first) { 278 DPRINTF("partition %d has start offset below first " 279 "LBA: %jd < %jd\n", e1->gpe_index, 280 (intmax_t)e1->gpe_start, 281 (intmax_t)table->gpt_first); 282 failed++; 283 } 284 if (e1->gpe_start > table->gpt_last) { 285 DPRINTF("partition %d has start offset beyond last " 286 "LBA: %jd > %jd\n", e1->gpe_index, 287 (intmax_t)e1->gpe_start, 288 (intmax_t)table->gpt_last); 289 failed++; 290 } 291 if (e1->gpe_end < e1->gpe_start) { 292 DPRINTF("partition %d has end offset below start " 293 "offset: %jd < %jd\n", e1->gpe_index, 294 (intmax_t)e1->gpe_end, 295 (intmax_t)e1->gpe_start); 296 failed++; 297 } 298 if (e1->gpe_end > table->gpt_last) { 299 DPRINTF("partition %d has end offset beyond last " 300 "LBA: %jd > %jd\n", e1->gpe_index, 301 (intmax_t)e1->gpe_end, 302 (intmax_t)table->gpt_last); 303 failed++; 304 } 305 if (pp->stripesize > 0) { 306 offset = e1->gpe_start * pp->sectorsize; 307 if (e1->gpe_offset > offset) 308 offset = e1->gpe_offset; 309 if ((offset + pp->stripeoffset) % pp->stripesize) { 310 DPRINTF("partition %d is not aligned on %u " 311 "bytes\n", e1->gpe_index, pp->stripesize); 312 /* Don't treat this as a critical failure */ 313 } 314 } 315 e2 = e1; 316 while ((e2 = LIST_NEXT(e2, gpe_entry)) != NULL) { 317 if (e2->gpe_deleted || e2->gpe_internal) 318 continue; 319 if (e1->gpe_start >= e2->gpe_start && 320 e1->gpe_start <= e2->gpe_end) { 321 DPRINTF("partition %d has start offset inside " 322 "partition %d: start[%d] %jd >= start[%d] " 323 "%jd <= end[%d] %jd\n", 324 e1->gpe_index, e2->gpe_index, 325 e2->gpe_index, (intmax_t)e2->gpe_start, 326 e1->gpe_index, (intmax_t)e1->gpe_start, 327 e2->gpe_index, (intmax_t)e2->gpe_end); 328 failed++; 329 } 330 if (e1->gpe_end >= e2->gpe_start && 331 e1->gpe_end <= e2->gpe_end) { 332 DPRINTF("partition %d has end offset inside " 333 "partition %d: start[%d] %jd >= end[%d] " 334 "%jd <= end[%d] %jd\n", 335 e1->gpe_index, e2->gpe_index, 336 e2->gpe_index, (intmax_t)e2->gpe_start, 337 e1->gpe_index, (intmax_t)e1->gpe_end, 338 e2->gpe_index, (intmax_t)e2->gpe_end); 339 failed++; 340 } 341 if (e1->gpe_start < e2->gpe_start && 342 e1->gpe_end > e2->gpe_end) { 343 DPRINTF("partition %d contains partition %d: " 344 "start[%d] %jd > start[%d] %jd, end[%d] " 345 "%jd < end[%d] %jd\n", 346 e1->gpe_index, e2->gpe_index, 347 e1->gpe_index, (intmax_t)e1->gpe_start, 348 e2->gpe_index, (intmax_t)e2->gpe_start, 349 e2->gpe_index, (intmax_t)e2->gpe_end, 350 e1->gpe_index, (intmax_t)e1->gpe_end); 351 failed++; 352 } 353 } 354 } 355 if (failed != 0) { 356 printf("GEOM_PART: integrity check failed (%s, %s)\n", 357 pp->name, table->gpt_scheme->name); 358 if (check_integrity != 0) 359 return (EINVAL); 360 table->gpt_corrupt = 1; 361 } 362 return (0); 363 } 364 #undef DPRINTF 365 366 struct g_part_entry * 367 g_part_new_entry(struct g_part_table *table, int index, quad_t start, 368 quad_t end) 369 { 370 struct g_part_entry *entry, *last; 371 372 last = NULL; 373 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) { 374 if (entry->gpe_index == index) 375 break; 376 if (entry->gpe_index > index) { 377 entry = NULL; 378 break; 379 } 380 last = entry; 381 } 382 if (entry == NULL) { 383 entry = g_malloc(table->gpt_scheme->gps_entrysz, 384 M_WAITOK | M_ZERO); 385 entry->gpe_index = index; 386 if (last == NULL) 387 LIST_INSERT_HEAD(&table->gpt_entry, entry, gpe_entry); 388 else 389 LIST_INSERT_AFTER(last, entry, gpe_entry); 390 } else 391 entry->gpe_offset = 0; 392 entry->gpe_start = start; 393 entry->gpe_end = end; 394 return (entry); 395 } 396 397 static void 398 g_part_new_provider(struct g_geom *gp, struct g_part_table *table, 399 struct g_part_entry *entry) 400 { 401 struct g_consumer *cp; 402 struct g_provider *pp; 403 struct sbuf *sb; 404 off_t offset; 405 406 cp = LIST_FIRST(&gp->consumer); 407 pp = cp->provider; 408 409 offset = entry->gpe_start * pp->sectorsize; 410 if (entry->gpe_offset < offset) 411 entry->gpe_offset = offset; 412 413 if (entry->gpe_pp == NULL) { 414 sb = sbuf_new_auto(); 415 G_PART_FULLNAME(table, entry, sb, gp->name); 416 sbuf_finish(sb); 417 entry->gpe_pp = g_new_providerf(gp, "%s", sbuf_data(sb)); 418 sbuf_delete(sb); 419 entry->gpe_pp->private = entry; /* Close the circle. */ 420 } 421 entry->gpe_pp->index = entry->gpe_index - 1; /* index is 1-based. */ 422 entry->gpe_pp->mediasize = (entry->gpe_end - entry->gpe_start + 1) * 423 pp->sectorsize; 424 entry->gpe_pp->mediasize -= entry->gpe_offset - offset; 425 entry->gpe_pp->sectorsize = pp->sectorsize; 426 entry->gpe_pp->stripesize = pp->stripesize; 427 entry->gpe_pp->stripeoffset = pp->stripeoffset + entry->gpe_offset; 428 if (pp->stripesize > 0) 429 entry->gpe_pp->stripeoffset %= pp->stripesize; 430 entry->gpe_pp->flags |= pp->flags & G_PF_ACCEPT_UNMAPPED; 431 g_error_provider(entry->gpe_pp, 0); 432 } 433 434 static struct g_geom* 435 g_part_find_geom(const char *name) 436 { 437 struct g_geom *gp; 438 LIST_FOREACH(gp, &g_part_class.geom, geom) { 439 if (!strcmp(name, gp->name)) 440 break; 441 } 442 return (gp); 443 } 444 445 static int 446 g_part_parm_geom(struct gctl_req *req, const char *name, struct g_geom **v) 447 { 448 struct g_geom *gp; 449 const char *gname; 450 451 gname = gctl_get_asciiparam(req, name); 452 if (gname == NULL) 453 return (ENOATTR); 454 if (strncmp(gname, _PATH_DEV, sizeof(_PATH_DEV) - 1) == 0) 455 gname += sizeof(_PATH_DEV) - 1; 456 gp = g_part_find_geom(gname); 457 if (gp == NULL) { 458 gctl_error(req, "%d %s '%s'", EINVAL, name, gname); 459 return (EINVAL); 460 } 461 if ((gp->flags & G_GEOM_WITHER) != 0) { 462 gctl_error(req, "%d %s", ENXIO, gname); 463 return (ENXIO); 464 } 465 *v = gp; 466 return (0); 467 } 468 469 static int 470 g_part_parm_provider(struct gctl_req *req, const char *name, 471 struct g_provider **v) 472 { 473 struct g_provider *pp; 474 const char *pname; 475 476 pname = gctl_get_asciiparam(req, name); 477 if (pname == NULL) 478 return (ENOATTR); 479 if (strncmp(pname, _PATH_DEV, sizeof(_PATH_DEV) - 1) == 0) 480 pname += sizeof(_PATH_DEV) - 1; 481 pp = g_provider_by_name(pname); 482 if (pp == NULL) { 483 gctl_error(req, "%d %s '%s'", EINVAL, name, pname); 484 return (EINVAL); 485 } 486 *v = pp; 487 return (0); 488 } 489 490 static int 491 g_part_parm_quad(struct gctl_req *req, const char *name, quad_t *v) 492 { 493 const char *p; 494 char *x; 495 quad_t q; 496 497 p = gctl_get_asciiparam(req, name); 498 if (p == NULL) 499 return (ENOATTR); 500 q = strtoq(p, &x, 0); 501 if (*x != '\0' || q < 0) { 502 gctl_error(req, "%d %s '%s'", EINVAL, name, p); 503 return (EINVAL); 504 } 505 *v = q; 506 return (0); 507 } 508 509 static int 510 g_part_parm_scheme(struct gctl_req *req, const char *name, 511 struct g_part_scheme **v) 512 { 513 struct g_part_scheme *s; 514 const char *p; 515 516 p = gctl_get_asciiparam(req, name); 517 if (p == NULL) 518 return (ENOATTR); 519 TAILQ_FOREACH(s, &g_part_schemes, scheme_list) { 520 if (s == &g_part_null_scheme) 521 continue; 522 if (!strcasecmp(s->name, p)) 523 break; 524 } 525 if (s == NULL) { 526 gctl_error(req, "%d %s '%s'", EINVAL, name, p); 527 return (EINVAL); 528 } 529 *v = s; 530 return (0); 531 } 532 533 static int 534 g_part_parm_str(struct gctl_req *req, const char *name, const char **v) 535 { 536 const char *p; 537 538 p = gctl_get_asciiparam(req, name); 539 if (p == NULL) 540 return (ENOATTR); 541 /* An empty label is always valid. */ 542 if (strcmp(name, "label") != 0 && p[0] == '\0') { 543 gctl_error(req, "%d %s '%s'", EINVAL, name, p); 544 return (EINVAL); 545 } 546 *v = p; 547 return (0); 548 } 549 550 static int 551 g_part_parm_intmax(struct gctl_req *req, const char *name, u_int *v) 552 { 553 const intmax_t *p; 554 int size; 555 556 p = gctl_get_param(req, name, &size); 557 if (p == NULL) 558 return (ENOATTR); 559 if (size != sizeof(*p) || *p < 0 || *p > INT_MAX) { 560 gctl_error(req, "%d %s '%jd'", EINVAL, name, *p); 561 return (EINVAL); 562 } 563 *v = (u_int)*p; 564 return (0); 565 } 566 567 static int 568 g_part_parm_uint32(struct gctl_req *req, const char *name, u_int *v) 569 { 570 const uint32_t *p; 571 int size; 572 573 p = gctl_get_param(req, name, &size); 574 if (p == NULL) 575 return (ENOATTR); 576 if (size != sizeof(*p) || *p > INT_MAX) { 577 gctl_error(req, "%d %s '%u'", EINVAL, name, (unsigned int)*p); 578 return (EINVAL); 579 } 580 *v = (u_int)*p; 581 return (0); 582 } 583 584 static int 585 g_part_parm_bootcode(struct gctl_req *req, const char *name, const void **v, 586 unsigned int *s) 587 { 588 const void *p; 589 int size; 590 591 p = gctl_get_param(req, name, &size); 592 if (p == NULL) 593 return (ENOATTR); 594 *v = p; 595 *s = size; 596 return (0); 597 } 598 599 static int 600 g_part_probe(struct g_geom *gp, struct g_consumer *cp, int depth) 601 { 602 struct g_part_scheme *iter, *scheme; 603 struct g_part_table *table; 604 int pri, probe; 605 606 table = gp->softc; 607 scheme = (table != NULL) ? table->gpt_scheme : NULL; 608 pri = (scheme != NULL) ? G_PART_PROBE(table, cp) : INT_MIN; 609 if (pri == 0) 610 goto done; 611 if (pri > 0) { /* error */ 612 scheme = NULL; 613 pri = INT_MIN; 614 } 615 616 TAILQ_FOREACH(iter, &g_part_schemes, scheme_list) { 617 if (iter == &g_part_null_scheme) 618 continue; 619 table = (void *)kobj_create((kobj_class_t)iter, M_GEOM, 620 M_WAITOK); 621 table->gpt_gp = gp; 622 table->gpt_scheme = iter; 623 table->gpt_depth = depth; 624 probe = G_PART_PROBE(table, cp); 625 if (probe <= 0 && probe > pri) { 626 pri = probe; 627 scheme = iter; 628 if (gp->softc != NULL) 629 kobj_delete((kobj_t)gp->softc, M_GEOM); 630 gp->softc = table; 631 if (pri == 0) 632 goto done; 633 } else 634 kobj_delete((kobj_t)table, M_GEOM); 635 } 636 637 done: 638 return ((scheme == NULL) ? ENXIO : 0); 639 } 640 641 /* 642 * Control request functions. 643 */ 644 645 static int 646 g_part_ctl_add(struct gctl_req *req, struct g_part_parms *gpp) 647 { 648 struct g_geom *gp; 649 struct g_provider *pp; 650 struct g_part_entry *delent, *last, *entry; 651 struct g_part_table *table; 652 struct sbuf *sb; 653 quad_t end; 654 unsigned int index; 655 int error; 656 657 gp = gpp->gpp_geom; 658 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name)); 659 g_topology_assert(); 660 661 pp = LIST_FIRST(&gp->consumer)->provider; 662 table = gp->softc; 663 end = gpp->gpp_start + gpp->gpp_size - 1; 664 665 if (gpp->gpp_start < table->gpt_first || 666 gpp->gpp_start > table->gpt_last) { 667 gctl_error(req, "%d start '%jd'", EINVAL, 668 (intmax_t)gpp->gpp_start); 669 return (EINVAL); 670 } 671 if (end < gpp->gpp_start || end > table->gpt_last) { 672 gctl_error(req, "%d size '%jd'", EINVAL, 673 (intmax_t)gpp->gpp_size); 674 return (EINVAL); 675 } 676 if (gpp->gpp_index > table->gpt_entries) { 677 gctl_error(req, "%d index '%d'", EINVAL, gpp->gpp_index); 678 return (EINVAL); 679 } 680 681 delent = last = NULL; 682 index = (gpp->gpp_index > 0) ? gpp->gpp_index : 1; 683 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) { 684 if (entry->gpe_deleted) { 685 if (entry->gpe_index == index) 686 delent = entry; 687 continue; 688 } 689 if (entry->gpe_index == index) 690 index = entry->gpe_index + 1; 691 if (entry->gpe_index < index) 692 last = entry; 693 if (entry->gpe_internal) 694 continue; 695 if (gpp->gpp_start >= entry->gpe_start && 696 gpp->gpp_start <= entry->gpe_end) { 697 gctl_error(req, "%d start '%jd'", ENOSPC, 698 (intmax_t)gpp->gpp_start); 699 return (ENOSPC); 700 } 701 if (end >= entry->gpe_start && end <= entry->gpe_end) { 702 gctl_error(req, "%d end '%jd'", ENOSPC, (intmax_t)end); 703 return (ENOSPC); 704 } 705 if (gpp->gpp_start < entry->gpe_start && end > entry->gpe_end) { 706 gctl_error(req, "%d size '%jd'", ENOSPC, 707 (intmax_t)gpp->gpp_size); 708 return (ENOSPC); 709 } 710 } 711 if (gpp->gpp_index > 0 && index != gpp->gpp_index) { 712 gctl_error(req, "%d index '%d'", EEXIST, gpp->gpp_index); 713 return (EEXIST); 714 } 715 if (index > table->gpt_entries) { 716 gctl_error(req, "%d index '%d'", ENOSPC, index); 717 return (ENOSPC); 718 } 719 720 entry = (delent == NULL) ? g_malloc(table->gpt_scheme->gps_entrysz, 721 M_WAITOK | M_ZERO) : delent; 722 entry->gpe_index = index; 723 entry->gpe_start = gpp->gpp_start; 724 entry->gpe_end = end; 725 error = G_PART_ADD(table, entry, gpp); 726 if (error) { 727 gctl_error(req, "%d", error); 728 if (delent == NULL) 729 g_free(entry); 730 return (error); 731 } 732 if (delent == NULL) { 733 if (last == NULL) 734 LIST_INSERT_HEAD(&table->gpt_entry, entry, gpe_entry); 735 else 736 LIST_INSERT_AFTER(last, entry, gpe_entry); 737 entry->gpe_created = 1; 738 } else { 739 entry->gpe_deleted = 0; 740 entry->gpe_modified = 1; 741 } 742 g_part_new_provider(gp, table, entry); 743 744 /* Provide feedback if so requested. */ 745 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) { 746 sb = sbuf_new_auto(); 747 G_PART_FULLNAME(table, entry, sb, gp->name); 748 if (pp->stripesize > 0 && entry->gpe_pp->stripeoffset != 0) 749 sbuf_printf(sb, " added, but partition is not " 750 "aligned on %u bytes\n", pp->stripesize); 751 else 752 sbuf_cat(sb, " added\n"); 753 sbuf_finish(sb); 754 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1); 755 sbuf_delete(sb); 756 } 757 return (0); 758 } 759 760 static int 761 g_part_ctl_bootcode(struct gctl_req *req, struct g_part_parms *gpp) 762 { 763 struct g_geom *gp; 764 struct g_part_table *table; 765 struct sbuf *sb; 766 int error, sz; 767 768 gp = gpp->gpp_geom; 769 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name)); 770 g_topology_assert(); 771 772 table = gp->softc; 773 sz = table->gpt_scheme->gps_bootcodesz; 774 if (sz == 0) { 775 error = ENODEV; 776 goto fail; 777 } 778 if (gpp->gpp_codesize > sz) { 779 error = EFBIG; 780 goto fail; 781 } 782 783 error = G_PART_BOOTCODE(table, gpp); 784 if (error) 785 goto fail; 786 787 /* Provide feedback if so requested. */ 788 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) { 789 sb = sbuf_new_auto(); 790 sbuf_printf(sb, "bootcode written to %s\n", gp->name); 791 sbuf_finish(sb); 792 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1); 793 sbuf_delete(sb); 794 } 795 return (0); 796 797 fail: 798 gctl_error(req, "%d", error); 799 return (error); 800 } 801 802 static int 803 g_part_ctl_commit(struct gctl_req *req, struct g_part_parms *gpp) 804 { 805 struct g_consumer *cp; 806 struct g_geom *gp; 807 struct g_provider *pp; 808 struct g_part_entry *entry, *tmp; 809 struct g_part_table *table; 810 char *buf; 811 int error, i; 812 813 gp = gpp->gpp_geom; 814 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name)); 815 g_topology_assert(); 816 817 table = gp->softc; 818 if (!table->gpt_opened) { 819 gctl_error(req, "%d", EPERM); 820 return (EPERM); 821 } 822 823 g_topology_unlock(); 824 825 cp = LIST_FIRST(&gp->consumer); 826 if ((table->gpt_smhead | table->gpt_smtail) != 0) { 827 pp = cp->provider; 828 buf = g_malloc(pp->sectorsize, M_WAITOK | M_ZERO); 829 while (table->gpt_smhead != 0) { 830 i = ffs(table->gpt_smhead) - 1; 831 error = g_write_data(cp, i * pp->sectorsize, buf, 832 pp->sectorsize); 833 if (error) { 834 g_free(buf); 835 goto fail; 836 } 837 table->gpt_smhead &= ~(1 << i); 838 } 839 while (table->gpt_smtail != 0) { 840 i = ffs(table->gpt_smtail) - 1; 841 error = g_write_data(cp, pp->mediasize - (i + 1) * 842 pp->sectorsize, buf, pp->sectorsize); 843 if (error) { 844 g_free(buf); 845 goto fail; 846 } 847 table->gpt_smtail &= ~(1 << i); 848 } 849 g_free(buf); 850 } 851 852 if (table->gpt_scheme == &g_part_null_scheme) { 853 g_topology_lock(); 854 g_access(cp, -1, -1, -1); 855 g_part_wither(gp, ENXIO); 856 return (0); 857 } 858 859 error = G_PART_WRITE(table, cp); 860 if (error) 861 goto fail; 862 863 LIST_FOREACH_SAFE(entry, &table->gpt_entry, gpe_entry, tmp) { 864 if (!entry->gpe_deleted) { 865 entry->gpe_created = 0; 866 entry->gpe_modified = 0; 867 continue; 868 } 869 LIST_REMOVE(entry, gpe_entry); 870 g_free(entry); 871 } 872 table->gpt_created = 0; 873 table->gpt_opened = 0; 874 875 g_topology_lock(); 876 g_access(cp, -1, -1, -1); 877 return (0); 878 879 fail: 880 g_topology_lock(); 881 gctl_error(req, "%d", error); 882 return (error); 883 } 884 885 static int 886 g_part_ctl_create(struct gctl_req *req, struct g_part_parms *gpp) 887 { 888 struct g_consumer *cp; 889 struct g_geom *gp; 890 struct g_provider *pp; 891 struct g_part_scheme *scheme; 892 struct g_part_table *null, *table; 893 struct sbuf *sb; 894 int attr, error; 895 896 pp = gpp->gpp_provider; 897 scheme = gpp->gpp_scheme; 898 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, pp->name)); 899 g_topology_assert(); 900 901 /* Check that there isn't already a g_part geom on the provider. */ 902 gp = g_part_find_geom(pp->name); 903 if (gp != NULL) { 904 null = gp->softc; 905 if (null->gpt_scheme != &g_part_null_scheme) { 906 gctl_error(req, "%d geom '%s'", EEXIST, pp->name); 907 return (EEXIST); 908 } 909 } else 910 null = NULL; 911 912 if ((gpp->gpp_parms & G_PART_PARM_ENTRIES) && 913 (gpp->gpp_entries < scheme->gps_minent || 914 gpp->gpp_entries > scheme->gps_maxent)) { 915 gctl_error(req, "%d entries '%d'", EINVAL, gpp->gpp_entries); 916 return (EINVAL); 917 } 918 919 if (null == NULL) 920 gp = g_new_geomf(&g_part_class, "%s", pp->name); 921 gp->softc = kobj_create((kobj_class_t)gpp->gpp_scheme, M_GEOM, 922 M_WAITOK); 923 table = gp->softc; 924 table->gpt_gp = gp; 925 table->gpt_scheme = gpp->gpp_scheme; 926 table->gpt_entries = (gpp->gpp_parms & G_PART_PARM_ENTRIES) ? 927 gpp->gpp_entries : scheme->gps_minent; 928 LIST_INIT(&table->gpt_entry); 929 if (null == NULL) { 930 cp = g_new_consumer(gp); 931 error = g_attach(cp, pp); 932 if (error == 0) 933 error = g_access(cp, 1, 1, 1); 934 if (error != 0) { 935 g_part_wither(gp, error); 936 gctl_error(req, "%d geom '%s'", error, pp->name); 937 return (error); 938 } 939 table->gpt_opened = 1; 940 } else { 941 cp = LIST_FIRST(&gp->consumer); 942 table->gpt_opened = null->gpt_opened; 943 table->gpt_smhead = null->gpt_smhead; 944 table->gpt_smtail = null->gpt_smtail; 945 } 946 947 g_topology_unlock(); 948 949 /* Make sure the provider has media. */ 950 if (pp->mediasize == 0 || pp->sectorsize == 0) { 951 error = ENODEV; 952 goto fail; 953 } 954 955 /* Make sure we can nest and if so, determine our depth. */ 956 error = g_getattr("PART::isleaf", cp, &attr); 957 if (!error && attr) { 958 error = ENODEV; 959 goto fail; 960 } 961 error = g_getattr("PART::depth", cp, &attr); 962 table->gpt_depth = (!error) ? attr + 1 : 0; 963 964 /* 965 * Synthesize a disk geometry. Some partitioning schemes 966 * depend on it and since some file systems need it even 967 * when the partitition scheme doesn't, we do it here in 968 * scheme-independent code. 969 */ 970 g_part_geometry(table, cp, pp->mediasize / pp->sectorsize); 971 972 error = G_PART_CREATE(table, gpp); 973 if (error) 974 goto fail; 975 976 g_topology_lock(); 977 978 table->gpt_created = 1; 979 if (null != NULL) 980 kobj_delete((kobj_t)null, M_GEOM); 981 982 /* 983 * Support automatic commit by filling in the gpp_geom 984 * parameter. 985 */ 986 gpp->gpp_parms |= G_PART_PARM_GEOM; 987 gpp->gpp_geom = gp; 988 989 /* Provide feedback if so requested. */ 990 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) { 991 sb = sbuf_new_auto(); 992 sbuf_printf(sb, "%s created\n", gp->name); 993 sbuf_finish(sb); 994 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1); 995 sbuf_delete(sb); 996 } 997 return (0); 998 999 fail: 1000 g_topology_lock(); 1001 if (null == NULL) { 1002 g_access(cp, -1, -1, -1); 1003 g_part_wither(gp, error); 1004 } else { 1005 kobj_delete((kobj_t)gp->softc, M_GEOM); 1006 gp->softc = null; 1007 } 1008 gctl_error(req, "%d provider", error); 1009 return (error); 1010 } 1011 1012 static int 1013 g_part_ctl_delete(struct gctl_req *req, struct g_part_parms *gpp) 1014 { 1015 struct g_geom *gp; 1016 struct g_provider *pp; 1017 struct g_part_entry *entry; 1018 struct g_part_table *table; 1019 struct sbuf *sb; 1020 1021 gp = gpp->gpp_geom; 1022 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name)); 1023 g_topology_assert(); 1024 1025 table = gp->softc; 1026 1027 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) { 1028 if (entry->gpe_deleted || entry->gpe_internal) 1029 continue; 1030 if (entry->gpe_index == gpp->gpp_index) 1031 break; 1032 } 1033 if (entry == NULL) { 1034 gctl_error(req, "%d index '%d'", ENOENT, gpp->gpp_index); 1035 return (ENOENT); 1036 } 1037 1038 pp = entry->gpe_pp; 1039 if (pp != NULL) { 1040 if (pp->acr > 0 || pp->acw > 0 || pp->ace > 0) { 1041 gctl_error(req, "%d", EBUSY); 1042 return (EBUSY); 1043 } 1044 1045 pp->private = NULL; 1046 entry->gpe_pp = NULL; 1047 } 1048 1049 if (pp != NULL) 1050 g_wither_provider(pp, ENXIO); 1051 1052 /* Provide feedback if so requested. */ 1053 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) { 1054 sb = sbuf_new_auto(); 1055 G_PART_FULLNAME(table, entry, sb, gp->name); 1056 sbuf_cat(sb, " deleted\n"); 1057 sbuf_finish(sb); 1058 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1); 1059 sbuf_delete(sb); 1060 } 1061 1062 if (entry->gpe_created) { 1063 LIST_REMOVE(entry, gpe_entry); 1064 g_free(entry); 1065 } else { 1066 entry->gpe_modified = 0; 1067 entry->gpe_deleted = 1; 1068 } 1069 return (0); 1070 } 1071 1072 static int 1073 g_part_ctl_destroy(struct gctl_req *req, struct g_part_parms *gpp) 1074 { 1075 struct g_consumer *cp; 1076 struct g_geom *gp; 1077 struct g_provider *pp; 1078 struct g_part_entry *entry, *tmp; 1079 struct g_part_table *null, *table; 1080 struct sbuf *sb; 1081 int error; 1082 1083 gp = gpp->gpp_geom; 1084 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name)); 1085 g_topology_assert(); 1086 1087 table = gp->softc; 1088 /* Check for busy providers. */ 1089 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) { 1090 if (entry->gpe_deleted || entry->gpe_internal) 1091 continue; 1092 if (gpp->gpp_force) { 1093 pp = entry->gpe_pp; 1094 if (pp == NULL) 1095 continue; 1096 if (pp->acr == 0 && pp->acw == 0 && pp->ace == 0) 1097 continue; 1098 } 1099 gctl_error(req, "%d", EBUSY); 1100 return (EBUSY); 1101 } 1102 1103 if (gpp->gpp_force) { 1104 /* Destroy all providers. */ 1105 LIST_FOREACH_SAFE(entry, &table->gpt_entry, gpe_entry, tmp) { 1106 pp = entry->gpe_pp; 1107 if (pp != NULL) { 1108 pp->private = NULL; 1109 g_wither_provider(pp, ENXIO); 1110 } 1111 LIST_REMOVE(entry, gpe_entry); 1112 g_free(entry); 1113 } 1114 } 1115 1116 error = G_PART_DESTROY(table, gpp); 1117 if (error) { 1118 gctl_error(req, "%d", error); 1119 return (error); 1120 } 1121 1122 gp->softc = kobj_create((kobj_class_t)&g_part_null_scheme, M_GEOM, 1123 M_WAITOK); 1124 null = gp->softc; 1125 null->gpt_gp = gp; 1126 null->gpt_scheme = &g_part_null_scheme; 1127 LIST_INIT(&null->gpt_entry); 1128 1129 cp = LIST_FIRST(&gp->consumer); 1130 pp = cp->provider; 1131 null->gpt_last = pp->mediasize / pp->sectorsize - 1; 1132 1133 null->gpt_depth = table->gpt_depth; 1134 null->gpt_opened = table->gpt_opened; 1135 null->gpt_smhead = table->gpt_smhead; 1136 null->gpt_smtail = table->gpt_smtail; 1137 1138 while ((entry = LIST_FIRST(&table->gpt_entry)) != NULL) { 1139 LIST_REMOVE(entry, gpe_entry); 1140 g_free(entry); 1141 } 1142 kobj_delete((kobj_t)table, M_GEOM); 1143 1144 /* Provide feedback if so requested. */ 1145 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) { 1146 sb = sbuf_new_auto(); 1147 sbuf_printf(sb, "%s destroyed\n", gp->name); 1148 sbuf_finish(sb); 1149 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1); 1150 sbuf_delete(sb); 1151 } 1152 return (0); 1153 } 1154 1155 static int 1156 g_part_ctl_modify(struct gctl_req *req, struct g_part_parms *gpp) 1157 { 1158 struct g_geom *gp; 1159 struct g_part_entry *entry; 1160 struct g_part_table *table; 1161 struct sbuf *sb; 1162 int error; 1163 1164 gp = gpp->gpp_geom; 1165 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name)); 1166 g_topology_assert(); 1167 1168 table = gp->softc; 1169 1170 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) { 1171 if (entry->gpe_deleted || entry->gpe_internal) 1172 continue; 1173 if (entry->gpe_index == gpp->gpp_index) 1174 break; 1175 } 1176 if (entry == NULL) { 1177 gctl_error(req, "%d index '%d'", ENOENT, gpp->gpp_index); 1178 return (ENOENT); 1179 } 1180 1181 error = G_PART_MODIFY(table, entry, gpp); 1182 if (error) { 1183 gctl_error(req, "%d", error); 1184 return (error); 1185 } 1186 1187 if (!entry->gpe_created) 1188 entry->gpe_modified = 1; 1189 1190 /* Provide feedback if so requested. */ 1191 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) { 1192 sb = sbuf_new_auto(); 1193 G_PART_FULLNAME(table, entry, sb, gp->name); 1194 sbuf_cat(sb, " modified\n"); 1195 sbuf_finish(sb); 1196 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1); 1197 sbuf_delete(sb); 1198 } 1199 return (0); 1200 } 1201 1202 static int 1203 g_part_ctl_move(struct gctl_req *req, struct g_part_parms *gpp) 1204 { 1205 gctl_error(req, "%d verb 'move'", ENOSYS); 1206 return (ENOSYS); 1207 } 1208 1209 static int 1210 g_part_ctl_recover(struct gctl_req *req, struct g_part_parms *gpp) 1211 { 1212 struct g_part_table *table; 1213 struct g_geom *gp; 1214 struct sbuf *sb; 1215 int error, recovered; 1216 1217 gp = gpp->gpp_geom; 1218 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name)); 1219 g_topology_assert(); 1220 table = gp->softc; 1221 error = recovered = 0; 1222 1223 if (table->gpt_corrupt) { 1224 error = G_PART_RECOVER(table); 1225 if (error == 0) 1226 error = g_part_check_integrity(table, 1227 LIST_FIRST(&gp->consumer)); 1228 if (error) { 1229 gctl_error(req, "%d recovering '%s' failed", 1230 error, gp->name); 1231 return (error); 1232 } 1233 recovered = 1; 1234 } 1235 /* Provide feedback if so requested. */ 1236 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) { 1237 sb = sbuf_new_auto(); 1238 if (recovered) 1239 sbuf_printf(sb, "%s recovered\n", gp->name); 1240 else 1241 sbuf_printf(sb, "%s recovering is not needed\n", 1242 gp->name); 1243 sbuf_finish(sb); 1244 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1); 1245 sbuf_delete(sb); 1246 } 1247 return (0); 1248 } 1249 1250 static int 1251 g_part_ctl_resize(struct gctl_req *req, struct g_part_parms *gpp) 1252 { 1253 struct g_geom *gp; 1254 struct g_provider *pp; 1255 struct g_part_entry *pe, *entry; 1256 struct g_part_table *table; 1257 struct sbuf *sb; 1258 quad_t end; 1259 int error; 1260 off_t mediasize; 1261 1262 gp = gpp->gpp_geom; 1263 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name)); 1264 g_topology_assert(); 1265 table = gp->softc; 1266 1267 /* check gpp_index */ 1268 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) { 1269 if (entry->gpe_deleted || entry->gpe_internal) 1270 continue; 1271 if (entry->gpe_index == gpp->gpp_index) 1272 break; 1273 } 1274 if (entry == NULL) { 1275 gctl_error(req, "%d index '%d'", ENOENT, gpp->gpp_index); 1276 return (ENOENT); 1277 } 1278 1279 /* check gpp_size */ 1280 end = entry->gpe_start + gpp->gpp_size - 1; 1281 if (gpp->gpp_size < 1 || end > table->gpt_last) { 1282 gctl_error(req, "%d size '%jd'", EINVAL, 1283 (intmax_t)gpp->gpp_size); 1284 return (EINVAL); 1285 } 1286 1287 LIST_FOREACH(pe, &table->gpt_entry, gpe_entry) { 1288 if (pe->gpe_deleted || pe->gpe_internal || pe == entry) 1289 continue; 1290 if (end >= pe->gpe_start && end <= pe->gpe_end) { 1291 gctl_error(req, "%d end '%jd'", ENOSPC, 1292 (intmax_t)end); 1293 return (ENOSPC); 1294 } 1295 if (entry->gpe_start < pe->gpe_start && end > pe->gpe_end) { 1296 gctl_error(req, "%d size '%jd'", ENOSPC, 1297 (intmax_t)gpp->gpp_size); 1298 return (ENOSPC); 1299 } 1300 } 1301 1302 pp = entry->gpe_pp; 1303 if ((g_debugflags & 16) == 0 && 1304 (pp->acr > 0 || pp->acw > 0 || pp->ace > 0)) { 1305 if (entry->gpe_end - entry->gpe_start + 1 > gpp->gpp_size) { 1306 /* Deny shrinking of an opened partition. */ 1307 gctl_error(req, "%d", EBUSY); 1308 return (EBUSY); 1309 } 1310 } 1311 1312 error = G_PART_RESIZE(table, entry, gpp); 1313 if (error) { 1314 gctl_error(req, "%d", error); 1315 return (error); 1316 } 1317 1318 if (!entry->gpe_created) 1319 entry->gpe_modified = 1; 1320 1321 /* update mediasize of changed provider */ 1322 mediasize = (entry->gpe_end - entry->gpe_start + 1) * 1323 pp->sectorsize; 1324 g_resize_provider(pp, mediasize); 1325 1326 /* Provide feedback if so requested. */ 1327 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) { 1328 sb = sbuf_new_auto(); 1329 G_PART_FULLNAME(table, entry, sb, gp->name); 1330 sbuf_cat(sb, " resized\n"); 1331 sbuf_finish(sb); 1332 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1); 1333 sbuf_delete(sb); 1334 } 1335 return (0); 1336 } 1337 1338 static int 1339 g_part_ctl_setunset(struct gctl_req *req, struct g_part_parms *gpp, 1340 unsigned int set) 1341 { 1342 struct g_geom *gp; 1343 struct g_part_entry *entry; 1344 struct g_part_table *table; 1345 struct sbuf *sb; 1346 int error; 1347 1348 gp = gpp->gpp_geom; 1349 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name)); 1350 g_topology_assert(); 1351 1352 table = gp->softc; 1353 1354 if (gpp->gpp_parms & G_PART_PARM_INDEX) { 1355 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) { 1356 if (entry->gpe_deleted || entry->gpe_internal) 1357 continue; 1358 if (entry->gpe_index == gpp->gpp_index) 1359 break; 1360 } 1361 if (entry == NULL) { 1362 gctl_error(req, "%d index '%d'", ENOENT, 1363 gpp->gpp_index); 1364 return (ENOENT); 1365 } 1366 } else 1367 entry = NULL; 1368 1369 error = G_PART_SETUNSET(table, entry, gpp->gpp_attrib, set); 1370 if (error) { 1371 gctl_error(req, "%d attrib '%s'", error, gpp->gpp_attrib); 1372 return (error); 1373 } 1374 1375 /* Provide feedback if so requested. */ 1376 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) { 1377 sb = sbuf_new_auto(); 1378 sbuf_printf(sb, "%s %sset on ", gpp->gpp_attrib, 1379 (set) ? "" : "un"); 1380 if (entry) 1381 G_PART_FULLNAME(table, entry, sb, gp->name); 1382 else 1383 sbuf_cat(sb, gp->name); 1384 sbuf_cat(sb, "\n"); 1385 sbuf_finish(sb); 1386 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1); 1387 sbuf_delete(sb); 1388 } 1389 return (0); 1390 } 1391 1392 static int 1393 g_part_ctl_undo(struct gctl_req *req, struct g_part_parms *gpp) 1394 { 1395 struct g_consumer *cp; 1396 struct g_provider *pp; 1397 struct g_geom *gp; 1398 struct g_part_entry *entry, *tmp; 1399 struct g_part_table *table; 1400 int error, reprobe; 1401 1402 gp = gpp->gpp_geom; 1403 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name)); 1404 g_topology_assert(); 1405 1406 table = gp->softc; 1407 if (!table->gpt_opened) { 1408 gctl_error(req, "%d", EPERM); 1409 return (EPERM); 1410 } 1411 1412 cp = LIST_FIRST(&gp->consumer); 1413 LIST_FOREACH_SAFE(entry, &table->gpt_entry, gpe_entry, tmp) { 1414 entry->gpe_modified = 0; 1415 if (entry->gpe_created) { 1416 pp = entry->gpe_pp; 1417 if (pp != NULL) { 1418 pp->private = NULL; 1419 entry->gpe_pp = NULL; 1420 g_wither_provider(pp, ENXIO); 1421 } 1422 entry->gpe_deleted = 1; 1423 } 1424 if (entry->gpe_deleted) { 1425 LIST_REMOVE(entry, gpe_entry); 1426 g_free(entry); 1427 } 1428 } 1429 1430 g_topology_unlock(); 1431 1432 reprobe = (table->gpt_scheme == &g_part_null_scheme || 1433 table->gpt_created) ? 1 : 0; 1434 1435 if (reprobe) { 1436 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) { 1437 if (entry->gpe_internal) 1438 continue; 1439 error = EBUSY; 1440 goto fail; 1441 } 1442 while ((entry = LIST_FIRST(&table->gpt_entry)) != NULL) { 1443 LIST_REMOVE(entry, gpe_entry); 1444 g_free(entry); 1445 } 1446 error = g_part_probe(gp, cp, table->gpt_depth); 1447 if (error) { 1448 g_topology_lock(); 1449 g_access(cp, -1, -1, -1); 1450 g_part_wither(gp, error); 1451 return (0); 1452 } 1453 table = gp->softc; 1454 1455 /* 1456 * Synthesize a disk geometry. Some partitioning schemes 1457 * depend on it and since some file systems need it even 1458 * when the partitition scheme doesn't, we do it here in 1459 * scheme-independent code. 1460 */ 1461 pp = cp->provider; 1462 g_part_geometry(table, cp, pp->mediasize / pp->sectorsize); 1463 } 1464 1465 error = G_PART_READ(table, cp); 1466 if (error) 1467 goto fail; 1468 error = g_part_check_integrity(table, cp); 1469 if (error) 1470 goto fail; 1471 1472 g_topology_lock(); 1473 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) { 1474 if (!entry->gpe_internal) 1475 g_part_new_provider(gp, table, entry); 1476 } 1477 1478 table->gpt_opened = 0; 1479 g_access(cp, -1, -1, -1); 1480 return (0); 1481 1482 fail: 1483 g_topology_lock(); 1484 gctl_error(req, "%d", error); 1485 return (error); 1486 } 1487 1488 static void 1489 g_part_wither(struct g_geom *gp, int error) 1490 { 1491 struct g_part_entry *entry; 1492 struct g_part_table *table; 1493 1494 table = gp->softc; 1495 if (table != NULL) { 1496 G_PART_DESTROY(table, NULL); 1497 while ((entry = LIST_FIRST(&table->gpt_entry)) != NULL) { 1498 LIST_REMOVE(entry, gpe_entry); 1499 g_free(entry); 1500 } 1501 if (gp->softc != NULL) { 1502 kobj_delete((kobj_t)gp->softc, M_GEOM); 1503 gp->softc = NULL; 1504 } 1505 } 1506 g_wither_geom(gp, error); 1507 } 1508 1509 /* 1510 * Class methods. 1511 */ 1512 1513 static void 1514 g_part_ctlreq(struct gctl_req *req, struct g_class *mp, const char *verb) 1515 { 1516 struct g_part_parms gpp; 1517 struct g_part_table *table; 1518 struct gctl_req_arg *ap; 1519 enum g_part_ctl ctlreq; 1520 unsigned int i, mparms, oparms, parm; 1521 int auto_commit, close_on_error; 1522 int error, modifies; 1523 1524 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s,%s)", __func__, mp->name, verb)); 1525 g_topology_assert(); 1526 1527 ctlreq = G_PART_CTL_NONE; 1528 modifies = 1; 1529 mparms = 0; 1530 oparms = G_PART_PARM_FLAGS | G_PART_PARM_OUTPUT | G_PART_PARM_VERSION; 1531 switch (*verb) { 1532 case 'a': 1533 if (!strcmp(verb, "add")) { 1534 ctlreq = G_PART_CTL_ADD; 1535 mparms |= G_PART_PARM_GEOM | G_PART_PARM_SIZE | 1536 G_PART_PARM_START | G_PART_PARM_TYPE; 1537 oparms |= G_PART_PARM_INDEX | G_PART_PARM_LABEL; 1538 } 1539 break; 1540 case 'b': 1541 if (!strcmp(verb, "bootcode")) { 1542 ctlreq = G_PART_CTL_BOOTCODE; 1543 mparms |= G_PART_PARM_GEOM | G_PART_PARM_BOOTCODE; 1544 } 1545 break; 1546 case 'c': 1547 if (!strcmp(verb, "commit")) { 1548 ctlreq = G_PART_CTL_COMMIT; 1549 mparms |= G_PART_PARM_GEOM; 1550 modifies = 0; 1551 } else if (!strcmp(verb, "create")) { 1552 ctlreq = G_PART_CTL_CREATE; 1553 mparms |= G_PART_PARM_PROVIDER | G_PART_PARM_SCHEME; 1554 oparms |= G_PART_PARM_ENTRIES; 1555 } 1556 break; 1557 case 'd': 1558 if (!strcmp(verb, "delete")) { 1559 ctlreq = G_PART_CTL_DELETE; 1560 mparms |= G_PART_PARM_GEOM | G_PART_PARM_INDEX; 1561 } else if (!strcmp(verb, "destroy")) { 1562 ctlreq = G_PART_CTL_DESTROY; 1563 mparms |= G_PART_PARM_GEOM; 1564 oparms |= G_PART_PARM_FORCE; 1565 } 1566 break; 1567 case 'm': 1568 if (!strcmp(verb, "modify")) { 1569 ctlreq = G_PART_CTL_MODIFY; 1570 mparms |= G_PART_PARM_GEOM | G_PART_PARM_INDEX; 1571 oparms |= G_PART_PARM_LABEL | G_PART_PARM_TYPE; 1572 } else if (!strcmp(verb, "move")) { 1573 ctlreq = G_PART_CTL_MOVE; 1574 mparms |= G_PART_PARM_GEOM | G_PART_PARM_INDEX; 1575 } 1576 break; 1577 case 'r': 1578 if (!strcmp(verb, "recover")) { 1579 ctlreq = G_PART_CTL_RECOVER; 1580 mparms |= G_PART_PARM_GEOM; 1581 } else if (!strcmp(verb, "resize")) { 1582 ctlreq = G_PART_CTL_RESIZE; 1583 mparms |= G_PART_PARM_GEOM | G_PART_PARM_INDEX | 1584 G_PART_PARM_SIZE; 1585 } 1586 break; 1587 case 's': 1588 if (!strcmp(verb, "set")) { 1589 ctlreq = G_PART_CTL_SET; 1590 mparms |= G_PART_PARM_ATTRIB | G_PART_PARM_GEOM; 1591 oparms |= G_PART_PARM_INDEX; 1592 } 1593 break; 1594 case 'u': 1595 if (!strcmp(verb, "undo")) { 1596 ctlreq = G_PART_CTL_UNDO; 1597 mparms |= G_PART_PARM_GEOM; 1598 modifies = 0; 1599 } else if (!strcmp(verb, "unset")) { 1600 ctlreq = G_PART_CTL_UNSET; 1601 mparms |= G_PART_PARM_ATTRIB | G_PART_PARM_GEOM; 1602 oparms |= G_PART_PARM_INDEX; 1603 } 1604 break; 1605 } 1606 if (ctlreq == G_PART_CTL_NONE) { 1607 gctl_error(req, "%d verb '%s'", EINVAL, verb); 1608 return; 1609 } 1610 1611 bzero(&gpp, sizeof(gpp)); 1612 for (i = 0; i < req->narg; i++) { 1613 ap = &req->arg[i]; 1614 parm = 0; 1615 switch (ap->name[0]) { 1616 case 'a': 1617 if (!strcmp(ap->name, "arg0")) { 1618 parm = mparms & 1619 (G_PART_PARM_GEOM | G_PART_PARM_PROVIDER); 1620 } 1621 if (!strcmp(ap->name, "attrib")) 1622 parm = G_PART_PARM_ATTRIB; 1623 break; 1624 case 'b': 1625 if (!strcmp(ap->name, "bootcode")) 1626 parm = G_PART_PARM_BOOTCODE; 1627 break; 1628 case 'c': 1629 if (!strcmp(ap->name, "class")) 1630 continue; 1631 break; 1632 case 'e': 1633 if (!strcmp(ap->name, "entries")) 1634 parm = G_PART_PARM_ENTRIES; 1635 break; 1636 case 'f': 1637 if (!strcmp(ap->name, "flags")) 1638 parm = G_PART_PARM_FLAGS; 1639 else if (!strcmp(ap->name, "force")) 1640 parm = G_PART_PARM_FORCE; 1641 break; 1642 case 'i': 1643 if (!strcmp(ap->name, "index")) 1644 parm = G_PART_PARM_INDEX; 1645 break; 1646 case 'l': 1647 if (!strcmp(ap->name, "label")) 1648 parm = G_PART_PARM_LABEL; 1649 break; 1650 case 'o': 1651 if (!strcmp(ap->name, "output")) 1652 parm = G_PART_PARM_OUTPUT; 1653 break; 1654 case 's': 1655 if (!strcmp(ap->name, "scheme")) 1656 parm = G_PART_PARM_SCHEME; 1657 else if (!strcmp(ap->name, "size")) 1658 parm = G_PART_PARM_SIZE; 1659 else if (!strcmp(ap->name, "start")) 1660 parm = G_PART_PARM_START; 1661 break; 1662 case 't': 1663 if (!strcmp(ap->name, "type")) 1664 parm = G_PART_PARM_TYPE; 1665 break; 1666 case 'v': 1667 if (!strcmp(ap->name, "verb")) 1668 continue; 1669 else if (!strcmp(ap->name, "version")) 1670 parm = G_PART_PARM_VERSION; 1671 break; 1672 } 1673 if ((parm & (mparms | oparms)) == 0) { 1674 gctl_error(req, "%d param '%s'", EINVAL, ap->name); 1675 return; 1676 } 1677 switch (parm) { 1678 case G_PART_PARM_ATTRIB: 1679 error = g_part_parm_str(req, ap->name, 1680 &gpp.gpp_attrib); 1681 break; 1682 case G_PART_PARM_BOOTCODE: 1683 error = g_part_parm_bootcode(req, ap->name, 1684 &gpp.gpp_codeptr, &gpp.gpp_codesize); 1685 break; 1686 case G_PART_PARM_ENTRIES: 1687 error = g_part_parm_intmax(req, ap->name, 1688 &gpp.gpp_entries); 1689 break; 1690 case G_PART_PARM_FLAGS: 1691 error = g_part_parm_str(req, ap->name, &gpp.gpp_flags); 1692 break; 1693 case G_PART_PARM_FORCE: 1694 error = g_part_parm_uint32(req, ap->name, 1695 &gpp.gpp_force); 1696 break; 1697 case G_PART_PARM_GEOM: 1698 error = g_part_parm_geom(req, ap->name, &gpp.gpp_geom); 1699 break; 1700 case G_PART_PARM_INDEX: 1701 error = g_part_parm_intmax(req, ap->name, 1702 &gpp.gpp_index); 1703 break; 1704 case G_PART_PARM_LABEL: 1705 error = g_part_parm_str(req, ap->name, &gpp.gpp_label); 1706 break; 1707 case G_PART_PARM_OUTPUT: 1708 error = 0; /* Write-only parameter */ 1709 break; 1710 case G_PART_PARM_PROVIDER: 1711 error = g_part_parm_provider(req, ap->name, 1712 &gpp.gpp_provider); 1713 break; 1714 case G_PART_PARM_SCHEME: 1715 error = g_part_parm_scheme(req, ap->name, 1716 &gpp.gpp_scheme); 1717 break; 1718 case G_PART_PARM_SIZE: 1719 error = g_part_parm_quad(req, ap->name, &gpp.gpp_size); 1720 break; 1721 case G_PART_PARM_START: 1722 error = g_part_parm_quad(req, ap->name, 1723 &gpp.gpp_start); 1724 break; 1725 case G_PART_PARM_TYPE: 1726 error = g_part_parm_str(req, ap->name, &gpp.gpp_type); 1727 break; 1728 case G_PART_PARM_VERSION: 1729 error = g_part_parm_uint32(req, ap->name, 1730 &gpp.gpp_version); 1731 break; 1732 default: 1733 error = EDOOFUS; 1734 gctl_error(req, "%d %s", error, ap->name); 1735 break; 1736 } 1737 if (error != 0) { 1738 if (error == ENOATTR) { 1739 gctl_error(req, "%d param '%s'", error, 1740 ap->name); 1741 } 1742 return; 1743 } 1744 gpp.gpp_parms |= parm; 1745 } 1746 if ((gpp.gpp_parms & mparms) != mparms) { 1747 parm = mparms - (gpp.gpp_parms & mparms); 1748 gctl_error(req, "%d param '%x'", ENOATTR, parm); 1749 return; 1750 } 1751 1752 /* Obtain permissions if possible/necessary. */ 1753 close_on_error = 0; 1754 table = NULL; 1755 if (modifies && (gpp.gpp_parms & G_PART_PARM_GEOM)) { 1756 table = gpp.gpp_geom->softc; 1757 if (table != NULL && table->gpt_corrupt && 1758 ctlreq != G_PART_CTL_DESTROY && 1759 ctlreq != G_PART_CTL_RECOVER) { 1760 gctl_error(req, "%d table '%s' is corrupt", 1761 EPERM, gpp.gpp_geom->name); 1762 return; 1763 } 1764 if (table != NULL && !table->gpt_opened) { 1765 error = g_access(LIST_FIRST(&gpp.gpp_geom->consumer), 1766 1, 1, 1); 1767 if (error) { 1768 gctl_error(req, "%d geom '%s'", error, 1769 gpp.gpp_geom->name); 1770 return; 1771 } 1772 table->gpt_opened = 1; 1773 close_on_error = 1; 1774 } 1775 } 1776 1777 /* Allow the scheme to check or modify the parameters. */ 1778 if (table != NULL) { 1779 error = G_PART_PRECHECK(table, ctlreq, &gpp); 1780 if (error) { 1781 gctl_error(req, "%d pre-check failed", error); 1782 goto out; 1783 } 1784 } else 1785 error = EDOOFUS; /* Prevent bogus uninit. warning. */ 1786 1787 switch (ctlreq) { 1788 case G_PART_CTL_NONE: 1789 panic("%s", __func__); 1790 case G_PART_CTL_ADD: 1791 error = g_part_ctl_add(req, &gpp); 1792 break; 1793 case G_PART_CTL_BOOTCODE: 1794 error = g_part_ctl_bootcode(req, &gpp); 1795 break; 1796 case G_PART_CTL_COMMIT: 1797 error = g_part_ctl_commit(req, &gpp); 1798 break; 1799 case G_PART_CTL_CREATE: 1800 error = g_part_ctl_create(req, &gpp); 1801 break; 1802 case G_PART_CTL_DELETE: 1803 error = g_part_ctl_delete(req, &gpp); 1804 break; 1805 case G_PART_CTL_DESTROY: 1806 error = g_part_ctl_destroy(req, &gpp); 1807 break; 1808 case G_PART_CTL_MODIFY: 1809 error = g_part_ctl_modify(req, &gpp); 1810 break; 1811 case G_PART_CTL_MOVE: 1812 error = g_part_ctl_move(req, &gpp); 1813 break; 1814 case G_PART_CTL_RECOVER: 1815 error = g_part_ctl_recover(req, &gpp); 1816 break; 1817 case G_PART_CTL_RESIZE: 1818 error = g_part_ctl_resize(req, &gpp); 1819 break; 1820 case G_PART_CTL_SET: 1821 error = g_part_ctl_setunset(req, &gpp, 1); 1822 break; 1823 case G_PART_CTL_UNDO: 1824 error = g_part_ctl_undo(req, &gpp); 1825 break; 1826 case G_PART_CTL_UNSET: 1827 error = g_part_ctl_setunset(req, &gpp, 0); 1828 break; 1829 } 1830 1831 /* Implement automatic commit. */ 1832 if (!error) { 1833 auto_commit = (modifies && 1834 (gpp.gpp_parms & G_PART_PARM_FLAGS) && 1835 strchr(gpp.gpp_flags, 'C') != NULL) ? 1 : 0; 1836 if (auto_commit) { 1837 KASSERT(gpp.gpp_parms & G_PART_PARM_GEOM, ("%s", 1838 __func__)); 1839 error = g_part_ctl_commit(req, &gpp); 1840 } 1841 } 1842 1843 out: 1844 if (error && close_on_error) { 1845 g_access(LIST_FIRST(&gpp.gpp_geom->consumer), -1, -1, -1); 1846 table->gpt_opened = 0; 1847 } 1848 } 1849 1850 static int 1851 g_part_destroy_geom(struct gctl_req *req, struct g_class *mp, 1852 struct g_geom *gp) 1853 { 1854 1855 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s,%s)", __func__, mp->name, gp->name)); 1856 g_topology_assert(); 1857 1858 g_part_wither(gp, EINVAL); 1859 return (0); 1860 } 1861 1862 static struct g_geom * 1863 g_part_taste(struct g_class *mp, struct g_provider *pp, int flags __unused) 1864 { 1865 struct g_consumer *cp; 1866 struct g_geom *gp; 1867 struct g_part_entry *entry; 1868 struct g_part_table *table; 1869 struct root_hold_token *rht; 1870 int attr, depth; 1871 int error; 1872 1873 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s,%s)", __func__, mp->name, pp->name)); 1874 g_topology_assert(); 1875 1876 /* Skip providers that are already open for writing. */ 1877 if (pp->acw > 0) 1878 return (NULL); 1879 1880 /* 1881 * Create a GEOM with consumer and hook it up to the provider. 1882 * With that we become part of the topology. Optain read access 1883 * to the provider. 1884 */ 1885 gp = g_new_geomf(mp, "%s", pp->name); 1886 cp = g_new_consumer(gp); 1887 error = g_attach(cp, pp); 1888 if (error == 0) 1889 error = g_access(cp, 1, 0, 0); 1890 if (error != 0) { 1891 if (cp->provider) 1892 g_detach(cp); 1893 g_destroy_consumer(cp); 1894 g_destroy_geom(gp); 1895 return (NULL); 1896 } 1897 1898 rht = root_mount_hold(mp->name); 1899 g_topology_unlock(); 1900 1901 /* 1902 * Short-circuit the whole probing galore when there's no 1903 * media present. 1904 */ 1905 if (pp->mediasize == 0 || pp->sectorsize == 0) { 1906 error = ENODEV; 1907 goto fail; 1908 } 1909 1910 /* Make sure we can nest and if so, determine our depth. */ 1911 error = g_getattr("PART::isleaf", cp, &attr); 1912 if (!error && attr) { 1913 error = ENODEV; 1914 goto fail; 1915 } 1916 error = g_getattr("PART::depth", cp, &attr); 1917 depth = (!error) ? attr + 1 : 0; 1918 1919 error = g_part_probe(gp, cp, depth); 1920 if (error) 1921 goto fail; 1922 1923 table = gp->softc; 1924 1925 /* 1926 * Synthesize a disk geometry. Some partitioning schemes 1927 * depend on it and since some file systems need it even 1928 * when the partitition scheme doesn't, we do it here in 1929 * scheme-independent code. 1930 */ 1931 g_part_geometry(table, cp, pp->mediasize / pp->sectorsize); 1932 1933 error = G_PART_READ(table, cp); 1934 if (error) 1935 goto fail; 1936 error = g_part_check_integrity(table, cp); 1937 if (error) 1938 goto fail; 1939 1940 g_topology_lock(); 1941 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) { 1942 if (!entry->gpe_internal) 1943 g_part_new_provider(gp, table, entry); 1944 } 1945 1946 root_mount_rel(rht); 1947 g_access(cp, -1, 0, 0); 1948 return (gp); 1949 1950 fail: 1951 g_topology_lock(); 1952 root_mount_rel(rht); 1953 g_access(cp, -1, 0, 0); 1954 g_detach(cp); 1955 g_destroy_consumer(cp); 1956 g_destroy_geom(gp); 1957 return (NULL); 1958 } 1959 1960 /* 1961 * Geom methods. 1962 */ 1963 1964 static int 1965 g_part_access(struct g_provider *pp, int dr, int dw, int de) 1966 { 1967 struct g_consumer *cp; 1968 1969 G_PART_TRACE((G_T_ACCESS, "%s(%s,%d,%d,%d)", __func__, pp->name, dr, 1970 dw, de)); 1971 1972 cp = LIST_FIRST(&pp->geom->consumer); 1973 1974 /* We always gain write-exclusive access. */ 1975 return (g_access(cp, dr, dw, dw + de)); 1976 } 1977 1978 static void 1979 g_part_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp, 1980 struct g_consumer *cp, struct g_provider *pp) 1981 { 1982 char buf[64]; 1983 struct g_part_entry *entry; 1984 struct g_part_table *table; 1985 1986 KASSERT(sb != NULL && gp != NULL, ("%s", __func__)); 1987 table = gp->softc; 1988 1989 if (indent == NULL) { 1990 KASSERT(cp == NULL && pp != NULL, ("%s", __func__)); 1991 entry = pp->private; 1992 if (entry == NULL) 1993 return; 1994 sbuf_printf(sb, " i %u o %ju ty %s", entry->gpe_index, 1995 (uintmax_t)entry->gpe_offset, 1996 G_PART_TYPE(table, entry, buf, sizeof(buf))); 1997 /* 1998 * libdisk compatibility quirk - the scheme dumps the 1999 * slicer name and partition type in a way that is 2000 * compatible with libdisk. When libdisk is not used 2001 * anymore, this should go away. 2002 */ 2003 G_PART_DUMPCONF(table, entry, sb, indent); 2004 } else if (cp != NULL) { /* Consumer configuration. */ 2005 KASSERT(pp == NULL, ("%s", __func__)); 2006 /* none */ 2007 } else if (pp != NULL) { /* Provider configuration. */ 2008 entry = pp->private; 2009 if (entry == NULL) 2010 return; 2011 sbuf_printf(sb, "%s<start>%ju</start>\n", indent, 2012 (uintmax_t)entry->gpe_start); 2013 sbuf_printf(sb, "%s<end>%ju</end>\n", indent, 2014 (uintmax_t)entry->gpe_end); 2015 sbuf_printf(sb, "%s<index>%u</index>\n", indent, 2016 entry->gpe_index); 2017 sbuf_printf(sb, "%s<type>%s</type>\n", indent, 2018 G_PART_TYPE(table, entry, buf, sizeof(buf))); 2019 sbuf_printf(sb, "%s<offset>%ju</offset>\n", indent, 2020 (uintmax_t)entry->gpe_offset); 2021 sbuf_printf(sb, "%s<length>%ju</length>\n", indent, 2022 (uintmax_t)pp->mediasize); 2023 G_PART_DUMPCONF(table, entry, sb, indent); 2024 } else { /* Geom configuration. */ 2025 sbuf_printf(sb, "%s<scheme>%s</scheme>\n", indent, 2026 table->gpt_scheme->name); 2027 sbuf_printf(sb, "%s<entries>%u</entries>\n", indent, 2028 table->gpt_entries); 2029 sbuf_printf(sb, "%s<first>%ju</first>\n", indent, 2030 (uintmax_t)table->gpt_first); 2031 sbuf_printf(sb, "%s<last>%ju</last>\n", indent, 2032 (uintmax_t)table->gpt_last); 2033 sbuf_printf(sb, "%s<fwsectors>%u</fwsectors>\n", indent, 2034 table->gpt_sectors); 2035 sbuf_printf(sb, "%s<fwheads>%u</fwheads>\n", indent, 2036 table->gpt_heads); 2037 sbuf_printf(sb, "%s<state>%s</state>\n", indent, 2038 table->gpt_corrupt ? "CORRUPT": "OK"); 2039 sbuf_printf(sb, "%s<modified>%s</modified>\n", indent, 2040 table->gpt_opened ? "true": "false"); 2041 G_PART_DUMPCONF(table, NULL, sb, indent); 2042 } 2043 } 2044 2045 static void 2046 g_part_orphan(struct g_consumer *cp) 2047 { 2048 struct g_provider *pp; 2049 struct g_part_table *table; 2050 2051 pp = cp->provider; 2052 KASSERT(pp != NULL, ("%s", __func__)); 2053 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, pp->name)); 2054 g_topology_assert(); 2055 2056 KASSERT(pp->error != 0, ("%s", __func__)); 2057 table = cp->geom->softc; 2058 if (table != NULL && table->gpt_opened) 2059 g_access(cp, -1, -1, -1); 2060 g_part_wither(cp->geom, pp->error); 2061 } 2062 2063 static void 2064 g_part_spoiled(struct g_consumer *cp) 2065 { 2066 2067 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, cp->provider->name)); 2068 g_topology_assert(); 2069 2070 cp->flags |= G_CF_ORPHAN; 2071 g_part_wither(cp->geom, ENXIO); 2072 } 2073 2074 static void 2075 g_part_start(struct bio *bp) 2076 { 2077 struct bio *bp2; 2078 struct g_consumer *cp; 2079 struct g_geom *gp; 2080 struct g_part_entry *entry; 2081 struct g_part_table *table; 2082 struct g_kerneldump *gkd; 2083 struct g_provider *pp; 2084 char buf[64]; 2085 2086 pp = bp->bio_to; 2087 gp = pp->geom; 2088 table = gp->softc; 2089 cp = LIST_FIRST(&gp->consumer); 2090 2091 G_PART_TRACE((G_T_BIO, "%s: cmd=%d, provider=%s", __func__, bp->bio_cmd, 2092 pp->name)); 2093 2094 entry = pp->private; 2095 if (entry == NULL) { 2096 g_io_deliver(bp, ENXIO); 2097 return; 2098 } 2099 2100 switch(bp->bio_cmd) { 2101 case BIO_DELETE: 2102 case BIO_READ: 2103 case BIO_WRITE: 2104 if (bp->bio_offset >= pp->mediasize) { 2105 g_io_deliver(bp, EIO); 2106 return; 2107 } 2108 bp2 = g_clone_bio(bp); 2109 if (bp2 == NULL) { 2110 g_io_deliver(bp, ENOMEM); 2111 return; 2112 } 2113 if (bp2->bio_offset + bp2->bio_length > pp->mediasize) 2114 bp2->bio_length = pp->mediasize - bp2->bio_offset; 2115 bp2->bio_done = g_std_done; 2116 bp2->bio_offset += entry->gpe_offset; 2117 g_io_request(bp2, cp); 2118 return; 2119 case BIO_FLUSH: 2120 break; 2121 case BIO_GETATTR: 2122 if (g_handleattr_int(bp, "GEOM::fwheads", table->gpt_heads)) 2123 return; 2124 if (g_handleattr_int(bp, "GEOM::fwsectors", table->gpt_sectors)) 2125 return; 2126 if (g_handleattr_int(bp, "PART::isleaf", table->gpt_isleaf)) 2127 return; 2128 if (g_handleattr_int(bp, "PART::depth", table->gpt_depth)) 2129 return; 2130 if (g_handleattr_str(bp, "PART::scheme", 2131 table->gpt_scheme->name)) 2132 return; 2133 if (g_handleattr_str(bp, "PART::type", 2134 G_PART_TYPE(table, entry, buf, sizeof(buf)))) 2135 return; 2136 if (!strcmp("GEOM::kerneldump", bp->bio_attribute)) { 2137 /* 2138 * Check that the partition is suitable for kernel 2139 * dumps. Typically only swap partitions should be 2140 * used. If the request comes from the nested scheme 2141 * we allow dumping there as well. 2142 */ 2143 if ((bp->bio_from == NULL || 2144 bp->bio_from->geom->class != &g_part_class) && 2145 G_PART_DUMPTO(table, entry) == 0) { 2146 g_io_deliver(bp, ENODEV); 2147 printf("GEOM_PART: Partition '%s' not suitable" 2148 " for kernel dumps (wrong type?)\n", 2149 pp->name); 2150 return; 2151 } 2152 gkd = (struct g_kerneldump *)bp->bio_data; 2153 if (gkd->offset >= pp->mediasize) { 2154 g_io_deliver(bp, EIO); 2155 return; 2156 } 2157 if (gkd->offset + gkd->length > pp->mediasize) 2158 gkd->length = pp->mediasize - gkd->offset; 2159 gkd->offset += entry->gpe_offset; 2160 } 2161 break; 2162 default: 2163 g_io_deliver(bp, EOPNOTSUPP); 2164 return; 2165 } 2166 2167 bp2 = g_clone_bio(bp); 2168 if (bp2 == NULL) { 2169 g_io_deliver(bp, ENOMEM); 2170 return; 2171 } 2172 bp2->bio_done = g_std_done; 2173 g_io_request(bp2, cp); 2174 } 2175 2176 static void 2177 g_part_init(struct g_class *mp) 2178 { 2179 2180 TAILQ_INSERT_HEAD(&g_part_schemes, &g_part_null_scheme, scheme_list); 2181 } 2182 2183 static void 2184 g_part_fini(struct g_class *mp) 2185 { 2186 2187 TAILQ_REMOVE(&g_part_schemes, &g_part_null_scheme, scheme_list); 2188 } 2189 2190 static void 2191 g_part_unload_event(void *arg, int flag) 2192 { 2193 struct g_consumer *cp; 2194 struct g_geom *gp; 2195 struct g_provider *pp; 2196 struct g_part_scheme *scheme; 2197 struct g_part_table *table; 2198 uintptr_t *xchg; 2199 int acc, error; 2200 2201 if (flag == EV_CANCEL) 2202 return; 2203 2204 xchg = arg; 2205 error = 0; 2206 scheme = (void *)(*xchg); 2207 2208 g_topology_assert(); 2209 2210 LIST_FOREACH(gp, &g_part_class.geom, geom) { 2211 table = gp->softc; 2212 if (table->gpt_scheme != scheme) 2213 continue; 2214 2215 acc = 0; 2216 LIST_FOREACH(pp, &gp->provider, provider) 2217 acc += pp->acr + pp->acw + pp->ace; 2218 LIST_FOREACH(cp, &gp->consumer, consumer) 2219 acc += cp->acr + cp->acw + cp->ace; 2220 2221 if (!acc) 2222 g_part_wither(gp, ENOSYS); 2223 else 2224 error = EBUSY; 2225 } 2226 2227 if (!error) 2228 TAILQ_REMOVE(&g_part_schemes, scheme, scheme_list); 2229 2230 *xchg = error; 2231 } 2232 2233 int 2234 g_part_modevent(module_t mod, int type, struct g_part_scheme *scheme) 2235 { 2236 struct g_part_scheme *iter; 2237 uintptr_t arg; 2238 int error; 2239 2240 error = 0; 2241 switch (type) { 2242 case MOD_LOAD: 2243 TAILQ_FOREACH(iter, &g_part_schemes, scheme_list) { 2244 if (scheme == iter) { 2245 printf("GEOM_PART: scheme %s is already " 2246 "registered!\n", scheme->name); 2247 break; 2248 } 2249 } 2250 if (iter == NULL) { 2251 TAILQ_INSERT_TAIL(&g_part_schemes, scheme, 2252 scheme_list); 2253 g_retaste(&g_part_class); 2254 } 2255 break; 2256 case MOD_UNLOAD: 2257 arg = (uintptr_t)scheme; 2258 error = g_waitfor_event(g_part_unload_event, &arg, M_WAITOK, 2259 NULL); 2260 if (error == 0) 2261 error = arg; 2262 break; 2263 default: 2264 error = EOPNOTSUPP; 2265 break; 2266 } 2267 2268 return (error); 2269 } 2270