1 /*- 2 * Copyright (c) 2002, 2005-2009 Marcel Moolenaar 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 */ 26 27 #include <sys/cdefs.h> 28 __FBSDID("$FreeBSD$"); 29 30 #include <sys/param.h> 31 #include <sys/bio.h> 32 #include <sys/diskmbr.h> 33 #include <sys/endian.h> 34 #include <sys/kernel.h> 35 #include <sys/kobj.h> 36 #include <sys/limits.h> 37 #include <sys/lock.h> 38 #include <sys/malloc.h> 39 #include <sys/mutex.h> 40 #include <sys/queue.h> 41 #include <sys/sbuf.h> 42 #include <sys/sysctl.h> 43 #include <sys/systm.h> 44 #include <sys/uuid.h> 45 #include <geom/geom.h> 46 #include <geom/geom_ctl.h> 47 #include <geom/geom_int.h> 48 #include <geom/part/g_part.h> 49 50 #include "g_part_if.h" 51 52 #ifndef _PATH_DEV 53 #define _PATH_DEV "/dev/" 54 #endif 55 56 static kobj_method_t g_part_null_methods[] = { 57 { 0, 0 } 58 }; 59 60 static struct g_part_scheme g_part_null_scheme = { 61 "(none)", 62 g_part_null_methods, 63 sizeof(struct g_part_table), 64 }; 65 66 TAILQ_HEAD(, g_part_scheme) g_part_schemes = 67 TAILQ_HEAD_INITIALIZER(g_part_schemes); 68 69 struct g_part_alias_list { 70 const char *lexeme; 71 enum g_part_alias alias; 72 } g_part_alias_list[G_PART_ALIAS_COUNT] = { 73 { "apple-boot", G_PART_ALIAS_APPLE_BOOT }, 74 { "apple-hfs", G_PART_ALIAS_APPLE_HFS }, 75 { "apple-label", G_PART_ALIAS_APPLE_LABEL }, 76 { "apple-raid", G_PART_ALIAS_APPLE_RAID }, 77 { "apple-raid-offline", G_PART_ALIAS_APPLE_RAID_OFFLINE }, 78 { "apple-tv-recovery", G_PART_ALIAS_APPLE_TV_RECOVERY }, 79 { "apple-ufs", G_PART_ALIAS_APPLE_UFS }, 80 { "bios-boot", G_PART_ALIAS_BIOS_BOOT }, 81 { "ebr", G_PART_ALIAS_EBR }, 82 { "efi", G_PART_ALIAS_EFI }, 83 { "fat16", G_PART_ALIAS_MS_FAT16 }, 84 { "fat32", G_PART_ALIAS_MS_FAT32 }, 85 { "freebsd", G_PART_ALIAS_FREEBSD }, 86 { "freebsd-boot", G_PART_ALIAS_FREEBSD_BOOT }, 87 { "freebsd-nandfs", G_PART_ALIAS_FREEBSD_NANDFS }, 88 { "freebsd-swap", G_PART_ALIAS_FREEBSD_SWAP }, 89 { "freebsd-ufs", G_PART_ALIAS_FREEBSD_UFS }, 90 { "freebsd-vinum", G_PART_ALIAS_FREEBSD_VINUM }, 91 { "freebsd-zfs", G_PART_ALIAS_FREEBSD_ZFS }, 92 { "linux-data", G_PART_ALIAS_LINUX_DATA }, 93 { "linux-lvm", G_PART_ALIAS_LINUX_LVM }, 94 { "linux-raid", G_PART_ALIAS_LINUX_RAID }, 95 { "linux-swap", G_PART_ALIAS_LINUX_SWAP }, 96 { "mbr", G_PART_ALIAS_MBR }, 97 { "ms-basic-data", G_PART_ALIAS_MS_BASIC_DATA }, 98 { "ms-ldm-data", G_PART_ALIAS_MS_LDM_DATA }, 99 { "ms-ldm-metadata", G_PART_ALIAS_MS_LDM_METADATA }, 100 { "ms-reserved", G_PART_ALIAS_MS_RESERVED }, 101 { "ntfs", G_PART_ALIAS_MS_NTFS }, 102 { "netbsd-ccd", G_PART_ALIAS_NETBSD_CCD }, 103 { "netbsd-cgd", G_PART_ALIAS_NETBSD_CGD }, 104 { "netbsd-ffs", G_PART_ALIAS_NETBSD_FFS }, 105 { "netbsd-lfs", G_PART_ALIAS_NETBSD_LFS }, 106 { "netbsd-raid", G_PART_ALIAS_NETBSD_RAID }, 107 { "netbsd-swap", G_PART_ALIAS_NETBSD_SWAP }, 108 { "vmware-vmfs", G_PART_ALIAS_VMFS }, 109 { "vmware-vmkdiag", G_PART_ALIAS_VMKDIAG }, 110 { "vmware-reserved", G_PART_ALIAS_VMRESERVED }, 111 }; 112 113 SYSCTL_DECL(_kern_geom); 114 SYSCTL_NODE(_kern_geom, OID_AUTO, part, CTLFLAG_RW, 0, 115 "GEOM_PART stuff"); 116 static u_int check_integrity = 1; 117 TUNABLE_INT("kern.geom.part.check_integrity", &check_integrity); 118 SYSCTL_UINT(_kern_geom_part, OID_AUTO, check_integrity, 119 CTLFLAG_RW | CTLFLAG_TUN, &check_integrity, 1, 120 "Enable integrity checking"); 121 122 /* 123 * The GEOM partitioning class. 124 */ 125 static g_ctl_req_t g_part_ctlreq; 126 static g_ctl_destroy_geom_t g_part_destroy_geom; 127 static g_fini_t g_part_fini; 128 static g_init_t g_part_init; 129 static g_taste_t g_part_taste; 130 131 static g_access_t g_part_access; 132 static g_dumpconf_t g_part_dumpconf; 133 static g_orphan_t g_part_orphan; 134 static g_spoiled_t g_part_spoiled; 135 static g_start_t g_part_start; 136 137 static struct g_class g_part_class = { 138 .name = "PART", 139 .version = G_VERSION, 140 /* Class methods. */ 141 .ctlreq = g_part_ctlreq, 142 .destroy_geom = g_part_destroy_geom, 143 .fini = g_part_fini, 144 .init = g_part_init, 145 .taste = g_part_taste, 146 /* Geom methods. */ 147 .access = g_part_access, 148 .dumpconf = g_part_dumpconf, 149 .orphan = g_part_orphan, 150 .spoiled = g_part_spoiled, 151 .start = g_part_start, 152 }; 153 154 DECLARE_GEOM_CLASS(g_part_class, g_part); 155 MODULE_VERSION(g_part, 0); 156 157 /* 158 * Support functions. 159 */ 160 161 static void g_part_wither(struct g_geom *, int); 162 163 const char * 164 g_part_alias_name(enum g_part_alias alias) 165 { 166 int i; 167 168 for (i = 0; i < G_PART_ALIAS_COUNT; i++) { 169 if (g_part_alias_list[i].alias != alias) 170 continue; 171 return (g_part_alias_list[i].lexeme); 172 } 173 174 return (NULL); 175 } 176 177 void 178 g_part_geometry_heads(off_t blocks, u_int sectors, off_t *bestchs, 179 u_int *bestheads) 180 { 181 static u_int candidate_heads[] = { 1, 2, 16, 32, 64, 128, 255, 0 }; 182 off_t chs, cylinders; 183 u_int heads; 184 int idx; 185 186 *bestchs = 0; 187 *bestheads = 0; 188 for (idx = 0; candidate_heads[idx] != 0; idx++) { 189 heads = candidate_heads[idx]; 190 cylinders = blocks / heads / sectors; 191 if (cylinders < heads || cylinders < sectors) 192 break; 193 if (cylinders > 1023) 194 continue; 195 chs = cylinders * heads * sectors; 196 if (chs > *bestchs || (chs == *bestchs && *bestheads == 1)) { 197 *bestchs = chs; 198 *bestheads = heads; 199 } 200 } 201 } 202 203 static void 204 g_part_geometry(struct g_part_table *table, struct g_consumer *cp, 205 off_t blocks) 206 { 207 static u_int candidate_sectors[] = { 1, 9, 17, 33, 63, 0 }; 208 off_t chs, bestchs; 209 u_int heads, sectors; 210 int idx; 211 212 if (g_getattr("GEOM::fwsectors", cp, §ors) != 0 || sectors == 0 || 213 g_getattr("GEOM::fwheads", cp, &heads) != 0 || heads == 0) { 214 table->gpt_fixgeom = 0; 215 table->gpt_heads = 0; 216 table->gpt_sectors = 0; 217 bestchs = 0; 218 for (idx = 0; candidate_sectors[idx] != 0; idx++) { 219 sectors = candidate_sectors[idx]; 220 g_part_geometry_heads(blocks, sectors, &chs, &heads); 221 if (chs == 0) 222 continue; 223 /* 224 * Prefer a geometry with sectors > 1, but only if 225 * it doesn't bump down the number of heads to 1. 226 */ 227 if (chs > bestchs || (chs == bestchs && heads > 1 && 228 table->gpt_sectors == 1)) { 229 bestchs = chs; 230 table->gpt_heads = heads; 231 table->gpt_sectors = sectors; 232 } 233 } 234 /* 235 * If we didn't find a geometry at all, then the disk is 236 * too big. This means we can use the maximum number of 237 * heads and sectors. 238 */ 239 if (bestchs == 0) { 240 table->gpt_heads = 255; 241 table->gpt_sectors = 63; 242 } 243 } else { 244 table->gpt_fixgeom = 1; 245 table->gpt_heads = heads; 246 table->gpt_sectors = sectors; 247 } 248 } 249 250 #define DPRINTF(...) if (bootverbose) { \ 251 printf("GEOM_PART: " __VA_ARGS__); \ 252 } 253 254 static int 255 g_part_check_integrity(struct g_part_table *table, struct g_consumer *cp) 256 { 257 struct g_part_entry *e1, *e2; 258 struct g_provider *pp; 259 off_t offset; 260 int failed; 261 262 failed = 0; 263 pp = cp->provider; 264 if (table->gpt_last < table->gpt_first) { 265 DPRINTF("last LBA is below first LBA: %jd < %jd\n", 266 (intmax_t)table->gpt_last, (intmax_t)table->gpt_first); 267 failed++; 268 } 269 if (table->gpt_last > pp->mediasize / pp->sectorsize - 1) { 270 DPRINTF("last LBA extends beyond mediasize: " 271 "%jd > %jd\n", (intmax_t)table->gpt_last, 272 (intmax_t)pp->mediasize / pp->sectorsize - 1); 273 failed++; 274 } 275 LIST_FOREACH(e1, &table->gpt_entry, gpe_entry) { 276 if (e1->gpe_deleted || e1->gpe_internal) 277 continue; 278 if (e1->gpe_start < table->gpt_first) { 279 DPRINTF("partition %d has start offset below first " 280 "LBA: %jd < %jd\n", e1->gpe_index, 281 (intmax_t)e1->gpe_start, 282 (intmax_t)table->gpt_first); 283 failed++; 284 } 285 if (e1->gpe_start > table->gpt_last) { 286 DPRINTF("partition %d has start offset beyond last " 287 "LBA: %jd > %jd\n", e1->gpe_index, 288 (intmax_t)e1->gpe_start, 289 (intmax_t)table->gpt_last); 290 failed++; 291 } 292 if (e1->gpe_end < e1->gpe_start) { 293 DPRINTF("partition %d has end offset below start " 294 "offset: %jd < %jd\n", e1->gpe_index, 295 (intmax_t)e1->gpe_end, 296 (intmax_t)e1->gpe_start); 297 failed++; 298 } 299 if (e1->gpe_end > table->gpt_last) { 300 DPRINTF("partition %d has end offset beyond last " 301 "LBA: %jd > %jd\n", e1->gpe_index, 302 (intmax_t)e1->gpe_end, 303 (intmax_t)table->gpt_last); 304 failed++; 305 } 306 if (pp->stripesize > 0) { 307 offset = e1->gpe_start * pp->sectorsize; 308 if (e1->gpe_offset > offset) 309 offset = e1->gpe_offset; 310 if ((offset + pp->stripeoffset) % pp->stripesize) { 311 DPRINTF("partition %d is not aligned on %u " 312 "bytes\n", e1->gpe_index, pp->stripesize); 313 /* Don't treat this as a critical failure */ 314 } 315 } 316 e2 = e1; 317 while ((e2 = LIST_NEXT(e2, gpe_entry)) != NULL) { 318 if (e2->gpe_deleted || e2->gpe_internal) 319 continue; 320 if (e1->gpe_start >= e2->gpe_start && 321 e1->gpe_start <= e2->gpe_end) { 322 DPRINTF("partition %d has start offset inside " 323 "partition %d: start[%d] %jd >= start[%d] " 324 "%jd <= end[%d] %jd\n", 325 e1->gpe_index, e2->gpe_index, 326 e2->gpe_index, (intmax_t)e2->gpe_start, 327 e1->gpe_index, (intmax_t)e1->gpe_start, 328 e2->gpe_index, (intmax_t)e2->gpe_end); 329 failed++; 330 } 331 if (e1->gpe_end >= e2->gpe_start && 332 e1->gpe_end <= e2->gpe_end) { 333 DPRINTF("partition %d has end offset inside " 334 "partition %d: start[%d] %jd >= end[%d] " 335 "%jd <= end[%d] %jd\n", 336 e1->gpe_index, e2->gpe_index, 337 e2->gpe_index, (intmax_t)e2->gpe_start, 338 e1->gpe_index, (intmax_t)e1->gpe_end, 339 e2->gpe_index, (intmax_t)e2->gpe_end); 340 failed++; 341 } 342 if (e1->gpe_start < e2->gpe_start && 343 e1->gpe_end > e2->gpe_end) { 344 DPRINTF("partition %d contains partition %d: " 345 "start[%d] %jd > start[%d] %jd, end[%d] " 346 "%jd < end[%d] %jd\n", 347 e1->gpe_index, e2->gpe_index, 348 e1->gpe_index, (intmax_t)e1->gpe_start, 349 e2->gpe_index, (intmax_t)e2->gpe_start, 350 e2->gpe_index, (intmax_t)e2->gpe_end, 351 e1->gpe_index, (intmax_t)e1->gpe_end); 352 failed++; 353 } 354 } 355 } 356 if (failed != 0) { 357 printf("GEOM_PART: integrity check failed (%s, %s)\n", 358 pp->name, table->gpt_scheme->name); 359 if (check_integrity != 0) 360 return (EINVAL); 361 table->gpt_corrupt = 1; 362 } 363 return (0); 364 } 365 #undef DPRINTF 366 367 struct g_part_entry * 368 g_part_new_entry(struct g_part_table *table, int index, quad_t start, 369 quad_t end) 370 { 371 struct g_part_entry *entry, *last; 372 373 last = NULL; 374 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) { 375 if (entry->gpe_index == index) 376 break; 377 if (entry->gpe_index > index) { 378 entry = NULL; 379 break; 380 } 381 last = entry; 382 } 383 if (entry == NULL) { 384 entry = g_malloc(table->gpt_scheme->gps_entrysz, 385 M_WAITOK | M_ZERO); 386 entry->gpe_index = index; 387 if (last == NULL) 388 LIST_INSERT_HEAD(&table->gpt_entry, entry, gpe_entry); 389 else 390 LIST_INSERT_AFTER(last, entry, gpe_entry); 391 } else 392 entry->gpe_offset = 0; 393 entry->gpe_start = start; 394 entry->gpe_end = end; 395 return (entry); 396 } 397 398 static void 399 g_part_new_provider(struct g_geom *gp, struct g_part_table *table, 400 struct g_part_entry *entry) 401 { 402 struct g_consumer *cp; 403 struct g_provider *pp; 404 struct sbuf *sb; 405 off_t offset; 406 407 cp = LIST_FIRST(&gp->consumer); 408 pp = cp->provider; 409 410 offset = entry->gpe_start * pp->sectorsize; 411 if (entry->gpe_offset < offset) 412 entry->gpe_offset = offset; 413 414 if (entry->gpe_pp == NULL) { 415 sb = sbuf_new_auto(); 416 G_PART_FULLNAME(table, entry, sb, gp->name); 417 sbuf_finish(sb); 418 entry->gpe_pp = g_new_providerf(gp, "%s", sbuf_data(sb)); 419 sbuf_delete(sb); 420 entry->gpe_pp->private = entry; /* Close the circle. */ 421 } 422 entry->gpe_pp->index = entry->gpe_index - 1; /* index is 1-based. */ 423 entry->gpe_pp->mediasize = (entry->gpe_end - entry->gpe_start + 1) * 424 pp->sectorsize; 425 entry->gpe_pp->mediasize -= entry->gpe_offset - offset; 426 entry->gpe_pp->sectorsize = pp->sectorsize; 427 entry->gpe_pp->stripesize = pp->stripesize; 428 entry->gpe_pp->stripeoffset = pp->stripeoffset + entry->gpe_offset; 429 if (pp->stripesize > 0) 430 entry->gpe_pp->stripeoffset %= pp->stripesize; 431 entry->gpe_pp->flags |= pp->flags & G_PF_ACCEPT_UNMAPPED; 432 g_error_provider(entry->gpe_pp, 0); 433 } 434 435 static struct g_geom* 436 g_part_find_geom(const char *name) 437 { 438 struct g_geom *gp; 439 LIST_FOREACH(gp, &g_part_class.geom, geom) { 440 if (!strcmp(name, gp->name)) 441 break; 442 } 443 return (gp); 444 } 445 446 static int 447 g_part_parm_geom(struct gctl_req *req, const char *name, struct g_geom **v) 448 { 449 struct g_geom *gp; 450 const char *gname; 451 452 gname = gctl_get_asciiparam(req, name); 453 if (gname == NULL) 454 return (ENOATTR); 455 if (strncmp(gname, _PATH_DEV, sizeof(_PATH_DEV) - 1) == 0) 456 gname += sizeof(_PATH_DEV) - 1; 457 gp = g_part_find_geom(gname); 458 if (gp == NULL) { 459 gctl_error(req, "%d %s '%s'", EINVAL, name, gname); 460 return (EINVAL); 461 } 462 if ((gp->flags & G_GEOM_WITHER) != 0) { 463 gctl_error(req, "%d %s", ENXIO, gname); 464 return (ENXIO); 465 } 466 *v = gp; 467 return (0); 468 } 469 470 static int 471 g_part_parm_provider(struct gctl_req *req, const char *name, 472 struct g_provider **v) 473 { 474 struct g_provider *pp; 475 const char *pname; 476 477 pname = gctl_get_asciiparam(req, name); 478 if (pname == NULL) 479 return (ENOATTR); 480 if (strncmp(pname, _PATH_DEV, sizeof(_PATH_DEV) - 1) == 0) 481 pname += sizeof(_PATH_DEV) - 1; 482 pp = g_provider_by_name(pname); 483 if (pp == NULL) { 484 gctl_error(req, "%d %s '%s'", EINVAL, name, pname); 485 return (EINVAL); 486 } 487 *v = pp; 488 return (0); 489 } 490 491 static int 492 g_part_parm_quad(struct gctl_req *req, const char *name, quad_t *v) 493 { 494 const char *p; 495 char *x; 496 quad_t q; 497 498 p = gctl_get_asciiparam(req, name); 499 if (p == NULL) 500 return (ENOATTR); 501 q = strtoq(p, &x, 0); 502 if (*x != '\0' || q < 0) { 503 gctl_error(req, "%d %s '%s'", EINVAL, name, p); 504 return (EINVAL); 505 } 506 *v = q; 507 return (0); 508 } 509 510 static int 511 g_part_parm_scheme(struct gctl_req *req, const char *name, 512 struct g_part_scheme **v) 513 { 514 struct g_part_scheme *s; 515 const char *p; 516 517 p = gctl_get_asciiparam(req, name); 518 if (p == NULL) 519 return (ENOATTR); 520 TAILQ_FOREACH(s, &g_part_schemes, scheme_list) { 521 if (s == &g_part_null_scheme) 522 continue; 523 if (!strcasecmp(s->name, p)) 524 break; 525 } 526 if (s == NULL) { 527 gctl_error(req, "%d %s '%s'", EINVAL, name, p); 528 return (EINVAL); 529 } 530 *v = s; 531 return (0); 532 } 533 534 static int 535 g_part_parm_str(struct gctl_req *req, const char *name, const char **v) 536 { 537 const char *p; 538 539 p = gctl_get_asciiparam(req, name); 540 if (p == NULL) 541 return (ENOATTR); 542 /* An empty label is always valid. */ 543 if (strcmp(name, "label") != 0 && p[0] == '\0') { 544 gctl_error(req, "%d %s '%s'", EINVAL, name, p); 545 return (EINVAL); 546 } 547 *v = p; 548 return (0); 549 } 550 551 static int 552 g_part_parm_intmax(struct gctl_req *req, const char *name, u_int *v) 553 { 554 const intmax_t *p; 555 int size; 556 557 p = gctl_get_param(req, name, &size); 558 if (p == NULL) 559 return (ENOATTR); 560 if (size != sizeof(*p) || *p < 0 || *p > INT_MAX) { 561 gctl_error(req, "%d %s '%jd'", EINVAL, name, *p); 562 return (EINVAL); 563 } 564 *v = (u_int)*p; 565 return (0); 566 } 567 568 static int 569 g_part_parm_uint32(struct gctl_req *req, const char *name, u_int *v) 570 { 571 const uint32_t *p; 572 int size; 573 574 p = gctl_get_param(req, name, &size); 575 if (p == NULL) 576 return (ENOATTR); 577 if (size != sizeof(*p) || *p > INT_MAX) { 578 gctl_error(req, "%d %s '%u'", EINVAL, name, (unsigned int)*p); 579 return (EINVAL); 580 } 581 *v = (u_int)*p; 582 return (0); 583 } 584 585 static int 586 g_part_parm_bootcode(struct gctl_req *req, const char *name, const void **v, 587 unsigned int *s) 588 { 589 const void *p; 590 int size; 591 592 p = gctl_get_param(req, name, &size); 593 if (p == NULL) 594 return (ENOATTR); 595 *v = p; 596 *s = size; 597 return (0); 598 } 599 600 static int 601 g_part_probe(struct g_geom *gp, struct g_consumer *cp, int depth) 602 { 603 struct g_part_scheme *iter, *scheme; 604 struct g_part_table *table; 605 int pri, probe; 606 607 table = gp->softc; 608 scheme = (table != NULL) ? table->gpt_scheme : NULL; 609 pri = (scheme != NULL) ? G_PART_PROBE(table, cp) : INT_MIN; 610 if (pri == 0) 611 goto done; 612 if (pri > 0) { /* error */ 613 scheme = NULL; 614 pri = INT_MIN; 615 } 616 617 TAILQ_FOREACH(iter, &g_part_schemes, scheme_list) { 618 if (iter == &g_part_null_scheme) 619 continue; 620 table = (void *)kobj_create((kobj_class_t)iter, M_GEOM, 621 M_WAITOK); 622 table->gpt_gp = gp; 623 table->gpt_scheme = iter; 624 table->gpt_depth = depth; 625 probe = G_PART_PROBE(table, cp); 626 if (probe <= 0 && probe > pri) { 627 pri = probe; 628 scheme = iter; 629 if (gp->softc != NULL) 630 kobj_delete((kobj_t)gp->softc, M_GEOM); 631 gp->softc = table; 632 if (pri == 0) 633 goto done; 634 } else 635 kobj_delete((kobj_t)table, M_GEOM); 636 } 637 638 done: 639 return ((scheme == NULL) ? ENXIO : 0); 640 } 641 642 /* 643 * Control request functions. 644 */ 645 646 static int 647 g_part_ctl_add(struct gctl_req *req, struct g_part_parms *gpp) 648 { 649 struct g_geom *gp; 650 struct g_provider *pp; 651 struct g_part_entry *delent, *last, *entry; 652 struct g_part_table *table; 653 struct sbuf *sb; 654 quad_t end; 655 unsigned int index; 656 int error; 657 658 gp = gpp->gpp_geom; 659 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name)); 660 g_topology_assert(); 661 662 pp = LIST_FIRST(&gp->consumer)->provider; 663 table = gp->softc; 664 end = gpp->gpp_start + gpp->gpp_size - 1; 665 666 if (gpp->gpp_start < table->gpt_first || 667 gpp->gpp_start > table->gpt_last) { 668 gctl_error(req, "%d start '%jd'", EINVAL, 669 (intmax_t)gpp->gpp_start); 670 return (EINVAL); 671 } 672 if (end < gpp->gpp_start || end > table->gpt_last) { 673 gctl_error(req, "%d size '%jd'", EINVAL, 674 (intmax_t)gpp->gpp_size); 675 return (EINVAL); 676 } 677 if (gpp->gpp_index > table->gpt_entries) { 678 gctl_error(req, "%d index '%d'", EINVAL, gpp->gpp_index); 679 return (EINVAL); 680 } 681 682 delent = last = NULL; 683 index = (gpp->gpp_index > 0) ? gpp->gpp_index : 1; 684 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) { 685 if (entry->gpe_deleted) { 686 if (entry->gpe_index == index) 687 delent = entry; 688 continue; 689 } 690 if (entry->gpe_index == index) 691 index = entry->gpe_index + 1; 692 if (entry->gpe_index < index) 693 last = entry; 694 if (entry->gpe_internal) 695 continue; 696 if (gpp->gpp_start >= entry->gpe_start && 697 gpp->gpp_start <= entry->gpe_end) { 698 gctl_error(req, "%d start '%jd'", ENOSPC, 699 (intmax_t)gpp->gpp_start); 700 return (ENOSPC); 701 } 702 if (end >= entry->gpe_start && end <= entry->gpe_end) { 703 gctl_error(req, "%d end '%jd'", ENOSPC, (intmax_t)end); 704 return (ENOSPC); 705 } 706 if (gpp->gpp_start < entry->gpe_start && end > entry->gpe_end) { 707 gctl_error(req, "%d size '%jd'", ENOSPC, 708 (intmax_t)gpp->gpp_size); 709 return (ENOSPC); 710 } 711 } 712 if (gpp->gpp_index > 0 && index != gpp->gpp_index) { 713 gctl_error(req, "%d index '%d'", EEXIST, gpp->gpp_index); 714 return (EEXIST); 715 } 716 if (index > table->gpt_entries) { 717 gctl_error(req, "%d index '%d'", ENOSPC, index); 718 return (ENOSPC); 719 } 720 721 entry = (delent == NULL) ? g_malloc(table->gpt_scheme->gps_entrysz, 722 M_WAITOK | M_ZERO) : delent; 723 entry->gpe_index = index; 724 entry->gpe_start = gpp->gpp_start; 725 entry->gpe_end = end; 726 error = G_PART_ADD(table, entry, gpp); 727 if (error) { 728 gctl_error(req, "%d", error); 729 if (delent == NULL) 730 g_free(entry); 731 return (error); 732 } 733 if (delent == NULL) { 734 if (last == NULL) 735 LIST_INSERT_HEAD(&table->gpt_entry, entry, gpe_entry); 736 else 737 LIST_INSERT_AFTER(last, entry, gpe_entry); 738 entry->gpe_created = 1; 739 } else { 740 entry->gpe_deleted = 0; 741 entry->gpe_modified = 1; 742 } 743 g_part_new_provider(gp, table, entry); 744 745 /* Provide feedback if so requested. */ 746 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) { 747 sb = sbuf_new_auto(); 748 G_PART_FULLNAME(table, entry, sb, gp->name); 749 if (pp->stripesize > 0 && entry->gpe_pp->stripeoffset != 0) 750 sbuf_printf(sb, " added, but partition is not " 751 "aligned on %u bytes\n", pp->stripesize); 752 else 753 sbuf_cat(sb, " added\n"); 754 sbuf_finish(sb); 755 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1); 756 sbuf_delete(sb); 757 } 758 return (0); 759 } 760 761 static int 762 g_part_ctl_bootcode(struct gctl_req *req, struct g_part_parms *gpp) 763 { 764 struct g_geom *gp; 765 struct g_part_table *table; 766 struct sbuf *sb; 767 int error, sz; 768 769 gp = gpp->gpp_geom; 770 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name)); 771 g_topology_assert(); 772 773 table = gp->softc; 774 sz = table->gpt_scheme->gps_bootcodesz; 775 if (sz == 0) { 776 error = ENODEV; 777 goto fail; 778 } 779 if (gpp->gpp_codesize > sz) { 780 error = EFBIG; 781 goto fail; 782 } 783 784 error = G_PART_BOOTCODE(table, gpp); 785 if (error) 786 goto fail; 787 788 /* Provide feedback if so requested. */ 789 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) { 790 sb = sbuf_new_auto(); 791 sbuf_printf(sb, "bootcode written to %s\n", gp->name); 792 sbuf_finish(sb); 793 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1); 794 sbuf_delete(sb); 795 } 796 return (0); 797 798 fail: 799 gctl_error(req, "%d", error); 800 return (error); 801 } 802 803 static int 804 g_part_ctl_commit(struct gctl_req *req, struct g_part_parms *gpp) 805 { 806 struct g_consumer *cp; 807 struct g_geom *gp; 808 struct g_provider *pp; 809 struct g_part_entry *entry, *tmp; 810 struct g_part_table *table; 811 char *buf; 812 int error, i; 813 814 gp = gpp->gpp_geom; 815 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name)); 816 g_topology_assert(); 817 818 table = gp->softc; 819 if (!table->gpt_opened) { 820 gctl_error(req, "%d", EPERM); 821 return (EPERM); 822 } 823 824 g_topology_unlock(); 825 826 cp = LIST_FIRST(&gp->consumer); 827 if ((table->gpt_smhead | table->gpt_smtail) != 0) { 828 pp = cp->provider; 829 buf = g_malloc(pp->sectorsize, M_WAITOK | M_ZERO); 830 while (table->gpt_smhead != 0) { 831 i = ffs(table->gpt_smhead) - 1; 832 error = g_write_data(cp, i * pp->sectorsize, buf, 833 pp->sectorsize); 834 if (error) { 835 g_free(buf); 836 goto fail; 837 } 838 table->gpt_smhead &= ~(1 << i); 839 } 840 while (table->gpt_smtail != 0) { 841 i = ffs(table->gpt_smtail) - 1; 842 error = g_write_data(cp, pp->mediasize - (i + 1) * 843 pp->sectorsize, buf, pp->sectorsize); 844 if (error) { 845 g_free(buf); 846 goto fail; 847 } 848 table->gpt_smtail &= ~(1 << i); 849 } 850 g_free(buf); 851 } 852 853 if (table->gpt_scheme == &g_part_null_scheme) { 854 g_topology_lock(); 855 g_access(cp, -1, -1, -1); 856 g_part_wither(gp, ENXIO); 857 return (0); 858 } 859 860 error = G_PART_WRITE(table, cp); 861 if (error) 862 goto fail; 863 864 LIST_FOREACH_SAFE(entry, &table->gpt_entry, gpe_entry, tmp) { 865 if (!entry->gpe_deleted) { 866 entry->gpe_created = 0; 867 entry->gpe_modified = 0; 868 continue; 869 } 870 LIST_REMOVE(entry, gpe_entry); 871 g_free(entry); 872 } 873 table->gpt_created = 0; 874 table->gpt_opened = 0; 875 876 g_topology_lock(); 877 g_access(cp, -1, -1, -1); 878 return (0); 879 880 fail: 881 g_topology_lock(); 882 gctl_error(req, "%d", error); 883 return (error); 884 } 885 886 static int 887 g_part_ctl_create(struct gctl_req *req, struct g_part_parms *gpp) 888 { 889 struct g_consumer *cp; 890 struct g_geom *gp; 891 struct g_provider *pp; 892 struct g_part_scheme *scheme; 893 struct g_part_table *null, *table; 894 struct sbuf *sb; 895 int attr, error; 896 897 pp = gpp->gpp_provider; 898 scheme = gpp->gpp_scheme; 899 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, pp->name)); 900 g_topology_assert(); 901 902 /* Check that there isn't already a g_part geom on the provider. */ 903 gp = g_part_find_geom(pp->name); 904 if (gp != NULL) { 905 null = gp->softc; 906 if (null->gpt_scheme != &g_part_null_scheme) { 907 gctl_error(req, "%d geom '%s'", EEXIST, pp->name); 908 return (EEXIST); 909 } 910 } else 911 null = NULL; 912 913 if ((gpp->gpp_parms & G_PART_PARM_ENTRIES) && 914 (gpp->gpp_entries < scheme->gps_minent || 915 gpp->gpp_entries > scheme->gps_maxent)) { 916 gctl_error(req, "%d entries '%d'", EINVAL, gpp->gpp_entries); 917 return (EINVAL); 918 } 919 920 if (null == NULL) 921 gp = g_new_geomf(&g_part_class, "%s", pp->name); 922 gp->softc = kobj_create((kobj_class_t)gpp->gpp_scheme, M_GEOM, 923 M_WAITOK); 924 table = gp->softc; 925 table->gpt_gp = gp; 926 table->gpt_scheme = gpp->gpp_scheme; 927 table->gpt_entries = (gpp->gpp_parms & G_PART_PARM_ENTRIES) ? 928 gpp->gpp_entries : scheme->gps_minent; 929 LIST_INIT(&table->gpt_entry); 930 if (null == NULL) { 931 cp = g_new_consumer(gp); 932 error = g_attach(cp, pp); 933 if (error == 0) 934 error = g_access(cp, 1, 1, 1); 935 if (error != 0) { 936 g_part_wither(gp, error); 937 gctl_error(req, "%d geom '%s'", error, pp->name); 938 return (error); 939 } 940 table->gpt_opened = 1; 941 } else { 942 cp = LIST_FIRST(&gp->consumer); 943 table->gpt_opened = null->gpt_opened; 944 table->gpt_smhead = null->gpt_smhead; 945 table->gpt_smtail = null->gpt_smtail; 946 } 947 948 g_topology_unlock(); 949 950 /* Make sure the provider has media. */ 951 if (pp->mediasize == 0 || pp->sectorsize == 0) { 952 error = ENODEV; 953 goto fail; 954 } 955 956 /* Make sure we can nest and if so, determine our depth. */ 957 error = g_getattr("PART::isleaf", cp, &attr); 958 if (!error && attr) { 959 error = ENODEV; 960 goto fail; 961 } 962 error = g_getattr("PART::depth", cp, &attr); 963 table->gpt_depth = (!error) ? attr + 1 : 0; 964 965 /* 966 * Synthesize a disk geometry. Some partitioning schemes 967 * depend on it and since some file systems need it even 968 * when the partitition scheme doesn't, we do it here in 969 * scheme-independent code. 970 */ 971 g_part_geometry(table, cp, pp->mediasize / pp->sectorsize); 972 973 error = G_PART_CREATE(table, gpp); 974 if (error) 975 goto fail; 976 977 g_topology_lock(); 978 979 table->gpt_created = 1; 980 if (null != NULL) 981 kobj_delete((kobj_t)null, M_GEOM); 982 983 /* 984 * Support automatic commit by filling in the gpp_geom 985 * parameter. 986 */ 987 gpp->gpp_parms |= G_PART_PARM_GEOM; 988 gpp->gpp_geom = gp; 989 990 /* Provide feedback if so requested. */ 991 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) { 992 sb = sbuf_new_auto(); 993 sbuf_printf(sb, "%s created\n", gp->name); 994 sbuf_finish(sb); 995 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1); 996 sbuf_delete(sb); 997 } 998 return (0); 999 1000 fail: 1001 g_topology_lock(); 1002 if (null == NULL) { 1003 g_access(cp, -1, -1, -1); 1004 g_part_wither(gp, error); 1005 } else { 1006 kobj_delete((kobj_t)gp->softc, M_GEOM); 1007 gp->softc = null; 1008 } 1009 gctl_error(req, "%d provider", error); 1010 return (error); 1011 } 1012 1013 static int 1014 g_part_ctl_delete(struct gctl_req *req, struct g_part_parms *gpp) 1015 { 1016 struct g_geom *gp; 1017 struct g_provider *pp; 1018 struct g_part_entry *entry; 1019 struct g_part_table *table; 1020 struct sbuf *sb; 1021 1022 gp = gpp->gpp_geom; 1023 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name)); 1024 g_topology_assert(); 1025 1026 table = gp->softc; 1027 1028 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) { 1029 if (entry->gpe_deleted || entry->gpe_internal) 1030 continue; 1031 if (entry->gpe_index == gpp->gpp_index) 1032 break; 1033 } 1034 if (entry == NULL) { 1035 gctl_error(req, "%d index '%d'", ENOENT, gpp->gpp_index); 1036 return (ENOENT); 1037 } 1038 1039 pp = entry->gpe_pp; 1040 if (pp != NULL) { 1041 if (pp->acr > 0 || pp->acw > 0 || pp->ace > 0) { 1042 gctl_error(req, "%d", EBUSY); 1043 return (EBUSY); 1044 } 1045 1046 pp->private = NULL; 1047 entry->gpe_pp = NULL; 1048 } 1049 1050 if (pp != NULL) 1051 g_wither_provider(pp, ENXIO); 1052 1053 /* Provide feedback if so requested. */ 1054 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) { 1055 sb = sbuf_new_auto(); 1056 G_PART_FULLNAME(table, entry, sb, gp->name); 1057 sbuf_cat(sb, " deleted\n"); 1058 sbuf_finish(sb); 1059 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1); 1060 sbuf_delete(sb); 1061 } 1062 1063 if (entry->gpe_created) { 1064 LIST_REMOVE(entry, gpe_entry); 1065 g_free(entry); 1066 } else { 1067 entry->gpe_modified = 0; 1068 entry->gpe_deleted = 1; 1069 } 1070 return (0); 1071 } 1072 1073 static int 1074 g_part_ctl_destroy(struct gctl_req *req, struct g_part_parms *gpp) 1075 { 1076 struct g_consumer *cp; 1077 struct g_geom *gp; 1078 struct g_provider *pp; 1079 struct g_part_entry *entry, *tmp; 1080 struct g_part_table *null, *table; 1081 struct sbuf *sb; 1082 int error; 1083 1084 gp = gpp->gpp_geom; 1085 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name)); 1086 g_topology_assert(); 1087 1088 table = gp->softc; 1089 /* Check for busy providers. */ 1090 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) { 1091 if (entry->gpe_deleted || entry->gpe_internal) 1092 continue; 1093 if (gpp->gpp_force) { 1094 pp = entry->gpe_pp; 1095 if (pp == NULL) 1096 continue; 1097 if (pp->acr == 0 && pp->acw == 0 && pp->ace == 0) 1098 continue; 1099 } 1100 gctl_error(req, "%d", EBUSY); 1101 return (EBUSY); 1102 } 1103 1104 if (gpp->gpp_force) { 1105 /* Destroy all providers. */ 1106 LIST_FOREACH_SAFE(entry, &table->gpt_entry, gpe_entry, tmp) { 1107 pp = entry->gpe_pp; 1108 if (pp != NULL) { 1109 pp->private = NULL; 1110 g_wither_provider(pp, ENXIO); 1111 } 1112 LIST_REMOVE(entry, gpe_entry); 1113 g_free(entry); 1114 } 1115 } 1116 1117 error = G_PART_DESTROY(table, gpp); 1118 if (error) { 1119 gctl_error(req, "%d", error); 1120 return (error); 1121 } 1122 1123 gp->softc = kobj_create((kobj_class_t)&g_part_null_scheme, M_GEOM, 1124 M_WAITOK); 1125 null = gp->softc; 1126 null->gpt_gp = gp; 1127 null->gpt_scheme = &g_part_null_scheme; 1128 LIST_INIT(&null->gpt_entry); 1129 1130 cp = LIST_FIRST(&gp->consumer); 1131 pp = cp->provider; 1132 null->gpt_last = pp->mediasize / pp->sectorsize - 1; 1133 1134 null->gpt_depth = table->gpt_depth; 1135 null->gpt_opened = table->gpt_opened; 1136 null->gpt_smhead = table->gpt_smhead; 1137 null->gpt_smtail = table->gpt_smtail; 1138 1139 while ((entry = LIST_FIRST(&table->gpt_entry)) != NULL) { 1140 LIST_REMOVE(entry, gpe_entry); 1141 g_free(entry); 1142 } 1143 kobj_delete((kobj_t)table, M_GEOM); 1144 1145 /* Provide feedback if so requested. */ 1146 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) { 1147 sb = sbuf_new_auto(); 1148 sbuf_printf(sb, "%s destroyed\n", gp->name); 1149 sbuf_finish(sb); 1150 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1); 1151 sbuf_delete(sb); 1152 } 1153 return (0); 1154 } 1155 1156 static int 1157 g_part_ctl_modify(struct gctl_req *req, struct g_part_parms *gpp) 1158 { 1159 struct g_geom *gp; 1160 struct g_part_entry *entry; 1161 struct g_part_table *table; 1162 struct sbuf *sb; 1163 int error; 1164 1165 gp = gpp->gpp_geom; 1166 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name)); 1167 g_topology_assert(); 1168 1169 table = gp->softc; 1170 1171 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) { 1172 if (entry->gpe_deleted || entry->gpe_internal) 1173 continue; 1174 if (entry->gpe_index == gpp->gpp_index) 1175 break; 1176 } 1177 if (entry == NULL) { 1178 gctl_error(req, "%d index '%d'", ENOENT, gpp->gpp_index); 1179 return (ENOENT); 1180 } 1181 1182 error = G_PART_MODIFY(table, entry, gpp); 1183 if (error) { 1184 gctl_error(req, "%d", error); 1185 return (error); 1186 } 1187 1188 if (!entry->gpe_created) 1189 entry->gpe_modified = 1; 1190 1191 /* Provide feedback if so requested. */ 1192 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) { 1193 sb = sbuf_new_auto(); 1194 G_PART_FULLNAME(table, entry, sb, gp->name); 1195 sbuf_cat(sb, " modified\n"); 1196 sbuf_finish(sb); 1197 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1); 1198 sbuf_delete(sb); 1199 } 1200 return (0); 1201 } 1202 1203 static int 1204 g_part_ctl_move(struct gctl_req *req, struct g_part_parms *gpp) 1205 { 1206 gctl_error(req, "%d verb 'move'", ENOSYS); 1207 return (ENOSYS); 1208 } 1209 1210 static int 1211 g_part_ctl_recover(struct gctl_req *req, struct g_part_parms *gpp) 1212 { 1213 struct g_part_table *table; 1214 struct g_geom *gp; 1215 struct sbuf *sb; 1216 int error, recovered; 1217 1218 gp = gpp->gpp_geom; 1219 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name)); 1220 g_topology_assert(); 1221 table = gp->softc; 1222 error = recovered = 0; 1223 1224 if (table->gpt_corrupt) { 1225 error = G_PART_RECOVER(table); 1226 if (error == 0) 1227 error = g_part_check_integrity(table, 1228 LIST_FIRST(&gp->consumer)); 1229 if (error) { 1230 gctl_error(req, "%d recovering '%s' failed", 1231 error, gp->name); 1232 return (error); 1233 } 1234 recovered = 1; 1235 } 1236 /* Provide feedback if so requested. */ 1237 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) { 1238 sb = sbuf_new_auto(); 1239 if (recovered) 1240 sbuf_printf(sb, "%s recovered\n", gp->name); 1241 else 1242 sbuf_printf(sb, "%s recovering is not needed\n", 1243 gp->name); 1244 sbuf_finish(sb); 1245 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1); 1246 sbuf_delete(sb); 1247 } 1248 return (0); 1249 } 1250 1251 static int 1252 g_part_ctl_resize(struct gctl_req *req, struct g_part_parms *gpp) 1253 { 1254 struct g_geom *gp; 1255 struct g_provider *pp; 1256 struct g_part_entry *pe, *entry; 1257 struct g_part_table *table; 1258 struct sbuf *sb; 1259 quad_t end; 1260 int error; 1261 off_t mediasize; 1262 1263 gp = gpp->gpp_geom; 1264 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name)); 1265 g_topology_assert(); 1266 table = gp->softc; 1267 1268 /* check gpp_index */ 1269 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) { 1270 if (entry->gpe_deleted || entry->gpe_internal) 1271 continue; 1272 if (entry->gpe_index == gpp->gpp_index) 1273 break; 1274 } 1275 if (entry == NULL) { 1276 gctl_error(req, "%d index '%d'", ENOENT, gpp->gpp_index); 1277 return (ENOENT); 1278 } 1279 1280 /* check gpp_size */ 1281 end = entry->gpe_start + gpp->gpp_size - 1; 1282 if (gpp->gpp_size < 1 || end > table->gpt_last) { 1283 gctl_error(req, "%d size '%jd'", EINVAL, 1284 (intmax_t)gpp->gpp_size); 1285 return (EINVAL); 1286 } 1287 1288 LIST_FOREACH(pe, &table->gpt_entry, gpe_entry) { 1289 if (pe->gpe_deleted || pe->gpe_internal || pe == entry) 1290 continue; 1291 if (end >= pe->gpe_start && end <= pe->gpe_end) { 1292 gctl_error(req, "%d end '%jd'", ENOSPC, 1293 (intmax_t)end); 1294 return (ENOSPC); 1295 } 1296 if (entry->gpe_start < pe->gpe_start && end > pe->gpe_end) { 1297 gctl_error(req, "%d size '%jd'", ENOSPC, 1298 (intmax_t)gpp->gpp_size); 1299 return (ENOSPC); 1300 } 1301 } 1302 1303 pp = entry->gpe_pp; 1304 if ((g_debugflags & 16) == 0 && 1305 (pp->acr > 0 || pp->acw > 0 || pp->ace > 0)) { 1306 if (entry->gpe_end - entry->gpe_start + 1 > gpp->gpp_size) { 1307 /* Deny shrinking of an opened partition. */ 1308 gctl_error(req, "%d", EBUSY); 1309 return (EBUSY); 1310 } 1311 } 1312 1313 error = G_PART_RESIZE(table, entry, gpp); 1314 if (error) { 1315 gctl_error(req, "%d", error); 1316 return (error); 1317 } 1318 1319 if (!entry->gpe_created) 1320 entry->gpe_modified = 1; 1321 1322 /* update mediasize of changed provider */ 1323 mediasize = (entry->gpe_end - entry->gpe_start + 1) * 1324 pp->sectorsize; 1325 g_resize_provider(pp, mediasize); 1326 1327 /* Provide feedback if so requested. */ 1328 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) { 1329 sb = sbuf_new_auto(); 1330 G_PART_FULLNAME(table, entry, sb, gp->name); 1331 sbuf_cat(sb, " resized\n"); 1332 sbuf_finish(sb); 1333 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1); 1334 sbuf_delete(sb); 1335 } 1336 return (0); 1337 } 1338 1339 static int 1340 g_part_ctl_setunset(struct gctl_req *req, struct g_part_parms *gpp, 1341 unsigned int set) 1342 { 1343 struct g_geom *gp; 1344 struct g_part_entry *entry; 1345 struct g_part_table *table; 1346 struct sbuf *sb; 1347 int error; 1348 1349 gp = gpp->gpp_geom; 1350 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name)); 1351 g_topology_assert(); 1352 1353 table = gp->softc; 1354 1355 if (gpp->gpp_parms & G_PART_PARM_INDEX) { 1356 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) { 1357 if (entry->gpe_deleted || entry->gpe_internal) 1358 continue; 1359 if (entry->gpe_index == gpp->gpp_index) 1360 break; 1361 } 1362 if (entry == NULL) { 1363 gctl_error(req, "%d index '%d'", ENOENT, 1364 gpp->gpp_index); 1365 return (ENOENT); 1366 } 1367 } else 1368 entry = NULL; 1369 1370 error = G_PART_SETUNSET(table, entry, gpp->gpp_attrib, set); 1371 if (error) { 1372 gctl_error(req, "%d attrib '%s'", error, gpp->gpp_attrib); 1373 return (error); 1374 } 1375 1376 /* Provide feedback if so requested. */ 1377 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) { 1378 sb = sbuf_new_auto(); 1379 sbuf_printf(sb, "%s %sset on ", gpp->gpp_attrib, 1380 (set) ? "" : "un"); 1381 if (entry) 1382 G_PART_FULLNAME(table, entry, sb, gp->name); 1383 else 1384 sbuf_cat(sb, gp->name); 1385 sbuf_cat(sb, "\n"); 1386 sbuf_finish(sb); 1387 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1); 1388 sbuf_delete(sb); 1389 } 1390 return (0); 1391 } 1392 1393 static int 1394 g_part_ctl_undo(struct gctl_req *req, struct g_part_parms *gpp) 1395 { 1396 struct g_consumer *cp; 1397 struct g_provider *pp; 1398 struct g_geom *gp; 1399 struct g_part_entry *entry, *tmp; 1400 struct g_part_table *table; 1401 int error, reprobe; 1402 1403 gp = gpp->gpp_geom; 1404 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name)); 1405 g_topology_assert(); 1406 1407 table = gp->softc; 1408 if (!table->gpt_opened) { 1409 gctl_error(req, "%d", EPERM); 1410 return (EPERM); 1411 } 1412 1413 cp = LIST_FIRST(&gp->consumer); 1414 LIST_FOREACH_SAFE(entry, &table->gpt_entry, gpe_entry, tmp) { 1415 entry->gpe_modified = 0; 1416 if (entry->gpe_created) { 1417 pp = entry->gpe_pp; 1418 if (pp != NULL) { 1419 pp->private = NULL; 1420 entry->gpe_pp = NULL; 1421 g_wither_provider(pp, ENXIO); 1422 } 1423 entry->gpe_deleted = 1; 1424 } 1425 if (entry->gpe_deleted) { 1426 LIST_REMOVE(entry, gpe_entry); 1427 g_free(entry); 1428 } 1429 } 1430 1431 g_topology_unlock(); 1432 1433 reprobe = (table->gpt_scheme == &g_part_null_scheme || 1434 table->gpt_created) ? 1 : 0; 1435 1436 if (reprobe) { 1437 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) { 1438 if (entry->gpe_internal) 1439 continue; 1440 error = EBUSY; 1441 goto fail; 1442 } 1443 while ((entry = LIST_FIRST(&table->gpt_entry)) != NULL) { 1444 LIST_REMOVE(entry, gpe_entry); 1445 g_free(entry); 1446 } 1447 error = g_part_probe(gp, cp, table->gpt_depth); 1448 if (error) { 1449 g_topology_lock(); 1450 g_access(cp, -1, -1, -1); 1451 g_part_wither(gp, error); 1452 return (0); 1453 } 1454 table = gp->softc; 1455 1456 /* 1457 * Synthesize a disk geometry. Some partitioning schemes 1458 * depend on it and since some file systems need it even 1459 * when the partitition scheme doesn't, we do it here in 1460 * scheme-independent code. 1461 */ 1462 pp = cp->provider; 1463 g_part_geometry(table, cp, pp->mediasize / pp->sectorsize); 1464 } 1465 1466 error = G_PART_READ(table, cp); 1467 if (error) 1468 goto fail; 1469 error = g_part_check_integrity(table, cp); 1470 if (error) 1471 goto fail; 1472 1473 g_topology_lock(); 1474 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) { 1475 if (!entry->gpe_internal) 1476 g_part_new_provider(gp, table, entry); 1477 } 1478 1479 table->gpt_opened = 0; 1480 g_access(cp, -1, -1, -1); 1481 return (0); 1482 1483 fail: 1484 g_topology_lock(); 1485 gctl_error(req, "%d", error); 1486 return (error); 1487 } 1488 1489 static void 1490 g_part_wither(struct g_geom *gp, int error) 1491 { 1492 struct g_part_entry *entry; 1493 struct g_part_table *table; 1494 1495 table = gp->softc; 1496 if (table != NULL) { 1497 G_PART_DESTROY(table, NULL); 1498 while ((entry = LIST_FIRST(&table->gpt_entry)) != NULL) { 1499 LIST_REMOVE(entry, gpe_entry); 1500 g_free(entry); 1501 } 1502 if (gp->softc != NULL) { 1503 kobj_delete((kobj_t)gp->softc, M_GEOM); 1504 gp->softc = NULL; 1505 } 1506 } 1507 g_wither_geom(gp, error); 1508 } 1509 1510 /* 1511 * Class methods. 1512 */ 1513 1514 static void 1515 g_part_ctlreq(struct gctl_req *req, struct g_class *mp, const char *verb) 1516 { 1517 struct g_part_parms gpp; 1518 struct g_part_table *table; 1519 struct gctl_req_arg *ap; 1520 enum g_part_ctl ctlreq; 1521 unsigned int i, mparms, oparms, parm; 1522 int auto_commit, close_on_error; 1523 int error, modifies; 1524 1525 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s,%s)", __func__, mp->name, verb)); 1526 g_topology_assert(); 1527 1528 ctlreq = G_PART_CTL_NONE; 1529 modifies = 1; 1530 mparms = 0; 1531 oparms = G_PART_PARM_FLAGS | G_PART_PARM_OUTPUT | G_PART_PARM_VERSION; 1532 switch (*verb) { 1533 case 'a': 1534 if (!strcmp(verb, "add")) { 1535 ctlreq = G_PART_CTL_ADD; 1536 mparms |= G_PART_PARM_GEOM | G_PART_PARM_SIZE | 1537 G_PART_PARM_START | G_PART_PARM_TYPE; 1538 oparms |= G_PART_PARM_INDEX | G_PART_PARM_LABEL; 1539 } 1540 break; 1541 case 'b': 1542 if (!strcmp(verb, "bootcode")) { 1543 ctlreq = G_PART_CTL_BOOTCODE; 1544 mparms |= G_PART_PARM_GEOM | G_PART_PARM_BOOTCODE; 1545 } 1546 break; 1547 case 'c': 1548 if (!strcmp(verb, "commit")) { 1549 ctlreq = G_PART_CTL_COMMIT; 1550 mparms |= G_PART_PARM_GEOM; 1551 modifies = 0; 1552 } else if (!strcmp(verb, "create")) { 1553 ctlreq = G_PART_CTL_CREATE; 1554 mparms |= G_PART_PARM_PROVIDER | G_PART_PARM_SCHEME; 1555 oparms |= G_PART_PARM_ENTRIES; 1556 } 1557 break; 1558 case 'd': 1559 if (!strcmp(verb, "delete")) { 1560 ctlreq = G_PART_CTL_DELETE; 1561 mparms |= G_PART_PARM_GEOM | G_PART_PARM_INDEX; 1562 } else if (!strcmp(verb, "destroy")) { 1563 ctlreq = G_PART_CTL_DESTROY; 1564 mparms |= G_PART_PARM_GEOM; 1565 oparms |= G_PART_PARM_FORCE; 1566 } 1567 break; 1568 case 'm': 1569 if (!strcmp(verb, "modify")) { 1570 ctlreq = G_PART_CTL_MODIFY; 1571 mparms |= G_PART_PARM_GEOM | G_PART_PARM_INDEX; 1572 oparms |= G_PART_PARM_LABEL | G_PART_PARM_TYPE; 1573 } else if (!strcmp(verb, "move")) { 1574 ctlreq = G_PART_CTL_MOVE; 1575 mparms |= G_PART_PARM_GEOM | G_PART_PARM_INDEX; 1576 } 1577 break; 1578 case 'r': 1579 if (!strcmp(verb, "recover")) { 1580 ctlreq = G_PART_CTL_RECOVER; 1581 mparms |= G_PART_PARM_GEOM; 1582 } else if (!strcmp(verb, "resize")) { 1583 ctlreq = G_PART_CTL_RESIZE; 1584 mparms |= G_PART_PARM_GEOM | G_PART_PARM_INDEX | 1585 G_PART_PARM_SIZE; 1586 } 1587 break; 1588 case 's': 1589 if (!strcmp(verb, "set")) { 1590 ctlreq = G_PART_CTL_SET; 1591 mparms |= G_PART_PARM_ATTRIB | G_PART_PARM_GEOM; 1592 oparms |= G_PART_PARM_INDEX; 1593 } 1594 break; 1595 case 'u': 1596 if (!strcmp(verb, "undo")) { 1597 ctlreq = G_PART_CTL_UNDO; 1598 mparms |= G_PART_PARM_GEOM; 1599 modifies = 0; 1600 } else if (!strcmp(verb, "unset")) { 1601 ctlreq = G_PART_CTL_UNSET; 1602 mparms |= G_PART_PARM_ATTRIB | G_PART_PARM_GEOM; 1603 oparms |= G_PART_PARM_INDEX; 1604 } 1605 break; 1606 } 1607 if (ctlreq == G_PART_CTL_NONE) { 1608 gctl_error(req, "%d verb '%s'", EINVAL, verb); 1609 return; 1610 } 1611 1612 bzero(&gpp, sizeof(gpp)); 1613 for (i = 0; i < req->narg; i++) { 1614 ap = &req->arg[i]; 1615 parm = 0; 1616 switch (ap->name[0]) { 1617 case 'a': 1618 if (!strcmp(ap->name, "arg0")) { 1619 parm = mparms & 1620 (G_PART_PARM_GEOM | G_PART_PARM_PROVIDER); 1621 } 1622 if (!strcmp(ap->name, "attrib")) 1623 parm = G_PART_PARM_ATTRIB; 1624 break; 1625 case 'b': 1626 if (!strcmp(ap->name, "bootcode")) 1627 parm = G_PART_PARM_BOOTCODE; 1628 break; 1629 case 'c': 1630 if (!strcmp(ap->name, "class")) 1631 continue; 1632 break; 1633 case 'e': 1634 if (!strcmp(ap->name, "entries")) 1635 parm = G_PART_PARM_ENTRIES; 1636 break; 1637 case 'f': 1638 if (!strcmp(ap->name, "flags")) 1639 parm = G_PART_PARM_FLAGS; 1640 else if (!strcmp(ap->name, "force")) 1641 parm = G_PART_PARM_FORCE; 1642 break; 1643 case 'i': 1644 if (!strcmp(ap->name, "index")) 1645 parm = G_PART_PARM_INDEX; 1646 break; 1647 case 'l': 1648 if (!strcmp(ap->name, "label")) 1649 parm = G_PART_PARM_LABEL; 1650 break; 1651 case 'o': 1652 if (!strcmp(ap->name, "output")) 1653 parm = G_PART_PARM_OUTPUT; 1654 break; 1655 case 's': 1656 if (!strcmp(ap->name, "scheme")) 1657 parm = G_PART_PARM_SCHEME; 1658 else if (!strcmp(ap->name, "size")) 1659 parm = G_PART_PARM_SIZE; 1660 else if (!strcmp(ap->name, "start")) 1661 parm = G_PART_PARM_START; 1662 break; 1663 case 't': 1664 if (!strcmp(ap->name, "type")) 1665 parm = G_PART_PARM_TYPE; 1666 break; 1667 case 'v': 1668 if (!strcmp(ap->name, "verb")) 1669 continue; 1670 else if (!strcmp(ap->name, "version")) 1671 parm = G_PART_PARM_VERSION; 1672 break; 1673 } 1674 if ((parm & (mparms | oparms)) == 0) { 1675 gctl_error(req, "%d param '%s'", EINVAL, ap->name); 1676 return; 1677 } 1678 switch (parm) { 1679 case G_PART_PARM_ATTRIB: 1680 error = g_part_parm_str(req, ap->name, 1681 &gpp.gpp_attrib); 1682 break; 1683 case G_PART_PARM_BOOTCODE: 1684 error = g_part_parm_bootcode(req, ap->name, 1685 &gpp.gpp_codeptr, &gpp.gpp_codesize); 1686 break; 1687 case G_PART_PARM_ENTRIES: 1688 error = g_part_parm_intmax(req, ap->name, 1689 &gpp.gpp_entries); 1690 break; 1691 case G_PART_PARM_FLAGS: 1692 error = g_part_parm_str(req, ap->name, &gpp.gpp_flags); 1693 break; 1694 case G_PART_PARM_FORCE: 1695 error = g_part_parm_uint32(req, ap->name, 1696 &gpp.gpp_force); 1697 break; 1698 case G_PART_PARM_GEOM: 1699 error = g_part_parm_geom(req, ap->name, &gpp.gpp_geom); 1700 break; 1701 case G_PART_PARM_INDEX: 1702 error = g_part_parm_intmax(req, ap->name, 1703 &gpp.gpp_index); 1704 break; 1705 case G_PART_PARM_LABEL: 1706 error = g_part_parm_str(req, ap->name, &gpp.gpp_label); 1707 break; 1708 case G_PART_PARM_OUTPUT: 1709 error = 0; /* Write-only parameter */ 1710 break; 1711 case G_PART_PARM_PROVIDER: 1712 error = g_part_parm_provider(req, ap->name, 1713 &gpp.gpp_provider); 1714 break; 1715 case G_PART_PARM_SCHEME: 1716 error = g_part_parm_scheme(req, ap->name, 1717 &gpp.gpp_scheme); 1718 break; 1719 case G_PART_PARM_SIZE: 1720 error = g_part_parm_quad(req, ap->name, &gpp.gpp_size); 1721 break; 1722 case G_PART_PARM_START: 1723 error = g_part_parm_quad(req, ap->name, 1724 &gpp.gpp_start); 1725 break; 1726 case G_PART_PARM_TYPE: 1727 error = g_part_parm_str(req, ap->name, &gpp.gpp_type); 1728 break; 1729 case G_PART_PARM_VERSION: 1730 error = g_part_parm_uint32(req, ap->name, 1731 &gpp.gpp_version); 1732 break; 1733 default: 1734 error = EDOOFUS; 1735 gctl_error(req, "%d %s", error, ap->name); 1736 break; 1737 } 1738 if (error != 0) { 1739 if (error == ENOATTR) { 1740 gctl_error(req, "%d param '%s'", error, 1741 ap->name); 1742 } 1743 return; 1744 } 1745 gpp.gpp_parms |= parm; 1746 } 1747 if ((gpp.gpp_parms & mparms) != mparms) { 1748 parm = mparms - (gpp.gpp_parms & mparms); 1749 gctl_error(req, "%d param '%x'", ENOATTR, parm); 1750 return; 1751 } 1752 1753 /* Obtain permissions if possible/necessary. */ 1754 close_on_error = 0; 1755 table = NULL; 1756 if (modifies && (gpp.gpp_parms & G_PART_PARM_GEOM)) { 1757 table = gpp.gpp_geom->softc; 1758 if (table != NULL && table->gpt_corrupt && 1759 ctlreq != G_PART_CTL_DESTROY && 1760 ctlreq != G_PART_CTL_RECOVER) { 1761 gctl_error(req, "%d table '%s' is corrupt", 1762 EPERM, gpp.gpp_geom->name); 1763 return; 1764 } 1765 if (table != NULL && !table->gpt_opened) { 1766 error = g_access(LIST_FIRST(&gpp.gpp_geom->consumer), 1767 1, 1, 1); 1768 if (error) { 1769 gctl_error(req, "%d geom '%s'", error, 1770 gpp.gpp_geom->name); 1771 return; 1772 } 1773 table->gpt_opened = 1; 1774 close_on_error = 1; 1775 } 1776 } 1777 1778 /* Allow the scheme to check or modify the parameters. */ 1779 if (table != NULL) { 1780 error = G_PART_PRECHECK(table, ctlreq, &gpp); 1781 if (error) { 1782 gctl_error(req, "%d pre-check failed", error); 1783 goto out; 1784 } 1785 } else 1786 error = EDOOFUS; /* Prevent bogus uninit. warning. */ 1787 1788 switch (ctlreq) { 1789 case G_PART_CTL_NONE: 1790 panic("%s", __func__); 1791 case G_PART_CTL_ADD: 1792 error = g_part_ctl_add(req, &gpp); 1793 break; 1794 case G_PART_CTL_BOOTCODE: 1795 error = g_part_ctl_bootcode(req, &gpp); 1796 break; 1797 case G_PART_CTL_COMMIT: 1798 error = g_part_ctl_commit(req, &gpp); 1799 break; 1800 case G_PART_CTL_CREATE: 1801 error = g_part_ctl_create(req, &gpp); 1802 break; 1803 case G_PART_CTL_DELETE: 1804 error = g_part_ctl_delete(req, &gpp); 1805 break; 1806 case G_PART_CTL_DESTROY: 1807 error = g_part_ctl_destroy(req, &gpp); 1808 break; 1809 case G_PART_CTL_MODIFY: 1810 error = g_part_ctl_modify(req, &gpp); 1811 break; 1812 case G_PART_CTL_MOVE: 1813 error = g_part_ctl_move(req, &gpp); 1814 break; 1815 case G_PART_CTL_RECOVER: 1816 error = g_part_ctl_recover(req, &gpp); 1817 break; 1818 case G_PART_CTL_RESIZE: 1819 error = g_part_ctl_resize(req, &gpp); 1820 break; 1821 case G_PART_CTL_SET: 1822 error = g_part_ctl_setunset(req, &gpp, 1); 1823 break; 1824 case G_PART_CTL_UNDO: 1825 error = g_part_ctl_undo(req, &gpp); 1826 break; 1827 case G_PART_CTL_UNSET: 1828 error = g_part_ctl_setunset(req, &gpp, 0); 1829 break; 1830 } 1831 1832 /* Implement automatic commit. */ 1833 if (!error) { 1834 auto_commit = (modifies && 1835 (gpp.gpp_parms & G_PART_PARM_FLAGS) && 1836 strchr(gpp.gpp_flags, 'C') != NULL) ? 1 : 0; 1837 if (auto_commit) { 1838 KASSERT(gpp.gpp_parms & G_PART_PARM_GEOM, ("%s", 1839 __func__)); 1840 error = g_part_ctl_commit(req, &gpp); 1841 } 1842 } 1843 1844 out: 1845 if (error && close_on_error) { 1846 g_access(LIST_FIRST(&gpp.gpp_geom->consumer), -1, -1, -1); 1847 table->gpt_opened = 0; 1848 } 1849 } 1850 1851 static int 1852 g_part_destroy_geom(struct gctl_req *req, struct g_class *mp, 1853 struct g_geom *gp) 1854 { 1855 1856 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s,%s)", __func__, mp->name, gp->name)); 1857 g_topology_assert(); 1858 1859 g_part_wither(gp, EINVAL); 1860 return (0); 1861 } 1862 1863 static struct g_geom * 1864 g_part_taste(struct g_class *mp, struct g_provider *pp, int flags __unused) 1865 { 1866 struct g_consumer *cp; 1867 struct g_geom *gp; 1868 struct g_part_entry *entry; 1869 struct g_part_table *table; 1870 struct root_hold_token *rht; 1871 int attr, depth; 1872 int error; 1873 1874 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s,%s)", __func__, mp->name, pp->name)); 1875 g_topology_assert(); 1876 1877 /* Skip providers that are already open for writing. */ 1878 if (pp->acw > 0) 1879 return (NULL); 1880 1881 /* 1882 * Create a GEOM with consumer and hook it up to the provider. 1883 * With that we become part of the topology. Optain read access 1884 * to the provider. 1885 */ 1886 gp = g_new_geomf(mp, "%s", pp->name); 1887 cp = g_new_consumer(gp); 1888 error = g_attach(cp, pp); 1889 if (error == 0) 1890 error = g_access(cp, 1, 0, 0); 1891 if (error != 0) { 1892 if (cp->provider) 1893 g_detach(cp); 1894 g_destroy_consumer(cp); 1895 g_destroy_geom(gp); 1896 return (NULL); 1897 } 1898 1899 rht = root_mount_hold(mp->name); 1900 g_topology_unlock(); 1901 1902 /* 1903 * Short-circuit the whole probing galore when there's no 1904 * media present. 1905 */ 1906 if (pp->mediasize == 0 || pp->sectorsize == 0) { 1907 error = ENODEV; 1908 goto fail; 1909 } 1910 1911 /* Make sure we can nest and if so, determine our depth. */ 1912 error = g_getattr("PART::isleaf", cp, &attr); 1913 if (!error && attr) { 1914 error = ENODEV; 1915 goto fail; 1916 } 1917 error = g_getattr("PART::depth", cp, &attr); 1918 depth = (!error) ? attr + 1 : 0; 1919 1920 error = g_part_probe(gp, cp, depth); 1921 if (error) 1922 goto fail; 1923 1924 table = gp->softc; 1925 1926 /* 1927 * Synthesize a disk geometry. Some partitioning schemes 1928 * depend on it and since some file systems need it even 1929 * when the partitition scheme doesn't, we do it here in 1930 * scheme-independent code. 1931 */ 1932 g_part_geometry(table, cp, pp->mediasize / pp->sectorsize); 1933 1934 error = G_PART_READ(table, cp); 1935 if (error) 1936 goto fail; 1937 error = g_part_check_integrity(table, cp); 1938 if (error) 1939 goto fail; 1940 1941 g_topology_lock(); 1942 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) { 1943 if (!entry->gpe_internal) 1944 g_part_new_provider(gp, table, entry); 1945 } 1946 1947 root_mount_rel(rht); 1948 g_access(cp, -1, 0, 0); 1949 return (gp); 1950 1951 fail: 1952 g_topology_lock(); 1953 root_mount_rel(rht); 1954 g_access(cp, -1, 0, 0); 1955 g_detach(cp); 1956 g_destroy_consumer(cp); 1957 g_destroy_geom(gp); 1958 return (NULL); 1959 } 1960 1961 /* 1962 * Geom methods. 1963 */ 1964 1965 static int 1966 g_part_access(struct g_provider *pp, int dr, int dw, int de) 1967 { 1968 struct g_consumer *cp; 1969 1970 G_PART_TRACE((G_T_ACCESS, "%s(%s,%d,%d,%d)", __func__, pp->name, dr, 1971 dw, de)); 1972 1973 cp = LIST_FIRST(&pp->geom->consumer); 1974 1975 /* We always gain write-exclusive access. */ 1976 return (g_access(cp, dr, dw, dw + de)); 1977 } 1978 1979 static void 1980 g_part_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp, 1981 struct g_consumer *cp, struct g_provider *pp) 1982 { 1983 char buf[64]; 1984 struct g_part_entry *entry; 1985 struct g_part_table *table; 1986 1987 KASSERT(sb != NULL && gp != NULL, ("%s", __func__)); 1988 table = gp->softc; 1989 1990 if (indent == NULL) { 1991 KASSERT(cp == NULL && pp != NULL, ("%s", __func__)); 1992 entry = pp->private; 1993 if (entry == NULL) 1994 return; 1995 sbuf_printf(sb, " i %u o %ju ty %s", entry->gpe_index, 1996 (uintmax_t)entry->gpe_offset, 1997 G_PART_TYPE(table, entry, buf, sizeof(buf))); 1998 /* 1999 * libdisk compatibility quirk - the scheme dumps the 2000 * slicer name and partition type in a way that is 2001 * compatible with libdisk. When libdisk is not used 2002 * anymore, this should go away. 2003 */ 2004 G_PART_DUMPCONF(table, entry, sb, indent); 2005 } else if (cp != NULL) { /* Consumer configuration. */ 2006 KASSERT(pp == NULL, ("%s", __func__)); 2007 /* none */ 2008 } else if (pp != NULL) { /* Provider configuration. */ 2009 entry = pp->private; 2010 if (entry == NULL) 2011 return; 2012 sbuf_printf(sb, "%s<start>%ju</start>\n", indent, 2013 (uintmax_t)entry->gpe_start); 2014 sbuf_printf(sb, "%s<end>%ju</end>\n", indent, 2015 (uintmax_t)entry->gpe_end); 2016 sbuf_printf(sb, "%s<index>%u</index>\n", indent, 2017 entry->gpe_index); 2018 sbuf_printf(sb, "%s<type>%s</type>\n", indent, 2019 G_PART_TYPE(table, entry, buf, sizeof(buf))); 2020 sbuf_printf(sb, "%s<offset>%ju</offset>\n", indent, 2021 (uintmax_t)entry->gpe_offset); 2022 sbuf_printf(sb, "%s<length>%ju</length>\n", indent, 2023 (uintmax_t)pp->mediasize); 2024 G_PART_DUMPCONF(table, entry, sb, indent); 2025 } else { /* Geom configuration. */ 2026 sbuf_printf(sb, "%s<scheme>%s</scheme>\n", indent, 2027 table->gpt_scheme->name); 2028 sbuf_printf(sb, "%s<entries>%u</entries>\n", indent, 2029 table->gpt_entries); 2030 sbuf_printf(sb, "%s<first>%ju</first>\n", indent, 2031 (uintmax_t)table->gpt_first); 2032 sbuf_printf(sb, "%s<last>%ju</last>\n", indent, 2033 (uintmax_t)table->gpt_last); 2034 sbuf_printf(sb, "%s<fwsectors>%u</fwsectors>\n", indent, 2035 table->gpt_sectors); 2036 sbuf_printf(sb, "%s<fwheads>%u</fwheads>\n", indent, 2037 table->gpt_heads); 2038 sbuf_printf(sb, "%s<state>%s</state>\n", indent, 2039 table->gpt_corrupt ? "CORRUPT": "OK"); 2040 sbuf_printf(sb, "%s<modified>%s</modified>\n", indent, 2041 table->gpt_opened ? "true": "false"); 2042 G_PART_DUMPCONF(table, NULL, sb, indent); 2043 } 2044 } 2045 2046 static void 2047 g_part_orphan(struct g_consumer *cp) 2048 { 2049 struct g_provider *pp; 2050 struct g_part_table *table; 2051 2052 pp = cp->provider; 2053 KASSERT(pp != NULL, ("%s", __func__)); 2054 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, pp->name)); 2055 g_topology_assert(); 2056 2057 KASSERT(pp->error != 0, ("%s", __func__)); 2058 table = cp->geom->softc; 2059 if (table != NULL && table->gpt_opened) 2060 g_access(cp, -1, -1, -1); 2061 g_part_wither(cp->geom, pp->error); 2062 } 2063 2064 static void 2065 g_part_spoiled(struct g_consumer *cp) 2066 { 2067 2068 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, cp->provider->name)); 2069 g_topology_assert(); 2070 2071 cp->flags |= G_CF_ORPHAN; 2072 g_part_wither(cp->geom, ENXIO); 2073 } 2074 2075 static void 2076 g_part_start(struct bio *bp) 2077 { 2078 struct bio *bp2; 2079 struct g_consumer *cp; 2080 struct g_geom *gp; 2081 struct g_part_entry *entry; 2082 struct g_part_table *table; 2083 struct g_kerneldump *gkd; 2084 struct g_provider *pp; 2085 char buf[64]; 2086 2087 pp = bp->bio_to; 2088 gp = pp->geom; 2089 table = gp->softc; 2090 cp = LIST_FIRST(&gp->consumer); 2091 2092 G_PART_TRACE((G_T_BIO, "%s: cmd=%d, provider=%s", __func__, bp->bio_cmd, 2093 pp->name)); 2094 2095 entry = pp->private; 2096 if (entry == NULL) { 2097 g_io_deliver(bp, ENXIO); 2098 return; 2099 } 2100 2101 switch(bp->bio_cmd) { 2102 case BIO_DELETE: 2103 case BIO_READ: 2104 case BIO_WRITE: 2105 if (bp->bio_offset >= pp->mediasize) { 2106 g_io_deliver(bp, EIO); 2107 return; 2108 } 2109 bp2 = g_clone_bio(bp); 2110 if (bp2 == NULL) { 2111 g_io_deliver(bp, ENOMEM); 2112 return; 2113 } 2114 if (bp2->bio_offset + bp2->bio_length > pp->mediasize) 2115 bp2->bio_length = pp->mediasize - bp2->bio_offset; 2116 bp2->bio_done = g_std_done; 2117 bp2->bio_offset += entry->gpe_offset; 2118 g_io_request(bp2, cp); 2119 return; 2120 case BIO_FLUSH: 2121 break; 2122 case BIO_GETATTR: 2123 if (g_handleattr_int(bp, "GEOM::fwheads", table->gpt_heads)) 2124 return; 2125 if (g_handleattr_int(bp, "GEOM::fwsectors", table->gpt_sectors)) 2126 return; 2127 if (g_handleattr_int(bp, "PART::isleaf", table->gpt_isleaf)) 2128 return; 2129 if (g_handleattr_int(bp, "PART::depth", table->gpt_depth)) 2130 return; 2131 if (g_handleattr_str(bp, "PART::scheme", 2132 table->gpt_scheme->name)) 2133 return; 2134 if (g_handleattr_str(bp, "PART::type", 2135 G_PART_TYPE(table, entry, buf, sizeof(buf)))) 2136 return; 2137 if (!strcmp("GEOM::kerneldump", bp->bio_attribute)) { 2138 /* 2139 * Check that the partition is suitable for kernel 2140 * dumps. Typically only swap partitions should be 2141 * used. If the request comes from the nested scheme 2142 * we allow dumping there as well. 2143 */ 2144 if ((bp->bio_from == NULL || 2145 bp->bio_from->geom->class != &g_part_class) && 2146 G_PART_DUMPTO(table, entry) == 0) { 2147 g_io_deliver(bp, ENODEV); 2148 printf("GEOM_PART: Partition '%s' not suitable" 2149 " for kernel dumps (wrong type?)\n", 2150 pp->name); 2151 return; 2152 } 2153 gkd = (struct g_kerneldump *)bp->bio_data; 2154 if (gkd->offset >= pp->mediasize) { 2155 g_io_deliver(bp, EIO); 2156 return; 2157 } 2158 if (gkd->offset + gkd->length > pp->mediasize) 2159 gkd->length = pp->mediasize - gkd->offset; 2160 gkd->offset += entry->gpe_offset; 2161 } 2162 break; 2163 default: 2164 g_io_deliver(bp, EOPNOTSUPP); 2165 return; 2166 } 2167 2168 bp2 = g_clone_bio(bp); 2169 if (bp2 == NULL) { 2170 g_io_deliver(bp, ENOMEM); 2171 return; 2172 } 2173 bp2->bio_done = g_std_done; 2174 g_io_request(bp2, cp); 2175 } 2176 2177 static void 2178 g_part_init(struct g_class *mp) 2179 { 2180 2181 TAILQ_INSERT_HEAD(&g_part_schemes, &g_part_null_scheme, scheme_list); 2182 } 2183 2184 static void 2185 g_part_fini(struct g_class *mp) 2186 { 2187 2188 TAILQ_REMOVE(&g_part_schemes, &g_part_null_scheme, scheme_list); 2189 } 2190 2191 static void 2192 g_part_unload_event(void *arg, int flag) 2193 { 2194 struct g_consumer *cp; 2195 struct g_geom *gp; 2196 struct g_provider *pp; 2197 struct g_part_scheme *scheme; 2198 struct g_part_table *table; 2199 uintptr_t *xchg; 2200 int acc, error; 2201 2202 if (flag == EV_CANCEL) 2203 return; 2204 2205 xchg = arg; 2206 error = 0; 2207 scheme = (void *)(*xchg); 2208 2209 g_topology_assert(); 2210 2211 LIST_FOREACH(gp, &g_part_class.geom, geom) { 2212 table = gp->softc; 2213 if (table->gpt_scheme != scheme) 2214 continue; 2215 2216 acc = 0; 2217 LIST_FOREACH(pp, &gp->provider, provider) 2218 acc += pp->acr + pp->acw + pp->ace; 2219 LIST_FOREACH(cp, &gp->consumer, consumer) 2220 acc += cp->acr + cp->acw + cp->ace; 2221 2222 if (!acc) 2223 g_part_wither(gp, ENOSYS); 2224 else 2225 error = EBUSY; 2226 } 2227 2228 if (!error) 2229 TAILQ_REMOVE(&g_part_schemes, scheme, scheme_list); 2230 2231 *xchg = error; 2232 } 2233 2234 int 2235 g_part_modevent(module_t mod, int type, struct g_part_scheme *scheme) 2236 { 2237 struct g_part_scheme *iter; 2238 uintptr_t arg; 2239 int error; 2240 2241 error = 0; 2242 switch (type) { 2243 case MOD_LOAD: 2244 TAILQ_FOREACH(iter, &g_part_schemes, scheme_list) { 2245 if (scheme == iter) { 2246 printf("GEOM_PART: scheme %s is already " 2247 "registered!\n", scheme->name); 2248 break; 2249 } 2250 } 2251 if (iter == NULL) { 2252 TAILQ_INSERT_TAIL(&g_part_schemes, scheme, 2253 scheme_list); 2254 g_retaste(&g_part_class); 2255 } 2256 break; 2257 case MOD_UNLOAD: 2258 arg = (uintptr_t)scheme; 2259 error = g_waitfor_event(g_part_unload_event, &arg, M_WAITOK, 2260 NULL); 2261 if (error == 0) 2262 error = arg; 2263 break; 2264 default: 2265 error = EOPNOTSUPP; 2266 break; 2267 } 2268 2269 return (error); 2270 } 2271