1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2002, 2005-2009 Marcel Moolenaar 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 */ 28 29 #include <sys/cdefs.h> 30 __FBSDID("$FreeBSD$"); 31 32 #include <sys/param.h> 33 #include <sys/bio.h> 34 #include <sys/endian.h> 35 #include <sys/kernel.h> 36 #include <sys/kobj.h> 37 #include <sys/limits.h> 38 #include <sys/lock.h> 39 #include <sys/malloc.h> 40 #include <sys/mutex.h> 41 #include <sys/queue.h> 42 #include <sys/sbuf.h> 43 #include <sys/sysctl.h> 44 #include <sys/systm.h> 45 #include <sys/uuid.h> 46 #include <geom/geom.h> 47 #include <geom/geom_ctl.h> 48 #include <geom/geom_int.h> 49 #include <geom/part/g_part.h> 50 51 #include "g_part_if.h" 52 53 #ifndef _PATH_DEV 54 #define _PATH_DEV "/dev/" 55 #endif 56 57 static kobj_method_t g_part_null_methods[] = { 58 { 0, 0 } 59 }; 60 61 static struct g_part_scheme g_part_null_scheme = { 62 "(none)", 63 g_part_null_methods, 64 sizeof(struct g_part_table), 65 }; 66 67 TAILQ_HEAD(, g_part_scheme) g_part_schemes = 68 TAILQ_HEAD_INITIALIZER(g_part_schemes); 69 70 struct g_part_alias_list { 71 const char *lexeme; 72 enum g_part_alias alias; 73 } g_part_alias_list[G_PART_ALIAS_COUNT] = { 74 { "apple-apfs", G_PART_ALIAS_APPLE_APFS }, 75 { "apple-boot", G_PART_ALIAS_APPLE_BOOT }, 76 { "apple-core-storage", G_PART_ALIAS_APPLE_CORE_STORAGE }, 77 { "apple-hfs", G_PART_ALIAS_APPLE_HFS }, 78 { "apple-label", G_PART_ALIAS_APPLE_LABEL }, 79 { "apple-raid", G_PART_ALIAS_APPLE_RAID }, 80 { "apple-raid-offline", G_PART_ALIAS_APPLE_RAID_OFFLINE }, 81 { "apple-tv-recovery", G_PART_ALIAS_APPLE_TV_RECOVERY }, 82 { "apple-ufs", G_PART_ALIAS_APPLE_UFS }, 83 { "bios-boot", G_PART_ALIAS_BIOS_BOOT }, 84 { "chromeos-firmware", G_PART_ALIAS_CHROMEOS_FIRMWARE }, 85 { "chromeos-kernel", G_PART_ALIAS_CHROMEOS_KERNEL }, 86 { "chromeos-reserved", G_PART_ALIAS_CHROMEOS_RESERVED }, 87 { "chromeos-root", G_PART_ALIAS_CHROMEOS_ROOT }, 88 { "dragonfly-ccd", G_PART_ALIAS_DFBSD_CCD }, 89 { "dragonfly-hammer", G_PART_ALIAS_DFBSD_HAMMER }, 90 { "dragonfly-hammer2", G_PART_ALIAS_DFBSD_HAMMER2 }, 91 { "dragonfly-label32", G_PART_ALIAS_DFBSD }, 92 { "dragonfly-label64", G_PART_ALIAS_DFBSD64 }, 93 { "dragonfly-legacy", G_PART_ALIAS_DFBSD_LEGACY }, 94 { "dragonfly-swap", G_PART_ALIAS_DFBSD_SWAP }, 95 { "dragonfly-ufs", G_PART_ALIAS_DFBSD_UFS }, 96 { "dragonfly-vinum", G_PART_ALIAS_DFBSD_VINUM }, 97 { "ebr", G_PART_ALIAS_EBR }, 98 { "efi", G_PART_ALIAS_EFI }, 99 { "fat16", G_PART_ALIAS_MS_FAT16 }, 100 { "fat32", G_PART_ALIAS_MS_FAT32 }, 101 { "fat32lba", G_PART_ALIAS_MS_FAT32LBA }, 102 { "freebsd", G_PART_ALIAS_FREEBSD }, 103 { "freebsd-boot", G_PART_ALIAS_FREEBSD_BOOT }, 104 { "freebsd-nandfs", G_PART_ALIAS_FREEBSD_NANDFS }, 105 { "freebsd-swap", G_PART_ALIAS_FREEBSD_SWAP }, 106 { "freebsd-ufs", G_PART_ALIAS_FREEBSD_UFS }, 107 { "freebsd-vinum", G_PART_ALIAS_FREEBSD_VINUM }, 108 { "freebsd-zfs", G_PART_ALIAS_FREEBSD_ZFS }, 109 { "linux-data", G_PART_ALIAS_LINUX_DATA }, 110 { "linux-lvm", G_PART_ALIAS_LINUX_LVM }, 111 { "linux-raid", G_PART_ALIAS_LINUX_RAID }, 112 { "linux-swap", G_PART_ALIAS_LINUX_SWAP }, 113 { "mbr", G_PART_ALIAS_MBR }, 114 { "ms-basic-data", G_PART_ALIAS_MS_BASIC_DATA }, 115 { "ms-ldm-data", G_PART_ALIAS_MS_LDM_DATA }, 116 { "ms-ldm-metadata", G_PART_ALIAS_MS_LDM_METADATA }, 117 { "ms-recovery", G_PART_ALIAS_MS_RECOVERY }, 118 { "ms-reserved", G_PART_ALIAS_MS_RESERVED }, 119 { "ms-spaces", G_PART_ALIAS_MS_SPACES }, 120 { "netbsd-ccd", G_PART_ALIAS_NETBSD_CCD }, 121 { "netbsd-cgd", G_PART_ALIAS_NETBSD_CGD }, 122 { "netbsd-ffs", G_PART_ALIAS_NETBSD_FFS }, 123 { "netbsd-lfs", G_PART_ALIAS_NETBSD_LFS }, 124 { "netbsd-raid", G_PART_ALIAS_NETBSD_RAID }, 125 { "netbsd-swap", G_PART_ALIAS_NETBSD_SWAP }, 126 { "ntfs", G_PART_ALIAS_MS_NTFS }, 127 { "openbsd-data", G_PART_ALIAS_OPENBSD_DATA }, 128 { "prep-boot", G_PART_ALIAS_PREP_BOOT }, 129 { "vmware-reserved", G_PART_ALIAS_VMRESERVED }, 130 { "vmware-vmfs", G_PART_ALIAS_VMFS }, 131 { "vmware-vmkdiag", G_PART_ALIAS_VMKDIAG }, 132 { "vmware-vsanhdr", G_PART_ALIAS_VMVSANHDR }, 133 }; 134 135 SYSCTL_DECL(_kern_geom); 136 SYSCTL_NODE(_kern_geom, OID_AUTO, part, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 137 "GEOM_PART stuff"); 138 static u_int check_integrity = 1; 139 SYSCTL_UINT(_kern_geom_part, OID_AUTO, check_integrity, 140 CTLFLAG_RWTUN, &check_integrity, 1, 141 "Enable integrity checking"); 142 static u_int auto_resize = 1; 143 SYSCTL_UINT(_kern_geom_part, OID_AUTO, auto_resize, 144 CTLFLAG_RWTUN, &auto_resize, 1, 145 "Enable auto resize"); 146 static u_int allow_nesting = 0; 147 SYSCTL_UINT(_kern_geom_part, OID_AUTO, allow_nesting, 148 CTLFLAG_RWTUN, &allow_nesting, 0, 149 "Allow additional levels of nesting"); 150 char g_part_separator[MAXPATHLEN] = ""; 151 SYSCTL_STRING(_kern_geom_part, OID_AUTO, separator, 152 CTLFLAG_RDTUN, &g_part_separator, sizeof(g_part_separator), 153 "Partition name separator"); 154 155 /* 156 * The GEOM partitioning class. 157 */ 158 static g_ctl_req_t g_part_ctlreq; 159 static g_ctl_destroy_geom_t g_part_destroy_geom; 160 static g_fini_t g_part_fini; 161 static g_init_t g_part_init; 162 static g_taste_t g_part_taste; 163 164 static g_access_t g_part_access; 165 static g_dumpconf_t g_part_dumpconf; 166 static g_orphan_t g_part_orphan; 167 static g_spoiled_t g_part_spoiled; 168 static g_start_t g_part_start; 169 static g_resize_t g_part_resize; 170 static g_ioctl_t g_part_ioctl; 171 172 static struct g_class g_part_class = { 173 .name = "PART", 174 .version = G_VERSION, 175 /* Class methods. */ 176 .ctlreq = g_part_ctlreq, 177 .destroy_geom = g_part_destroy_geom, 178 .fini = g_part_fini, 179 .init = g_part_init, 180 .taste = g_part_taste, 181 /* Geom methods. */ 182 .access = g_part_access, 183 .dumpconf = g_part_dumpconf, 184 .orphan = g_part_orphan, 185 .spoiled = g_part_spoiled, 186 .start = g_part_start, 187 .resize = g_part_resize, 188 .ioctl = g_part_ioctl, 189 }; 190 191 DECLARE_GEOM_CLASS(g_part_class, g_part); 192 MODULE_VERSION(g_part, 0); 193 194 /* 195 * Support functions. 196 */ 197 198 static void g_part_wither(struct g_geom *, int); 199 200 const char * 201 g_part_alias_name(enum g_part_alias alias) 202 { 203 int i; 204 205 for (i = 0; i < G_PART_ALIAS_COUNT; i++) { 206 if (g_part_alias_list[i].alias != alias) 207 continue; 208 return (g_part_alias_list[i].lexeme); 209 } 210 211 return (NULL); 212 } 213 214 void 215 g_part_geometry_heads(off_t blocks, u_int sectors, off_t *bestchs, 216 u_int *bestheads) 217 { 218 static u_int candidate_heads[] = { 1, 2, 16, 32, 64, 128, 255, 0 }; 219 off_t chs, cylinders; 220 u_int heads; 221 int idx; 222 223 *bestchs = 0; 224 *bestheads = 0; 225 for (idx = 0; candidate_heads[idx] != 0; idx++) { 226 heads = candidate_heads[idx]; 227 cylinders = blocks / heads / sectors; 228 if (cylinders < heads || cylinders < sectors) 229 break; 230 if (cylinders > 1023) 231 continue; 232 chs = cylinders * heads * sectors; 233 if (chs > *bestchs || (chs == *bestchs && *bestheads == 1)) { 234 *bestchs = chs; 235 *bestheads = heads; 236 } 237 } 238 } 239 240 static void 241 g_part_geometry(struct g_part_table *table, struct g_consumer *cp, 242 off_t blocks) 243 { 244 static u_int candidate_sectors[] = { 1, 9, 17, 33, 63, 0 }; 245 off_t chs, bestchs; 246 u_int heads, sectors; 247 int idx; 248 249 if (g_getattr("GEOM::fwsectors", cp, §ors) != 0 || sectors == 0 || 250 g_getattr("GEOM::fwheads", cp, &heads) != 0 || heads == 0) { 251 table->gpt_fixgeom = 0; 252 table->gpt_heads = 0; 253 table->gpt_sectors = 0; 254 bestchs = 0; 255 for (idx = 0; candidate_sectors[idx] != 0; idx++) { 256 sectors = candidate_sectors[idx]; 257 g_part_geometry_heads(blocks, sectors, &chs, &heads); 258 if (chs == 0) 259 continue; 260 /* 261 * Prefer a geometry with sectors > 1, but only if 262 * it doesn't bump down the number of heads to 1. 263 */ 264 if (chs > bestchs || (chs == bestchs && heads > 1 && 265 table->gpt_sectors == 1)) { 266 bestchs = chs; 267 table->gpt_heads = heads; 268 table->gpt_sectors = sectors; 269 } 270 } 271 /* 272 * If we didn't find a geometry at all, then the disk is 273 * too big. This means we can use the maximum number of 274 * heads and sectors. 275 */ 276 if (bestchs == 0) { 277 table->gpt_heads = 255; 278 table->gpt_sectors = 63; 279 } 280 } else { 281 table->gpt_fixgeom = 1; 282 table->gpt_heads = heads; 283 table->gpt_sectors = sectors; 284 } 285 } 286 287 static void 288 g_part_get_physpath_done(struct bio *bp) 289 { 290 struct g_geom *gp; 291 struct g_part_entry *entry; 292 struct g_part_table *table; 293 struct g_provider *pp; 294 struct bio *pbp; 295 296 pbp = bp->bio_parent; 297 pp = pbp->bio_to; 298 gp = pp->geom; 299 table = gp->softc; 300 entry = pp->private; 301 302 if (bp->bio_error == 0) { 303 char *end; 304 size_t len, remainder; 305 len = strlcat(bp->bio_data, "/", bp->bio_length); 306 if (len < bp->bio_length) { 307 end = bp->bio_data + len; 308 remainder = bp->bio_length - len; 309 G_PART_NAME(table, entry, end, remainder); 310 } 311 } 312 g_std_done(bp); 313 } 314 315 316 #define DPRINTF(...) if (bootverbose) { \ 317 printf("GEOM_PART: " __VA_ARGS__); \ 318 } 319 320 static int 321 g_part_check_integrity(struct g_part_table *table, struct g_consumer *cp) 322 { 323 struct g_part_entry *e1, *e2; 324 struct g_provider *pp; 325 off_t offset; 326 int failed; 327 328 failed = 0; 329 pp = cp->provider; 330 if (table->gpt_last < table->gpt_first) { 331 DPRINTF("last LBA is below first LBA: %jd < %jd\n", 332 (intmax_t)table->gpt_last, (intmax_t)table->gpt_first); 333 failed++; 334 } 335 if (table->gpt_last > pp->mediasize / pp->sectorsize - 1) { 336 DPRINTF("last LBA extends beyond mediasize: " 337 "%jd > %jd\n", (intmax_t)table->gpt_last, 338 (intmax_t)pp->mediasize / pp->sectorsize - 1); 339 failed++; 340 } 341 LIST_FOREACH(e1, &table->gpt_entry, gpe_entry) { 342 if (e1->gpe_deleted || e1->gpe_internal) 343 continue; 344 if (e1->gpe_start < table->gpt_first) { 345 DPRINTF("partition %d has start offset below first " 346 "LBA: %jd < %jd\n", e1->gpe_index, 347 (intmax_t)e1->gpe_start, 348 (intmax_t)table->gpt_first); 349 failed++; 350 } 351 if (e1->gpe_start > table->gpt_last) { 352 DPRINTF("partition %d has start offset beyond last " 353 "LBA: %jd > %jd\n", e1->gpe_index, 354 (intmax_t)e1->gpe_start, 355 (intmax_t)table->gpt_last); 356 failed++; 357 } 358 if (e1->gpe_end < e1->gpe_start) { 359 DPRINTF("partition %d has end offset below start " 360 "offset: %jd < %jd\n", e1->gpe_index, 361 (intmax_t)e1->gpe_end, 362 (intmax_t)e1->gpe_start); 363 failed++; 364 } 365 if (e1->gpe_end > table->gpt_last) { 366 DPRINTF("partition %d has end offset beyond last " 367 "LBA: %jd > %jd\n", e1->gpe_index, 368 (intmax_t)e1->gpe_end, 369 (intmax_t)table->gpt_last); 370 failed++; 371 } 372 if (pp->stripesize > 0) { 373 offset = e1->gpe_start * pp->sectorsize; 374 if (e1->gpe_offset > offset) 375 offset = e1->gpe_offset; 376 if ((offset + pp->stripeoffset) % pp->stripesize) { 377 DPRINTF("partition %d on (%s, %s) is not " 378 "aligned on %ju bytes\n", e1->gpe_index, 379 pp->name, table->gpt_scheme->name, 380 (uintmax_t)pp->stripesize); 381 /* Don't treat this as a critical failure */ 382 } 383 } 384 e2 = e1; 385 while ((e2 = LIST_NEXT(e2, gpe_entry)) != NULL) { 386 if (e2->gpe_deleted || e2->gpe_internal) 387 continue; 388 if (e1->gpe_start >= e2->gpe_start && 389 e1->gpe_start <= e2->gpe_end) { 390 DPRINTF("partition %d has start offset inside " 391 "partition %d: start[%d] %jd >= start[%d] " 392 "%jd <= end[%d] %jd\n", 393 e1->gpe_index, e2->gpe_index, 394 e2->gpe_index, (intmax_t)e2->gpe_start, 395 e1->gpe_index, (intmax_t)e1->gpe_start, 396 e2->gpe_index, (intmax_t)e2->gpe_end); 397 failed++; 398 } 399 if (e1->gpe_end >= e2->gpe_start && 400 e1->gpe_end <= e2->gpe_end) { 401 DPRINTF("partition %d has end offset inside " 402 "partition %d: start[%d] %jd >= end[%d] " 403 "%jd <= end[%d] %jd\n", 404 e1->gpe_index, e2->gpe_index, 405 e2->gpe_index, (intmax_t)e2->gpe_start, 406 e1->gpe_index, (intmax_t)e1->gpe_end, 407 e2->gpe_index, (intmax_t)e2->gpe_end); 408 failed++; 409 } 410 if (e1->gpe_start < e2->gpe_start && 411 e1->gpe_end > e2->gpe_end) { 412 DPRINTF("partition %d contains partition %d: " 413 "start[%d] %jd > start[%d] %jd, end[%d] " 414 "%jd < end[%d] %jd\n", 415 e1->gpe_index, e2->gpe_index, 416 e1->gpe_index, (intmax_t)e1->gpe_start, 417 e2->gpe_index, (intmax_t)e2->gpe_start, 418 e2->gpe_index, (intmax_t)e2->gpe_end, 419 e1->gpe_index, (intmax_t)e1->gpe_end); 420 failed++; 421 } 422 } 423 } 424 if (failed != 0) { 425 printf("GEOM_PART: integrity check failed (%s, %s)\n", 426 pp->name, table->gpt_scheme->name); 427 if (check_integrity != 0) 428 return (EINVAL); 429 table->gpt_corrupt = 1; 430 } 431 return (0); 432 } 433 #undef DPRINTF 434 435 struct g_part_entry * 436 g_part_new_entry(struct g_part_table *table, int index, quad_t start, 437 quad_t end) 438 { 439 struct g_part_entry *entry, *last; 440 441 last = NULL; 442 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) { 443 if (entry->gpe_index == index) 444 break; 445 if (entry->gpe_index > index) { 446 entry = NULL; 447 break; 448 } 449 last = entry; 450 } 451 if (entry == NULL) { 452 entry = g_malloc(table->gpt_scheme->gps_entrysz, 453 M_WAITOK | M_ZERO); 454 entry->gpe_index = index; 455 if (last == NULL) 456 LIST_INSERT_HEAD(&table->gpt_entry, entry, gpe_entry); 457 else 458 LIST_INSERT_AFTER(last, entry, gpe_entry); 459 } else 460 entry->gpe_offset = 0; 461 entry->gpe_start = start; 462 entry->gpe_end = end; 463 return (entry); 464 } 465 466 static void 467 g_part_new_provider(struct g_geom *gp, struct g_part_table *table, 468 struct g_part_entry *entry) 469 { 470 struct g_consumer *cp; 471 struct g_provider *pp; 472 struct g_geom_alias *gap; 473 off_t offset; 474 475 cp = LIST_FIRST(&gp->consumer); 476 pp = cp->provider; 477 478 offset = entry->gpe_start * pp->sectorsize; 479 if (entry->gpe_offset < offset) 480 entry->gpe_offset = offset; 481 482 if (entry->gpe_pp == NULL) { 483 entry->gpe_pp = G_PART_NEW_PROVIDER(table, gp, entry, gp->name); 484 /* 485 * If our parent provider had any aliases, then copy them to our 486 * provider so when geom DEV tastes things later, they will be 487 * there for it to create the aliases with those name used in 488 * place of the geom's name we use to create the provider. The 489 * kobj interface that generates names makes this awkward. 490 */ 491 LIST_FOREACH(gap, &pp->aliases, ga_next) 492 G_PART_ADD_ALIAS(table, entry->gpe_pp, entry, gap->ga_alias); 493 entry->gpe_pp->flags |= G_PF_DIRECT_SEND | G_PF_DIRECT_RECEIVE; 494 entry->gpe_pp->private = entry; /* Close the circle. */ 495 } 496 entry->gpe_pp->index = entry->gpe_index - 1; /* index is 1-based. */ 497 entry->gpe_pp->mediasize = (entry->gpe_end - entry->gpe_start + 1) * 498 pp->sectorsize; 499 entry->gpe_pp->mediasize -= entry->gpe_offset - offset; 500 entry->gpe_pp->sectorsize = pp->sectorsize; 501 entry->gpe_pp->stripesize = pp->stripesize; 502 entry->gpe_pp->stripeoffset = pp->stripeoffset + entry->gpe_offset; 503 if (pp->stripesize > 0) 504 entry->gpe_pp->stripeoffset %= pp->stripesize; 505 entry->gpe_pp->flags |= pp->flags & G_PF_ACCEPT_UNMAPPED; 506 g_error_provider(entry->gpe_pp, 0); 507 } 508 509 static struct g_geom* 510 g_part_find_geom(const char *name) 511 { 512 struct g_geom *gp; 513 LIST_FOREACH(gp, &g_part_class.geom, geom) { 514 if ((gp->flags & G_GEOM_WITHER) == 0 && 515 strcmp(name, gp->name) == 0) 516 break; 517 } 518 return (gp); 519 } 520 521 static int 522 g_part_parm_geom(struct gctl_req *req, const char *name, struct g_geom **v) 523 { 524 struct g_geom *gp; 525 const char *gname; 526 527 gname = gctl_get_asciiparam(req, name); 528 if (gname == NULL) 529 return (ENOATTR); 530 if (strncmp(gname, _PATH_DEV, sizeof(_PATH_DEV) - 1) == 0) 531 gname += sizeof(_PATH_DEV) - 1; 532 gp = g_part_find_geom(gname); 533 if (gp == NULL) { 534 gctl_error(req, "%d %s '%s'", EINVAL, name, gname); 535 return (EINVAL); 536 } 537 *v = gp; 538 return (0); 539 } 540 541 static int 542 g_part_parm_provider(struct gctl_req *req, const char *name, 543 struct g_provider **v) 544 { 545 struct g_provider *pp; 546 const char *pname; 547 548 pname = gctl_get_asciiparam(req, name); 549 if (pname == NULL) 550 return (ENOATTR); 551 if (strncmp(pname, _PATH_DEV, sizeof(_PATH_DEV) - 1) == 0) 552 pname += sizeof(_PATH_DEV) - 1; 553 pp = g_provider_by_name(pname); 554 if (pp == NULL) { 555 gctl_error(req, "%d %s '%s'", EINVAL, name, pname); 556 return (EINVAL); 557 } 558 *v = pp; 559 return (0); 560 } 561 562 static int 563 g_part_parm_quad(struct gctl_req *req, const char *name, quad_t *v) 564 { 565 const char *p; 566 char *x; 567 quad_t q; 568 569 p = gctl_get_asciiparam(req, name); 570 if (p == NULL) 571 return (ENOATTR); 572 q = strtoq(p, &x, 0); 573 if (*x != '\0' || q < 0) { 574 gctl_error(req, "%d %s '%s'", EINVAL, name, p); 575 return (EINVAL); 576 } 577 *v = q; 578 return (0); 579 } 580 581 static int 582 g_part_parm_scheme(struct gctl_req *req, const char *name, 583 struct g_part_scheme **v) 584 { 585 struct g_part_scheme *s; 586 const char *p; 587 588 p = gctl_get_asciiparam(req, name); 589 if (p == NULL) 590 return (ENOATTR); 591 TAILQ_FOREACH(s, &g_part_schemes, scheme_list) { 592 if (s == &g_part_null_scheme) 593 continue; 594 if (!strcasecmp(s->name, p)) 595 break; 596 } 597 if (s == NULL) { 598 gctl_error(req, "%d %s '%s'", EINVAL, name, p); 599 return (EINVAL); 600 } 601 *v = s; 602 return (0); 603 } 604 605 static int 606 g_part_parm_str(struct gctl_req *req, const char *name, const char **v) 607 { 608 const char *p; 609 610 p = gctl_get_asciiparam(req, name); 611 if (p == NULL) 612 return (ENOATTR); 613 /* An empty label is always valid. */ 614 if (strcmp(name, "label") != 0 && p[0] == '\0') { 615 gctl_error(req, "%d %s '%s'", EINVAL, name, p); 616 return (EINVAL); 617 } 618 *v = p; 619 return (0); 620 } 621 622 static int 623 g_part_parm_intmax(struct gctl_req *req, const char *name, u_int *v) 624 { 625 const intmax_t *p; 626 int size; 627 628 p = gctl_get_param(req, name, &size); 629 if (p == NULL) 630 return (ENOATTR); 631 if (size != sizeof(*p) || *p < 0 || *p > INT_MAX) { 632 gctl_error(req, "%d %s '%jd'", EINVAL, name, *p); 633 return (EINVAL); 634 } 635 *v = (u_int)*p; 636 return (0); 637 } 638 639 static int 640 g_part_parm_uint32(struct gctl_req *req, const char *name, u_int *v) 641 { 642 const uint32_t *p; 643 int size; 644 645 p = gctl_get_param(req, name, &size); 646 if (p == NULL) 647 return (ENOATTR); 648 if (size != sizeof(*p) || *p > INT_MAX) { 649 gctl_error(req, "%d %s '%u'", EINVAL, name, (unsigned int)*p); 650 return (EINVAL); 651 } 652 *v = (u_int)*p; 653 return (0); 654 } 655 656 static int 657 g_part_parm_bootcode(struct gctl_req *req, const char *name, const void **v, 658 unsigned int *s) 659 { 660 const void *p; 661 int size; 662 663 p = gctl_get_param(req, name, &size); 664 if (p == NULL) 665 return (ENOATTR); 666 *v = p; 667 *s = size; 668 return (0); 669 } 670 671 static int 672 g_part_probe(struct g_geom *gp, struct g_consumer *cp, int depth) 673 { 674 struct g_part_scheme *iter, *scheme; 675 struct g_part_table *table; 676 int pri, probe; 677 678 table = gp->softc; 679 scheme = (table != NULL) ? table->gpt_scheme : NULL; 680 pri = (scheme != NULL) ? G_PART_PROBE(table, cp) : INT_MIN; 681 if (pri == 0) 682 goto done; 683 if (pri > 0) { /* error */ 684 scheme = NULL; 685 pri = INT_MIN; 686 } 687 688 TAILQ_FOREACH(iter, &g_part_schemes, scheme_list) { 689 if (iter == &g_part_null_scheme) 690 continue; 691 table = (void *)kobj_create((kobj_class_t)iter, M_GEOM, 692 M_WAITOK); 693 table->gpt_gp = gp; 694 table->gpt_scheme = iter; 695 table->gpt_depth = depth; 696 probe = G_PART_PROBE(table, cp); 697 if (probe <= 0 && probe > pri) { 698 pri = probe; 699 scheme = iter; 700 if (gp->softc != NULL) 701 kobj_delete((kobj_t)gp->softc, M_GEOM); 702 gp->softc = table; 703 if (pri == 0) 704 goto done; 705 } else 706 kobj_delete((kobj_t)table, M_GEOM); 707 } 708 709 done: 710 return ((scheme == NULL) ? ENXIO : 0); 711 } 712 713 /* 714 * Control request functions. 715 */ 716 717 static int 718 g_part_ctl_add(struct gctl_req *req, struct g_part_parms *gpp) 719 { 720 struct g_geom *gp; 721 struct g_provider *pp; 722 struct g_part_entry *delent, *last, *entry; 723 struct g_part_table *table; 724 struct sbuf *sb; 725 quad_t end; 726 unsigned int index; 727 int error; 728 729 gp = gpp->gpp_geom; 730 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name)); 731 g_topology_assert(); 732 733 pp = LIST_FIRST(&gp->consumer)->provider; 734 table = gp->softc; 735 end = gpp->gpp_start + gpp->gpp_size - 1; 736 737 if (gpp->gpp_start < table->gpt_first || 738 gpp->gpp_start > table->gpt_last) { 739 gctl_error(req, "%d start '%jd'", EINVAL, 740 (intmax_t)gpp->gpp_start); 741 return (EINVAL); 742 } 743 if (end < gpp->gpp_start || end > table->gpt_last) { 744 gctl_error(req, "%d size '%jd'", EINVAL, 745 (intmax_t)gpp->gpp_size); 746 return (EINVAL); 747 } 748 if (gpp->gpp_index > table->gpt_entries) { 749 gctl_error(req, "%d index '%d'", EINVAL, gpp->gpp_index); 750 return (EINVAL); 751 } 752 753 delent = last = NULL; 754 index = (gpp->gpp_index > 0) ? gpp->gpp_index : 1; 755 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) { 756 if (entry->gpe_deleted) { 757 if (entry->gpe_index == index) 758 delent = entry; 759 continue; 760 } 761 if (entry->gpe_index == index) 762 index = entry->gpe_index + 1; 763 if (entry->gpe_index < index) 764 last = entry; 765 if (entry->gpe_internal) 766 continue; 767 if (gpp->gpp_start >= entry->gpe_start && 768 gpp->gpp_start <= entry->gpe_end) { 769 gctl_error(req, "%d start '%jd'", ENOSPC, 770 (intmax_t)gpp->gpp_start); 771 return (ENOSPC); 772 } 773 if (end >= entry->gpe_start && end <= entry->gpe_end) { 774 gctl_error(req, "%d end '%jd'", ENOSPC, (intmax_t)end); 775 return (ENOSPC); 776 } 777 if (gpp->gpp_start < entry->gpe_start && end > entry->gpe_end) { 778 gctl_error(req, "%d size '%jd'", ENOSPC, 779 (intmax_t)gpp->gpp_size); 780 return (ENOSPC); 781 } 782 } 783 if (gpp->gpp_index > 0 && index != gpp->gpp_index) { 784 gctl_error(req, "%d index '%d'", EEXIST, gpp->gpp_index); 785 return (EEXIST); 786 } 787 if (index > table->gpt_entries) { 788 gctl_error(req, "%d index '%d'", ENOSPC, index); 789 return (ENOSPC); 790 } 791 792 entry = (delent == NULL) ? g_malloc(table->gpt_scheme->gps_entrysz, 793 M_WAITOK | M_ZERO) : delent; 794 entry->gpe_index = index; 795 entry->gpe_start = gpp->gpp_start; 796 entry->gpe_end = end; 797 error = G_PART_ADD(table, entry, gpp); 798 if (error) { 799 gctl_error(req, "%d", error); 800 if (delent == NULL) 801 g_free(entry); 802 return (error); 803 } 804 if (delent == NULL) { 805 if (last == NULL) 806 LIST_INSERT_HEAD(&table->gpt_entry, entry, gpe_entry); 807 else 808 LIST_INSERT_AFTER(last, entry, gpe_entry); 809 entry->gpe_created = 1; 810 } else { 811 entry->gpe_deleted = 0; 812 entry->gpe_modified = 1; 813 } 814 g_part_new_provider(gp, table, entry); 815 816 /* Provide feedback if so requested. */ 817 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) { 818 sb = sbuf_new_auto(); 819 G_PART_FULLNAME(table, entry, sb, gp->name); 820 if (pp->stripesize > 0 && entry->gpe_pp->stripeoffset != 0) 821 sbuf_printf(sb, " added, but partition is not " 822 "aligned on %ju bytes\n", (uintmax_t)pp->stripesize); 823 else 824 sbuf_cat(sb, " added\n"); 825 sbuf_finish(sb); 826 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1); 827 sbuf_delete(sb); 828 } 829 return (0); 830 } 831 832 static int 833 g_part_ctl_bootcode(struct gctl_req *req, struct g_part_parms *gpp) 834 { 835 struct g_geom *gp; 836 struct g_part_table *table; 837 struct sbuf *sb; 838 int error, sz; 839 840 gp = gpp->gpp_geom; 841 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name)); 842 g_topology_assert(); 843 844 table = gp->softc; 845 sz = table->gpt_scheme->gps_bootcodesz; 846 if (sz == 0) { 847 error = ENODEV; 848 goto fail; 849 } 850 if (gpp->gpp_codesize > sz) { 851 error = EFBIG; 852 goto fail; 853 } 854 855 error = G_PART_BOOTCODE(table, gpp); 856 if (error) 857 goto fail; 858 859 /* Provide feedback if so requested. */ 860 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) { 861 sb = sbuf_new_auto(); 862 sbuf_printf(sb, "bootcode written to %s\n", gp->name); 863 sbuf_finish(sb); 864 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1); 865 sbuf_delete(sb); 866 } 867 return (0); 868 869 fail: 870 gctl_error(req, "%d", error); 871 return (error); 872 } 873 874 static int 875 g_part_ctl_commit(struct gctl_req *req, struct g_part_parms *gpp) 876 { 877 struct g_consumer *cp; 878 struct g_geom *gp; 879 struct g_provider *pp; 880 struct g_part_entry *entry, *tmp; 881 struct g_part_table *table; 882 char *buf; 883 int error, i; 884 885 gp = gpp->gpp_geom; 886 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name)); 887 g_topology_assert(); 888 889 table = gp->softc; 890 if (!table->gpt_opened) { 891 gctl_error(req, "%d", EPERM); 892 return (EPERM); 893 } 894 895 g_topology_unlock(); 896 897 cp = LIST_FIRST(&gp->consumer); 898 if ((table->gpt_smhead | table->gpt_smtail) != 0) { 899 pp = cp->provider; 900 buf = g_malloc(pp->sectorsize, M_WAITOK | M_ZERO); 901 while (table->gpt_smhead != 0) { 902 i = ffs(table->gpt_smhead) - 1; 903 error = g_write_data(cp, i * pp->sectorsize, buf, 904 pp->sectorsize); 905 if (error) { 906 g_free(buf); 907 goto fail; 908 } 909 table->gpt_smhead &= ~(1 << i); 910 } 911 while (table->gpt_smtail != 0) { 912 i = ffs(table->gpt_smtail) - 1; 913 error = g_write_data(cp, pp->mediasize - (i + 1) * 914 pp->sectorsize, buf, pp->sectorsize); 915 if (error) { 916 g_free(buf); 917 goto fail; 918 } 919 table->gpt_smtail &= ~(1 << i); 920 } 921 g_free(buf); 922 } 923 924 if (table->gpt_scheme == &g_part_null_scheme) { 925 g_topology_lock(); 926 g_access(cp, -1, -1, -1); 927 g_part_wither(gp, ENXIO); 928 return (0); 929 } 930 931 error = G_PART_WRITE(table, cp); 932 if (error) 933 goto fail; 934 935 LIST_FOREACH_SAFE(entry, &table->gpt_entry, gpe_entry, tmp) { 936 if (!entry->gpe_deleted) { 937 /* Notify consumers that provider might be changed. */ 938 if (entry->gpe_modified && ( 939 entry->gpe_pp->acw + entry->gpe_pp->ace + 940 entry->gpe_pp->acr) == 0) 941 g_media_changed(entry->gpe_pp, M_NOWAIT); 942 entry->gpe_created = 0; 943 entry->gpe_modified = 0; 944 continue; 945 } 946 LIST_REMOVE(entry, gpe_entry); 947 g_free(entry); 948 } 949 table->gpt_created = 0; 950 table->gpt_opened = 0; 951 952 g_topology_lock(); 953 g_access(cp, -1, -1, -1); 954 return (0); 955 956 fail: 957 g_topology_lock(); 958 gctl_error(req, "%d", error); 959 return (error); 960 } 961 962 static int 963 g_part_ctl_create(struct gctl_req *req, struct g_part_parms *gpp) 964 { 965 struct g_consumer *cp; 966 struct g_geom *gp; 967 struct g_provider *pp; 968 struct g_part_scheme *scheme; 969 struct g_part_table *null, *table; 970 struct sbuf *sb; 971 int attr, error; 972 973 pp = gpp->gpp_provider; 974 scheme = gpp->gpp_scheme; 975 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, pp->name)); 976 g_topology_assert(); 977 978 /* Check that there isn't already a g_part geom on the provider. */ 979 gp = g_part_find_geom(pp->name); 980 if (gp != NULL) { 981 null = gp->softc; 982 if (null->gpt_scheme != &g_part_null_scheme) { 983 gctl_error(req, "%d geom '%s'", EEXIST, pp->name); 984 return (EEXIST); 985 } 986 } else 987 null = NULL; 988 989 if ((gpp->gpp_parms & G_PART_PARM_ENTRIES) && 990 (gpp->gpp_entries < scheme->gps_minent || 991 gpp->gpp_entries > scheme->gps_maxent)) { 992 gctl_error(req, "%d entries '%d'", EINVAL, gpp->gpp_entries); 993 return (EINVAL); 994 } 995 996 if (null == NULL) 997 gp = g_new_geomf(&g_part_class, "%s", pp->name); 998 gp->softc = kobj_create((kobj_class_t)gpp->gpp_scheme, M_GEOM, 999 M_WAITOK); 1000 table = gp->softc; 1001 table->gpt_gp = gp; 1002 table->gpt_scheme = gpp->gpp_scheme; 1003 table->gpt_entries = (gpp->gpp_parms & G_PART_PARM_ENTRIES) ? 1004 gpp->gpp_entries : scheme->gps_minent; 1005 LIST_INIT(&table->gpt_entry); 1006 if (null == NULL) { 1007 cp = g_new_consumer(gp); 1008 cp->flags |= G_CF_DIRECT_SEND | G_CF_DIRECT_RECEIVE; 1009 error = g_attach(cp, pp); 1010 if (error == 0) 1011 error = g_access(cp, 1, 1, 1); 1012 if (error != 0) { 1013 g_part_wither(gp, error); 1014 gctl_error(req, "%d geom '%s'", error, pp->name); 1015 return (error); 1016 } 1017 table->gpt_opened = 1; 1018 } else { 1019 cp = LIST_FIRST(&gp->consumer); 1020 table->gpt_opened = null->gpt_opened; 1021 table->gpt_smhead = null->gpt_smhead; 1022 table->gpt_smtail = null->gpt_smtail; 1023 } 1024 1025 g_topology_unlock(); 1026 1027 /* Make sure the provider has media. */ 1028 if (pp->mediasize == 0 || pp->sectorsize == 0) { 1029 error = ENODEV; 1030 goto fail; 1031 } 1032 1033 /* Make sure we can nest and if so, determine our depth. */ 1034 error = g_getattr("PART::isleaf", cp, &attr); 1035 if (!error && attr) { 1036 error = ENODEV; 1037 goto fail; 1038 } 1039 error = g_getattr("PART::depth", cp, &attr); 1040 table->gpt_depth = (!error) ? attr + 1 : 0; 1041 1042 /* 1043 * Synthesize a disk geometry. Some partitioning schemes 1044 * depend on it and since some file systems need it even 1045 * when the partitition scheme doesn't, we do it here in 1046 * scheme-independent code. 1047 */ 1048 g_part_geometry(table, cp, pp->mediasize / pp->sectorsize); 1049 1050 error = G_PART_CREATE(table, gpp); 1051 if (error) 1052 goto fail; 1053 1054 g_topology_lock(); 1055 1056 table->gpt_created = 1; 1057 if (null != NULL) 1058 kobj_delete((kobj_t)null, M_GEOM); 1059 1060 /* 1061 * Support automatic commit by filling in the gpp_geom 1062 * parameter. 1063 */ 1064 gpp->gpp_parms |= G_PART_PARM_GEOM; 1065 gpp->gpp_geom = gp; 1066 1067 /* Provide feedback if so requested. */ 1068 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) { 1069 sb = sbuf_new_auto(); 1070 sbuf_printf(sb, "%s created\n", gp->name); 1071 sbuf_finish(sb); 1072 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1); 1073 sbuf_delete(sb); 1074 } 1075 return (0); 1076 1077 fail: 1078 g_topology_lock(); 1079 if (null == NULL) { 1080 g_access(cp, -1, -1, -1); 1081 g_part_wither(gp, error); 1082 } else { 1083 kobj_delete((kobj_t)gp->softc, M_GEOM); 1084 gp->softc = null; 1085 } 1086 gctl_error(req, "%d provider", error); 1087 return (error); 1088 } 1089 1090 static int 1091 g_part_ctl_delete(struct gctl_req *req, struct g_part_parms *gpp) 1092 { 1093 struct g_geom *gp; 1094 struct g_provider *pp; 1095 struct g_part_entry *entry; 1096 struct g_part_table *table; 1097 struct sbuf *sb; 1098 1099 gp = gpp->gpp_geom; 1100 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name)); 1101 g_topology_assert(); 1102 1103 table = gp->softc; 1104 1105 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) { 1106 if (entry->gpe_deleted || entry->gpe_internal) 1107 continue; 1108 if (entry->gpe_index == gpp->gpp_index) 1109 break; 1110 } 1111 if (entry == NULL) { 1112 gctl_error(req, "%d index '%d'", ENOENT, gpp->gpp_index); 1113 return (ENOENT); 1114 } 1115 1116 pp = entry->gpe_pp; 1117 if (pp != NULL) { 1118 if (pp->acr > 0 || pp->acw > 0 || pp->ace > 0) { 1119 gctl_error(req, "%d", EBUSY); 1120 return (EBUSY); 1121 } 1122 1123 pp->private = NULL; 1124 entry->gpe_pp = NULL; 1125 } 1126 1127 if (pp != NULL) 1128 g_wither_provider(pp, ENXIO); 1129 1130 /* Provide feedback if so requested. */ 1131 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) { 1132 sb = sbuf_new_auto(); 1133 G_PART_FULLNAME(table, entry, sb, gp->name); 1134 sbuf_cat(sb, " deleted\n"); 1135 sbuf_finish(sb); 1136 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1); 1137 sbuf_delete(sb); 1138 } 1139 1140 if (entry->gpe_created) { 1141 LIST_REMOVE(entry, gpe_entry); 1142 g_free(entry); 1143 } else { 1144 entry->gpe_modified = 0; 1145 entry->gpe_deleted = 1; 1146 } 1147 return (0); 1148 } 1149 1150 static int 1151 g_part_ctl_destroy(struct gctl_req *req, struct g_part_parms *gpp) 1152 { 1153 struct g_consumer *cp; 1154 struct g_geom *gp; 1155 struct g_provider *pp; 1156 struct g_part_entry *entry, *tmp; 1157 struct g_part_table *null, *table; 1158 struct sbuf *sb; 1159 int error; 1160 1161 gp = gpp->gpp_geom; 1162 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name)); 1163 g_topology_assert(); 1164 1165 table = gp->softc; 1166 /* Check for busy providers. */ 1167 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) { 1168 if (entry->gpe_deleted || entry->gpe_internal) 1169 continue; 1170 if (gpp->gpp_force) { 1171 pp = entry->gpe_pp; 1172 if (pp == NULL) 1173 continue; 1174 if (pp->acr == 0 && pp->acw == 0 && pp->ace == 0) 1175 continue; 1176 } 1177 gctl_error(req, "%d", EBUSY); 1178 return (EBUSY); 1179 } 1180 1181 if (gpp->gpp_force) { 1182 /* Destroy all providers. */ 1183 LIST_FOREACH_SAFE(entry, &table->gpt_entry, gpe_entry, tmp) { 1184 pp = entry->gpe_pp; 1185 if (pp != NULL) { 1186 pp->private = NULL; 1187 g_wither_provider(pp, ENXIO); 1188 } 1189 LIST_REMOVE(entry, gpe_entry); 1190 g_free(entry); 1191 } 1192 } 1193 1194 error = G_PART_DESTROY(table, gpp); 1195 if (error) { 1196 gctl_error(req, "%d", error); 1197 return (error); 1198 } 1199 1200 gp->softc = kobj_create((kobj_class_t)&g_part_null_scheme, M_GEOM, 1201 M_WAITOK); 1202 null = gp->softc; 1203 null->gpt_gp = gp; 1204 null->gpt_scheme = &g_part_null_scheme; 1205 LIST_INIT(&null->gpt_entry); 1206 1207 cp = LIST_FIRST(&gp->consumer); 1208 pp = cp->provider; 1209 null->gpt_last = pp->mediasize / pp->sectorsize - 1; 1210 1211 null->gpt_depth = table->gpt_depth; 1212 null->gpt_opened = table->gpt_opened; 1213 null->gpt_smhead = table->gpt_smhead; 1214 null->gpt_smtail = table->gpt_smtail; 1215 1216 while ((entry = LIST_FIRST(&table->gpt_entry)) != NULL) { 1217 LIST_REMOVE(entry, gpe_entry); 1218 g_free(entry); 1219 } 1220 kobj_delete((kobj_t)table, M_GEOM); 1221 1222 /* Provide feedback if so requested. */ 1223 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) { 1224 sb = sbuf_new_auto(); 1225 sbuf_printf(sb, "%s destroyed\n", gp->name); 1226 sbuf_finish(sb); 1227 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1); 1228 sbuf_delete(sb); 1229 } 1230 return (0); 1231 } 1232 1233 static int 1234 g_part_ctl_modify(struct gctl_req *req, struct g_part_parms *gpp) 1235 { 1236 struct g_geom *gp; 1237 struct g_part_entry *entry; 1238 struct g_part_table *table; 1239 struct sbuf *sb; 1240 int error; 1241 1242 gp = gpp->gpp_geom; 1243 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name)); 1244 g_topology_assert(); 1245 1246 table = gp->softc; 1247 1248 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) { 1249 if (entry->gpe_deleted || entry->gpe_internal) 1250 continue; 1251 if (entry->gpe_index == gpp->gpp_index) 1252 break; 1253 } 1254 if (entry == NULL) { 1255 gctl_error(req, "%d index '%d'", ENOENT, gpp->gpp_index); 1256 return (ENOENT); 1257 } 1258 1259 error = G_PART_MODIFY(table, entry, gpp); 1260 if (error) { 1261 gctl_error(req, "%d", error); 1262 return (error); 1263 } 1264 1265 if (!entry->gpe_created) 1266 entry->gpe_modified = 1; 1267 1268 /* Provide feedback if so requested. */ 1269 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) { 1270 sb = sbuf_new_auto(); 1271 G_PART_FULLNAME(table, entry, sb, gp->name); 1272 sbuf_cat(sb, " modified\n"); 1273 sbuf_finish(sb); 1274 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1); 1275 sbuf_delete(sb); 1276 } 1277 return (0); 1278 } 1279 1280 static int 1281 g_part_ctl_move(struct gctl_req *req, struct g_part_parms *gpp) 1282 { 1283 gctl_error(req, "%d verb 'move'", ENOSYS); 1284 return (ENOSYS); 1285 } 1286 1287 static int 1288 g_part_ctl_recover(struct gctl_req *req, struct g_part_parms *gpp) 1289 { 1290 struct g_part_table *table; 1291 struct g_geom *gp; 1292 struct sbuf *sb; 1293 int error, recovered; 1294 1295 gp = gpp->gpp_geom; 1296 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name)); 1297 g_topology_assert(); 1298 table = gp->softc; 1299 error = recovered = 0; 1300 1301 if (table->gpt_corrupt) { 1302 error = G_PART_RECOVER(table); 1303 if (error == 0) 1304 error = g_part_check_integrity(table, 1305 LIST_FIRST(&gp->consumer)); 1306 if (error) { 1307 gctl_error(req, "%d recovering '%s' failed", 1308 error, gp->name); 1309 return (error); 1310 } 1311 recovered = 1; 1312 } 1313 /* Provide feedback if so requested. */ 1314 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) { 1315 sb = sbuf_new_auto(); 1316 if (recovered) 1317 sbuf_printf(sb, "%s recovered\n", gp->name); 1318 else 1319 sbuf_printf(sb, "%s recovering is not needed\n", 1320 gp->name); 1321 sbuf_finish(sb); 1322 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1); 1323 sbuf_delete(sb); 1324 } 1325 return (0); 1326 } 1327 1328 static int 1329 g_part_ctl_resize(struct gctl_req *req, struct g_part_parms *gpp) 1330 { 1331 struct g_geom *gp; 1332 struct g_provider *pp; 1333 struct g_part_entry *pe, *entry; 1334 struct g_part_table *table; 1335 struct sbuf *sb; 1336 quad_t end; 1337 int error; 1338 off_t mediasize; 1339 1340 gp = gpp->gpp_geom; 1341 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name)); 1342 g_topology_assert(); 1343 table = gp->softc; 1344 1345 /* check gpp_index */ 1346 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) { 1347 if (entry->gpe_deleted || entry->gpe_internal) 1348 continue; 1349 if (entry->gpe_index == gpp->gpp_index) 1350 break; 1351 } 1352 if (entry == NULL) { 1353 gctl_error(req, "%d index '%d'", ENOENT, gpp->gpp_index); 1354 return (ENOENT); 1355 } 1356 1357 /* check gpp_size */ 1358 end = entry->gpe_start + gpp->gpp_size - 1; 1359 if (gpp->gpp_size < 1 || end > table->gpt_last) { 1360 gctl_error(req, "%d size '%jd'", EINVAL, 1361 (intmax_t)gpp->gpp_size); 1362 return (EINVAL); 1363 } 1364 1365 LIST_FOREACH(pe, &table->gpt_entry, gpe_entry) { 1366 if (pe->gpe_deleted || pe->gpe_internal || pe == entry) 1367 continue; 1368 if (end >= pe->gpe_start && end <= pe->gpe_end) { 1369 gctl_error(req, "%d end '%jd'", ENOSPC, 1370 (intmax_t)end); 1371 return (ENOSPC); 1372 } 1373 if (entry->gpe_start < pe->gpe_start && end > pe->gpe_end) { 1374 gctl_error(req, "%d size '%jd'", ENOSPC, 1375 (intmax_t)gpp->gpp_size); 1376 return (ENOSPC); 1377 } 1378 } 1379 1380 pp = entry->gpe_pp; 1381 if ((g_debugflags & G_F_FOOTSHOOTING) == 0 && 1382 (pp->acr > 0 || pp->acw > 0 || pp->ace > 0)) { 1383 if (entry->gpe_end - entry->gpe_start + 1 > gpp->gpp_size) { 1384 /* Deny shrinking of an opened partition. */ 1385 gctl_error(req, "%d", EBUSY); 1386 return (EBUSY); 1387 } 1388 } 1389 1390 error = G_PART_RESIZE(table, entry, gpp); 1391 if (error) { 1392 gctl_error(req, "%d%s", error, error != EBUSY ? "": 1393 " resizing will lead to unexpected shrinking" 1394 " due to alignment"); 1395 return (error); 1396 } 1397 1398 if (!entry->gpe_created) 1399 entry->gpe_modified = 1; 1400 1401 /* update mediasize of changed provider */ 1402 mediasize = (entry->gpe_end - entry->gpe_start + 1) * 1403 pp->sectorsize; 1404 g_resize_provider(pp, mediasize); 1405 1406 /* Provide feedback if so requested. */ 1407 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) { 1408 sb = sbuf_new_auto(); 1409 G_PART_FULLNAME(table, entry, sb, gp->name); 1410 sbuf_cat(sb, " resized\n"); 1411 sbuf_finish(sb); 1412 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1); 1413 sbuf_delete(sb); 1414 } 1415 return (0); 1416 } 1417 1418 static int 1419 g_part_ctl_setunset(struct gctl_req *req, struct g_part_parms *gpp, 1420 unsigned int set) 1421 { 1422 struct g_geom *gp; 1423 struct g_part_entry *entry; 1424 struct g_part_table *table; 1425 struct sbuf *sb; 1426 int error; 1427 1428 gp = gpp->gpp_geom; 1429 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name)); 1430 g_topology_assert(); 1431 1432 table = gp->softc; 1433 1434 if (gpp->gpp_parms & G_PART_PARM_INDEX) { 1435 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) { 1436 if (entry->gpe_deleted || entry->gpe_internal) 1437 continue; 1438 if (entry->gpe_index == gpp->gpp_index) 1439 break; 1440 } 1441 if (entry == NULL) { 1442 gctl_error(req, "%d index '%d'", ENOENT, 1443 gpp->gpp_index); 1444 return (ENOENT); 1445 } 1446 } else 1447 entry = NULL; 1448 1449 error = G_PART_SETUNSET(table, entry, gpp->gpp_attrib, set); 1450 if (error) { 1451 gctl_error(req, "%d attrib '%s'", error, gpp->gpp_attrib); 1452 return (error); 1453 } 1454 1455 /* Provide feedback if so requested. */ 1456 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) { 1457 sb = sbuf_new_auto(); 1458 sbuf_printf(sb, "%s %sset on ", gpp->gpp_attrib, 1459 (set) ? "" : "un"); 1460 if (entry) 1461 G_PART_FULLNAME(table, entry, sb, gp->name); 1462 else 1463 sbuf_cat(sb, gp->name); 1464 sbuf_cat(sb, "\n"); 1465 sbuf_finish(sb); 1466 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1); 1467 sbuf_delete(sb); 1468 } 1469 return (0); 1470 } 1471 1472 static int 1473 g_part_ctl_undo(struct gctl_req *req, struct g_part_parms *gpp) 1474 { 1475 struct g_consumer *cp; 1476 struct g_provider *pp; 1477 struct g_geom *gp; 1478 struct g_part_entry *entry, *tmp; 1479 struct g_part_table *table; 1480 int error, reprobe; 1481 1482 gp = gpp->gpp_geom; 1483 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name)); 1484 g_topology_assert(); 1485 1486 table = gp->softc; 1487 if (!table->gpt_opened) { 1488 gctl_error(req, "%d", EPERM); 1489 return (EPERM); 1490 } 1491 1492 cp = LIST_FIRST(&gp->consumer); 1493 LIST_FOREACH_SAFE(entry, &table->gpt_entry, gpe_entry, tmp) { 1494 entry->gpe_modified = 0; 1495 if (entry->gpe_created) { 1496 pp = entry->gpe_pp; 1497 if (pp != NULL) { 1498 pp->private = NULL; 1499 entry->gpe_pp = NULL; 1500 g_wither_provider(pp, ENXIO); 1501 } 1502 entry->gpe_deleted = 1; 1503 } 1504 if (entry->gpe_deleted) { 1505 LIST_REMOVE(entry, gpe_entry); 1506 g_free(entry); 1507 } 1508 } 1509 1510 g_topology_unlock(); 1511 1512 reprobe = (table->gpt_scheme == &g_part_null_scheme || 1513 table->gpt_created) ? 1 : 0; 1514 1515 if (reprobe) { 1516 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) { 1517 if (entry->gpe_internal) 1518 continue; 1519 error = EBUSY; 1520 goto fail; 1521 } 1522 while ((entry = LIST_FIRST(&table->gpt_entry)) != NULL) { 1523 LIST_REMOVE(entry, gpe_entry); 1524 g_free(entry); 1525 } 1526 error = g_part_probe(gp, cp, table->gpt_depth); 1527 if (error) { 1528 g_topology_lock(); 1529 g_access(cp, -1, -1, -1); 1530 g_part_wither(gp, error); 1531 return (0); 1532 } 1533 table = gp->softc; 1534 1535 /* 1536 * Synthesize a disk geometry. Some partitioning schemes 1537 * depend on it and since some file systems need it even 1538 * when the partitition scheme doesn't, we do it here in 1539 * scheme-independent code. 1540 */ 1541 pp = cp->provider; 1542 g_part_geometry(table, cp, pp->mediasize / pp->sectorsize); 1543 } 1544 1545 error = G_PART_READ(table, cp); 1546 if (error) 1547 goto fail; 1548 error = g_part_check_integrity(table, cp); 1549 if (error) 1550 goto fail; 1551 1552 g_topology_lock(); 1553 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) { 1554 if (!entry->gpe_internal) 1555 g_part_new_provider(gp, table, entry); 1556 } 1557 1558 table->gpt_opened = 0; 1559 g_access(cp, -1, -1, -1); 1560 return (0); 1561 1562 fail: 1563 g_topology_lock(); 1564 gctl_error(req, "%d", error); 1565 return (error); 1566 } 1567 1568 static void 1569 g_part_wither(struct g_geom *gp, int error) 1570 { 1571 struct g_part_entry *entry; 1572 struct g_part_table *table; 1573 struct g_provider *pp; 1574 1575 table = gp->softc; 1576 if (table != NULL) { 1577 gp->softc = NULL; 1578 while ((entry = LIST_FIRST(&table->gpt_entry)) != NULL) { 1579 LIST_REMOVE(entry, gpe_entry); 1580 pp = entry->gpe_pp; 1581 entry->gpe_pp = NULL; 1582 if (pp != NULL) { 1583 pp->private = NULL; 1584 g_wither_provider(pp, error); 1585 } 1586 g_free(entry); 1587 } 1588 G_PART_DESTROY(table, NULL); 1589 kobj_delete((kobj_t)table, M_GEOM); 1590 } 1591 g_wither_geom(gp, error); 1592 } 1593 1594 /* 1595 * Class methods. 1596 */ 1597 1598 static void 1599 g_part_ctlreq(struct gctl_req *req, struct g_class *mp, const char *verb) 1600 { 1601 struct g_part_parms gpp; 1602 struct g_part_table *table; 1603 struct gctl_req_arg *ap; 1604 enum g_part_ctl ctlreq; 1605 unsigned int i, mparms, oparms, parm; 1606 int auto_commit, close_on_error; 1607 int error, modifies; 1608 1609 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s,%s)", __func__, mp->name, verb)); 1610 g_topology_assert(); 1611 1612 ctlreq = G_PART_CTL_NONE; 1613 modifies = 1; 1614 mparms = 0; 1615 oparms = G_PART_PARM_FLAGS | G_PART_PARM_OUTPUT | G_PART_PARM_VERSION; 1616 switch (*verb) { 1617 case 'a': 1618 if (!strcmp(verb, "add")) { 1619 ctlreq = G_PART_CTL_ADD; 1620 mparms |= G_PART_PARM_GEOM | G_PART_PARM_SIZE | 1621 G_PART_PARM_START | G_PART_PARM_TYPE; 1622 oparms |= G_PART_PARM_INDEX | G_PART_PARM_LABEL; 1623 } 1624 break; 1625 case 'b': 1626 if (!strcmp(verb, "bootcode")) { 1627 ctlreq = G_PART_CTL_BOOTCODE; 1628 mparms |= G_PART_PARM_GEOM | G_PART_PARM_BOOTCODE; 1629 oparms |= G_PART_PARM_SKIP_DSN; 1630 } 1631 break; 1632 case 'c': 1633 if (!strcmp(verb, "commit")) { 1634 ctlreq = G_PART_CTL_COMMIT; 1635 mparms |= G_PART_PARM_GEOM; 1636 modifies = 0; 1637 } else if (!strcmp(verb, "create")) { 1638 ctlreq = G_PART_CTL_CREATE; 1639 mparms |= G_PART_PARM_PROVIDER | G_PART_PARM_SCHEME; 1640 oparms |= G_PART_PARM_ENTRIES; 1641 } 1642 break; 1643 case 'd': 1644 if (!strcmp(verb, "delete")) { 1645 ctlreq = G_PART_CTL_DELETE; 1646 mparms |= G_PART_PARM_GEOM | G_PART_PARM_INDEX; 1647 } else if (!strcmp(verb, "destroy")) { 1648 ctlreq = G_PART_CTL_DESTROY; 1649 mparms |= G_PART_PARM_GEOM; 1650 oparms |= G_PART_PARM_FORCE; 1651 } 1652 break; 1653 case 'm': 1654 if (!strcmp(verb, "modify")) { 1655 ctlreq = G_PART_CTL_MODIFY; 1656 mparms |= G_PART_PARM_GEOM | G_PART_PARM_INDEX; 1657 oparms |= G_PART_PARM_LABEL | G_PART_PARM_TYPE; 1658 } else if (!strcmp(verb, "move")) { 1659 ctlreq = G_PART_CTL_MOVE; 1660 mparms |= G_PART_PARM_GEOM | G_PART_PARM_INDEX; 1661 } 1662 break; 1663 case 'r': 1664 if (!strcmp(verb, "recover")) { 1665 ctlreq = G_PART_CTL_RECOVER; 1666 mparms |= G_PART_PARM_GEOM; 1667 } else if (!strcmp(verb, "resize")) { 1668 ctlreq = G_PART_CTL_RESIZE; 1669 mparms |= G_PART_PARM_GEOM | G_PART_PARM_INDEX | 1670 G_PART_PARM_SIZE; 1671 } 1672 break; 1673 case 's': 1674 if (!strcmp(verb, "set")) { 1675 ctlreq = G_PART_CTL_SET; 1676 mparms |= G_PART_PARM_ATTRIB | G_PART_PARM_GEOM; 1677 oparms |= G_PART_PARM_INDEX; 1678 } 1679 break; 1680 case 'u': 1681 if (!strcmp(verb, "undo")) { 1682 ctlreq = G_PART_CTL_UNDO; 1683 mparms |= G_PART_PARM_GEOM; 1684 modifies = 0; 1685 } else if (!strcmp(verb, "unset")) { 1686 ctlreq = G_PART_CTL_UNSET; 1687 mparms |= G_PART_PARM_ATTRIB | G_PART_PARM_GEOM; 1688 oparms |= G_PART_PARM_INDEX; 1689 } 1690 break; 1691 } 1692 if (ctlreq == G_PART_CTL_NONE) { 1693 gctl_error(req, "%d verb '%s'", EINVAL, verb); 1694 return; 1695 } 1696 1697 bzero(&gpp, sizeof(gpp)); 1698 for (i = 0; i < req->narg; i++) { 1699 ap = &req->arg[i]; 1700 parm = 0; 1701 switch (ap->name[0]) { 1702 case 'a': 1703 if (!strcmp(ap->name, "arg0")) { 1704 parm = mparms & 1705 (G_PART_PARM_GEOM | G_PART_PARM_PROVIDER); 1706 } 1707 if (!strcmp(ap->name, "attrib")) 1708 parm = G_PART_PARM_ATTRIB; 1709 break; 1710 case 'b': 1711 if (!strcmp(ap->name, "bootcode")) 1712 parm = G_PART_PARM_BOOTCODE; 1713 break; 1714 case 'c': 1715 if (!strcmp(ap->name, "class")) 1716 continue; 1717 break; 1718 case 'e': 1719 if (!strcmp(ap->name, "entries")) 1720 parm = G_PART_PARM_ENTRIES; 1721 break; 1722 case 'f': 1723 if (!strcmp(ap->name, "flags")) 1724 parm = G_PART_PARM_FLAGS; 1725 else if (!strcmp(ap->name, "force")) 1726 parm = G_PART_PARM_FORCE; 1727 break; 1728 case 'i': 1729 if (!strcmp(ap->name, "index")) 1730 parm = G_PART_PARM_INDEX; 1731 break; 1732 case 'l': 1733 if (!strcmp(ap->name, "label")) 1734 parm = G_PART_PARM_LABEL; 1735 break; 1736 case 'o': 1737 if (!strcmp(ap->name, "output")) 1738 parm = G_PART_PARM_OUTPUT; 1739 break; 1740 case 's': 1741 if (!strcmp(ap->name, "scheme")) 1742 parm = G_PART_PARM_SCHEME; 1743 else if (!strcmp(ap->name, "size")) 1744 parm = G_PART_PARM_SIZE; 1745 else if (!strcmp(ap->name, "start")) 1746 parm = G_PART_PARM_START; 1747 else if (!strcmp(ap->name, "skip_dsn")) 1748 parm = G_PART_PARM_SKIP_DSN; 1749 break; 1750 case 't': 1751 if (!strcmp(ap->name, "type")) 1752 parm = G_PART_PARM_TYPE; 1753 break; 1754 case 'v': 1755 if (!strcmp(ap->name, "verb")) 1756 continue; 1757 else if (!strcmp(ap->name, "version")) 1758 parm = G_PART_PARM_VERSION; 1759 break; 1760 } 1761 if ((parm & (mparms | oparms)) == 0) { 1762 gctl_error(req, "%d param '%s'", EINVAL, ap->name); 1763 return; 1764 } 1765 switch (parm) { 1766 case G_PART_PARM_ATTRIB: 1767 error = g_part_parm_str(req, ap->name, 1768 &gpp.gpp_attrib); 1769 break; 1770 case G_PART_PARM_BOOTCODE: 1771 error = g_part_parm_bootcode(req, ap->name, 1772 &gpp.gpp_codeptr, &gpp.gpp_codesize); 1773 break; 1774 case G_PART_PARM_ENTRIES: 1775 error = g_part_parm_intmax(req, ap->name, 1776 &gpp.gpp_entries); 1777 break; 1778 case G_PART_PARM_FLAGS: 1779 error = g_part_parm_str(req, ap->name, &gpp.gpp_flags); 1780 break; 1781 case G_PART_PARM_FORCE: 1782 error = g_part_parm_uint32(req, ap->name, 1783 &gpp.gpp_force); 1784 break; 1785 case G_PART_PARM_GEOM: 1786 error = g_part_parm_geom(req, ap->name, &gpp.gpp_geom); 1787 break; 1788 case G_PART_PARM_INDEX: 1789 error = g_part_parm_intmax(req, ap->name, 1790 &gpp.gpp_index); 1791 break; 1792 case G_PART_PARM_LABEL: 1793 error = g_part_parm_str(req, ap->name, &gpp.gpp_label); 1794 break; 1795 case G_PART_PARM_OUTPUT: 1796 error = 0; /* Write-only parameter */ 1797 break; 1798 case G_PART_PARM_PROVIDER: 1799 error = g_part_parm_provider(req, ap->name, 1800 &gpp.gpp_provider); 1801 break; 1802 case G_PART_PARM_SCHEME: 1803 error = g_part_parm_scheme(req, ap->name, 1804 &gpp.gpp_scheme); 1805 break; 1806 case G_PART_PARM_SIZE: 1807 error = g_part_parm_quad(req, ap->name, &gpp.gpp_size); 1808 break; 1809 case G_PART_PARM_SKIP_DSN: 1810 error = g_part_parm_uint32(req, ap->name, 1811 &gpp.gpp_skip_dsn); 1812 break; 1813 case G_PART_PARM_START: 1814 error = g_part_parm_quad(req, ap->name, 1815 &gpp.gpp_start); 1816 break; 1817 case G_PART_PARM_TYPE: 1818 error = g_part_parm_str(req, ap->name, &gpp.gpp_type); 1819 break; 1820 case G_PART_PARM_VERSION: 1821 error = g_part_parm_uint32(req, ap->name, 1822 &gpp.gpp_version); 1823 break; 1824 default: 1825 error = EDOOFUS; 1826 gctl_error(req, "%d %s", error, ap->name); 1827 break; 1828 } 1829 if (error != 0) { 1830 if (error == ENOATTR) { 1831 gctl_error(req, "%d param '%s'", error, 1832 ap->name); 1833 } 1834 return; 1835 } 1836 gpp.gpp_parms |= parm; 1837 } 1838 if ((gpp.gpp_parms & mparms) != mparms) { 1839 parm = mparms - (gpp.gpp_parms & mparms); 1840 gctl_error(req, "%d param '%x'", ENOATTR, parm); 1841 return; 1842 } 1843 1844 /* Obtain permissions if possible/necessary. */ 1845 close_on_error = 0; 1846 table = NULL; 1847 if (modifies && (gpp.gpp_parms & G_PART_PARM_GEOM)) { 1848 table = gpp.gpp_geom->softc; 1849 if (table != NULL && table->gpt_corrupt && 1850 ctlreq != G_PART_CTL_DESTROY && 1851 ctlreq != G_PART_CTL_RECOVER) { 1852 gctl_error(req, "%d table '%s' is corrupt", 1853 EPERM, gpp.gpp_geom->name); 1854 return; 1855 } 1856 if (table != NULL && !table->gpt_opened) { 1857 error = g_access(LIST_FIRST(&gpp.gpp_geom->consumer), 1858 1, 1, 1); 1859 if (error) { 1860 gctl_error(req, "%d geom '%s'", error, 1861 gpp.gpp_geom->name); 1862 return; 1863 } 1864 table->gpt_opened = 1; 1865 close_on_error = 1; 1866 } 1867 } 1868 1869 /* Allow the scheme to check or modify the parameters. */ 1870 if (table != NULL) { 1871 error = G_PART_PRECHECK(table, ctlreq, &gpp); 1872 if (error) { 1873 gctl_error(req, "%d pre-check failed", error); 1874 goto out; 1875 } 1876 } else 1877 error = EDOOFUS; /* Prevent bogus uninit. warning. */ 1878 1879 switch (ctlreq) { 1880 case G_PART_CTL_NONE: 1881 panic("%s", __func__); 1882 case G_PART_CTL_ADD: 1883 error = g_part_ctl_add(req, &gpp); 1884 break; 1885 case G_PART_CTL_BOOTCODE: 1886 error = g_part_ctl_bootcode(req, &gpp); 1887 break; 1888 case G_PART_CTL_COMMIT: 1889 error = g_part_ctl_commit(req, &gpp); 1890 break; 1891 case G_PART_CTL_CREATE: 1892 error = g_part_ctl_create(req, &gpp); 1893 break; 1894 case G_PART_CTL_DELETE: 1895 error = g_part_ctl_delete(req, &gpp); 1896 break; 1897 case G_PART_CTL_DESTROY: 1898 error = g_part_ctl_destroy(req, &gpp); 1899 break; 1900 case G_PART_CTL_MODIFY: 1901 error = g_part_ctl_modify(req, &gpp); 1902 break; 1903 case G_PART_CTL_MOVE: 1904 error = g_part_ctl_move(req, &gpp); 1905 break; 1906 case G_PART_CTL_RECOVER: 1907 error = g_part_ctl_recover(req, &gpp); 1908 break; 1909 case G_PART_CTL_RESIZE: 1910 error = g_part_ctl_resize(req, &gpp); 1911 break; 1912 case G_PART_CTL_SET: 1913 error = g_part_ctl_setunset(req, &gpp, 1); 1914 break; 1915 case G_PART_CTL_UNDO: 1916 error = g_part_ctl_undo(req, &gpp); 1917 break; 1918 case G_PART_CTL_UNSET: 1919 error = g_part_ctl_setunset(req, &gpp, 0); 1920 break; 1921 } 1922 1923 /* Implement automatic commit. */ 1924 if (!error) { 1925 auto_commit = (modifies && 1926 (gpp.gpp_parms & G_PART_PARM_FLAGS) && 1927 strchr(gpp.gpp_flags, 'C') != NULL) ? 1 : 0; 1928 if (auto_commit) { 1929 KASSERT(gpp.gpp_parms & G_PART_PARM_GEOM, ("%s", 1930 __func__)); 1931 error = g_part_ctl_commit(req, &gpp); 1932 } 1933 } 1934 1935 out: 1936 if (error && close_on_error) { 1937 g_access(LIST_FIRST(&gpp.gpp_geom->consumer), -1, -1, -1); 1938 table->gpt_opened = 0; 1939 } 1940 } 1941 1942 static int 1943 g_part_destroy_geom(struct gctl_req *req, struct g_class *mp, 1944 struct g_geom *gp) 1945 { 1946 1947 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s,%s)", __func__, mp->name, gp->name)); 1948 g_topology_assert(); 1949 1950 g_part_wither(gp, EINVAL); 1951 return (0); 1952 } 1953 1954 static struct g_geom * 1955 g_part_taste(struct g_class *mp, struct g_provider *pp, int flags __unused) 1956 { 1957 struct g_consumer *cp; 1958 struct g_geom *gp; 1959 struct g_part_entry *entry; 1960 struct g_part_table *table; 1961 struct root_hold_token *rht; 1962 int attr, depth; 1963 int error; 1964 1965 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s,%s)", __func__, mp->name, pp->name)); 1966 g_topology_assert(); 1967 1968 /* Skip providers that are already open for writing. */ 1969 if (pp->acw > 0) 1970 return (NULL); 1971 1972 /* 1973 * Create a GEOM with consumer and hook it up to the provider. 1974 * With that we become part of the topology. Obtain read access 1975 * to the provider. 1976 */ 1977 gp = g_new_geomf(mp, "%s", pp->name); 1978 cp = g_new_consumer(gp); 1979 cp->flags |= G_CF_DIRECT_SEND | G_CF_DIRECT_RECEIVE; 1980 error = g_attach(cp, pp); 1981 if (error == 0) 1982 error = g_access(cp, 1, 0, 0); 1983 if (error != 0) { 1984 if (cp->provider) 1985 g_detach(cp); 1986 g_destroy_consumer(cp); 1987 g_destroy_geom(gp); 1988 return (NULL); 1989 } 1990 1991 rht = root_mount_hold(mp->name); 1992 g_topology_unlock(); 1993 1994 /* 1995 * Short-circuit the whole probing galore when there's no 1996 * media present. 1997 */ 1998 if (pp->mediasize == 0 || pp->sectorsize == 0) { 1999 error = ENODEV; 2000 goto fail; 2001 } 2002 2003 /* Make sure we can nest and if so, determine our depth. */ 2004 error = g_getattr("PART::isleaf", cp, &attr); 2005 if (!error && attr) { 2006 error = ENODEV; 2007 goto fail; 2008 } 2009 error = g_getattr("PART::depth", cp, &attr); 2010 depth = (!error) ? attr + 1 : 0; 2011 2012 error = g_part_probe(gp, cp, depth); 2013 if (error) 2014 goto fail; 2015 2016 table = gp->softc; 2017 2018 /* 2019 * Synthesize a disk geometry. Some partitioning schemes 2020 * depend on it and since some file systems need it even 2021 * when the partitition scheme doesn't, we do it here in 2022 * scheme-independent code. 2023 */ 2024 g_part_geometry(table, cp, pp->mediasize / pp->sectorsize); 2025 2026 error = G_PART_READ(table, cp); 2027 if (error) 2028 goto fail; 2029 error = g_part_check_integrity(table, cp); 2030 if (error) 2031 goto fail; 2032 2033 g_topology_lock(); 2034 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) { 2035 if (!entry->gpe_internal) 2036 g_part_new_provider(gp, table, entry); 2037 } 2038 2039 root_mount_rel(rht); 2040 g_access(cp, -1, 0, 0); 2041 return (gp); 2042 2043 fail: 2044 g_topology_lock(); 2045 root_mount_rel(rht); 2046 g_access(cp, -1, 0, 0); 2047 g_detach(cp); 2048 g_destroy_consumer(cp); 2049 g_destroy_geom(gp); 2050 return (NULL); 2051 } 2052 2053 /* 2054 * Geom methods. 2055 */ 2056 2057 static int 2058 g_part_access(struct g_provider *pp, int dr, int dw, int de) 2059 { 2060 struct g_consumer *cp; 2061 2062 G_PART_TRACE((G_T_ACCESS, "%s(%s,%d,%d,%d)", __func__, pp->name, dr, 2063 dw, de)); 2064 2065 cp = LIST_FIRST(&pp->geom->consumer); 2066 2067 /* We always gain write-exclusive access. */ 2068 return (g_access(cp, dr, dw, dw + de)); 2069 } 2070 2071 static void 2072 g_part_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp, 2073 struct g_consumer *cp, struct g_provider *pp) 2074 { 2075 char buf[64]; 2076 struct g_part_entry *entry; 2077 struct g_part_table *table; 2078 2079 KASSERT(sb != NULL && gp != NULL, ("%s", __func__)); 2080 table = gp->softc; 2081 2082 if (indent == NULL) { 2083 KASSERT(cp == NULL && pp != NULL, ("%s", __func__)); 2084 entry = pp->private; 2085 if (entry == NULL) 2086 return; 2087 sbuf_printf(sb, " i %u o %ju ty %s", entry->gpe_index, 2088 (uintmax_t)entry->gpe_offset, 2089 G_PART_TYPE(table, entry, buf, sizeof(buf))); 2090 /* 2091 * libdisk compatibility quirk - the scheme dumps the 2092 * slicer name and partition type in a way that is 2093 * compatible with libdisk. When libdisk is not used 2094 * anymore, this should go away. 2095 */ 2096 G_PART_DUMPCONF(table, entry, sb, indent); 2097 } else if (cp != NULL) { /* Consumer configuration. */ 2098 KASSERT(pp == NULL, ("%s", __func__)); 2099 /* none */ 2100 } else if (pp != NULL) { /* Provider configuration. */ 2101 entry = pp->private; 2102 if (entry == NULL) 2103 return; 2104 sbuf_printf(sb, "%s<start>%ju</start>\n", indent, 2105 (uintmax_t)entry->gpe_start); 2106 sbuf_printf(sb, "%s<end>%ju</end>\n", indent, 2107 (uintmax_t)entry->gpe_end); 2108 sbuf_printf(sb, "%s<index>%u</index>\n", indent, 2109 entry->gpe_index); 2110 sbuf_printf(sb, "%s<type>%s</type>\n", indent, 2111 G_PART_TYPE(table, entry, buf, sizeof(buf))); 2112 sbuf_printf(sb, "%s<offset>%ju</offset>\n", indent, 2113 (uintmax_t)entry->gpe_offset); 2114 sbuf_printf(sb, "%s<length>%ju</length>\n", indent, 2115 (uintmax_t)pp->mediasize); 2116 G_PART_DUMPCONF(table, entry, sb, indent); 2117 } else { /* Geom configuration. */ 2118 sbuf_printf(sb, "%s<scheme>%s</scheme>\n", indent, 2119 table->gpt_scheme->name); 2120 sbuf_printf(sb, "%s<entries>%u</entries>\n", indent, 2121 table->gpt_entries); 2122 sbuf_printf(sb, "%s<first>%ju</first>\n", indent, 2123 (uintmax_t)table->gpt_first); 2124 sbuf_printf(sb, "%s<last>%ju</last>\n", indent, 2125 (uintmax_t)table->gpt_last); 2126 sbuf_printf(sb, "%s<fwsectors>%u</fwsectors>\n", indent, 2127 table->gpt_sectors); 2128 sbuf_printf(sb, "%s<fwheads>%u</fwheads>\n", indent, 2129 table->gpt_heads); 2130 sbuf_printf(sb, "%s<state>%s</state>\n", indent, 2131 table->gpt_corrupt ? "CORRUPT": "OK"); 2132 sbuf_printf(sb, "%s<modified>%s</modified>\n", indent, 2133 table->gpt_opened ? "true": "false"); 2134 G_PART_DUMPCONF(table, NULL, sb, indent); 2135 } 2136 } 2137 2138 /*- 2139 * This start routine is only called for non-trivial requests, all the 2140 * trivial ones are handled autonomously by the slice code. 2141 * For requests we handle here, we must call the g_io_deliver() on the 2142 * bio, and return non-zero to indicate to the slice code that we did so. 2143 * This code executes in the "DOWN" I/O path, this means: 2144 * * No sleeping. 2145 * * Don't grab the topology lock. 2146 * * Don't call biowait, g_getattr(), g_setattr() or g_read_data() 2147 */ 2148 static int 2149 g_part_ioctl(struct g_provider *pp, u_long cmd, void *data, int fflag, struct thread *td) 2150 { 2151 struct g_part_table *table; 2152 2153 table = pp->geom->softc; 2154 return G_PART_IOCTL(table, pp, cmd, data, fflag, td); 2155 } 2156 2157 static void 2158 g_part_resize(struct g_consumer *cp) 2159 { 2160 struct g_part_table *table; 2161 2162 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, cp->provider->name)); 2163 g_topology_assert(); 2164 2165 if (auto_resize == 0) 2166 return; 2167 2168 table = cp->geom->softc; 2169 if (table->gpt_opened == 0) { 2170 if (g_access(cp, 1, 1, 1) != 0) 2171 return; 2172 table->gpt_opened = 1; 2173 } 2174 if (G_PART_RESIZE(table, NULL, NULL) == 0) 2175 printf("GEOM_PART: %s was automatically resized.\n" 2176 " Use `gpart commit %s` to save changes or " 2177 "`gpart undo %s` to revert them.\n", cp->geom->name, 2178 cp->geom->name, cp->geom->name); 2179 if (g_part_check_integrity(table, cp) != 0) { 2180 g_access(cp, -1, -1, -1); 2181 table->gpt_opened = 0; 2182 g_part_wither(table->gpt_gp, ENXIO); 2183 } 2184 } 2185 2186 static void 2187 g_part_orphan(struct g_consumer *cp) 2188 { 2189 struct g_provider *pp; 2190 struct g_part_table *table; 2191 2192 pp = cp->provider; 2193 KASSERT(pp != NULL, ("%s", __func__)); 2194 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, pp->name)); 2195 g_topology_assert(); 2196 2197 KASSERT(pp->error != 0, ("%s", __func__)); 2198 table = cp->geom->softc; 2199 if (table != NULL && table->gpt_opened) 2200 g_access(cp, -1, -1, -1); 2201 g_part_wither(cp->geom, pp->error); 2202 } 2203 2204 static void 2205 g_part_spoiled(struct g_consumer *cp) 2206 { 2207 2208 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, cp->provider->name)); 2209 g_topology_assert(); 2210 2211 cp->flags |= G_CF_ORPHAN; 2212 g_part_wither(cp->geom, ENXIO); 2213 } 2214 2215 static void 2216 g_part_start(struct bio *bp) 2217 { 2218 struct bio *bp2; 2219 struct g_consumer *cp; 2220 struct g_geom *gp; 2221 struct g_part_entry *entry; 2222 struct g_part_table *table; 2223 struct g_kerneldump *gkd; 2224 struct g_provider *pp; 2225 void (*done_func)(struct bio *) = g_std_done; 2226 char buf[64]; 2227 2228 biotrack(bp, __func__); 2229 2230 pp = bp->bio_to; 2231 gp = pp->geom; 2232 table = gp->softc; 2233 cp = LIST_FIRST(&gp->consumer); 2234 2235 G_PART_TRACE((G_T_BIO, "%s: cmd=%d, provider=%s", __func__, bp->bio_cmd, 2236 pp->name)); 2237 2238 entry = pp->private; 2239 if (entry == NULL) { 2240 g_io_deliver(bp, ENXIO); 2241 return; 2242 } 2243 2244 switch(bp->bio_cmd) { 2245 case BIO_DELETE: 2246 case BIO_READ: 2247 case BIO_WRITE: 2248 if (bp->bio_offset >= pp->mediasize) { 2249 g_io_deliver(bp, EIO); 2250 return; 2251 } 2252 bp2 = g_clone_bio(bp); 2253 if (bp2 == NULL) { 2254 g_io_deliver(bp, ENOMEM); 2255 return; 2256 } 2257 if (bp2->bio_offset + bp2->bio_length > pp->mediasize) 2258 bp2->bio_length = pp->mediasize - bp2->bio_offset; 2259 bp2->bio_done = g_std_done; 2260 bp2->bio_offset += entry->gpe_offset; 2261 g_io_request(bp2, cp); 2262 return; 2263 case BIO_SPEEDUP: 2264 case BIO_FLUSH: 2265 break; 2266 case BIO_GETATTR: 2267 if (g_handleattr_int(bp, "GEOM::fwheads", table->gpt_heads)) 2268 return; 2269 if (g_handleattr_int(bp, "GEOM::fwsectors", table->gpt_sectors)) 2270 return; 2271 /* 2272 * allow_nesting overrides "isleaf" to false _unless_ the 2273 * provider offset is zero, since otherwise we would recurse. 2274 */ 2275 if (g_handleattr_int(bp, "PART::isleaf", 2276 table->gpt_isleaf && 2277 (allow_nesting == 0 || entry->gpe_offset == 0))) 2278 return; 2279 if (g_handleattr_int(bp, "PART::depth", table->gpt_depth)) 2280 return; 2281 if (g_handleattr_str(bp, "PART::scheme", 2282 table->gpt_scheme->name)) 2283 return; 2284 if (g_handleattr_str(bp, "PART::type", 2285 G_PART_TYPE(table, entry, buf, sizeof(buf)))) 2286 return; 2287 if (!strcmp("GEOM::physpath", bp->bio_attribute)) { 2288 done_func = g_part_get_physpath_done; 2289 break; 2290 } 2291 if (!strcmp("GEOM::kerneldump", bp->bio_attribute)) { 2292 /* 2293 * Check that the partition is suitable for kernel 2294 * dumps. Typically only swap partitions should be 2295 * used. If the request comes from the nested scheme 2296 * we allow dumping there as well. 2297 */ 2298 if ((bp->bio_from == NULL || 2299 bp->bio_from->geom->class != &g_part_class) && 2300 G_PART_DUMPTO(table, entry) == 0) { 2301 g_io_deliver(bp, ENODEV); 2302 printf("GEOM_PART: Partition '%s' not suitable" 2303 " for kernel dumps (wrong type?)\n", 2304 pp->name); 2305 return; 2306 } 2307 gkd = (struct g_kerneldump *)bp->bio_data; 2308 if (gkd->offset >= pp->mediasize) { 2309 g_io_deliver(bp, EIO); 2310 return; 2311 } 2312 if (gkd->offset + gkd->length > pp->mediasize) 2313 gkd->length = pp->mediasize - gkd->offset; 2314 gkd->offset += entry->gpe_offset; 2315 } 2316 break; 2317 default: 2318 g_io_deliver(bp, EOPNOTSUPP); 2319 return; 2320 } 2321 2322 bp2 = g_clone_bio(bp); 2323 if (bp2 == NULL) { 2324 g_io_deliver(bp, ENOMEM); 2325 return; 2326 } 2327 bp2->bio_done = done_func; 2328 g_io_request(bp2, cp); 2329 } 2330 2331 static void 2332 g_part_init(struct g_class *mp) 2333 { 2334 2335 TAILQ_INSERT_HEAD(&g_part_schemes, &g_part_null_scheme, scheme_list); 2336 } 2337 2338 static void 2339 g_part_fini(struct g_class *mp) 2340 { 2341 2342 TAILQ_REMOVE(&g_part_schemes, &g_part_null_scheme, scheme_list); 2343 } 2344 2345 static void 2346 g_part_unload_event(void *arg, int flag) 2347 { 2348 struct g_consumer *cp; 2349 struct g_geom *gp; 2350 struct g_provider *pp; 2351 struct g_part_scheme *scheme; 2352 struct g_part_table *table; 2353 uintptr_t *xchg; 2354 int acc, error; 2355 2356 if (flag == EV_CANCEL) 2357 return; 2358 2359 xchg = arg; 2360 error = 0; 2361 scheme = (void *)(*xchg); 2362 2363 g_topology_assert(); 2364 2365 LIST_FOREACH(gp, &g_part_class.geom, geom) { 2366 table = gp->softc; 2367 if (table->gpt_scheme != scheme) 2368 continue; 2369 2370 acc = 0; 2371 LIST_FOREACH(pp, &gp->provider, provider) 2372 acc += pp->acr + pp->acw + pp->ace; 2373 LIST_FOREACH(cp, &gp->consumer, consumer) 2374 acc += cp->acr + cp->acw + cp->ace; 2375 2376 if (!acc) 2377 g_part_wither(gp, ENOSYS); 2378 else 2379 error = EBUSY; 2380 } 2381 2382 if (!error) 2383 TAILQ_REMOVE(&g_part_schemes, scheme, scheme_list); 2384 2385 *xchg = error; 2386 } 2387 2388 int 2389 g_part_modevent(module_t mod, int type, struct g_part_scheme *scheme) 2390 { 2391 struct g_part_scheme *iter; 2392 uintptr_t arg; 2393 int error; 2394 2395 error = 0; 2396 switch (type) { 2397 case MOD_LOAD: 2398 TAILQ_FOREACH(iter, &g_part_schemes, scheme_list) { 2399 if (scheme == iter) { 2400 printf("GEOM_PART: scheme %s is already " 2401 "registered!\n", scheme->name); 2402 break; 2403 } 2404 } 2405 if (iter == NULL) { 2406 TAILQ_INSERT_TAIL(&g_part_schemes, scheme, 2407 scheme_list); 2408 g_retaste(&g_part_class); 2409 } 2410 break; 2411 case MOD_UNLOAD: 2412 arg = (uintptr_t)scheme; 2413 error = g_waitfor_event(g_part_unload_event, &arg, M_WAITOK, 2414 NULL); 2415 if (error == 0) 2416 error = arg; 2417 break; 2418 default: 2419 error = EOPNOTSUPP; 2420 break; 2421 } 2422 2423 return (error); 2424 } 2425