1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright (c) 2002, 2005-2009 Marcel Moolenaar 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 */ 28 29 #include <sys/cdefs.h> 30 #include <sys/param.h> 31 #include <sys/bio.h> 32 #include <sys/endian.h> 33 #include <sys/kernel.h> 34 #include <sys/kobj.h> 35 #include <sys/limits.h> 36 #include <sys/lock.h> 37 #include <sys/malloc.h> 38 #include <sys/mutex.h> 39 #include <sys/queue.h> 40 #include <sys/sbuf.h> 41 #include <sys/sysctl.h> 42 #include <sys/systm.h> 43 #include <sys/uuid.h> 44 #include <geom/geom.h> 45 #include <geom/geom_ctl.h> 46 #include <geom/geom_int.h> 47 #include <geom/part/g_part.h> 48 49 #include "g_part_if.h" 50 51 static kobj_method_t g_part_null_methods[] = { 52 { 0, 0 } 53 }; 54 55 static struct g_part_scheme g_part_null_scheme = { 56 "(none)", 57 g_part_null_methods, 58 sizeof(struct g_part_table), 59 }; 60 61 TAILQ_HEAD(, g_part_scheme) g_part_schemes = 62 TAILQ_HEAD_INITIALIZER(g_part_schemes); 63 64 struct g_part_alias_list { 65 const char *lexeme; 66 enum g_part_alias alias; 67 } g_part_alias_list[G_PART_ALIAS_COUNT] = { 68 { "apple-apfs", G_PART_ALIAS_APPLE_APFS }, 69 { "apple-boot", G_PART_ALIAS_APPLE_BOOT }, 70 { "apple-core-storage", G_PART_ALIAS_APPLE_CORE_STORAGE }, 71 { "apple-hfs", G_PART_ALIAS_APPLE_HFS }, 72 { "apple-label", G_PART_ALIAS_APPLE_LABEL }, 73 { "apple-raid", G_PART_ALIAS_APPLE_RAID }, 74 { "apple-raid-offline", G_PART_ALIAS_APPLE_RAID_OFFLINE }, 75 { "apple-tv-recovery", G_PART_ALIAS_APPLE_TV_RECOVERY }, 76 { "apple-ufs", G_PART_ALIAS_APPLE_UFS }, 77 { "apple-zfs", G_PART_ALIAS_APPLE_ZFS }, 78 { "bios-boot", G_PART_ALIAS_BIOS_BOOT }, 79 { "chromeos-firmware", G_PART_ALIAS_CHROMEOS_FIRMWARE }, 80 { "chromeos-kernel", G_PART_ALIAS_CHROMEOS_KERNEL }, 81 { "chromeos-reserved", G_PART_ALIAS_CHROMEOS_RESERVED }, 82 { "chromeos-root", G_PART_ALIAS_CHROMEOS_ROOT }, 83 { "dragonfly-ccd", G_PART_ALIAS_DFBSD_CCD }, 84 { "dragonfly-hammer", G_PART_ALIAS_DFBSD_HAMMER }, 85 { "dragonfly-hammer2", G_PART_ALIAS_DFBSD_HAMMER2 }, 86 { "dragonfly-label32", G_PART_ALIAS_DFBSD }, 87 { "dragonfly-label64", G_PART_ALIAS_DFBSD64 }, 88 { "dragonfly-legacy", G_PART_ALIAS_DFBSD_LEGACY }, 89 { "dragonfly-swap", G_PART_ALIAS_DFBSD_SWAP }, 90 { "dragonfly-ufs", G_PART_ALIAS_DFBSD_UFS }, 91 { "dragonfly-vinum", G_PART_ALIAS_DFBSD_VINUM }, 92 { "ebr", G_PART_ALIAS_EBR }, 93 { "efi", G_PART_ALIAS_EFI }, 94 { "fat16", G_PART_ALIAS_MS_FAT16 }, 95 { "fat32", G_PART_ALIAS_MS_FAT32 }, 96 { "fat32lba", G_PART_ALIAS_MS_FAT32LBA }, 97 { "freebsd", G_PART_ALIAS_FREEBSD }, 98 { "freebsd-boot", G_PART_ALIAS_FREEBSD_BOOT }, 99 { "freebsd-nandfs", G_PART_ALIAS_FREEBSD_NANDFS }, 100 { "freebsd-swap", G_PART_ALIAS_FREEBSD_SWAP }, 101 { "freebsd-ufs", G_PART_ALIAS_FREEBSD_UFS }, 102 { "freebsd-vinum", G_PART_ALIAS_FREEBSD_VINUM }, 103 { "freebsd-zfs", G_PART_ALIAS_FREEBSD_ZFS }, 104 { "hifive-fsbl", G_PART_ALIAS_HIFIVE_FSBL }, 105 { "hifive-bbl", G_PART_ALIAS_HIFIVE_BBL }, 106 { "linux-data", G_PART_ALIAS_LINUX_DATA }, 107 { "linux-lvm", G_PART_ALIAS_LINUX_LVM }, 108 { "linux-raid", G_PART_ALIAS_LINUX_RAID }, 109 { "linux-swap", G_PART_ALIAS_LINUX_SWAP }, 110 { "mbr", G_PART_ALIAS_MBR }, 111 { "ms-basic-data", G_PART_ALIAS_MS_BASIC_DATA }, 112 { "ms-ldm-data", G_PART_ALIAS_MS_LDM_DATA }, 113 { "ms-ldm-metadata", G_PART_ALIAS_MS_LDM_METADATA }, 114 { "ms-recovery", G_PART_ALIAS_MS_RECOVERY }, 115 { "ms-reserved", G_PART_ALIAS_MS_RESERVED }, 116 { "ms-spaces", G_PART_ALIAS_MS_SPACES }, 117 { "netbsd-ccd", G_PART_ALIAS_NETBSD_CCD }, 118 { "netbsd-cgd", G_PART_ALIAS_NETBSD_CGD }, 119 { "netbsd-ffs", G_PART_ALIAS_NETBSD_FFS }, 120 { "netbsd-lfs", G_PART_ALIAS_NETBSD_LFS }, 121 { "netbsd-raid", G_PART_ALIAS_NETBSD_RAID }, 122 { "netbsd-swap", G_PART_ALIAS_NETBSD_SWAP }, 123 { "ntfs", G_PART_ALIAS_MS_NTFS }, 124 { "openbsd-data", G_PART_ALIAS_OPENBSD_DATA }, 125 { "prep-boot", G_PART_ALIAS_PREP_BOOT }, 126 { "solaris-boot", G_PART_ALIAS_SOLARIS_BOOT }, 127 { "solaris-root", G_PART_ALIAS_SOLARIS_ROOT }, 128 { "solaris-swap", G_PART_ALIAS_SOLARIS_SWAP }, 129 { "solaris-backup", G_PART_ALIAS_SOLARIS_BACKUP }, 130 { "solaris-var", G_PART_ALIAS_SOLARIS_VAR }, 131 { "solaris-home", G_PART_ALIAS_SOLARIS_HOME }, 132 { "solaris-altsec", G_PART_ALIAS_SOLARIS_ALTSEC }, 133 { "solaris-reserved", G_PART_ALIAS_SOLARIS_RESERVED }, 134 { "vmware-reserved", G_PART_ALIAS_VMRESERVED }, 135 { "vmware-vmfs", G_PART_ALIAS_VMFS }, 136 { "vmware-vmkdiag", G_PART_ALIAS_VMKDIAG }, 137 { "vmware-vsanhdr", G_PART_ALIAS_VMVSANHDR }, 138 }; 139 140 SYSCTL_DECL(_kern_geom); 141 SYSCTL_NODE(_kern_geom, OID_AUTO, part, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 142 "GEOM_PART stuff"); 143 u_int geom_part_check_integrity = 1; 144 SYSCTL_UINT(_kern_geom_part, OID_AUTO, check_integrity, 145 CTLFLAG_RWTUN, &geom_part_check_integrity, 1, 146 "Enable integrity checking"); 147 static u_int auto_resize = 1; 148 SYSCTL_UINT(_kern_geom_part, OID_AUTO, auto_resize, 149 CTLFLAG_RWTUN, &auto_resize, 1, 150 "Enable auto resize"); 151 static u_int allow_nesting = 0; 152 SYSCTL_UINT(_kern_geom_part, OID_AUTO, allow_nesting, 153 CTLFLAG_RWTUN, &allow_nesting, 0, 154 "Allow additional levels of nesting"); 155 char g_part_separator[MAXPATHLEN] = ""; 156 SYSCTL_STRING(_kern_geom_part, OID_AUTO, separator, 157 CTLFLAG_RDTUN, &g_part_separator, sizeof(g_part_separator), 158 "Partition name separator"); 159 160 /* 161 * The GEOM partitioning class. 162 */ 163 static g_ctl_req_t g_part_ctlreq; 164 static g_ctl_destroy_geom_t g_part_destroy_geom; 165 static g_fini_t g_part_fini; 166 static g_init_t g_part_init; 167 static g_taste_t g_part_taste; 168 169 static g_access_t g_part_access; 170 static g_dumpconf_t g_part_dumpconf; 171 static g_orphan_t g_part_orphan; 172 static g_spoiled_t g_part_spoiled; 173 static g_start_t g_part_start; 174 static g_resize_t g_part_resize; 175 static g_ioctl_t g_part_ioctl; 176 177 static struct g_class g_part_class = { 178 .name = "PART", 179 .version = G_VERSION, 180 /* Class methods. */ 181 .ctlreq = g_part_ctlreq, 182 .destroy_geom = g_part_destroy_geom, 183 .fini = g_part_fini, 184 .init = g_part_init, 185 .taste = g_part_taste, 186 /* Geom methods. */ 187 .access = g_part_access, 188 .dumpconf = g_part_dumpconf, 189 .orphan = g_part_orphan, 190 .spoiled = g_part_spoiled, 191 .start = g_part_start, 192 .resize = g_part_resize, 193 .ioctl = g_part_ioctl, 194 }; 195 196 DECLARE_GEOM_CLASS(g_part_class, g_part); 197 MODULE_VERSION(g_part, 0); 198 199 /* 200 * Support functions. 201 */ 202 203 static void g_part_wither(struct g_geom *, int); 204 205 const char * 206 g_part_alias_name(enum g_part_alias alias) 207 { 208 int i; 209 210 for (i = 0; i < G_PART_ALIAS_COUNT; i++) { 211 if (g_part_alias_list[i].alias != alias) 212 continue; 213 return (g_part_alias_list[i].lexeme); 214 } 215 216 return (NULL); 217 } 218 219 void 220 g_part_geometry_heads(off_t blocks, u_int sectors, off_t *bestchs, 221 u_int *bestheads) 222 { 223 static u_int candidate_heads[] = { 1, 2, 16, 32, 64, 128, 255, 0 }; 224 off_t chs, cylinders; 225 u_int heads; 226 int idx; 227 228 *bestchs = 0; 229 *bestheads = 0; 230 for (idx = 0; candidate_heads[idx] != 0; idx++) { 231 heads = candidate_heads[idx]; 232 cylinders = blocks / heads / sectors; 233 if (cylinders < heads || cylinders < sectors) 234 break; 235 if (cylinders > 1023) 236 continue; 237 chs = cylinders * heads * sectors; 238 if (chs > *bestchs || (chs == *bestchs && *bestheads == 1)) { 239 *bestchs = chs; 240 *bestheads = heads; 241 } 242 } 243 } 244 245 static void 246 g_part_geometry(struct g_part_table *table, struct g_consumer *cp, 247 off_t blocks) 248 { 249 static u_int candidate_sectors[] = { 1, 9, 17, 33, 63, 0 }; 250 off_t chs, bestchs; 251 u_int heads, sectors; 252 int idx; 253 254 if (g_getattr("GEOM::fwsectors", cp, §ors) != 0 || sectors == 0 || 255 g_getattr("GEOM::fwheads", cp, &heads) != 0 || heads == 0) { 256 table->gpt_fixgeom = 0; 257 table->gpt_heads = 0; 258 table->gpt_sectors = 0; 259 bestchs = 0; 260 for (idx = 0; candidate_sectors[idx] != 0; idx++) { 261 sectors = candidate_sectors[idx]; 262 g_part_geometry_heads(blocks, sectors, &chs, &heads); 263 if (chs == 0) 264 continue; 265 /* 266 * Prefer a geometry with sectors > 1, but only if 267 * it doesn't bump down the number of heads to 1. 268 */ 269 if (chs > bestchs || (chs == bestchs && heads > 1 && 270 table->gpt_sectors == 1)) { 271 bestchs = chs; 272 table->gpt_heads = heads; 273 table->gpt_sectors = sectors; 274 } 275 } 276 /* 277 * If we didn't find a geometry at all, then the disk is 278 * too big. This means we can use the maximum number of 279 * heads and sectors. 280 */ 281 if (bestchs == 0) { 282 table->gpt_heads = 255; 283 table->gpt_sectors = 63; 284 } 285 } else { 286 table->gpt_fixgeom = 1; 287 table->gpt_heads = heads; 288 table->gpt_sectors = sectors; 289 } 290 } 291 292 static void 293 g_part_get_physpath_done(struct bio *bp) 294 { 295 struct g_geom *gp; 296 struct g_part_entry *entry; 297 struct g_part_table *table; 298 struct g_provider *pp; 299 struct bio *pbp; 300 301 pbp = bp->bio_parent; 302 pp = pbp->bio_to; 303 gp = pp->geom; 304 table = gp->softc; 305 entry = pp->private; 306 307 if (bp->bio_error == 0) { 308 char *end; 309 size_t len, remainder; 310 len = strlcat(bp->bio_data, "/", bp->bio_length); 311 if (len < bp->bio_length) { 312 end = bp->bio_data + len; 313 remainder = bp->bio_length - len; 314 G_PART_NAME(table, entry, end, remainder); 315 } 316 } 317 g_std_done(bp); 318 } 319 320 #define DPRINTF(...) if (bootverbose) { \ 321 printf("GEOM_PART: " __VA_ARGS__); \ 322 } 323 324 static int 325 g_part_check_integrity(struct g_part_table *table, struct g_consumer *cp) 326 { 327 struct g_part_entry *e1, *e2; 328 struct g_provider *pp; 329 off_t offset; 330 int failed; 331 332 failed = 0; 333 pp = cp->provider; 334 if (table->gpt_last < table->gpt_first) { 335 DPRINTF("last LBA is below first LBA: %jd < %jd\n", 336 (intmax_t)table->gpt_last, (intmax_t)table->gpt_first); 337 failed++; 338 } 339 if (table->gpt_last > pp->mediasize / pp->sectorsize - 1) { 340 DPRINTF("last LBA extends beyond mediasize: " 341 "%jd > %jd\n", (intmax_t)table->gpt_last, 342 (intmax_t)pp->mediasize / pp->sectorsize - 1); 343 failed++; 344 } 345 LIST_FOREACH(e1, &table->gpt_entry, gpe_entry) { 346 if (e1->gpe_deleted || e1->gpe_internal) 347 continue; 348 if (e1->gpe_start < table->gpt_first) { 349 DPRINTF("partition %d has start offset below first " 350 "LBA: %jd < %jd\n", e1->gpe_index, 351 (intmax_t)e1->gpe_start, 352 (intmax_t)table->gpt_first); 353 failed++; 354 } 355 if (e1->gpe_start > table->gpt_last) { 356 DPRINTF("partition %d has start offset beyond last " 357 "LBA: %jd > %jd\n", e1->gpe_index, 358 (intmax_t)e1->gpe_start, 359 (intmax_t)table->gpt_last); 360 failed++; 361 } 362 if (e1->gpe_end < e1->gpe_start) { 363 DPRINTF("partition %d has end offset below start " 364 "offset: %jd < %jd\n", e1->gpe_index, 365 (intmax_t)e1->gpe_end, 366 (intmax_t)e1->gpe_start); 367 failed++; 368 } 369 if (e1->gpe_end > table->gpt_last) { 370 DPRINTF("partition %d has end offset beyond last " 371 "LBA: %jd > %jd\n", e1->gpe_index, 372 (intmax_t)e1->gpe_end, 373 (intmax_t)table->gpt_last); 374 failed++; 375 } 376 if (pp->stripesize > 0) { 377 offset = e1->gpe_start * pp->sectorsize; 378 if (e1->gpe_offset > offset) 379 offset = e1->gpe_offset; 380 if ((offset + pp->stripeoffset) % pp->stripesize) { 381 DPRINTF("partition %d on (%s, %s) is not " 382 "aligned on %ju bytes\n", e1->gpe_index, 383 pp->name, table->gpt_scheme->name, 384 (uintmax_t)pp->stripesize); 385 /* Don't treat this as a critical failure */ 386 } 387 } 388 e2 = e1; 389 while ((e2 = LIST_NEXT(e2, gpe_entry)) != NULL) { 390 if (e2->gpe_deleted || e2->gpe_internal) 391 continue; 392 if (e1->gpe_start >= e2->gpe_start && 393 e1->gpe_start <= e2->gpe_end) { 394 DPRINTF("partition %d has start offset inside " 395 "partition %d: start[%d] %jd >= start[%d] " 396 "%jd <= end[%d] %jd\n", 397 e1->gpe_index, e2->gpe_index, 398 e2->gpe_index, (intmax_t)e2->gpe_start, 399 e1->gpe_index, (intmax_t)e1->gpe_start, 400 e2->gpe_index, (intmax_t)e2->gpe_end); 401 failed++; 402 } 403 if (e1->gpe_end >= e2->gpe_start && 404 e1->gpe_end <= e2->gpe_end) { 405 DPRINTF("partition %d has end offset inside " 406 "partition %d: start[%d] %jd >= end[%d] " 407 "%jd <= end[%d] %jd\n", 408 e1->gpe_index, e2->gpe_index, 409 e2->gpe_index, (intmax_t)e2->gpe_start, 410 e1->gpe_index, (intmax_t)e1->gpe_end, 411 e2->gpe_index, (intmax_t)e2->gpe_end); 412 failed++; 413 } 414 if (e1->gpe_start < e2->gpe_start && 415 e1->gpe_end > e2->gpe_end) { 416 DPRINTF("partition %d contains partition %d: " 417 "start[%d] %jd > start[%d] %jd, end[%d] " 418 "%jd < end[%d] %jd\n", 419 e1->gpe_index, e2->gpe_index, 420 e1->gpe_index, (intmax_t)e1->gpe_start, 421 e2->gpe_index, (intmax_t)e2->gpe_start, 422 e2->gpe_index, (intmax_t)e2->gpe_end, 423 e1->gpe_index, (intmax_t)e1->gpe_end); 424 failed++; 425 } 426 } 427 } 428 if (failed != 0) { 429 printf("GEOM_PART: integrity check failed (%s, %s)\n", 430 pp->name, table->gpt_scheme->name); 431 if (geom_part_check_integrity != 0) 432 return (EINVAL); 433 table->gpt_corrupt = 1; 434 } 435 return (0); 436 } 437 #undef DPRINTF 438 439 struct g_part_entry * 440 g_part_new_entry(struct g_part_table *table, int index, quad_t start, 441 quad_t end) 442 { 443 struct g_part_entry *entry, *last; 444 445 last = NULL; 446 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) { 447 if (entry->gpe_index == index) 448 break; 449 if (entry->gpe_index > index) { 450 entry = NULL; 451 break; 452 } 453 last = entry; 454 } 455 if (entry == NULL) { 456 entry = g_malloc(table->gpt_scheme->gps_entrysz, 457 M_WAITOK | M_ZERO); 458 entry->gpe_index = index; 459 if (last == NULL) 460 LIST_INSERT_HEAD(&table->gpt_entry, entry, gpe_entry); 461 else 462 LIST_INSERT_AFTER(last, entry, gpe_entry); 463 } else 464 entry->gpe_offset = 0; 465 entry->gpe_start = start; 466 entry->gpe_end = end; 467 return (entry); 468 } 469 470 static void 471 g_part_new_provider(struct g_geom *gp, struct g_part_table *table, 472 struct g_part_entry *entry) 473 { 474 struct g_consumer *cp; 475 struct g_provider *pp; 476 struct g_geom_alias *gap; 477 off_t offset; 478 479 cp = LIST_FIRST(&gp->consumer); 480 pp = cp->provider; 481 482 offset = entry->gpe_start * pp->sectorsize; 483 if (entry->gpe_offset < offset) 484 entry->gpe_offset = offset; 485 486 if (entry->gpe_pp == NULL) { 487 entry->gpe_pp = G_PART_NEW_PROVIDER(table, gp, entry, gp->name); 488 /* 489 * If our parent provider had any aliases, then copy them to our 490 * provider so when geom DEV tastes things later, they will be 491 * there for it to create the aliases with those name used in 492 * place of the geom's name we use to create the provider. The 493 * kobj interface that generates names makes this awkward. 494 */ 495 LIST_FOREACH(gap, &pp->aliases, ga_next) 496 G_PART_ADD_ALIAS(table, entry->gpe_pp, entry, gap->ga_alias); 497 entry->gpe_pp->flags |= G_PF_DIRECT_SEND | G_PF_DIRECT_RECEIVE; 498 entry->gpe_pp->private = entry; /* Close the circle. */ 499 } 500 entry->gpe_pp->index = entry->gpe_index - 1; /* index is 1-based. */ 501 entry->gpe_pp->mediasize = (entry->gpe_end - entry->gpe_start + 1) * 502 pp->sectorsize; 503 entry->gpe_pp->mediasize -= entry->gpe_offset - offset; 504 entry->gpe_pp->sectorsize = pp->sectorsize; 505 entry->gpe_pp->stripesize = pp->stripesize; 506 entry->gpe_pp->stripeoffset = pp->stripeoffset + entry->gpe_offset; 507 if (pp->stripesize > 0) 508 entry->gpe_pp->stripeoffset %= pp->stripesize; 509 entry->gpe_pp->flags |= pp->flags & G_PF_ACCEPT_UNMAPPED; 510 g_error_provider(entry->gpe_pp, 0); 511 } 512 513 static struct g_geom* 514 g_part_find_geom(const char *name) 515 { 516 struct g_geom *gp; 517 LIST_FOREACH(gp, &g_part_class.geom, geom) { 518 if ((gp->flags & G_GEOM_WITHER) == 0 && 519 strcmp(name, gp->name) == 0) 520 break; 521 } 522 return (gp); 523 } 524 525 static int 526 g_part_parm_geom(struct gctl_req *req, const char *name, struct g_geom **v) 527 { 528 struct g_geom *gp; 529 const char *gname; 530 531 gname = gctl_get_asciiparam(req, name); 532 if (gname == NULL) 533 return (ENOATTR); 534 if (strncmp(gname, _PATH_DEV, sizeof(_PATH_DEV) - 1) == 0) 535 gname += sizeof(_PATH_DEV) - 1; 536 gp = g_part_find_geom(gname); 537 if (gp == NULL) { 538 gctl_error(req, "%d %s '%s'", EINVAL, name, gname); 539 return (EINVAL); 540 } 541 *v = gp; 542 return (0); 543 } 544 545 static int 546 g_part_parm_provider(struct gctl_req *req, const char *name, 547 struct g_provider **v) 548 { 549 struct g_provider *pp; 550 const char *pname; 551 552 pname = gctl_get_asciiparam(req, name); 553 if (pname == NULL) 554 return (ENOATTR); 555 if (strncmp(pname, _PATH_DEV, sizeof(_PATH_DEV) - 1) == 0) 556 pname += sizeof(_PATH_DEV) - 1; 557 pp = g_provider_by_name(pname); 558 if (pp == NULL) { 559 gctl_error(req, "%d %s '%s'", EINVAL, name, pname); 560 return (EINVAL); 561 } 562 *v = pp; 563 return (0); 564 } 565 566 static int 567 g_part_parm_quad(struct gctl_req *req, const char *name, quad_t *v) 568 { 569 const char *p; 570 char *x; 571 quad_t q; 572 573 p = gctl_get_asciiparam(req, name); 574 if (p == NULL) 575 return (ENOATTR); 576 q = strtoq(p, &x, 0); 577 if (*x != '\0' || q < 0) { 578 gctl_error(req, "%d %s '%s'", EINVAL, name, p); 579 return (EINVAL); 580 } 581 *v = q; 582 return (0); 583 } 584 585 static int 586 g_part_parm_scheme(struct gctl_req *req, const char *name, 587 struct g_part_scheme **v) 588 { 589 struct g_part_scheme *s; 590 const char *p; 591 592 p = gctl_get_asciiparam(req, name); 593 if (p == NULL) 594 return (ENOATTR); 595 TAILQ_FOREACH(s, &g_part_schemes, scheme_list) { 596 if (s == &g_part_null_scheme) 597 continue; 598 if (!strcasecmp(s->name, p)) 599 break; 600 } 601 if (s == NULL) { 602 gctl_error(req, "%d %s '%s'", EINVAL, name, p); 603 return (EINVAL); 604 } 605 *v = s; 606 return (0); 607 } 608 609 static int 610 g_part_parm_str(struct gctl_req *req, const char *name, const char **v) 611 { 612 const char *p; 613 614 p = gctl_get_asciiparam(req, name); 615 if (p == NULL) 616 return (ENOATTR); 617 /* An empty label is always valid. */ 618 if (strcmp(name, "label") != 0 && p[0] == '\0') { 619 gctl_error(req, "%d %s '%s'", EINVAL, name, p); 620 return (EINVAL); 621 } 622 *v = p; 623 return (0); 624 } 625 626 static int 627 g_part_parm_intmax(struct gctl_req *req, const char *name, u_int *v) 628 { 629 const intmax_t *p; 630 int size; 631 632 p = gctl_get_param(req, name, &size); 633 if (p == NULL) 634 return (ENOATTR); 635 if (size != sizeof(*p) || *p < 0 || *p > INT_MAX) { 636 gctl_error(req, "%d %s '%jd'", EINVAL, name, *p); 637 return (EINVAL); 638 } 639 *v = (u_int)*p; 640 return (0); 641 } 642 643 static int 644 g_part_parm_uint32(struct gctl_req *req, const char *name, u_int *v) 645 { 646 const uint32_t *p; 647 int size; 648 649 p = gctl_get_param(req, name, &size); 650 if (p == NULL) 651 return (ENOATTR); 652 if (size != sizeof(*p) || *p > INT_MAX) { 653 gctl_error(req, "%d %s '%u'", EINVAL, name, (unsigned int)*p); 654 return (EINVAL); 655 } 656 *v = (u_int)*p; 657 return (0); 658 } 659 660 static int 661 g_part_parm_bootcode(struct gctl_req *req, const char *name, const void **v, 662 unsigned int *s) 663 { 664 const void *p; 665 int size; 666 667 p = gctl_get_param(req, name, &size); 668 if (p == NULL) 669 return (ENOATTR); 670 *v = p; 671 *s = size; 672 return (0); 673 } 674 675 static int 676 g_part_probe(struct g_geom *gp, struct g_consumer *cp, int depth) 677 { 678 struct g_part_scheme *iter, *scheme; 679 struct g_part_table *table; 680 int pri, probe; 681 682 table = gp->softc; 683 scheme = (table != NULL) ? table->gpt_scheme : NULL; 684 pri = (scheme != NULL) ? G_PART_PROBE(table, cp) : INT_MIN; 685 if (pri == 0) 686 goto done; 687 if (pri > 0) { /* error */ 688 scheme = NULL; 689 pri = INT_MIN; 690 } 691 692 TAILQ_FOREACH(iter, &g_part_schemes, scheme_list) { 693 if (iter == &g_part_null_scheme) 694 continue; 695 table = (void *)kobj_create((kobj_class_t)iter, M_GEOM, 696 M_WAITOK); 697 table->gpt_gp = gp; 698 table->gpt_scheme = iter; 699 table->gpt_depth = depth; 700 probe = G_PART_PROBE(table, cp); 701 if (probe <= 0 && probe > pri) { 702 pri = probe; 703 scheme = iter; 704 if (gp->softc != NULL) 705 kobj_delete((kobj_t)gp->softc, M_GEOM); 706 gp->softc = table; 707 if (pri == 0) 708 goto done; 709 } else 710 kobj_delete((kobj_t)table, M_GEOM); 711 } 712 713 done: 714 return ((scheme == NULL) ? ENXIO : 0); 715 } 716 717 /* 718 * Control request functions. 719 */ 720 721 static int 722 g_part_ctl_add(struct gctl_req *req, struct g_part_parms *gpp) 723 { 724 struct g_geom *gp; 725 struct g_provider *pp; 726 struct g_part_entry *delent, *last, *entry; 727 struct g_part_table *table; 728 struct sbuf *sb; 729 quad_t end; 730 unsigned int index; 731 int error; 732 733 gp = gpp->gpp_geom; 734 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name)); 735 g_topology_assert(); 736 737 pp = LIST_FIRST(&gp->consumer)->provider; 738 table = gp->softc; 739 end = gpp->gpp_start + gpp->gpp_size - 1; 740 741 if (gpp->gpp_start < table->gpt_first || 742 gpp->gpp_start > table->gpt_last) { 743 gctl_error(req, "%d start '%jd'", EINVAL, 744 (intmax_t)gpp->gpp_start); 745 return (EINVAL); 746 } 747 if (end < gpp->gpp_start || end > table->gpt_last) { 748 gctl_error(req, "%d size '%jd'", EINVAL, 749 (intmax_t)gpp->gpp_size); 750 return (EINVAL); 751 } 752 if (gpp->gpp_index > table->gpt_entries) { 753 gctl_error(req, "%d index '%d'", EINVAL, gpp->gpp_index); 754 return (EINVAL); 755 } 756 757 delent = last = NULL; 758 index = (gpp->gpp_index > 0) ? gpp->gpp_index : 1; 759 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) { 760 if (entry->gpe_deleted) { 761 if (entry->gpe_index == index) 762 delent = entry; 763 continue; 764 } 765 if (entry->gpe_index == index) 766 index = entry->gpe_index + 1; 767 if (entry->gpe_index < index) 768 last = entry; 769 if (entry->gpe_internal) 770 continue; 771 if (gpp->gpp_start >= entry->gpe_start && 772 gpp->gpp_start <= entry->gpe_end) { 773 gctl_error(req, "%d start '%jd'", ENOSPC, 774 (intmax_t)gpp->gpp_start); 775 return (ENOSPC); 776 } 777 if (end >= entry->gpe_start && end <= entry->gpe_end) { 778 gctl_error(req, "%d end '%jd'", ENOSPC, (intmax_t)end); 779 return (ENOSPC); 780 } 781 if (gpp->gpp_start < entry->gpe_start && end > entry->gpe_end) { 782 gctl_error(req, "%d size '%jd'", ENOSPC, 783 (intmax_t)gpp->gpp_size); 784 return (ENOSPC); 785 } 786 } 787 if (gpp->gpp_index > 0 && index != gpp->gpp_index) { 788 gctl_error(req, "%d index '%d'", EEXIST, gpp->gpp_index); 789 return (EEXIST); 790 } 791 if (index > table->gpt_entries) { 792 gctl_error(req, "%d index '%d'", ENOSPC, index); 793 return (ENOSPC); 794 } 795 796 entry = (delent == NULL) ? g_malloc(table->gpt_scheme->gps_entrysz, 797 M_WAITOK | M_ZERO) : delent; 798 entry->gpe_index = index; 799 entry->gpe_start = gpp->gpp_start; 800 entry->gpe_end = end; 801 error = G_PART_ADD(table, entry, gpp); 802 if (error) { 803 gctl_error(req, "%d", error); 804 if (delent == NULL) 805 g_free(entry); 806 return (error); 807 } 808 if (delent == NULL) { 809 if (last == NULL) 810 LIST_INSERT_HEAD(&table->gpt_entry, entry, gpe_entry); 811 else 812 LIST_INSERT_AFTER(last, entry, gpe_entry); 813 entry->gpe_created = 1; 814 } else { 815 entry->gpe_deleted = 0; 816 entry->gpe_modified = 1; 817 } 818 g_part_new_provider(gp, table, entry); 819 820 /* Provide feedback if so requested. */ 821 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) { 822 sb = sbuf_new_auto(); 823 G_PART_FULLNAME(table, entry, sb, gp->name); 824 if (pp->stripesize > 0 && entry->gpe_pp->stripeoffset != 0) 825 sbuf_printf(sb, " added, but partition is not " 826 "aligned on %ju bytes\n", (uintmax_t)pp->stripesize); 827 else 828 sbuf_cat(sb, " added\n"); 829 sbuf_finish(sb); 830 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1); 831 sbuf_delete(sb); 832 } 833 return (0); 834 } 835 836 static int 837 g_part_ctl_bootcode(struct gctl_req *req, struct g_part_parms *gpp) 838 { 839 struct g_geom *gp; 840 struct g_part_table *table; 841 struct sbuf *sb; 842 int error, sz; 843 844 gp = gpp->gpp_geom; 845 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name)); 846 g_topology_assert(); 847 848 table = gp->softc; 849 sz = table->gpt_scheme->gps_bootcodesz; 850 if (sz == 0) { 851 error = ENODEV; 852 goto fail; 853 } 854 if (gpp->gpp_codesize > sz) { 855 error = EFBIG; 856 goto fail; 857 } 858 859 error = G_PART_BOOTCODE(table, gpp); 860 if (error) 861 goto fail; 862 863 /* Provide feedback if so requested. */ 864 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) { 865 sb = sbuf_new_auto(); 866 sbuf_printf(sb, "bootcode written to %s\n", gp->name); 867 sbuf_finish(sb); 868 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1); 869 sbuf_delete(sb); 870 } 871 return (0); 872 873 fail: 874 gctl_error(req, "%d", error); 875 return (error); 876 } 877 878 static int 879 g_part_ctl_commit(struct gctl_req *req, struct g_part_parms *gpp) 880 { 881 struct g_consumer *cp; 882 struct g_geom *gp; 883 struct g_provider *pp; 884 struct g_part_entry *entry, *tmp; 885 struct g_part_table *table; 886 char *buf; 887 int error, i; 888 889 gp = gpp->gpp_geom; 890 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name)); 891 g_topology_assert(); 892 893 table = gp->softc; 894 if (!table->gpt_opened) { 895 gctl_error(req, "%d", EPERM); 896 return (EPERM); 897 } 898 899 g_topology_unlock(); 900 901 cp = LIST_FIRST(&gp->consumer); 902 if ((table->gpt_smhead | table->gpt_smtail) != 0) { 903 pp = cp->provider; 904 buf = g_malloc(pp->sectorsize, M_WAITOK | M_ZERO); 905 while (table->gpt_smhead != 0) { 906 i = ffs(table->gpt_smhead) - 1; 907 error = g_write_data(cp, i * pp->sectorsize, buf, 908 pp->sectorsize); 909 if (error) { 910 g_free(buf); 911 goto fail; 912 } 913 table->gpt_smhead &= ~(1 << i); 914 } 915 while (table->gpt_smtail != 0) { 916 i = ffs(table->gpt_smtail) - 1; 917 error = g_write_data(cp, pp->mediasize - (i + 1) * 918 pp->sectorsize, buf, pp->sectorsize); 919 if (error) { 920 g_free(buf); 921 goto fail; 922 } 923 table->gpt_smtail &= ~(1 << i); 924 } 925 g_free(buf); 926 } 927 928 if (table->gpt_scheme == &g_part_null_scheme) { 929 g_topology_lock(); 930 g_access(cp, -1, -1, -1); 931 g_part_wither(gp, ENXIO); 932 return (0); 933 } 934 935 error = G_PART_WRITE(table, cp); 936 if (error) 937 goto fail; 938 939 LIST_FOREACH_SAFE(entry, &table->gpt_entry, gpe_entry, tmp) { 940 if (!entry->gpe_deleted) { 941 /* Notify consumers that provider might be changed. */ 942 if (entry->gpe_modified && ( 943 entry->gpe_pp->acw + entry->gpe_pp->ace + 944 entry->gpe_pp->acr) == 0) 945 g_media_changed(entry->gpe_pp, M_NOWAIT); 946 entry->gpe_created = 0; 947 entry->gpe_modified = 0; 948 continue; 949 } 950 LIST_REMOVE(entry, gpe_entry); 951 g_free(entry); 952 } 953 table->gpt_created = 0; 954 table->gpt_opened = 0; 955 956 g_topology_lock(); 957 g_access(cp, -1, -1, -1); 958 return (0); 959 960 fail: 961 g_topology_lock(); 962 gctl_error(req, "%d", error); 963 return (error); 964 } 965 966 static int 967 g_part_ctl_create(struct gctl_req *req, struct g_part_parms *gpp) 968 { 969 struct g_consumer *cp; 970 struct g_geom *gp; 971 struct g_provider *pp; 972 struct g_part_scheme *scheme; 973 struct g_part_table *null, *table; 974 struct sbuf *sb; 975 int attr, error; 976 977 pp = gpp->gpp_provider; 978 scheme = gpp->gpp_scheme; 979 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, pp->name)); 980 g_topology_assert(); 981 982 /* Check that there isn't already a g_part geom on the provider. */ 983 gp = g_part_find_geom(pp->name); 984 if (gp != NULL) { 985 null = gp->softc; 986 if (null->gpt_scheme != &g_part_null_scheme) { 987 gctl_error(req, "%d geom '%s'", EEXIST, pp->name); 988 return (EEXIST); 989 } 990 } else 991 null = NULL; 992 993 if ((gpp->gpp_parms & G_PART_PARM_ENTRIES) && 994 (gpp->gpp_entries < scheme->gps_minent || 995 gpp->gpp_entries > scheme->gps_maxent)) { 996 gctl_error(req, "%d entries '%d'", EINVAL, gpp->gpp_entries); 997 return (EINVAL); 998 } 999 1000 if (null == NULL) 1001 gp = g_new_geomf(&g_part_class, "%s", pp->name); 1002 gp->softc = kobj_create((kobj_class_t)gpp->gpp_scheme, M_GEOM, 1003 M_WAITOK); 1004 table = gp->softc; 1005 table->gpt_gp = gp; 1006 table->gpt_scheme = gpp->gpp_scheme; 1007 table->gpt_entries = (gpp->gpp_parms & G_PART_PARM_ENTRIES) ? 1008 gpp->gpp_entries : scheme->gps_minent; 1009 LIST_INIT(&table->gpt_entry); 1010 if (null == NULL) { 1011 cp = g_new_consumer(gp); 1012 cp->flags |= G_CF_DIRECT_SEND | G_CF_DIRECT_RECEIVE; 1013 error = g_attach(cp, pp); 1014 if (error == 0) 1015 error = g_access(cp, 1, 1, 1); 1016 if (error != 0) { 1017 g_part_wither(gp, error); 1018 gctl_error(req, "%d geom '%s'", error, pp->name); 1019 return (error); 1020 } 1021 table->gpt_opened = 1; 1022 } else { 1023 cp = LIST_FIRST(&gp->consumer); 1024 table->gpt_opened = null->gpt_opened; 1025 table->gpt_smhead = null->gpt_smhead; 1026 table->gpt_smtail = null->gpt_smtail; 1027 } 1028 1029 g_topology_unlock(); 1030 1031 /* Make sure the provider has media. */ 1032 if (pp->mediasize == 0 || pp->sectorsize == 0) { 1033 error = ENODEV; 1034 goto fail; 1035 } 1036 1037 /* Make sure we can nest and if so, determine our depth. */ 1038 error = g_getattr("PART::isleaf", cp, &attr); 1039 if (!error && attr) { 1040 error = ENODEV; 1041 goto fail; 1042 } 1043 error = g_getattr("PART::depth", cp, &attr); 1044 table->gpt_depth = (!error) ? attr + 1 : 0; 1045 1046 /* 1047 * Synthesize a disk geometry. Some partitioning schemes 1048 * depend on it and since some file systems need it even 1049 * when the partitition scheme doesn't, we do it here in 1050 * scheme-independent code. 1051 */ 1052 g_part_geometry(table, cp, pp->mediasize / pp->sectorsize); 1053 1054 error = G_PART_CREATE(table, gpp); 1055 if (error) 1056 goto fail; 1057 1058 g_topology_lock(); 1059 1060 table->gpt_created = 1; 1061 if (null != NULL) 1062 kobj_delete((kobj_t)null, M_GEOM); 1063 1064 /* 1065 * Support automatic commit by filling in the gpp_geom 1066 * parameter. 1067 */ 1068 gpp->gpp_parms |= G_PART_PARM_GEOM; 1069 gpp->gpp_geom = gp; 1070 1071 /* Provide feedback if so requested. */ 1072 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) { 1073 sb = sbuf_new_auto(); 1074 sbuf_printf(sb, "%s created\n", gp->name); 1075 sbuf_finish(sb); 1076 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1); 1077 sbuf_delete(sb); 1078 } 1079 return (0); 1080 1081 fail: 1082 g_topology_lock(); 1083 if (null == NULL) { 1084 g_access(cp, -1, -1, -1); 1085 g_part_wither(gp, error); 1086 } else { 1087 kobj_delete((kobj_t)gp->softc, M_GEOM); 1088 gp->softc = null; 1089 } 1090 gctl_error(req, "%d provider", error); 1091 return (error); 1092 } 1093 1094 static int 1095 g_part_ctl_delete(struct gctl_req *req, struct g_part_parms *gpp) 1096 { 1097 struct g_geom *gp; 1098 struct g_provider *pp; 1099 struct g_part_entry *entry; 1100 struct g_part_table *table; 1101 struct sbuf *sb; 1102 1103 gp = gpp->gpp_geom; 1104 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name)); 1105 g_topology_assert(); 1106 1107 table = gp->softc; 1108 1109 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) { 1110 if (entry->gpe_deleted || entry->gpe_internal) 1111 continue; 1112 if (entry->gpe_index == gpp->gpp_index) 1113 break; 1114 } 1115 if (entry == NULL) { 1116 gctl_error(req, "%d index '%d'", ENOENT, gpp->gpp_index); 1117 return (ENOENT); 1118 } 1119 1120 pp = entry->gpe_pp; 1121 if (pp != NULL) { 1122 if (pp->acr > 0 || pp->acw > 0 || pp->ace > 0) { 1123 gctl_error(req, "%d", EBUSY); 1124 return (EBUSY); 1125 } 1126 1127 pp->private = NULL; 1128 entry->gpe_pp = NULL; 1129 } 1130 1131 if (pp != NULL) 1132 g_wither_provider(pp, ENXIO); 1133 1134 /* Provide feedback if so requested. */ 1135 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) { 1136 sb = sbuf_new_auto(); 1137 G_PART_FULLNAME(table, entry, sb, gp->name); 1138 sbuf_cat(sb, " deleted\n"); 1139 sbuf_finish(sb); 1140 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1); 1141 sbuf_delete(sb); 1142 } 1143 1144 if (entry->gpe_created) { 1145 LIST_REMOVE(entry, gpe_entry); 1146 g_free(entry); 1147 } else { 1148 entry->gpe_modified = 0; 1149 entry->gpe_deleted = 1; 1150 } 1151 return (0); 1152 } 1153 1154 static int 1155 g_part_ctl_destroy(struct gctl_req *req, struct g_part_parms *gpp) 1156 { 1157 struct g_consumer *cp; 1158 struct g_geom *gp; 1159 struct g_provider *pp; 1160 struct g_part_entry *entry, *tmp; 1161 struct g_part_table *null, *table; 1162 struct sbuf *sb; 1163 int error; 1164 1165 gp = gpp->gpp_geom; 1166 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name)); 1167 g_topology_assert(); 1168 1169 table = gp->softc; 1170 /* Check for busy providers. */ 1171 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) { 1172 if (entry->gpe_deleted || entry->gpe_internal) 1173 continue; 1174 if (gpp->gpp_force) { 1175 pp = entry->gpe_pp; 1176 if (pp == NULL) 1177 continue; 1178 if (pp->acr == 0 && pp->acw == 0 && pp->ace == 0) 1179 continue; 1180 } 1181 gctl_error(req, "%d", EBUSY); 1182 return (EBUSY); 1183 } 1184 1185 if (gpp->gpp_force) { 1186 /* Destroy all providers. */ 1187 LIST_FOREACH_SAFE(entry, &table->gpt_entry, gpe_entry, tmp) { 1188 pp = entry->gpe_pp; 1189 if (pp != NULL) { 1190 pp->private = NULL; 1191 g_wither_provider(pp, ENXIO); 1192 } 1193 LIST_REMOVE(entry, gpe_entry); 1194 g_free(entry); 1195 } 1196 } 1197 1198 error = G_PART_DESTROY(table, gpp); 1199 if (error) { 1200 gctl_error(req, "%d", error); 1201 return (error); 1202 } 1203 1204 gp->softc = kobj_create((kobj_class_t)&g_part_null_scheme, M_GEOM, 1205 M_WAITOK); 1206 null = gp->softc; 1207 null->gpt_gp = gp; 1208 null->gpt_scheme = &g_part_null_scheme; 1209 LIST_INIT(&null->gpt_entry); 1210 1211 cp = LIST_FIRST(&gp->consumer); 1212 pp = cp->provider; 1213 null->gpt_last = pp->mediasize / pp->sectorsize - 1; 1214 1215 null->gpt_depth = table->gpt_depth; 1216 null->gpt_opened = table->gpt_opened; 1217 null->gpt_smhead = table->gpt_smhead; 1218 null->gpt_smtail = table->gpt_smtail; 1219 1220 while ((entry = LIST_FIRST(&table->gpt_entry)) != NULL) { 1221 LIST_REMOVE(entry, gpe_entry); 1222 g_free(entry); 1223 } 1224 kobj_delete((kobj_t)table, M_GEOM); 1225 1226 /* Provide feedback if so requested. */ 1227 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) { 1228 sb = sbuf_new_auto(); 1229 sbuf_printf(sb, "%s destroyed\n", gp->name); 1230 sbuf_finish(sb); 1231 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1); 1232 sbuf_delete(sb); 1233 } 1234 return (0); 1235 } 1236 1237 static int 1238 g_part_ctl_modify(struct gctl_req *req, struct g_part_parms *gpp) 1239 { 1240 struct g_geom *gp; 1241 struct g_part_entry *entry; 1242 struct g_part_table *table; 1243 struct sbuf *sb; 1244 int error; 1245 1246 gp = gpp->gpp_geom; 1247 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name)); 1248 g_topology_assert(); 1249 1250 table = gp->softc; 1251 1252 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) { 1253 if (entry->gpe_deleted || entry->gpe_internal) 1254 continue; 1255 if (entry->gpe_index == gpp->gpp_index) 1256 break; 1257 } 1258 if (entry == NULL) { 1259 gctl_error(req, "%d index '%d'", ENOENT, gpp->gpp_index); 1260 return (ENOENT); 1261 } 1262 1263 error = G_PART_MODIFY(table, entry, gpp); 1264 if (error) { 1265 gctl_error(req, "%d", error); 1266 return (error); 1267 } 1268 1269 if (!entry->gpe_created) 1270 entry->gpe_modified = 1; 1271 1272 /* Provide feedback if so requested. */ 1273 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) { 1274 sb = sbuf_new_auto(); 1275 G_PART_FULLNAME(table, entry, sb, gp->name); 1276 sbuf_cat(sb, " modified\n"); 1277 sbuf_finish(sb); 1278 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1); 1279 sbuf_delete(sb); 1280 } 1281 return (0); 1282 } 1283 1284 static int 1285 g_part_ctl_move(struct gctl_req *req, struct g_part_parms *gpp) 1286 { 1287 gctl_error(req, "%d verb 'move'", ENOSYS); 1288 return (ENOSYS); 1289 } 1290 1291 static int 1292 g_part_ctl_recover(struct gctl_req *req, struct g_part_parms *gpp) 1293 { 1294 struct g_part_table *table; 1295 struct g_geom *gp; 1296 struct sbuf *sb; 1297 int error, recovered; 1298 1299 gp = gpp->gpp_geom; 1300 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name)); 1301 g_topology_assert(); 1302 table = gp->softc; 1303 error = recovered = 0; 1304 1305 if (table->gpt_corrupt) { 1306 error = G_PART_RECOVER(table); 1307 if (error == 0) 1308 error = g_part_check_integrity(table, 1309 LIST_FIRST(&gp->consumer)); 1310 if (error) { 1311 gctl_error(req, "%d recovering '%s' failed", 1312 error, gp->name); 1313 return (error); 1314 } 1315 recovered = 1; 1316 } 1317 /* Provide feedback if so requested. */ 1318 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) { 1319 sb = sbuf_new_auto(); 1320 if (recovered) 1321 sbuf_printf(sb, "%s recovered\n", gp->name); 1322 else 1323 sbuf_printf(sb, "%s recovering is not needed\n", 1324 gp->name); 1325 sbuf_finish(sb); 1326 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1); 1327 sbuf_delete(sb); 1328 } 1329 return (0); 1330 } 1331 1332 static int 1333 g_part_ctl_resize(struct gctl_req *req, struct g_part_parms *gpp) 1334 { 1335 struct g_geom *gp; 1336 struct g_provider *pp; 1337 struct g_part_entry *pe, *entry; 1338 struct g_part_table *table; 1339 struct sbuf *sb; 1340 quad_t end; 1341 int error; 1342 off_t mediasize; 1343 1344 gp = gpp->gpp_geom; 1345 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name)); 1346 g_topology_assert(); 1347 table = gp->softc; 1348 1349 /* check gpp_index */ 1350 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) { 1351 if (entry->gpe_deleted || entry->gpe_internal) 1352 continue; 1353 if (entry->gpe_index == gpp->gpp_index) 1354 break; 1355 } 1356 if (entry == NULL) { 1357 gctl_error(req, "%d index '%d'", ENOENT, gpp->gpp_index); 1358 return (ENOENT); 1359 } 1360 1361 /* check gpp_size */ 1362 end = entry->gpe_start + gpp->gpp_size - 1; 1363 if (gpp->gpp_size < 1 || end > table->gpt_last) { 1364 gctl_error(req, "%d size '%jd'", EINVAL, 1365 (intmax_t)gpp->gpp_size); 1366 return (EINVAL); 1367 } 1368 1369 LIST_FOREACH(pe, &table->gpt_entry, gpe_entry) { 1370 if (pe->gpe_deleted || pe->gpe_internal || pe == entry) 1371 continue; 1372 if (end >= pe->gpe_start && end <= pe->gpe_end) { 1373 gctl_error(req, "%d end '%jd'", ENOSPC, 1374 (intmax_t)end); 1375 return (ENOSPC); 1376 } 1377 if (entry->gpe_start < pe->gpe_start && end > pe->gpe_end) { 1378 gctl_error(req, "%d size '%jd'", ENOSPC, 1379 (intmax_t)gpp->gpp_size); 1380 return (ENOSPC); 1381 } 1382 } 1383 1384 pp = entry->gpe_pp; 1385 if ((g_debugflags & G_F_FOOTSHOOTING) == 0 && 1386 (pp->acr > 0 || pp->acw > 0 || pp->ace > 0)) { 1387 if (entry->gpe_end - entry->gpe_start + 1 > gpp->gpp_size) { 1388 /* Deny shrinking of an opened partition. */ 1389 gctl_error(req, "%d", EBUSY); 1390 return (EBUSY); 1391 } 1392 } 1393 1394 error = G_PART_RESIZE(table, entry, gpp); 1395 if (error) { 1396 gctl_error(req, "%d%s", error, error != EBUSY ? "": 1397 " resizing will lead to unexpected shrinking" 1398 " due to alignment"); 1399 return (error); 1400 } 1401 1402 if (!entry->gpe_created) 1403 entry->gpe_modified = 1; 1404 1405 /* update mediasize of changed provider */ 1406 mediasize = (entry->gpe_end - entry->gpe_start + 1) * 1407 pp->sectorsize; 1408 g_resize_provider(pp, mediasize); 1409 1410 /* Provide feedback if so requested. */ 1411 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) { 1412 sb = sbuf_new_auto(); 1413 G_PART_FULLNAME(table, entry, sb, gp->name); 1414 sbuf_cat(sb, " resized\n"); 1415 sbuf_finish(sb); 1416 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1); 1417 sbuf_delete(sb); 1418 } 1419 return (0); 1420 } 1421 1422 static int 1423 g_part_ctl_setunset(struct gctl_req *req, struct g_part_parms *gpp, 1424 unsigned int set) 1425 { 1426 struct g_geom *gp; 1427 struct g_part_entry *entry; 1428 struct g_part_table *table; 1429 struct sbuf *sb; 1430 int error; 1431 1432 gp = gpp->gpp_geom; 1433 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name)); 1434 g_topology_assert(); 1435 1436 table = gp->softc; 1437 1438 if (gpp->gpp_parms & G_PART_PARM_INDEX) { 1439 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) { 1440 if (entry->gpe_deleted || entry->gpe_internal) 1441 continue; 1442 if (entry->gpe_index == gpp->gpp_index) 1443 break; 1444 } 1445 if (entry == NULL) { 1446 gctl_error(req, "%d index '%d'", ENOENT, 1447 gpp->gpp_index); 1448 return (ENOENT); 1449 } 1450 } else 1451 entry = NULL; 1452 1453 error = G_PART_SETUNSET(table, entry, gpp->gpp_attrib, set); 1454 if (error) { 1455 gctl_error(req, "%d attrib '%s'", error, gpp->gpp_attrib); 1456 return (error); 1457 } 1458 1459 /* Provide feedback if so requested. */ 1460 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) { 1461 sb = sbuf_new_auto(); 1462 sbuf_printf(sb, "%s %sset on ", gpp->gpp_attrib, 1463 (set) ? "" : "un"); 1464 if (entry) 1465 G_PART_FULLNAME(table, entry, sb, gp->name); 1466 else 1467 sbuf_cat(sb, gp->name); 1468 sbuf_cat(sb, "\n"); 1469 sbuf_finish(sb); 1470 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1); 1471 sbuf_delete(sb); 1472 } 1473 return (0); 1474 } 1475 1476 static int 1477 g_part_ctl_undo(struct gctl_req *req, struct g_part_parms *gpp) 1478 { 1479 struct g_consumer *cp; 1480 struct g_provider *pp; 1481 struct g_geom *gp; 1482 struct g_part_entry *entry, *tmp; 1483 struct g_part_table *table; 1484 int error, reprobe; 1485 1486 gp = gpp->gpp_geom; 1487 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name)); 1488 g_topology_assert(); 1489 1490 table = gp->softc; 1491 if (!table->gpt_opened) { 1492 gctl_error(req, "%d", EPERM); 1493 return (EPERM); 1494 } 1495 1496 cp = LIST_FIRST(&gp->consumer); 1497 LIST_FOREACH_SAFE(entry, &table->gpt_entry, gpe_entry, tmp) { 1498 entry->gpe_modified = 0; 1499 if (entry->gpe_created) { 1500 pp = entry->gpe_pp; 1501 if (pp != NULL) { 1502 pp->private = NULL; 1503 entry->gpe_pp = NULL; 1504 g_wither_provider(pp, ENXIO); 1505 } 1506 entry->gpe_deleted = 1; 1507 } 1508 if (entry->gpe_deleted) { 1509 LIST_REMOVE(entry, gpe_entry); 1510 g_free(entry); 1511 } 1512 } 1513 1514 g_topology_unlock(); 1515 1516 reprobe = (table->gpt_scheme == &g_part_null_scheme || 1517 table->gpt_created) ? 1 : 0; 1518 1519 if (reprobe) { 1520 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) { 1521 if (entry->gpe_internal) 1522 continue; 1523 error = EBUSY; 1524 goto fail; 1525 } 1526 while ((entry = LIST_FIRST(&table->gpt_entry)) != NULL) { 1527 LIST_REMOVE(entry, gpe_entry); 1528 g_free(entry); 1529 } 1530 error = g_part_probe(gp, cp, table->gpt_depth); 1531 if (error) { 1532 g_topology_lock(); 1533 g_access(cp, -1, -1, -1); 1534 g_part_wither(gp, error); 1535 return (0); 1536 } 1537 table = gp->softc; 1538 1539 /* 1540 * Synthesize a disk geometry. Some partitioning schemes 1541 * depend on it and since some file systems need it even 1542 * when the partitition scheme doesn't, we do it here in 1543 * scheme-independent code. 1544 */ 1545 pp = cp->provider; 1546 g_part_geometry(table, cp, pp->mediasize / pp->sectorsize); 1547 } 1548 1549 error = G_PART_READ(table, cp); 1550 if (error) 1551 goto fail; 1552 error = g_part_check_integrity(table, cp); 1553 if (error) 1554 goto fail; 1555 1556 g_topology_lock(); 1557 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) { 1558 if (!entry->gpe_internal) 1559 g_part_new_provider(gp, table, entry); 1560 } 1561 1562 table->gpt_opened = 0; 1563 g_access(cp, -1, -1, -1); 1564 return (0); 1565 1566 fail: 1567 g_topology_lock(); 1568 gctl_error(req, "%d", error); 1569 return (error); 1570 } 1571 1572 static void 1573 g_part_wither(struct g_geom *gp, int error) 1574 { 1575 struct g_part_entry *entry; 1576 struct g_part_table *table; 1577 struct g_provider *pp; 1578 1579 table = gp->softc; 1580 if (table != NULL) { 1581 gp->softc = NULL; 1582 while ((entry = LIST_FIRST(&table->gpt_entry)) != NULL) { 1583 LIST_REMOVE(entry, gpe_entry); 1584 pp = entry->gpe_pp; 1585 entry->gpe_pp = NULL; 1586 if (pp != NULL) { 1587 pp->private = NULL; 1588 g_wither_provider(pp, error); 1589 } 1590 g_free(entry); 1591 } 1592 G_PART_DESTROY(table, NULL); 1593 kobj_delete((kobj_t)table, M_GEOM); 1594 } 1595 g_wither_geom(gp, error); 1596 } 1597 1598 /* 1599 * Class methods. 1600 */ 1601 1602 static void 1603 g_part_ctlreq(struct gctl_req *req, struct g_class *mp, const char *verb) 1604 { 1605 struct g_part_parms gpp; 1606 struct g_part_table *table; 1607 struct gctl_req_arg *ap; 1608 enum g_part_ctl ctlreq; 1609 unsigned int i, mparms, oparms, parm; 1610 int auto_commit, close_on_error; 1611 int error, modifies; 1612 1613 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s,%s)", __func__, mp->name, verb)); 1614 g_topology_assert(); 1615 1616 ctlreq = G_PART_CTL_NONE; 1617 modifies = 1; 1618 mparms = 0; 1619 oparms = G_PART_PARM_FLAGS | G_PART_PARM_OUTPUT | G_PART_PARM_VERSION; 1620 switch (*verb) { 1621 case 'a': 1622 if (!strcmp(verb, "add")) { 1623 ctlreq = G_PART_CTL_ADD; 1624 mparms |= G_PART_PARM_GEOM | G_PART_PARM_SIZE | 1625 G_PART_PARM_START | G_PART_PARM_TYPE; 1626 oparms |= G_PART_PARM_INDEX | G_PART_PARM_LABEL; 1627 } 1628 break; 1629 case 'b': 1630 if (!strcmp(verb, "bootcode")) { 1631 ctlreq = G_PART_CTL_BOOTCODE; 1632 mparms |= G_PART_PARM_GEOM | G_PART_PARM_BOOTCODE; 1633 oparms |= G_PART_PARM_SKIP_DSN; 1634 } 1635 break; 1636 case 'c': 1637 if (!strcmp(verb, "commit")) { 1638 ctlreq = G_PART_CTL_COMMIT; 1639 mparms |= G_PART_PARM_GEOM; 1640 modifies = 0; 1641 } else if (!strcmp(verb, "create")) { 1642 ctlreq = G_PART_CTL_CREATE; 1643 mparms |= G_PART_PARM_PROVIDER | G_PART_PARM_SCHEME; 1644 oparms |= G_PART_PARM_ENTRIES; 1645 } 1646 break; 1647 case 'd': 1648 if (!strcmp(verb, "delete")) { 1649 ctlreq = G_PART_CTL_DELETE; 1650 mparms |= G_PART_PARM_GEOM | G_PART_PARM_INDEX; 1651 } else if (!strcmp(verb, "destroy")) { 1652 ctlreq = G_PART_CTL_DESTROY; 1653 mparms |= G_PART_PARM_GEOM; 1654 oparms |= G_PART_PARM_FORCE; 1655 } 1656 break; 1657 case 'm': 1658 if (!strcmp(verb, "modify")) { 1659 ctlreq = G_PART_CTL_MODIFY; 1660 mparms |= G_PART_PARM_GEOM | G_PART_PARM_INDEX; 1661 oparms |= G_PART_PARM_LABEL | G_PART_PARM_TYPE; 1662 } else if (!strcmp(verb, "move")) { 1663 ctlreq = G_PART_CTL_MOVE; 1664 mparms |= G_PART_PARM_GEOM | G_PART_PARM_INDEX; 1665 } 1666 break; 1667 case 'r': 1668 if (!strcmp(verb, "recover")) { 1669 ctlreq = G_PART_CTL_RECOVER; 1670 mparms |= G_PART_PARM_GEOM; 1671 } else if (!strcmp(verb, "resize")) { 1672 ctlreq = G_PART_CTL_RESIZE; 1673 mparms |= G_PART_PARM_GEOM | G_PART_PARM_INDEX | 1674 G_PART_PARM_SIZE; 1675 } 1676 break; 1677 case 's': 1678 if (!strcmp(verb, "set")) { 1679 ctlreq = G_PART_CTL_SET; 1680 mparms |= G_PART_PARM_ATTRIB | G_PART_PARM_GEOM; 1681 oparms |= G_PART_PARM_INDEX; 1682 } 1683 break; 1684 case 'u': 1685 if (!strcmp(verb, "undo")) { 1686 ctlreq = G_PART_CTL_UNDO; 1687 mparms |= G_PART_PARM_GEOM; 1688 modifies = 0; 1689 } else if (!strcmp(verb, "unset")) { 1690 ctlreq = G_PART_CTL_UNSET; 1691 mparms |= G_PART_PARM_ATTRIB | G_PART_PARM_GEOM; 1692 oparms |= G_PART_PARM_INDEX; 1693 } 1694 break; 1695 } 1696 if (ctlreq == G_PART_CTL_NONE) { 1697 gctl_error(req, "%d verb '%s'", EINVAL, verb); 1698 return; 1699 } 1700 1701 bzero(&gpp, sizeof(gpp)); 1702 for (i = 0; i < req->narg; i++) { 1703 ap = &req->arg[i]; 1704 parm = 0; 1705 switch (ap->name[0]) { 1706 case 'a': 1707 if (!strcmp(ap->name, "arg0")) { 1708 parm = mparms & 1709 (G_PART_PARM_GEOM | G_PART_PARM_PROVIDER); 1710 } 1711 if (!strcmp(ap->name, "attrib")) 1712 parm = G_PART_PARM_ATTRIB; 1713 break; 1714 case 'b': 1715 if (!strcmp(ap->name, "bootcode")) 1716 parm = G_PART_PARM_BOOTCODE; 1717 break; 1718 case 'c': 1719 if (!strcmp(ap->name, "class")) 1720 continue; 1721 break; 1722 case 'e': 1723 if (!strcmp(ap->name, "entries")) 1724 parm = G_PART_PARM_ENTRIES; 1725 break; 1726 case 'f': 1727 if (!strcmp(ap->name, "flags")) 1728 parm = G_PART_PARM_FLAGS; 1729 else if (!strcmp(ap->name, "force")) 1730 parm = G_PART_PARM_FORCE; 1731 break; 1732 case 'i': 1733 if (!strcmp(ap->name, "index")) 1734 parm = G_PART_PARM_INDEX; 1735 break; 1736 case 'l': 1737 if (!strcmp(ap->name, "label")) 1738 parm = G_PART_PARM_LABEL; 1739 break; 1740 case 'o': 1741 if (!strcmp(ap->name, "output")) 1742 parm = G_PART_PARM_OUTPUT; 1743 break; 1744 case 's': 1745 if (!strcmp(ap->name, "scheme")) 1746 parm = G_PART_PARM_SCHEME; 1747 else if (!strcmp(ap->name, "size")) 1748 parm = G_PART_PARM_SIZE; 1749 else if (!strcmp(ap->name, "start")) 1750 parm = G_PART_PARM_START; 1751 else if (!strcmp(ap->name, "skip_dsn")) 1752 parm = G_PART_PARM_SKIP_DSN; 1753 break; 1754 case 't': 1755 if (!strcmp(ap->name, "type")) 1756 parm = G_PART_PARM_TYPE; 1757 break; 1758 case 'v': 1759 if (!strcmp(ap->name, "verb")) 1760 continue; 1761 else if (!strcmp(ap->name, "version")) 1762 parm = G_PART_PARM_VERSION; 1763 break; 1764 } 1765 if ((parm & (mparms | oparms)) == 0) { 1766 gctl_error(req, "%d param '%s'", EINVAL, ap->name); 1767 return; 1768 } 1769 switch (parm) { 1770 case G_PART_PARM_ATTRIB: 1771 error = g_part_parm_str(req, ap->name, 1772 &gpp.gpp_attrib); 1773 break; 1774 case G_PART_PARM_BOOTCODE: 1775 error = g_part_parm_bootcode(req, ap->name, 1776 &gpp.gpp_codeptr, &gpp.gpp_codesize); 1777 break; 1778 case G_PART_PARM_ENTRIES: 1779 error = g_part_parm_intmax(req, ap->name, 1780 &gpp.gpp_entries); 1781 break; 1782 case G_PART_PARM_FLAGS: 1783 error = g_part_parm_str(req, ap->name, &gpp.gpp_flags); 1784 break; 1785 case G_PART_PARM_FORCE: 1786 error = g_part_parm_uint32(req, ap->name, 1787 &gpp.gpp_force); 1788 break; 1789 case G_PART_PARM_GEOM: 1790 error = g_part_parm_geom(req, ap->name, &gpp.gpp_geom); 1791 break; 1792 case G_PART_PARM_INDEX: 1793 error = g_part_parm_intmax(req, ap->name, 1794 &gpp.gpp_index); 1795 break; 1796 case G_PART_PARM_LABEL: 1797 error = g_part_parm_str(req, ap->name, &gpp.gpp_label); 1798 break; 1799 case G_PART_PARM_OUTPUT: 1800 error = 0; /* Write-only parameter */ 1801 break; 1802 case G_PART_PARM_PROVIDER: 1803 error = g_part_parm_provider(req, ap->name, 1804 &gpp.gpp_provider); 1805 break; 1806 case G_PART_PARM_SCHEME: 1807 error = g_part_parm_scheme(req, ap->name, 1808 &gpp.gpp_scheme); 1809 break; 1810 case G_PART_PARM_SIZE: 1811 error = g_part_parm_quad(req, ap->name, &gpp.gpp_size); 1812 break; 1813 case G_PART_PARM_SKIP_DSN: 1814 error = g_part_parm_uint32(req, ap->name, 1815 &gpp.gpp_skip_dsn); 1816 break; 1817 case G_PART_PARM_START: 1818 error = g_part_parm_quad(req, ap->name, 1819 &gpp.gpp_start); 1820 break; 1821 case G_PART_PARM_TYPE: 1822 error = g_part_parm_str(req, ap->name, &gpp.gpp_type); 1823 break; 1824 case G_PART_PARM_VERSION: 1825 error = g_part_parm_uint32(req, ap->name, 1826 &gpp.gpp_version); 1827 break; 1828 default: 1829 error = EDOOFUS; 1830 gctl_error(req, "%d %s", error, ap->name); 1831 break; 1832 } 1833 if (error != 0) { 1834 if (error == ENOATTR) { 1835 gctl_error(req, "%d param '%s'", error, 1836 ap->name); 1837 } 1838 return; 1839 } 1840 gpp.gpp_parms |= parm; 1841 } 1842 if ((gpp.gpp_parms & mparms) != mparms) { 1843 parm = mparms - (gpp.gpp_parms & mparms); 1844 gctl_error(req, "%d param '%x'", ENOATTR, parm); 1845 return; 1846 } 1847 1848 /* Obtain permissions if possible/necessary. */ 1849 close_on_error = 0; 1850 table = NULL; 1851 if (modifies && (gpp.gpp_parms & G_PART_PARM_GEOM)) { 1852 table = gpp.gpp_geom->softc; 1853 if (table != NULL && table->gpt_corrupt && 1854 ctlreq != G_PART_CTL_DESTROY && 1855 ctlreq != G_PART_CTL_RECOVER && 1856 geom_part_check_integrity) { 1857 gctl_error(req, "%d table '%s' is corrupt", 1858 EPERM, gpp.gpp_geom->name); 1859 return; 1860 } 1861 if (table != NULL && !table->gpt_opened) { 1862 error = g_access(LIST_FIRST(&gpp.gpp_geom->consumer), 1863 1, 1, 1); 1864 if (error) { 1865 gctl_error(req, "%d geom '%s'", error, 1866 gpp.gpp_geom->name); 1867 return; 1868 } 1869 table->gpt_opened = 1; 1870 close_on_error = 1; 1871 } 1872 } 1873 1874 /* Allow the scheme to check or modify the parameters. */ 1875 if (table != NULL) { 1876 error = G_PART_PRECHECK(table, ctlreq, &gpp); 1877 if (error) { 1878 gctl_error(req, "%d pre-check failed", error); 1879 goto out; 1880 } 1881 } else 1882 error = EDOOFUS; /* Prevent bogus uninit. warning. */ 1883 1884 switch (ctlreq) { 1885 case G_PART_CTL_NONE: 1886 panic("%s", __func__); 1887 case G_PART_CTL_ADD: 1888 error = g_part_ctl_add(req, &gpp); 1889 break; 1890 case G_PART_CTL_BOOTCODE: 1891 error = g_part_ctl_bootcode(req, &gpp); 1892 break; 1893 case G_PART_CTL_COMMIT: 1894 error = g_part_ctl_commit(req, &gpp); 1895 break; 1896 case G_PART_CTL_CREATE: 1897 error = g_part_ctl_create(req, &gpp); 1898 break; 1899 case G_PART_CTL_DELETE: 1900 error = g_part_ctl_delete(req, &gpp); 1901 break; 1902 case G_PART_CTL_DESTROY: 1903 error = g_part_ctl_destroy(req, &gpp); 1904 break; 1905 case G_PART_CTL_MODIFY: 1906 error = g_part_ctl_modify(req, &gpp); 1907 break; 1908 case G_PART_CTL_MOVE: 1909 error = g_part_ctl_move(req, &gpp); 1910 break; 1911 case G_PART_CTL_RECOVER: 1912 error = g_part_ctl_recover(req, &gpp); 1913 break; 1914 case G_PART_CTL_RESIZE: 1915 error = g_part_ctl_resize(req, &gpp); 1916 break; 1917 case G_PART_CTL_SET: 1918 error = g_part_ctl_setunset(req, &gpp, 1); 1919 break; 1920 case G_PART_CTL_UNDO: 1921 error = g_part_ctl_undo(req, &gpp); 1922 break; 1923 case G_PART_CTL_UNSET: 1924 error = g_part_ctl_setunset(req, &gpp, 0); 1925 break; 1926 } 1927 1928 /* Implement automatic commit. */ 1929 if (!error) { 1930 auto_commit = (modifies && 1931 (gpp.gpp_parms & G_PART_PARM_FLAGS) && 1932 strchr(gpp.gpp_flags, 'C') != NULL) ? 1 : 0; 1933 if (auto_commit) { 1934 KASSERT(gpp.gpp_parms & G_PART_PARM_GEOM, ("%s", 1935 __func__)); 1936 error = g_part_ctl_commit(req, &gpp); 1937 } 1938 } 1939 1940 out: 1941 if (error && close_on_error) { 1942 g_access(LIST_FIRST(&gpp.gpp_geom->consumer), -1, -1, -1); 1943 table->gpt_opened = 0; 1944 } 1945 } 1946 1947 static int 1948 g_part_destroy_geom(struct gctl_req *req, struct g_class *mp, 1949 struct g_geom *gp) 1950 { 1951 1952 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s,%s)", __func__, mp->name, gp->name)); 1953 g_topology_assert(); 1954 1955 g_part_wither(gp, EINVAL); 1956 return (0); 1957 } 1958 1959 static struct g_geom * 1960 g_part_taste(struct g_class *mp, struct g_provider *pp, int flags __unused) 1961 { 1962 struct g_consumer *cp; 1963 struct g_geom *gp; 1964 struct g_part_entry *entry; 1965 struct g_part_table *table; 1966 struct root_hold_token *rht; 1967 int attr, depth; 1968 int error; 1969 1970 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s,%s)", __func__, mp->name, pp->name)); 1971 g_topology_assert(); 1972 1973 /* Skip providers that are already open for writing. */ 1974 if (pp->acw > 0) 1975 return (NULL); 1976 1977 /* 1978 * Create a GEOM with consumer and hook it up to the provider. 1979 * With that we become part of the topology. Obtain read access 1980 * to the provider. 1981 */ 1982 gp = g_new_geomf(mp, "%s", pp->name); 1983 cp = g_new_consumer(gp); 1984 cp->flags |= G_CF_DIRECT_SEND | G_CF_DIRECT_RECEIVE; 1985 error = g_attach(cp, pp); 1986 if (error == 0) 1987 error = g_access(cp, 1, 0, 0); 1988 if (error != 0) { 1989 if (cp->provider) 1990 g_detach(cp); 1991 g_destroy_consumer(cp); 1992 g_destroy_geom(gp); 1993 return (NULL); 1994 } 1995 1996 rht = root_mount_hold(mp->name); 1997 g_topology_unlock(); 1998 1999 /* 2000 * Short-circuit the whole probing galore when there's no 2001 * media present. 2002 */ 2003 if (pp->mediasize == 0 || pp->sectorsize == 0) { 2004 error = ENODEV; 2005 goto fail; 2006 } 2007 2008 /* Make sure we can nest and if so, determine our depth. */ 2009 error = g_getattr("PART::isleaf", cp, &attr); 2010 if (!error && attr) { 2011 error = ENODEV; 2012 goto fail; 2013 } 2014 error = g_getattr("PART::depth", cp, &attr); 2015 depth = (!error) ? attr + 1 : 0; 2016 2017 error = g_part_probe(gp, cp, depth); 2018 if (error) 2019 goto fail; 2020 2021 table = gp->softc; 2022 2023 /* 2024 * Synthesize a disk geometry. Some partitioning schemes 2025 * depend on it and since some file systems need it even 2026 * when the partitition scheme doesn't, we do it here in 2027 * scheme-independent code. 2028 */ 2029 g_part_geometry(table, cp, pp->mediasize / pp->sectorsize); 2030 2031 error = G_PART_READ(table, cp); 2032 if (error) 2033 goto fail; 2034 error = g_part_check_integrity(table, cp); 2035 if (error) 2036 goto fail; 2037 2038 g_topology_lock(); 2039 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) { 2040 if (!entry->gpe_internal) 2041 g_part_new_provider(gp, table, entry); 2042 } 2043 2044 root_mount_rel(rht); 2045 g_access(cp, -1, 0, 0); 2046 return (gp); 2047 2048 fail: 2049 g_topology_lock(); 2050 root_mount_rel(rht); 2051 g_access(cp, -1, 0, 0); 2052 g_detach(cp); 2053 g_destroy_consumer(cp); 2054 g_destroy_geom(gp); 2055 return (NULL); 2056 } 2057 2058 /* 2059 * Geom methods. 2060 */ 2061 2062 static int 2063 g_part_access(struct g_provider *pp, int dr, int dw, int de) 2064 { 2065 struct g_consumer *cp; 2066 2067 G_PART_TRACE((G_T_ACCESS, "%s(%s,%d,%d,%d)", __func__, pp->name, dr, 2068 dw, de)); 2069 2070 cp = LIST_FIRST(&pp->geom->consumer); 2071 2072 /* We always gain write-exclusive access. */ 2073 return (g_access(cp, dr, dw, dw + de)); 2074 } 2075 2076 static void 2077 g_part_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp, 2078 struct g_consumer *cp, struct g_provider *pp) 2079 { 2080 char buf[64]; 2081 struct g_part_entry *entry; 2082 struct g_part_table *table; 2083 2084 KASSERT(sb != NULL && gp != NULL, ("%s", __func__)); 2085 table = gp->softc; 2086 2087 if (indent == NULL) { 2088 KASSERT(cp == NULL && pp != NULL, ("%s", __func__)); 2089 entry = pp->private; 2090 if (entry == NULL) 2091 return; 2092 sbuf_printf(sb, " i %u o %ju ty %s", entry->gpe_index, 2093 (uintmax_t)entry->gpe_offset, 2094 G_PART_TYPE(table, entry, buf, sizeof(buf))); 2095 /* 2096 * libdisk compatibility quirk - the scheme dumps the 2097 * slicer name and partition type in a way that is 2098 * compatible with libdisk. When libdisk is not used 2099 * anymore, this should go away. 2100 */ 2101 G_PART_DUMPCONF(table, entry, sb, indent); 2102 } else if (cp != NULL) { /* Consumer configuration. */ 2103 KASSERT(pp == NULL, ("%s", __func__)); 2104 /* none */ 2105 } else if (pp != NULL) { /* Provider configuration. */ 2106 entry = pp->private; 2107 if (entry == NULL) 2108 return; 2109 sbuf_printf(sb, "%s<start>%ju</start>\n", indent, 2110 (uintmax_t)entry->gpe_start); 2111 sbuf_printf(sb, "%s<end>%ju</end>\n", indent, 2112 (uintmax_t)entry->gpe_end); 2113 sbuf_printf(sb, "%s<index>%u</index>\n", indent, 2114 entry->gpe_index); 2115 sbuf_printf(sb, "%s<type>%s</type>\n", indent, 2116 G_PART_TYPE(table, entry, buf, sizeof(buf))); 2117 sbuf_printf(sb, "%s<offset>%ju</offset>\n", indent, 2118 (uintmax_t)entry->gpe_offset); 2119 sbuf_printf(sb, "%s<length>%ju</length>\n", indent, 2120 (uintmax_t)pp->mediasize); 2121 G_PART_DUMPCONF(table, entry, sb, indent); 2122 } else { /* Geom configuration. */ 2123 sbuf_printf(sb, "%s<scheme>%s</scheme>\n", indent, 2124 table->gpt_scheme->name); 2125 sbuf_printf(sb, "%s<entries>%u</entries>\n", indent, 2126 table->gpt_entries); 2127 sbuf_printf(sb, "%s<first>%ju</first>\n", indent, 2128 (uintmax_t)table->gpt_first); 2129 sbuf_printf(sb, "%s<last>%ju</last>\n", indent, 2130 (uintmax_t)table->gpt_last); 2131 sbuf_printf(sb, "%s<fwsectors>%u</fwsectors>\n", indent, 2132 table->gpt_sectors); 2133 sbuf_printf(sb, "%s<fwheads>%u</fwheads>\n", indent, 2134 table->gpt_heads); 2135 sbuf_printf(sb, "%s<state>%s</state>\n", indent, 2136 table->gpt_corrupt ? "CORRUPT": "OK"); 2137 sbuf_printf(sb, "%s<modified>%s</modified>\n", indent, 2138 table->gpt_opened ? "true": "false"); 2139 G_PART_DUMPCONF(table, NULL, sb, indent); 2140 } 2141 } 2142 2143 /*- 2144 * This start routine is only called for non-trivial requests, all the 2145 * trivial ones are handled autonomously by the slice code. 2146 * For requests we handle here, we must call the g_io_deliver() on the 2147 * bio, and return non-zero to indicate to the slice code that we did so. 2148 * This code executes in the "DOWN" I/O path, this means: 2149 * * No sleeping. 2150 * * Don't grab the topology lock. 2151 * * Don't call biowait, g_getattr(), g_setattr() or g_read_data() 2152 */ 2153 static int 2154 g_part_ioctl(struct g_provider *pp, u_long cmd, void *data, int fflag, struct thread *td) 2155 { 2156 struct g_part_table *table; 2157 2158 table = pp->geom->softc; 2159 return G_PART_IOCTL(table, pp, cmd, data, fflag, td); 2160 } 2161 2162 static void 2163 g_part_resize(struct g_consumer *cp) 2164 { 2165 struct g_part_table *table; 2166 2167 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, cp->provider->name)); 2168 g_topology_assert(); 2169 2170 if (auto_resize == 0) 2171 return; 2172 2173 table = cp->geom->softc; 2174 if (table->gpt_opened == 0) { 2175 if (g_access(cp, 1, 1, 1) != 0) 2176 return; 2177 table->gpt_opened = 1; 2178 } 2179 if (G_PART_RESIZE(table, NULL, NULL) == 0) 2180 printf("GEOM_PART: %s was automatically resized.\n" 2181 " Use `gpart commit %s` to save changes or " 2182 "`gpart undo %s` to revert them.\n", cp->geom->name, 2183 cp->geom->name, cp->geom->name); 2184 if (g_part_check_integrity(table, cp) != 0) { 2185 g_access(cp, -1, -1, -1); 2186 table->gpt_opened = 0; 2187 g_part_wither(table->gpt_gp, ENXIO); 2188 } 2189 } 2190 2191 static void 2192 g_part_orphan(struct g_consumer *cp) 2193 { 2194 struct g_provider *pp; 2195 struct g_part_table *table; 2196 2197 pp = cp->provider; 2198 KASSERT(pp != NULL, ("%s", __func__)); 2199 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, pp->name)); 2200 g_topology_assert(); 2201 2202 KASSERT(pp->error != 0, ("%s", __func__)); 2203 table = cp->geom->softc; 2204 if (table != NULL && table->gpt_opened) 2205 g_access(cp, -1, -1, -1); 2206 g_part_wither(cp->geom, pp->error); 2207 } 2208 2209 static void 2210 g_part_spoiled(struct g_consumer *cp) 2211 { 2212 2213 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, cp->provider->name)); 2214 g_topology_assert(); 2215 2216 cp->flags |= G_CF_ORPHAN; 2217 g_part_wither(cp->geom, ENXIO); 2218 } 2219 2220 static void 2221 g_part_start(struct bio *bp) 2222 { 2223 struct bio *bp2; 2224 struct g_consumer *cp; 2225 struct g_geom *gp; 2226 struct g_part_entry *entry; 2227 struct g_part_table *table; 2228 struct g_kerneldump *gkd; 2229 struct g_provider *pp; 2230 void (*done_func)(struct bio *) = g_std_done; 2231 char buf[64]; 2232 2233 biotrack(bp, __func__); 2234 2235 pp = bp->bio_to; 2236 gp = pp->geom; 2237 table = gp->softc; 2238 cp = LIST_FIRST(&gp->consumer); 2239 2240 G_PART_TRACE((G_T_BIO, "%s: cmd=%d, provider=%s", __func__, bp->bio_cmd, 2241 pp->name)); 2242 2243 entry = pp->private; 2244 if (entry == NULL) { 2245 g_io_deliver(bp, ENXIO); 2246 return; 2247 } 2248 2249 switch(bp->bio_cmd) { 2250 case BIO_DELETE: 2251 case BIO_READ: 2252 case BIO_WRITE: 2253 if (bp->bio_offset >= pp->mediasize) { 2254 g_io_deliver(bp, EIO); 2255 return; 2256 } 2257 bp2 = g_clone_bio(bp); 2258 if (bp2 == NULL) { 2259 g_io_deliver(bp, ENOMEM); 2260 return; 2261 } 2262 if (bp2->bio_offset + bp2->bio_length > pp->mediasize) 2263 bp2->bio_length = pp->mediasize - bp2->bio_offset; 2264 bp2->bio_done = g_std_done; 2265 bp2->bio_offset += entry->gpe_offset; 2266 g_io_request(bp2, cp); 2267 return; 2268 case BIO_SPEEDUP: 2269 case BIO_FLUSH: 2270 break; 2271 case BIO_GETATTR: 2272 if (g_handleattr_int(bp, "GEOM::fwheads", table->gpt_heads)) 2273 return; 2274 if (g_handleattr_int(bp, "GEOM::fwsectors", table->gpt_sectors)) 2275 return; 2276 /* 2277 * allow_nesting overrides "isleaf" to false _unless_ the 2278 * provider offset is zero, since otherwise we would recurse. 2279 */ 2280 if (g_handleattr_int(bp, "PART::isleaf", 2281 table->gpt_isleaf && 2282 (allow_nesting == 0 || entry->gpe_offset == 0))) 2283 return; 2284 if (g_handleattr_int(bp, "PART::depth", table->gpt_depth)) 2285 return; 2286 if (g_handleattr_str(bp, "PART::scheme", 2287 table->gpt_scheme->name)) 2288 return; 2289 if (g_handleattr_str(bp, "PART::type", 2290 G_PART_TYPE(table, entry, buf, sizeof(buf)))) 2291 return; 2292 if (!strcmp("GEOM::physpath", bp->bio_attribute)) { 2293 done_func = g_part_get_physpath_done; 2294 break; 2295 } 2296 if (!strcmp("GEOM::kerneldump", bp->bio_attribute)) { 2297 /* 2298 * Check that the partition is suitable for kernel 2299 * dumps. Typically only swap partitions should be 2300 * used. If the request comes from the nested scheme 2301 * we allow dumping there as well. 2302 */ 2303 if ((bp->bio_from == NULL || 2304 bp->bio_from->geom->class != &g_part_class) && 2305 G_PART_DUMPTO(table, entry) == 0) { 2306 g_io_deliver(bp, ENODEV); 2307 printf("GEOM_PART: Partition '%s' not suitable" 2308 " for kernel dumps (wrong type?)\n", 2309 pp->name); 2310 return; 2311 } 2312 gkd = (struct g_kerneldump *)bp->bio_data; 2313 if (gkd->offset >= pp->mediasize) { 2314 g_io_deliver(bp, EIO); 2315 return; 2316 } 2317 if (gkd->offset + gkd->length > pp->mediasize) 2318 gkd->length = pp->mediasize - gkd->offset; 2319 gkd->offset += entry->gpe_offset; 2320 } 2321 break; 2322 default: 2323 g_io_deliver(bp, EOPNOTSUPP); 2324 return; 2325 } 2326 2327 bp2 = g_clone_bio(bp); 2328 if (bp2 == NULL) { 2329 g_io_deliver(bp, ENOMEM); 2330 return; 2331 } 2332 bp2->bio_done = done_func; 2333 g_io_request(bp2, cp); 2334 } 2335 2336 static void 2337 g_part_init(struct g_class *mp) 2338 { 2339 2340 TAILQ_INSERT_HEAD(&g_part_schemes, &g_part_null_scheme, scheme_list); 2341 } 2342 2343 static void 2344 g_part_fini(struct g_class *mp) 2345 { 2346 2347 TAILQ_REMOVE(&g_part_schemes, &g_part_null_scheme, scheme_list); 2348 } 2349 2350 static void 2351 g_part_unload_event(void *arg, int flag) 2352 { 2353 struct g_consumer *cp; 2354 struct g_geom *gp; 2355 struct g_provider *pp; 2356 struct g_part_scheme *scheme; 2357 struct g_part_table *table; 2358 uintptr_t *xchg; 2359 int acc, error; 2360 2361 if (flag == EV_CANCEL) 2362 return; 2363 2364 xchg = arg; 2365 error = 0; 2366 scheme = (void *)(*xchg); 2367 2368 g_topology_assert(); 2369 2370 LIST_FOREACH(gp, &g_part_class.geom, geom) { 2371 table = gp->softc; 2372 if (table->gpt_scheme != scheme) 2373 continue; 2374 2375 acc = 0; 2376 LIST_FOREACH(pp, &gp->provider, provider) 2377 acc += pp->acr + pp->acw + pp->ace; 2378 LIST_FOREACH(cp, &gp->consumer, consumer) 2379 acc += cp->acr + cp->acw + cp->ace; 2380 2381 if (!acc) 2382 g_part_wither(gp, ENOSYS); 2383 else 2384 error = EBUSY; 2385 } 2386 2387 if (!error) 2388 TAILQ_REMOVE(&g_part_schemes, scheme, scheme_list); 2389 2390 *xchg = error; 2391 } 2392 2393 int 2394 g_part_modevent(module_t mod, int type, struct g_part_scheme *scheme) 2395 { 2396 struct g_part_scheme *iter; 2397 uintptr_t arg; 2398 int error; 2399 2400 error = 0; 2401 switch (type) { 2402 case MOD_LOAD: 2403 TAILQ_FOREACH(iter, &g_part_schemes, scheme_list) { 2404 if (scheme == iter) { 2405 printf("GEOM_PART: scheme %s is already " 2406 "registered!\n", scheme->name); 2407 break; 2408 } 2409 } 2410 if (iter == NULL) { 2411 TAILQ_INSERT_TAIL(&g_part_schemes, scheme, 2412 scheme_list); 2413 g_retaste(&g_part_class); 2414 } 2415 break; 2416 case MOD_UNLOAD: 2417 arg = (uintptr_t)scheme; 2418 error = g_waitfor_event(g_part_unload_event, &arg, M_WAITOK, 2419 NULL); 2420 if (error == 0) 2421 error = arg; 2422 break; 2423 default: 2424 error = EOPNOTSUPP; 2425 break; 2426 } 2427 2428 return (error); 2429 } 2430