1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright (c) 2002, 2005-2009 Marcel Moolenaar 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 */ 28 29 #include <sys/param.h> 30 #include <sys/bio.h> 31 #include <sys/endian.h> 32 #include <sys/kernel.h> 33 #include <sys/kobj.h> 34 #include <sys/limits.h> 35 #include <sys/lock.h> 36 #include <sys/malloc.h> 37 #include <sys/mutex.h> 38 #include <sys/queue.h> 39 #include <sys/sbuf.h> 40 #include <sys/sysctl.h> 41 #include <sys/systm.h> 42 #include <sys/uuid.h> 43 #include <geom/geom.h> 44 #include <geom/geom_ctl.h> 45 #include <geom/geom_int.h> 46 #include <geom/part/g_part.h> 47 48 #include "g_part_if.h" 49 50 static kobj_method_t g_part_null_methods[] = { 51 { 0, 0 } 52 }; 53 54 static struct g_part_scheme g_part_null_scheme = { 55 "(none)", 56 g_part_null_methods, 57 sizeof(struct g_part_table), 58 }; 59 60 TAILQ_HEAD(, g_part_scheme) g_part_schemes = 61 TAILQ_HEAD_INITIALIZER(g_part_schemes); 62 63 struct g_part_alias_list { 64 const char *lexeme; 65 enum g_part_alias alias; 66 } g_part_alias_list[G_PART_ALIAS_COUNT] = { 67 { "apple-apfs", G_PART_ALIAS_APPLE_APFS }, 68 { "apple-boot", G_PART_ALIAS_APPLE_BOOT }, 69 { "apple-core-storage", G_PART_ALIAS_APPLE_CORE_STORAGE }, 70 { "apple-hfs", G_PART_ALIAS_APPLE_HFS }, 71 { "apple-label", G_PART_ALIAS_APPLE_LABEL }, 72 { "apple-raid", G_PART_ALIAS_APPLE_RAID }, 73 { "apple-raid-offline", G_PART_ALIAS_APPLE_RAID_OFFLINE }, 74 { "apple-tv-recovery", G_PART_ALIAS_APPLE_TV_RECOVERY }, 75 { "apple-ufs", G_PART_ALIAS_APPLE_UFS }, 76 { "apple-zfs", G_PART_ALIAS_APPLE_ZFS }, 77 { "bios-boot", G_PART_ALIAS_BIOS_BOOT }, 78 { "chromeos-firmware", G_PART_ALIAS_CHROMEOS_FIRMWARE }, 79 { "chromeos-kernel", G_PART_ALIAS_CHROMEOS_KERNEL }, 80 { "chromeos-reserved", G_PART_ALIAS_CHROMEOS_RESERVED }, 81 { "chromeos-root", G_PART_ALIAS_CHROMEOS_ROOT }, 82 { "dragonfly-ccd", G_PART_ALIAS_DFBSD_CCD }, 83 { "dragonfly-hammer", G_PART_ALIAS_DFBSD_HAMMER }, 84 { "dragonfly-hammer2", G_PART_ALIAS_DFBSD_HAMMER2 }, 85 { "dragonfly-label32", G_PART_ALIAS_DFBSD }, 86 { "dragonfly-label64", G_PART_ALIAS_DFBSD64 }, 87 { "dragonfly-legacy", G_PART_ALIAS_DFBSD_LEGACY }, 88 { "dragonfly-swap", G_PART_ALIAS_DFBSD_SWAP }, 89 { "dragonfly-ufs", G_PART_ALIAS_DFBSD_UFS }, 90 { "dragonfly-vinum", G_PART_ALIAS_DFBSD_VINUM }, 91 { "ebr", G_PART_ALIAS_EBR }, 92 { "efi", G_PART_ALIAS_EFI }, 93 { "fat16", G_PART_ALIAS_MS_FAT16 }, 94 { "fat32", G_PART_ALIAS_MS_FAT32 }, 95 { "fat32lba", G_PART_ALIAS_MS_FAT32LBA }, 96 { "freebsd", G_PART_ALIAS_FREEBSD }, 97 { "freebsd-boot", G_PART_ALIAS_FREEBSD_BOOT }, 98 { "freebsd-nandfs", G_PART_ALIAS_FREEBSD_NANDFS }, 99 { "freebsd-swap", G_PART_ALIAS_FREEBSD_SWAP }, 100 { "freebsd-ufs", G_PART_ALIAS_FREEBSD_UFS }, 101 { "freebsd-vinum", G_PART_ALIAS_FREEBSD_VINUM }, 102 { "freebsd-zfs", G_PART_ALIAS_FREEBSD_ZFS }, 103 { "hifive-fsbl", G_PART_ALIAS_HIFIVE_FSBL }, 104 { "hifive-bbl", G_PART_ALIAS_HIFIVE_BBL }, 105 { "linux-data", G_PART_ALIAS_LINUX_DATA }, 106 { "linux-lvm", G_PART_ALIAS_LINUX_LVM }, 107 { "linux-raid", G_PART_ALIAS_LINUX_RAID }, 108 { "linux-swap", G_PART_ALIAS_LINUX_SWAP }, 109 { "mbr", G_PART_ALIAS_MBR }, 110 { "ms-basic-data", G_PART_ALIAS_MS_BASIC_DATA }, 111 { "ms-ldm-data", G_PART_ALIAS_MS_LDM_DATA }, 112 { "ms-ldm-metadata", G_PART_ALIAS_MS_LDM_METADATA }, 113 { "ms-recovery", G_PART_ALIAS_MS_RECOVERY }, 114 { "ms-reserved", G_PART_ALIAS_MS_RESERVED }, 115 { "ms-spaces", G_PART_ALIAS_MS_SPACES }, 116 { "netbsd-ccd", G_PART_ALIAS_NETBSD_CCD }, 117 { "netbsd-cgd", G_PART_ALIAS_NETBSD_CGD }, 118 { "netbsd-ffs", G_PART_ALIAS_NETBSD_FFS }, 119 { "netbsd-lfs", G_PART_ALIAS_NETBSD_LFS }, 120 { "netbsd-raid", G_PART_ALIAS_NETBSD_RAID }, 121 { "netbsd-swap", G_PART_ALIAS_NETBSD_SWAP }, 122 { "ntfs", G_PART_ALIAS_MS_NTFS }, 123 { "openbsd-data", G_PART_ALIAS_OPENBSD_DATA }, 124 { "prep-boot", G_PART_ALIAS_PREP_BOOT }, 125 { "solaris-boot", G_PART_ALIAS_SOLARIS_BOOT }, 126 { "solaris-root", G_PART_ALIAS_SOLARIS_ROOT }, 127 { "solaris-swap", G_PART_ALIAS_SOLARIS_SWAP }, 128 { "solaris-backup", G_PART_ALIAS_SOLARIS_BACKUP }, 129 { "solaris-var", G_PART_ALIAS_SOLARIS_VAR }, 130 { "solaris-home", G_PART_ALIAS_SOLARIS_HOME }, 131 { "solaris-altsec", G_PART_ALIAS_SOLARIS_ALTSEC }, 132 { "solaris-reserved", G_PART_ALIAS_SOLARIS_RESERVED }, 133 { "vmware-reserved", G_PART_ALIAS_VMRESERVED }, 134 { "vmware-vmfs", G_PART_ALIAS_VMFS }, 135 { "vmware-vmkdiag", G_PART_ALIAS_VMKDIAG }, 136 { "vmware-vsanhdr", G_PART_ALIAS_VMVSANHDR }, 137 }; 138 139 SYSCTL_DECL(_kern_geom); 140 SYSCTL_NODE(_kern_geom, OID_AUTO, part, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 141 "GEOM_PART stuff"); 142 u_int geom_part_check_integrity = 1; 143 SYSCTL_UINT(_kern_geom_part, OID_AUTO, check_integrity, 144 CTLFLAG_RWTUN, &geom_part_check_integrity, 1, 145 "Enable integrity checking"); 146 static u_int auto_resize = 1; 147 SYSCTL_UINT(_kern_geom_part, OID_AUTO, auto_resize, 148 CTLFLAG_RWTUN, &auto_resize, 1, 149 "Enable auto resize"); 150 static u_int allow_nesting = 0; 151 SYSCTL_UINT(_kern_geom_part, OID_AUTO, allow_nesting, 152 CTLFLAG_RWTUN, &allow_nesting, 0, 153 "Allow additional levels of nesting"); 154 char g_part_separator[MAXPATHLEN] = ""; 155 SYSCTL_STRING(_kern_geom_part, OID_AUTO, separator, 156 CTLFLAG_RDTUN, &g_part_separator, sizeof(g_part_separator), 157 "Partition name separator"); 158 159 /* 160 * The GEOM partitioning class. 161 */ 162 static g_ctl_req_t g_part_ctlreq; 163 static g_ctl_destroy_geom_t g_part_destroy_geom; 164 static g_fini_t g_part_fini; 165 static g_init_t g_part_init; 166 static g_taste_t g_part_taste; 167 168 static g_access_t g_part_access; 169 static g_dumpconf_t g_part_dumpconf; 170 static g_orphan_t g_part_orphan; 171 static g_spoiled_t g_part_spoiled; 172 static g_start_t g_part_start; 173 static g_resize_t g_part_resize; 174 static g_ioctl_t g_part_ioctl; 175 176 static struct g_class g_part_class = { 177 .name = "PART", 178 .version = G_VERSION, 179 /* Class methods. */ 180 .ctlreq = g_part_ctlreq, 181 .destroy_geom = g_part_destroy_geom, 182 .fini = g_part_fini, 183 .init = g_part_init, 184 .taste = g_part_taste, 185 /* Geom methods. */ 186 .access = g_part_access, 187 .dumpconf = g_part_dumpconf, 188 .orphan = g_part_orphan, 189 .spoiled = g_part_spoiled, 190 .start = g_part_start, 191 .resize = g_part_resize, 192 .ioctl = g_part_ioctl, 193 }; 194 195 DECLARE_GEOM_CLASS(g_part_class, g_part); 196 MODULE_VERSION(g_part, 0); 197 198 /* 199 * Support functions. 200 */ 201 202 static void g_part_wither(struct g_geom *, int); 203 204 const char * 205 g_part_alias_name(enum g_part_alias alias) 206 { 207 int i; 208 209 for (i = 0; i < G_PART_ALIAS_COUNT; i++) { 210 if (g_part_alias_list[i].alias != alias) 211 continue; 212 return (g_part_alias_list[i].lexeme); 213 } 214 215 return (NULL); 216 } 217 218 void 219 g_part_geometry_heads(off_t blocks, u_int sectors, off_t *bestchs, 220 u_int *bestheads) 221 { 222 static u_int candidate_heads[] = { 1, 2, 16, 32, 64, 128, 255, 0 }; 223 off_t chs, cylinders; 224 u_int heads; 225 int idx; 226 227 *bestchs = 0; 228 *bestheads = 0; 229 for (idx = 0; candidate_heads[idx] != 0; idx++) { 230 heads = candidate_heads[idx]; 231 cylinders = blocks / heads / sectors; 232 if (cylinders < heads || cylinders < sectors) 233 break; 234 if (cylinders > 1023) 235 continue; 236 chs = cylinders * heads * sectors; 237 if (chs > *bestchs || (chs == *bestchs && *bestheads == 1)) { 238 *bestchs = chs; 239 *bestheads = heads; 240 } 241 } 242 } 243 244 static void 245 g_part_geometry(struct g_part_table *table, struct g_consumer *cp, 246 off_t blocks) 247 { 248 static u_int candidate_sectors[] = { 1, 9, 17, 33, 63, 0 }; 249 off_t chs, bestchs; 250 u_int heads, sectors; 251 int idx; 252 253 if (g_getattr("GEOM::fwsectors", cp, §ors) != 0 || sectors == 0 || 254 g_getattr("GEOM::fwheads", cp, &heads) != 0 || heads == 0) { 255 table->gpt_fixgeom = 0; 256 table->gpt_heads = 0; 257 table->gpt_sectors = 0; 258 bestchs = 0; 259 for (idx = 0; candidate_sectors[idx] != 0; idx++) { 260 sectors = candidate_sectors[idx]; 261 g_part_geometry_heads(blocks, sectors, &chs, &heads); 262 if (chs == 0) 263 continue; 264 /* 265 * Prefer a geometry with sectors > 1, but only if 266 * it doesn't bump down the number of heads to 1. 267 */ 268 if (chs > bestchs || (chs == bestchs && heads > 1 && 269 table->gpt_sectors == 1)) { 270 bestchs = chs; 271 table->gpt_heads = heads; 272 table->gpt_sectors = sectors; 273 } 274 } 275 /* 276 * If we didn't find a geometry at all, then the disk is 277 * too big. This means we can use the maximum number of 278 * heads and sectors. 279 */ 280 if (bestchs == 0) { 281 table->gpt_heads = 255; 282 table->gpt_sectors = 63; 283 } 284 } else { 285 table->gpt_fixgeom = 1; 286 table->gpt_heads = heads; 287 table->gpt_sectors = sectors; 288 } 289 } 290 291 static void 292 g_part_get_physpath_done(struct bio *bp) 293 { 294 struct g_geom *gp; 295 struct g_part_entry *entry; 296 struct g_part_table *table; 297 struct g_provider *pp; 298 struct bio *pbp; 299 300 pbp = bp->bio_parent; 301 pp = pbp->bio_to; 302 gp = pp->geom; 303 table = gp->softc; 304 entry = pp->private; 305 306 if (bp->bio_error == 0) { 307 char *end; 308 size_t len, remainder; 309 len = strlcat(bp->bio_data, "/", bp->bio_length); 310 if (len < bp->bio_length) { 311 end = bp->bio_data + len; 312 remainder = bp->bio_length - len; 313 G_PART_NAME(table, entry, end, remainder); 314 } 315 } 316 g_std_done(bp); 317 } 318 319 #define DPRINTF(...) if (bootverbose) { \ 320 printf("GEOM_PART: " __VA_ARGS__); \ 321 } 322 323 static int 324 g_part_check_integrity(struct g_part_table *table, struct g_consumer *cp) 325 { 326 struct g_part_entry *e1, *e2; 327 struct g_provider *pp; 328 off_t offset; 329 int failed; 330 331 failed = 0; 332 pp = cp->provider; 333 if (table->gpt_last < table->gpt_first) { 334 DPRINTF("last LBA is below first LBA: %jd < %jd\n", 335 (intmax_t)table->gpt_last, (intmax_t)table->gpt_first); 336 failed++; 337 } 338 if (table->gpt_last > pp->mediasize / pp->sectorsize - 1) { 339 DPRINTF("last LBA extends beyond mediasize: " 340 "%jd > %jd\n", (intmax_t)table->gpt_last, 341 (intmax_t)pp->mediasize / pp->sectorsize - 1); 342 failed++; 343 } 344 LIST_FOREACH(e1, &table->gpt_entry, gpe_entry) { 345 if (e1->gpe_deleted || e1->gpe_internal) 346 continue; 347 if (e1->gpe_start < table->gpt_first) { 348 DPRINTF("partition %d has start offset below first " 349 "LBA: %jd < %jd\n", e1->gpe_index, 350 (intmax_t)e1->gpe_start, 351 (intmax_t)table->gpt_first); 352 failed++; 353 } 354 if (e1->gpe_start > table->gpt_last) { 355 DPRINTF("partition %d has start offset beyond last " 356 "LBA: %jd > %jd\n", e1->gpe_index, 357 (intmax_t)e1->gpe_start, 358 (intmax_t)table->gpt_last); 359 failed++; 360 } 361 if (e1->gpe_end < e1->gpe_start) { 362 DPRINTF("partition %d has end offset below start " 363 "offset: %jd < %jd\n", e1->gpe_index, 364 (intmax_t)e1->gpe_end, 365 (intmax_t)e1->gpe_start); 366 failed++; 367 } 368 if (e1->gpe_end > table->gpt_last) { 369 DPRINTF("partition %d has end offset beyond last " 370 "LBA: %jd > %jd\n", e1->gpe_index, 371 (intmax_t)e1->gpe_end, 372 (intmax_t)table->gpt_last); 373 failed++; 374 } 375 if (pp->stripesize > 0) { 376 offset = e1->gpe_start * pp->sectorsize; 377 if (e1->gpe_offset > offset) 378 offset = e1->gpe_offset; 379 if ((offset + pp->stripeoffset) % pp->stripesize) { 380 DPRINTF("partition %d on (%s, %s) is not " 381 "aligned on %ju bytes\n", e1->gpe_index, 382 pp->name, table->gpt_scheme->name, 383 (uintmax_t)pp->stripesize); 384 /* Don't treat this as a critical failure */ 385 } 386 } 387 e2 = e1; 388 while ((e2 = LIST_NEXT(e2, gpe_entry)) != NULL) { 389 if (e2->gpe_deleted || e2->gpe_internal) 390 continue; 391 if (e1->gpe_start >= e2->gpe_start && 392 e1->gpe_start <= e2->gpe_end) { 393 DPRINTF("partition %d has start offset inside " 394 "partition %d: start[%d] %jd >= start[%d] " 395 "%jd <= end[%d] %jd\n", 396 e1->gpe_index, e2->gpe_index, 397 e2->gpe_index, (intmax_t)e2->gpe_start, 398 e1->gpe_index, (intmax_t)e1->gpe_start, 399 e2->gpe_index, (intmax_t)e2->gpe_end); 400 failed++; 401 } 402 if (e1->gpe_end >= e2->gpe_start && 403 e1->gpe_end <= e2->gpe_end) { 404 DPRINTF("partition %d has end offset inside " 405 "partition %d: start[%d] %jd >= end[%d] " 406 "%jd <= end[%d] %jd\n", 407 e1->gpe_index, e2->gpe_index, 408 e2->gpe_index, (intmax_t)e2->gpe_start, 409 e1->gpe_index, (intmax_t)e1->gpe_end, 410 e2->gpe_index, (intmax_t)e2->gpe_end); 411 failed++; 412 } 413 if (e1->gpe_start < e2->gpe_start && 414 e1->gpe_end > e2->gpe_end) { 415 DPRINTF("partition %d contains partition %d: " 416 "start[%d] %jd > start[%d] %jd, end[%d] " 417 "%jd < end[%d] %jd\n", 418 e1->gpe_index, e2->gpe_index, 419 e1->gpe_index, (intmax_t)e1->gpe_start, 420 e2->gpe_index, (intmax_t)e2->gpe_start, 421 e2->gpe_index, (intmax_t)e2->gpe_end, 422 e1->gpe_index, (intmax_t)e1->gpe_end); 423 failed++; 424 } 425 } 426 } 427 if (failed != 0) { 428 printf("GEOM_PART: integrity check failed (%s, %s)\n", 429 pp->name, table->gpt_scheme->name); 430 if (geom_part_check_integrity != 0) 431 return (EINVAL); 432 table->gpt_corrupt = 1; 433 } 434 return (0); 435 } 436 #undef DPRINTF 437 438 struct g_part_entry * 439 g_part_new_entry(struct g_part_table *table, int index, quad_t start, 440 quad_t end) 441 { 442 struct g_part_entry *entry, *last; 443 444 last = NULL; 445 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) { 446 if (entry->gpe_index == index) 447 break; 448 if (entry->gpe_index > index) { 449 entry = NULL; 450 break; 451 } 452 last = entry; 453 } 454 if (entry == NULL) { 455 entry = g_malloc(table->gpt_scheme->gps_entrysz, 456 M_WAITOK | M_ZERO); 457 entry->gpe_index = index; 458 if (last == NULL) 459 LIST_INSERT_HEAD(&table->gpt_entry, entry, gpe_entry); 460 else 461 LIST_INSERT_AFTER(last, entry, gpe_entry); 462 } else 463 entry->gpe_offset = 0; 464 entry->gpe_start = start; 465 entry->gpe_end = end; 466 return (entry); 467 } 468 469 static void 470 g_part_new_provider(struct g_geom *gp, struct g_part_table *table, 471 struct g_part_entry *entry) 472 { 473 struct g_consumer *cp; 474 struct g_provider *pp; 475 struct g_geom_alias *gap; 476 off_t offset; 477 478 cp = LIST_FIRST(&gp->consumer); 479 pp = cp->provider; 480 481 offset = entry->gpe_start * pp->sectorsize; 482 if (entry->gpe_offset < offset) 483 entry->gpe_offset = offset; 484 485 if (entry->gpe_pp == NULL) { 486 entry->gpe_pp = G_PART_NEW_PROVIDER(table, gp, entry, gp->name); 487 /* 488 * If our parent provider had any aliases, then copy them to our 489 * provider so when geom DEV tastes things later, they will be 490 * there for it to create the aliases with those name used in 491 * place of the geom's name we use to create the provider. The 492 * kobj interface that generates names makes this awkward. 493 */ 494 LIST_FOREACH(gap, &pp->aliases, ga_next) 495 G_PART_ADD_ALIAS(table, entry->gpe_pp, entry, gap->ga_alias); 496 entry->gpe_pp->flags |= G_PF_DIRECT_SEND | G_PF_DIRECT_RECEIVE; 497 entry->gpe_pp->private = entry; /* Close the circle. */ 498 } 499 entry->gpe_pp->index = entry->gpe_index - 1; /* index is 1-based. */ 500 entry->gpe_pp->mediasize = (entry->gpe_end - entry->gpe_start + 1) * 501 pp->sectorsize; 502 entry->gpe_pp->mediasize -= entry->gpe_offset - offset; 503 entry->gpe_pp->sectorsize = pp->sectorsize; 504 entry->gpe_pp->stripesize = pp->stripesize; 505 entry->gpe_pp->stripeoffset = pp->stripeoffset + entry->gpe_offset; 506 if (pp->stripesize > 0) 507 entry->gpe_pp->stripeoffset %= pp->stripesize; 508 entry->gpe_pp->flags |= pp->flags & G_PF_ACCEPT_UNMAPPED; 509 g_error_provider(entry->gpe_pp, 0); 510 } 511 512 static struct g_geom* 513 g_part_find_geom(const char *name) 514 { 515 struct g_geom *gp; 516 LIST_FOREACH(gp, &g_part_class.geom, geom) { 517 if ((gp->flags & G_GEOM_WITHER) == 0 && 518 strcmp(name, gp->name) == 0) 519 break; 520 } 521 return (gp); 522 } 523 524 static int 525 g_part_parm_geom(struct gctl_req *req, const char *name, struct g_geom **v) 526 { 527 struct g_geom *gp; 528 const char *gname; 529 530 gname = gctl_get_asciiparam(req, name); 531 if (gname == NULL) 532 return (ENOATTR); 533 if (strncmp(gname, _PATH_DEV, sizeof(_PATH_DEV) - 1) == 0) 534 gname += sizeof(_PATH_DEV) - 1; 535 gp = g_part_find_geom(gname); 536 if (gp == NULL) { 537 gctl_error(req, "%d %s '%s'", EINVAL, name, gname); 538 return (EINVAL); 539 } 540 *v = gp; 541 return (0); 542 } 543 544 static int 545 g_part_parm_provider(struct gctl_req *req, const char *name, 546 struct g_provider **v) 547 { 548 struct g_provider *pp; 549 const char *pname; 550 551 pname = gctl_get_asciiparam(req, name); 552 if (pname == NULL) 553 return (ENOATTR); 554 if (strncmp(pname, _PATH_DEV, sizeof(_PATH_DEV) - 1) == 0) 555 pname += sizeof(_PATH_DEV) - 1; 556 pp = g_provider_by_name(pname); 557 if (pp == NULL) { 558 gctl_error(req, "%d %s '%s'", EINVAL, name, pname); 559 return (EINVAL); 560 } 561 *v = pp; 562 return (0); 563 } 564 565 static int 566 g_part_parm_quad(struct gctl_req *req, const char *name, quad_t *v) 567 { 568 const char *p; 569 char *x; 570 quad_t q; 571 572 p = gctl_get_asciiparam(req, name); 573 if (p == NULL) 574 return (ENOATTR); 575 q = strtoq(p, &x, 0); 576 if (*x != '\0' || q < 0) { 577 gctl_error(req, "%d %s '%s'", EINVAL, name, p); 578 return (EINVAL); 579 } 580 *v = q; 581 return (0); 582 } 583 584 static int 585 g_part_parm_scheme(struct gctl_req *req, const char *name, 586 struct g_part_scheme **v) 587 { 588 struct g_part_scheme *s; 589 const char *p; 590 591 p = gctl_get_asciiparam(req, name); 592 if (p == NULL) 593 return (ENOATTR); 594 TAILQ_FOREACH(s, &g_part_schemes, scheme_list) { 595 if (s == &g_part_null_scheme) 596 continue; 597 if (!strcasecmp(s->name, p)) 598 break; 599 } 600 if (s == NULL) { 601 gctl_error(req, "%d %s '%s'", EINVAL, name, p); 602 return (EINVAL); 603 } 604 *v = s; 605 return (0); 606 } 607 608 static int 609 g_part_parm_str(struct gctl_req *req, const char *name, const char **v) 610 { 611 const char *p; 612 613 p = gctl_get_asciiparam(req, name); 614 if (p == NULL) 615 return (ENOATTR); 616 /* An empty label is always valid. */ 617 if (strcmp(name, "label") != 0 && p[0] == '\0') { 618 gctl_error(req, "%d %s '%s'", EINVAL, name, p); 619 return (EINVAL); 620 } 621 *v = p; 622 return (0); 623 } 624 625 static int 626 g_part_parm_intmax(struct gctl_req *req, const char *name, u_int *v) 627 { 628 const intmax_t *p; 629 int size; 630 631 p = gctl_get_param(req, name, &size); 632 if (p == NULL) 633 return (ENOATTR); 634 if (size != sizeof(*p) || *p < 0 || *p > INT_MAX) { 635 gctl_error(req, "%d %s '%jd'", EINVAL, name, *p); 636 return (EINVAL); 637 } 638 *v = (u_int)*p; 639 return (0); 640 } 641 642 static int 643 g_part_parm_uint32(struct gctl_req *req, const char *name, u_int *v) 644 { 645 const uint32_t *p; 646 int size; 647 648 p = gctl_get_param(req, name, &size); 649 if (p == NULL) 650 return (ENOATTR); 651 if (size != sizeof(*p) || *p > INT_MAX) { 652 gctl_error(req, "%d %s '%u'", EINVAL, name, (unsigned int)*p); 653 return (EINVAL); 654 } 655 *v = (u_int)*p; 656 return (0); 657 } 658 659 static int 660 g_part_parm_bootcode(struct gctl_req *req, const char *name, const void **v, 661 unsigned int *s) 662 { 663 const void *p; 664 int size; 665 666 p = gctl_get_param(req, name, &size); 667 if (p == NULL) 668 return (ENOATTR); 669 *v = p; 670 *s = size; 671 return (0); 672 } 673 674 static int 675 g_part_probe(struct g_geom *gp, struct g_consumer *cp, int depth) 676 { 677 struct g_part_scheme *iter, *scheme; 678 struct g_part_table *table; 679 int pri, probe; 680 681 table = gp->softc; 682 scheme = (table != NULL) ? table->gpt_scheme : NULL; 683 pri = (scheme != NULL) ? G_PART_PROBE(table, cp) : INT_MIN; 684 if (pri == 0) 685 goto done; 686 if (pri > 0) { /* error */ 687 scheme = NULL; 688 pri = INT_MIN; 689 } 690 691 TAILQ_FOREACH(iter, &g_part_schemes, scheme_list) { 692 if (iter == &g_part_null_scheme) 693 continue; 694 table = (void *)kobj_create((kobj_class_t)iter, M_GEOM, 695 M_WAITOK); 696 table->gpt_gp = gp; 697 table->gpt_scheme = iter; 698 table->gpt_depth = depth; 699 probe = G_PART_PROBE(table, cp); 700 if (probe <= 0 && probe > pri) { 701 pri = probe; 702 scheme = iter; 703 if (gp->softc != NULL) 704 kobj_delete((kobj_t)gp->softc, M_GEOM); 705 gp->softc = table; 706 if (pri == 0) 707 goto done; 708 } else 709 kobj_delete((kobj_t)table, M_GEOM); 710 } 711 712 done: 713 return ((scheme == NULL) ? ENXIO : 0); 714 } 715 716 /* 717 * Control request functions. 718 */ 719 720 static int 721 g_part_ctl_add(struct gctl_req *req, struct g_part_parms *gpp) 722 { 723 struct g_geom *gp; 724 struct g_provider *pp; 725 struct g_part_entry *delent, *last, *entry; 726 struct g_part_table *table; 727 struct sbuf *sb; 728 quad_t end; 729 unsigned int index; 730 int error; 731 732 gp = gpp->gpp_geom; 733 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name)); 734 g_topology_assert(); 735 736 pp = LIST_FIRST(&gp->consumer)->provider; 737 table = gp->softc; 738 end = gpp->gpp_start + gpp->gpp_size - 1; 739 740 if (gpp->gpp_start < table->gpt_first || 741 gpp->gpp_start > table->gpt_last) { 742 gctl_error(req, "%d start '%jd'", EINVAL, 743 (intmax_t)gpp->gpp_start); 744 return (EINVAL); 745 } 746 if (end < gpp->gpp_start || end > table->gpt_last) { 747 gctl_error(req, "%d size '%jd'", EINVAL, 748 (intmax_t)gpp->gpp_size); 749 return (EINVAL); 750 } 751 if (gpp->gpp_index > table->gpt_entries) { 752 gctl_error(req, "%d index '%d'", EINVAL, gpp->gpp_index); 753 return (EINVAL); 754 } 755 756 delent = last = NULL; 757 index = (gpp->gpp_index > 0) ? gpp->gpp_index : 1; 758 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) { 759 if (entry->gpe_deleted) { 760 if (entry->gpe_index == index) 761 delent = entry; 762 continue; 763 } 764 if (entry->gpe_index == index) 765 index = entry->gpe_index + 1; 766 if (entry->gpe_index < index) 767 last = entry; 768 if (entry->gpe_internal) 769 continue; 770 if (gpp->gpp_start >= entry->gpe_start && 771 gpp->gpp_start <= entry->gpe_end) { 772 gctl_error(req, "%d start '%jd'", ENOSPC, 773 (intmax_t)gpp->gpp_start); 774 return (ENOSPC); 775 } 776 if (end >= entry->gpe_start && end <= entry->gpe_end) { 777 gctl_error(req, "%d end '%jd'", ENOSPC, (intmax_t)end); 778 return (ENOSPC); 779 } 780 if (gpp->gpp_start < entry->gpe_start && end > entry->gpe_end) { 781 gctl_error(req, "%d size '%jd'", ENOSPC, 782 (intmax_t)gpp->gpp_size); 783 return (ENOSPC); 784 } 785 } 786 if (gpp->gpp_index > 0 && index != gpp->gpp_index) { 787 gctl_error(req, "%d index '%d'", EEXIST, gpp->gpp_index); 788 return (EEXIST); 789 } 790 if (index > table->gpt_entries) { 791 gctl_error(req, "%d index '%d'", ENOSPC, index); 792 return (ENOSPC); 793 } 794 795 entry = (delent == NULL) ? g_malloc(table->gpt_scheme->gps_entrysz, 796 M_WAITOK | M_ZERO) : delent; 797 entry->gpe_index = index; 798 entry->gpe_start = gpp->gpp_start; 799 entry->gpe_end = end; 800 error = G_PART_ADD(table, entry, gpp); 801 if (error) { 802 gctl_error(req, "%d", error); 803 if (delent == NULL) 804 g_free(entry); 805 return (error); 806 } 807 if (delent == NULL) { 808 if (last == NULL) 809 LIST_INSERT_HEAD(&table->gpt_entry, entry, gpe_entry); 810 else 811 LIST_INSERT_AFTER(last, entry, gpe_entry); 812 entry->gpe_created = 1; 813 } else { 814 entry->gpe_deleted = 0; 815 entry->gpe_modified = 1; 816 } 817 g_part_new_provider(gp, table, entry); 818 819 /* Provide feedback if so requested. */ 820 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) { 821 sb = sbuf_new_auto(); 822 G_PART_FULLNAME(table, entry, sb, gp->name); 823 if (pp->stripesize > 0 && entry->gpe_pp->stripeoffset != 0) 824 sbuf_printf(sb, " added, but partition is not " 825 "aligned on %ju bytes\n", (uintmax_t)pp->stripesize); 826 else 827 sbuf_cat(sb, " added\n"); 828 sbuf_finish(sb); 829 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1); 830 sbuf_delete(sb); 831 } 832 return (0); 833 } 834 835 static int 836 g_part_ctl_bootcode(struct gctl_req *req, struct g_part_parms *gpp) 837 { 838 struct g_geom *gp; 839 struct g_part_table *table; 840 struct sbuf *sb; 841 int error, sz; 842 843 gp = gpp->gpp_geom; 844 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name)); 845 g_topology_assert(); 846 847 table = gp->softc; 848 sz = table->gpt_scheme->gps_bootcodesz; 849 if (sz == 0) { 850 error = ENODEV; 851 goto fail; 852 } 853 if (gpp->gpp_codesize > sz) { 854 error = EFBIG; 855 goto fail; 856 } 857 858 error = G_PART_BOOTCODE(table, gpp); 859 if (error) 860 goto fail; 861 862 /* Provide feedback if so requested. */ 863 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) { 864 sb = sbuf_new_auto(); 865 sbuf_printf(sb, "bootcode written to %s\n", gp->name); 866 sbuf_finish(sb); 867 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1); 868 sbuf_delete(sb); 869 } 870 return (0); 871 872 fail: 873 gctl_error(req, "%d", error); 874 return (error); 875 } 876 877 static int 878 g_part_ctl_commit(struct gctl_req *req, struct g_part_parms *gpp) 879 { 880 struct g_consumer *cp; 881 struct g_geom *gp; 882 struct g_provider *pp; 883 struct g_part_entry *entry, *tmp; 884 struct g_part_table *table; 885 char *buf; 886 int error, i; 887 888 gp = gpp->gpp_geom; 889 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name)); 890 g_topology_assert(); 891 892 table = gp->softc; 893 if (!table->gpt_opened) { 894 gctl_error(req, "%d", EPERM); 895 return (EPERM); 896 } 897 898 g_topology_unlock(); 899 900 cp = LIST_FIRST(&gp->consumer); 901 if ((table->gpt_smhead | table->gpt_smtail) != 0) { 902 pp = cp->provider; 903 buf = g_malloc(pp->sectorsize, M_WAITOK | M_ZERO); 904 while (table->gpt_smhead != 0) { 905 i = ffs(table->gpt_smhead) - 1; 906 error = g_write_data(cp, i * pp->sectorsize, buf, 907 pp->sectorsize); 908 if (error) { 909 g_free(buf); 910 goto fail; 911 } 912 table->gpt_smhead &= ~(1 << i); 913 } 914 while (table->gpt_smtail != 0) { 915 i = ffs(table->gpt_smtail) - 1; 916 error = g_write_data(cp, pp->mediasize - (i + 1) * 917 pp->sectorsize, buf, pp->sectorsize); 918 if (error) { 919 g_free(buf); 920 goto fail; 921 } 922 table->gpt_smtail &= ~(1 << i); 923 } 924 g_free(buf); 925 } 926 927 if (table->gpt_scheme == &g_part_null_scheme) { 928 g_topology_lock(); 929 g_access(cp, -1, -1, -1); 930 g_part_wither(gp, ENXIO); 931 return (0); 932 } 933 934 error = G_PART_WRITE(table, cp); 935 if (error) 936 goto fail; 937 938 LIST_FOREACH_SAFE(entry, &table->gpt_entry, gpe_entry, tmp) { 939 if (!entry->gpe_deleted) { 940 /* Notify consumers that provider might be changed. */ 941 if (entry->gpe_modified && ( 942 entry->gpe_pp->acw + entry->gpe_pp->ace + 943 entry->gpe_pp->acr) == 0) 944 g_media_changed(entry->gpe_pp, M_NOWAIT); 945 entry->gpe_created = 0; 946 entry->gpe_modified = 0; 947 continue; 948 } 949 LIST_REMOVE(entry, gpe_entry); 950 g_free(entry); 951 } 952 table->gpt_created = 0; 953 table->gpt_opened = 0; 954 955 g_topology_lock(); 956 g_access(cp, -1, -1, -1); 957 return (0); 958 959 fail: 960 g_topology_lock(); 961 gctl_error(req, "%d", error); 962 return (error); 963 } 964 965 static int 966 g_part_ctl_create(struct gctl_req *req, struct g_part_parms *gpp) 967 { 968 struct g_consumer *cp; 969 struct g_geom *gp; 970 struct g_provider *pp; 971 struct g_part_scheme *scheme; 972 struct g_part_table *null, *table; 973 struct sbuf *sb; 974 int attr, error; 975 976 pp = gpp->gpp_provider; 977 scheme = gpp->gpp_scheme; 978 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, pp->name)); 979 g_topology_assert(); 980 981 /* Check that there isn't already a g_part geom on the provider. */ 982 gp = g_part_find_geom(pp->name); 983 if (gp != NULL) { 984 null = gp->softc; 985 if (null->gpt_scheme != &g_part_null_scheme) { 986 gctl_error(req, "%d geom '%s'", EEXIST, pp->name); 987 return (EEXIST); 988 } 989 } else 990 null = NULL; 991 992 if ((gpp->gpp_parms & G_PART_PARM_ENTRIES) && 993 (gpp->gpp_entries < scheme->gps_minent || 994 gpp->gpp_entries > scheme->gps_maxent)) { 995 gctl_error(req, "%d entries '%d'", EINVAL, gpp->gpp_entries); 996 return (EINVAL); 997 } 998 999 if (null == NULL) 1000 gp = g_new_geomf(&g_part_class, "%s", pp->name); 1001 gp->softc = kobj_create((kobj_class_t)gpp->gpp_scheme, M_GEOM, 1002 M_WAITOK); 1003 table = gp->softc; 1004 table->gpt_gp = gp; 1005 table->gpt_scheme = gpp->gpp_scheme; 1006 table->gpt_entries = (gpp->gpp_parms & G_PART_PARM_ENTRIES) ? 1007 gpp->gpp_entries : scheme->gps_minent; 1008 LIST_INIT(&table->gpt_entry); 1009 if (null == NULL) { 1010 cp = g_new_consumer(gp); 1011 cp->flags |= G_CF_DIRECT_SEND | G_CF_DIRECT_RECEIVE; 1012 error = g_attach(cp, pp); 1013 if (error == 0) 1014 error = g_access(cp, 1, 1, 1); 1015 if (error != 0) { 1016 g_part_wither(gp, error); 1017 gctl_error(req, "%d geom '%s'", error, pp->name); 1018 return (error); 1019 } 1020 table->gpt_opened = 1; 1021 } else { 1022 cp = LIST_FIRST(&gp->consumer); 1023 table->gpt_opened = null->gpt_opened; 1024 table->gpt_smhead = null->gpt_smhead; 1025 table->gpt_smtail = null->gpt_smtail; 1026 } 1027 1028 g_topology_unlock(); 1029 1030 /* Make sure the provider has media. */ 1031 if (pp->mediasize == 0 || pp->sectorsize == 0) { 1032 error = ENODEV; 1033 goto fail; 1034 } 1035 1036 /* Make sure we can nest and if so, determine our depth. */ 1037 error = g_getattr("PART::isleaf", cp, &attr); 1038 if (!error && attr) { 1039 error = ENODEV; 1040 goto fail; 1041 } 1042 error = g_getattr("PART::depth", cp, &attr); 1043 table->gpt_depth = (!error) ? attr + 1 : 0; 1044 1045 /* 1046 * Synthesize a disk geometry. Some partitioning schemes 1047 * depend on it and since some file systems need it even 1048 * when the partitition scheme doesn't, we do it here in 1049 * scheme-independent code. 1050 */ 1051 g_part_geometry(table, cp, pp->mediasize / pp->sectorsize); 1052 1053 error = G_PART_CREATE(table, gpp); 1054 if (error) 1055 goto fail; 1056 1057 g_topology_lock(); 1058 1059 table->gpt_created = 1; 1060 if (null != NULL) 1061 kobj_delete((kobj_t)null, M_GEOM); 1062 1063 /* 1064 * Support automatic commit by filling in the gpp_geom 1065 * parameter. 1066 */ 1067 gpp->gpp_parms |= G_PART_PARM_GEOM; 1068 gpp->gpp_geom = gp; 1069 1070 /* Provide feedback if so requested. */ 1071 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) { 1072 sb = sbuf_new_auto(); 1073 sbuf_printf(sb, "%s created\n", gp->name); 1074 sbuf_finish(sb); 1075 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1); 1076 sbuf_delete(sb); 1077 } 1078 return (0); 1079 1080 fail: 1081 g_topology_lock(); 1082 if (null == NULL) { 1083 g_access(cp, -1, -1, -1); 1084 g_part_wither(gp, error); 1085 } else { 1086 kobj_delete((kobj_t)gp->softc, M_GEOM); 1087 gp->softc = null; 1088 } 1089 gctl_error(req, "%d provider", error); 1090 return (error); 1091 } 1092 1093 static int 1094 g_part_ctl_delete(struct gctl_req *req, struct g_part_parms *gpp) 1095 { 1096 struct g_geom *gp; 1097 struct g_provider *pp; 1098 struct g_part_entry *entry; 1099 struct g_part_table *table; 1100 struct sbuf *sb; 1101 1102 gp = gpp->gpp_geom; 1103 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name)); 1104 g_topology_assert(); 1105 1106 table = gp->softc; 1107 1108 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) { 1109 if (entry->gpe_deleted || entry->gpe_internal) 1110 continue; 1111 if (entry->gpe_index == gpp->gpp_index) 1112 break; 1113 } 1114 if (entry == NULL) { 1115 gctl_error(req, "%d index '%d'", ENOENT, gpp->gpp_index); 1116 return (ENOENT); 1117 } 1118 1119 pp = entry->gpe_pp; 1120 if (pp != NULL) { 1121 if (pp->acr > 0 || pp->acw > 0 || pp->ace > 0) { 1122 gctl_error(req, "%d", EBUSY); 1123 return (EBUSY); 1124 } 1125 1126 pp->private = NULL; 1127 entry->gpe_pp = NULL; 1128 } 1129 1130 if (pp != NULL) 1131 g_wither_provider(pp, ENXIO); 1132 1133 /* Provide feedback if so requested. */ 1134 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) { 1135 sb = sbuf_new_auto(); 1136 G_PART_FULLNAME(table, entry, sb, gp->name); 1137 sbuf_cat(sb, " deleted\n"); 1138 sbuf_finish(sb); 1139 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1); 1140 sbuf_delete(sb); 1141 } 1142 1143 if (entry->gpe_created) { 1144 LIST_REMOVE(entry, gpe_entry); 1145 g_free(entry); 1146 } else { 1147 entry->gpe_modified = 0; 1148 entry->gpe_deleted = 1; 1149 } 1150 return (0); 1151 } 1152 1153 static int 1154 g_part_ctl_destroy(struct gctl_req *req, struct g_part_parms *gpp) 1155 { 1156 struct g_consumer *cp; 1157 struct g_geom *gp; 1158 struct g_provider *pp; 1159 struct g_part_entry *entry, *tmp; 1160 struct g_part_table *null, *table; 1161 struct sbuf *sb; 1162 int error; 1163 1164 gp = gpp->gpp_geom; 1165 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name)); 1166 g_topology_assert(); 1167 1168 table = gp->softc; 1169 /* Check for busy providers. */ 1170 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) { 1171 if (entry->gpe_deleted || entry->gpe_internal) 1172 continue; 1173 if (gpp->gpp_force) { 1174 pp = entry->gpe_pp; 1175 if (pp == NULL) 1176 continue; 1177 if (pp->acr == 0 && pp->acw == 0 && pp->ace == 0) 1178 continue; 1179 } 1180 gctl_error(req, "%d", EBUSY); 1181 return (EBUSY); 1182 } 1183 1184 if (gpp->gpp_force) { 1185 /* Destroy all providers. */ 1186 LIST_FOREACH_SAFE(entry, &table->gpt_entry, gpe_entry, tmp) { 1187 pp = entry->gpe_pp; 1188 if (pp != NULL) { 1189 pp->private = NULL; 1190 g_wither_provider(pp, ENXIO); 1191 } 1192 LIST_REMOVE(entry, gpe_entry); 1193 g_free(entry); 1194 } 1195 } 1196 1197 error = G_PART_DESTROY(table, gpp); 1198 if (error) { 1199 gctl_error(req, "%d", error); 1200 return (error); 1201 } 1202 1203 gp->softc = kobj_create((kobj_class_t)&g_part_null_scheme, M_GEOM, 1204 M_WAITOK); 1205 null = gp->softc; 1206 null->gpt_gp = gp; 1207 null->gpt_scheme = &g_part_null_scheme; 1208 LIST_INIT(&null->gpt_entry); 1209 1210 cp = LIST_FIRST(&gp->consumer); 1211 pp = cp->provider; 1212 null->gpt_last = pp->mediasize / pp->sectorsize - 1; 1213 1214 null->gpt_depth = table->gpt_depth; 1215 null->gpt_opened = table->gpt_opened; 1216 null->gpt_smhead = table->gpt_smhead; 1217 null->gpt_smtail = table->gpt_smtail; 1218 1219 while ((entry = LIST_FIRST(&table->gpt_entry)) != NULL) { 1220 LIST_REMOVE(entry, gpe_entry); 1221 g_free(entry); 1222 } 1223 kobj_delete((kobj_t)table, M_GEOM); 1224 1225 /* Provide feedback if so requested. */ 1226 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) { 1227 sb = sbuf_new_auto(); 1228 sbuf_printf(sb, "%s destroyed\n", gp->name); 1229 sbuf_finish(sb); 1230 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1); 1231 sbuf_delete(sb); 1232 } 1233 return (0); 1234 } 1235 1236 static int 1237 g_part_ctl_modify(struct gctl_req *req, struct g_part_parms *gpp) 1238 { 1239 struct g_geom *gp; 1240 struct g_part_entry *entry; 1241 struct g_part_table *table; 1242 struct sbuf *sb; 1243 int error; 1244 1245 gp = gpp->gpp_geom; 1246 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name)); 1247 g_topology_assert(); 1248 1249 table = gp->softc; 1250 1251 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) { 1252 if (entry->gpe_deleted || entry->gpe_internal) 1253 continue; 1254 if (entry->gpe_index == gpp->gpp_index) 1255 break; 1256 } 1257 if (entry == NULL) { 1258 gctl_error(req, "%d index '%d'", ENOENT, gpp->gpp_index); 1259 return (ENOENT); 1260 } 1261 1262 error = G_PART_MODIFY(table, entry, gpp); 1263 if (error) { 1264 gctl_error(req, "%d", error); 1265 return (error); 1266 } 1267 1268 if (!entry->gpe_created) 1269 entry->gpe_modified = 1; 1270 1271 /* Provide feedback if so requested. */ 1272 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) { 1273 sb = sbuf_new_auto(); 1274 G_PART_FULLNAME(table, entry, sb, gp->name); 1275 sbuf_cat(sb, " modified\n"); 1276 sbuf_finish(sb); 1277 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1); 1278 sbuf_delete(sb); 1279 } 1280 return (0); 1281 } 1282 1283 static int 1284 g_part_ctl_move(struct gctl_req *req, struct g_part_parms *gpp) 1285 { 1286 gctl_error(req, "%d verb 'move'", ENOSYS); 1287 return (ENOSYS); 1288 } 1289 1290 static int 1291 g_part_ctl_recover(struct gctl_req *req, struct g_part_parms *gpp) 1292 { 1293 struct g_part_table *table; 1294 struct g_geom *gp; 1295 struct sbuf *sb; 1296 int error, recovered; 1297 1298 gp = gpp->gpp_geom; 1299 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name)); 1300 g_topology_assert(); 1301 table = gp->softc; 1302 error = recovered = 0; 1303 1304 if (table->gpt_corrupt) { 1305 error = G_PART_RECOVER(table); 1306 if (error == 0) 1307 error = g_part_check_integrity(table, 1308 LIST_FIRST(&gp->consumer)); 1309 if (error) { 1310 gctl_error(req, "%d recovering '%s' failed", 1311 error, gp->name); 1312 return (error); 1313 } 1314 recovered = 1; 1315 } 1316 /* Provide feedback if so requested. */ 1317 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) { 1318 sb = sbuf_new_auto(); 1319 if (recovered) 1320 sbuf_printf(sb, "%s recovered\n", gp->name); 1321 else 1322 sbuf_printf(sb, "%s recovering is not needed\n", 1323 gp->name); 1324 sbuf_finish(sb); 1325 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1); 1326 sbuf_delete(sb); 1327 } 1328 return (0); 1329 } 1330 1331 static int 1332 g_part_ctl_resize(struct gctl_req *req, struct g_part_parms *gpp) 1333 { 1334 struct g_geom *gp; 1335 struct g_provider *pp; 1336 struct g_part_entry *pe, *entry; 1337 struct g_part_table *table; 1338 struct sbuf *sb; 1339 quad_t end; 1340 int error; 1341 off_t mediasize; 1342 1343 gp = gpp->gpp_geom; 1344 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name)); 1345 g_topology_assert(); 1346 table = gp->softc; 1347 1348 /* check gpp_index */ 1349 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) { 1350 if (entry->gpe_deleted || entry->gpe_internal) 1351 continue; 1352 if (entry->gpe_index == gpp->gpp_index) 1353 break; 1354 } 1355 if (entry == NULL) { 1356 gctl_error(req, "%d index '%d'", ENOENT, gpp->gpp_index); 1357 return (ENOENT); 1358 } 1359 1360 /* check gpp_size */ 1361 end = entry->gpe_start + gpp->gpp_size - 1; 1362 if (gpp->gpp_size < 1 || end > table->gpt_last) { 1363 gctl_error(req, "%d size '%jd'", EINVAL, 1364 (intmax_t)gpp->gpp_size); 1365 return (EINVAL); 1366 } 1367 1368 LIST_FOREACH(pe, &table->gpt_entry, gpe_entry) { 1369 if (pe->gpe_deleted || pe->gpe_internal || pe == entry) 1370 continue; 1371 if (end >= pe->gpe_start && end <= pe->gpe_end) { 1372 gctl_error(req, "%d end '%jd'", ENOSPC, 1373 (intmax_t)end); 1374 return (ENOSPC); 1375 } 1376 if (entry->gpe_start < pe->gpe_start && end > pe->gpe_end) { 1377 gctl_error(req, "%d size '%jd'", ENOSPC, 1378 (intmax_t)gpp->gpp_size); 1379 return (ENOSPC); 1380 } 1381 } 1382 1383 pp = entry->gpe_pp; 1384 if ((g_debugflags & G_F_FOOTSHOOTING) == 0 && 1385 (pp->acr > 0 || pp->acw > 0 || pp->ace > 0)) { 1386 if (entry->gpe_end - entry->gpe_start + 1 > gpp->gpp_size) { 1387 /* Deny shrinking of an opened partition. */ 1388 gctl_error(req, "%d", EBUSY); 1389 return (EBUSY); 1390 } 1391 } 1392 1393 error = G_PART_RESIZE(table, entry, gpp); 1394 if (error) { 1395 gctl_error(req, "%d%s", error, error != EBUSY ? "": 1396 " resizing will lead to unexpected shrinking" 1397 " due to alignment"); 1398 return (error); 1399 } 1400 1401 if (!entry->gpe_created) 1402 entry->gpe_modified = 1; 1403 1404 /* update mediasize of changed provider */ 1405 mediasize = (entry->gpe_end - entry->gpe_start + 1) * 1406 pp->sectorsize; 1407 g_resize_provider(pp, mediasize); 1408 1409 /* Provide feedback if so requested. */ 1410 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) { 1411 sb = sbuf_new_auto(); 1412 G_PART_FULLNAME(table, entry, sb, gp->name); 1413 sbuf_cat(sb, " resized\n"); 1414 sbuf_finish(sb); 1415 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1); 1416 sbuf_delete(sb); 1417 } 1418 return (0); 1419 } 1420 1421 static int 1422 g_part_ctl_setunset(struct gctl_req *req, struct g_part_parms *gpp, 1423 unsigned int set) 1424 { 1425 struct g_geom *gp; 1426 struct g_part_entry *entry; 1427 struct g_part_table *table; 1428 struct sbuf *sb; 1429 int error; 1430 1431 gp = gpp->gpp_geom; 1432 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name)); 1433 g_topology_assert(); 1434 1435 table = gp->softc; 1436 1437 if (gpp->gpp_parms & G_PART_PARM_INDEX) { 1438 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) { 1439 if (entry->gpe_deleted || entry->gpe_internal) 1440 continue; 1441 if (entry->gpe_index == gpp->gpp_index) 1442 break; 1443 } 1444 if (entry == NULL) { 1445 gctl_error(req, "%d index '%d'", ENOENT, 1446 gpp->gpp_index); 1447 return (ENOENT); 1448 } 1449 } else 1450 entry = NULL; 1451 1452 error = G_PART_SETUNSET(table, entry, gpp->gpp_attrib, set); 1453 if (error) { 1454 gctl_error(req, "%d attrib '%s'", error, gpp->gpp_attrib); 1455 return (error); 1456 } 1457 1458 /* Provide feedback if so requested. */ 1459 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) { 1460 sb = sbuf_new_auto(); 1461 sbuf_printf(sb, "%s %sset on ", gpp->gpp_attrib, 1462 (set) ? "" : "un"); 1463 if (entry) 1464 G_PART_FULLNAME(table, entry, sb, gp->name); 1465 else 1466 sbuf_cat(sb, gp->name); 1467 sbuf_cat(sb, "\n"); 1468 sbuf_finish(sb); 1469 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1); 1470 sbuf_delete(sb); 1471 } 1472 return (0); 1473 } 1474 1475 static int 1476 g_part_ctl_undo(struct gctl_req *req, struct g_part_parms *gpp) 1477 { 1478 struct g_consumer *cp; 1479 struct g_provider *pp; 1480 struct g_geom *gp; 1481 struct g_part_entry *entry, *tmp; 1482 struct g_part_table *table; 1483 int error, reprobe; 1484 1485 gp = gpp->gpp_geom; 1486 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name)); 1487 g_topology_assert(); 1488 1489 table = gp->softc; 1490 if (!table->gpt_opened) { 1491 gctl_error(req, "%d", EPERM); 1492 return (EPERM); 1493 } 1494 1495 cp = LIST_FIRST(&gp->consumer); 1496 LIST_FOREACH_SAFE(entry, &table->gpt_entry, gpe_entry, tmp) { 1497 entry->gpe_modified = 0; 1498 if (entry->gpe_created) { 1499 pp = entry->gpe_pp; 1500 if (pp != NULL) { 1501 pp->private = NULL; 1502 entry->gpe_pp = NULL; 1503 g_wither_provider(pp, ENXIO); 1504 } 1505 entry->gpe_deleted = 1; 1506 } 1507 if (entry->gpe_deleted) { 1508 LIST_REMOVE(entry, gpe_entry); 1509 g_free(entry); 1510 } 1511 } 1512 1513 g_topology_unlock(); 1514 1515 reprobe = (table->gpt_scheme == &g_part_null_scheme || 1516 table->gpt_created) ? 1 : 0; 1517 1518 if (reprobe) { 1519 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) { 1520 if (entry->gpe_internal) 1521 continue; 1522 error = EBUSY; 1523 goto fail; 1524 } 1525 while ((entry = LIST_FIRST(&table->gpt_entry)) != NULL) { 1526 LIST_REMOVE(entry, gpe_entry); 1527 g_free(entry); 1528 } 1529 error = g_part_probe(gp, cp, table->gpt_depth); 1530 if (error) { 1531 g_topology_lock(); 1532 g_access(cp, -1, -1, -1); 1533 g_part_wither(gp, error); 1534 return (0); 1535 } 1536 table = gp->softc; 1537 1538 /* 1539 * Synthesize a disk geometry. Some partitioning schemes 1540 * depend on it and since some file systems need it even 1541 * when the partitition scheme doesn't, we do it here in 1542 * scheme-independent code. 1543 */ 1544 pp = cp->provider; 1545 g_part_geometry(table, cp, pp->mediasize / pp->sectorsize); 1546 } 1547 1548 error = G_PART_READ(table, cp); 1549 if (error) 1550 goto fail; 1551 error = g_part_check_integrity(table, cp); 1552 if (error) 1553 goto fail; 1554 1555 g_topology_lock(); 1556 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) { 1557 if (!entry->gpe_internal) 1558 g_part_new_provider(gp, table, entry); 1559 } 1560 1561 table->gpt_opened = 0; 1562 g_access(cp, -1, -1, -1); 1563 return (0); 1564 1565 fail: 1566 g_topology_lock(); 1567 gctl_error(req, "%d", error); 1568 return (error); 1569 } 1570 1571 static void 1572 g_part_wither(struct g_geom *gp, int error) 1573 { 1574 struct g_part_entry *entry; 1575 struct g_part_table *table; 1576 struct g_provider *pp; 1577 1578 table = gp->softc; 1579 if (table != NULL) { 1580 gp->softc = NULL; 1581 while ((entry = LIST_FIRST(&table->gpt_entry)) != NULL) { 1582 LIST_REMOVE(entry, gpe_entry); 1583 pp = entry->gpe_pp; 1584 entry->gpe_pp = NULL; 1585 if (pp != NULL) { 1586 pp->private = NULL; 1587 g_wither_provider(pp, error); 1588 } 1589 g_free(entry); 1590 } 1591 G_PART_DESTROY(table, NULL); 1592 kobj_delete((kobj_t)table, M_GEOM); 1593 } 1594 g_wither_geom(gp, error); 1595 } 1596 1597 /* 1598 * Class methods. 1599 */ 1600 1601 static void 1602 g_part_ctlreq(struct gctl_req *req, struct g_class *mp, const char *verb) 1603 { 1604 struct g_part_parms gpp; 1605 struct g_part_table *table; 1606 struct gctl_req_arg *ap; 1607 enum g_part_ctl ctlreq; 1608 unsigned int i, mparms, oparms, parm; 1609 int auto_commit, close_on_error; 1610 int error, modifies; 1611 1612 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s,%s)", __func__, mp->name, verb)); 1613 g_topology_assert(); 1614 1615 ctlreq = G_PART_CTL_NONE; 1616 modifies = 1; 1617 mparms = 0; 1618 oparms = G_PART_PARM_FLAGS | G_PART_PARM_OUTPUT | G_PART_PARM_VERSION; 1619 switch (*verb) { 1620 case 'a': 1621 if (!strcmp(verb, "add")) { 1622 ctlreq = G_PART_CTL_ADD; 1623 mparms |= G_PART_PARM_GEOM | G_PART_PARM_SIZE | 1624 G_PART_PARM_START | G_PART_PARM_TYPE; 1625 oparms |= G_PART_PARM_INDEX | G_PART_PARM_LABEL; 1626 } 1627 break; 1628 case 'b': 1629 if (!strcmp(verb, "bootcode")) { 1630 ctlreq = G_PART_CTL_BOOTCODE; 1631 mparms |= G_PART_PARM_GEOM | G_PART_PARM_BOOTCODE; 1632 oparms |= G_PART_PARM_SKIP_DSN; 1633 } 1634 break; 1635 case 'c': 1636 if (!strcmp(verb, "commit")) { 1637 ctlreq = G_PART_CTL_COMMIT; 1638 mparms |= G_PART_PARM_GEOM; 1639 modifies = 0; 1640 } else if (!strcmp(verb, "create")) { 1641 ctlreq = G_PART_CTL_CREATE; 1642 mparms |= G_PART_PARM_PROVIDER | G_PART_PARM_SCHEME; 1643 oparms |= G_PART_PARM_ENTRIES; 1644 } 1645 break; 1646 case 'd': 1647 if (!strcmp(verb, "delete")) { 1648 ctlreq = G_PART_CTL_DELETE; 1649 mparms |= G_PART_PARM_GEOM | G_PART_PARM_INDEX; 1650 } else if (!strcmp(verb, "destroy")) { 1651 ctlreq = G_PART_CTL_DESTROY; 1652 mparms |= G_PART_PARM_GEOM; 1653 oparms |= G_PART_PARM_FORCE; 1654 } 1655 break; 1656 case 'm': 1657 if (!strcmp(verb, "modify")) { 1658 ctlreq = G_PART_CTL_MODIFY; 1659 mparms |= G_PART_PARM_GEOM | G_PART_PARM_INDEX; 1660 oparms |= G_PART_PARM_LABEL | G_PART_PARM_TYPE; 1661 } else if (!strcmp(verb, "move")) { 1662 ctlreq = G_PART_CTL_MOVE; 1663 mparms |= G_PART_PARM_GEOM | G_PART_PARM_INDEX; 1664 } 1665 break; 1666 case 'r': 1667 if (!strcmp(verb, "recover")) { 1668 ctlreq = G_PART_CTL_RECOVER; 1669 mparms |= G_PART_PARM_GEOM; 1670 } else if (!strcmp(verb, "resize")) { 1671 ctlreq = G_PART_CTL_RESIZE; 1672 mparms |= G_PART_PARM_GEOM | G_PART_PARM_INDEX | 1673 G_PART_PARM_SIZE; 1674 } 1675 break; 1676 case 's': 1677 if (!strcmp(verb, "set")) { 1678 ctlreq = G_PART_CTL_SET; 1679 mparms |= G_PART_PARM_ATTRIB | G_PART_PARM_GEOM; 1680 oparms |= G_PART_PARM_INDEX; 1681 } 1682 break; 1683 case 'u': 1684 if (!strcmp(verb, "undo")) { 1685 ctlreq = G_PART_CTL_UNDO; 1686 mparms |= G_PART_PARM_GEOM; 1687 modifies = 0; 1688 } else if (!strcmp(verb, "unset")) { 1689 ctlreq = G_PART_CTL_UNSET; 1690 mparms |= G_PART_PARM_ATTRIB | G_PART_PARM_GEOM; 1691 oparms |= G_PART_PARM_INDEX; 1692 } 1693 break; 1694 } 1695 if (ctlreq == G_PART_CTL_NONE) { 1696 gctl_error(req, "%d verb '%s'", EINVAL, verb); 1697 return; 1698 } 1699 1700 bzero(&gpp, sizeof(gpp)); 1701 for (i = 0; i < req->narg; i++) { 1702 ap = &req->arg[i]; 1703 parm = 0; 1704 switch (ap->name[0]) { 1705 case 'a': 1706 if (!strcmp(ap->name, "arg0")) { 1707 parm = mparms & 1708 (G_PART_PARM_GEOM | G_PART_PARM_PROVIDER); 1709 } 1710 if (!strcmp(ap->name, "attrib")) 1711 parm = G_PART_PARM_ATTRIB; 1712 break; 1713 case 'b': 1714 if (!strcmp(ap->name, "bootcode")) 1715 parm = G_PART_PARM_BOOTCODE; 1716 break; 1717 case 'c': 1718 if (!strcmp(ap->name, "class")) 1719 continue; 1720 break; 1721 case 'e': 1722 if (!strcmp(ap->name, "entries")) 1723 parm = G_PART_PARM_ENTRIES; 1724 break; 1725 case 'f': 1726 if (!strcmp(ap->name, "flags")) 1727 parm = G_PART_PARM_FLAGS; 1728 else if (!strcmp(ap->name, "force")) 1729 parm = G_PART_PARM_FORCE; 1730 break; 1731 case 'i': 1732 if (!strcmp(ap->name, "index")) 1733 parm = G_PART_PARM_INDEX; 1734 break; 1735 case 'l': 1736 if (!strcmp(ap->name, "label")) 1737 parm = G_PART_PARM_LABEL; 1738 break; 1739 case 'o': 1740 if (!strcmp(ap->name, "output")) 1741 parm = G_PART_PARM_OUTPUT; 1742 break; 1743 case 's': 1744 if (!strcmp(ap->name, "scheme")) 1745 parm = G_PART_PARM_SCHEME; 1746 else if (!strcmp(ap->name, "size")) 1747 parm = G_PART_PARM_SIZE; 1748 else if (!strcmp(ap->name, "start")) 1749 parm = G_PART_PARM_START; 1750 else if (!strcmp(ap->name, "skip_dsn")) 1751 parm = G_PART_PARM_SKIP_DSN; 1752 break; 1753 case 't': 1754 if (!strcmp(ap->name, "type")) 1755 parm = G_PART_PARM_TYPE; 1756 break; 1757 case 'v': 1758 if (!strcmp(ap->name, "verb")) 1759 continue; 1760 else if (!strcmp(ap->name, "version")) 1761 parm = G_PART_PARM_VERSION; 1762 break; 1763 } 1764 if ((parm & (mparms | oparms)) == 0) { 1765 gctl_error(req, "%d param '%s'", EINVAL, ap->name); 1766 return; 1767 } 1768 switch (parm) { 1769 case G_PART_PARM_ATTRIB: 1770 error = g_part_parm_str(req, ap->name, 1771 &gpp.gpp_attrib); 1772 break; 1773 case G_PART_PARM_BOOTCODE: 1774 error = g_part_parm_bootcode(req, ap->name, 1775 &gpp.gpp_codeptr, &gpp.gpp_codesize); 1776 break; 1777 case G_PART_PARM_ENTRIES: 1778 error = g_part_parm_intmax(req, ap->name, 1779 &gpp.gpp_entries); 1780 break; 1781 case G_PART_PARM_FLAGS: 1782 error = g_part_parm_str(req, ap->name, &gpp.gpp_flags); 1783 break; 1784 case G_PART_PARM_FORCE: 1785 error = g_part_parm_uint32(req, ap->name, 1786 &gpp.gpp_force); 1787 break; 1788 case G_PART_PARM_GEOM: 1789 error = g_part_parm_geom(req, ap->name, &gpp.gpp_geom); 1790 break; 1791 case G_PART_PARM_INDEX: 1792 error = g_part_parm_intmax(req, ap->name, 1793 &gpp.gpp_index); 1794 break; 1795 case G_PART_PARM_LABEL: 1796 error = g_part_parm_str(req, ap->name, &gpp.gpp_label); 1797 break; 1798 case G_PART_PARM_OUTPUT: 1799 error = 0; /* Write-only parameter */ 1800 break; 1801 case G_PART_PARM_PROVIDER: 1802 error = g_part_parm_provider(req, ap->name, 1803 &gpp.gpp_provider); 1804 break; 1805 case G_PART_PARM_SCHEME: 1806 error = g_part_parm_scheme(req, ap->name, 1807 &gpp.gpp_scheme); 1808 break; 1809 case G_PART_PARM_SIZE: 1810 error = g_part_parm_quad(req, ap->name, &gpp.gpp_size); 1811 break; 1812 case G_PART_PARM_SKIP_DSN: 1813 error = g_part_parm_uint32(req, ap->name, 1814 &gpp.gpp_skip_dsn); 1815 break; 1816 case G_PART_PARM_START: 1817 error = g_part_parm_quad(req, ap->name, 1818 &gpp.gpp_start); 1819 break; 1820 case G_PART_PARM_TYPE: 1821 error = g_part_parm_str(req, ap->name, &gpp.gpp_type); 1822 break; 1823 case G_PART_PARM_VERSION: 1824 error = g_part_parm_uint32(req, ap->name, 1825 &gpp.gpp_version); 1826 break; 1827 default: 1828 error = EDOOFUS; 1829 gctl_error(req, "%d %s", error, ap->name); 1830 break; 1831 } 1832 if (error != 0) { 1833 if (error == ENOATTR) { 1834 gctl_error(req, "%d param '%s'", error, 1835 ap->name); 1836 } 1837 return; 1838 } 1839 gpp.gpp_parms |= parm; 1840 } 1841 if ((gpp.gpp_parms & mparms) != mparms) { 1842 parm = mparms - (gpp.gpp_parms & mparms); 1843 gctl_error(req, "%d param '%x'", ENOATTR, parm); 1844 return; 1845 } 1846 1847 /* Obtain permissions if possible/necessary. */ 1848 close_on_error = 0; 1849 table = NULL; 1850 if (modifies && (gpp.gpp_parms & G_PART_PARM_GEOM)) { 1851 table = gpp.gpp_geom->softc; 1852 if (table != NULL && table->gpt_corrupt && 1853 ctlreq != G_PART_CTL_DESTROY && 1854 ctlreq != G_PART_CTL_RECOVER && 1855 geom_part_check_integrity) { 1856 gctl_error(req, "%d table '%s' is corrupt", 1857 EPERM, gpp.gpp_geom->name); 1858 return; 1859 } 1860 if (table != NULL && !table->gpt_opened) { 1861 error = g_access(LIST_FIRST(&gpp.gpp_geom->consumer), 1862 1, 1, 1); 1863 if (error) { 1864 gctl_error(req, "%d geom '%s'", error, 1865 gpp.gpp_geom->name); 1866 return; 1867 } 1868 table->gpt_opened = 1; 1869 close_on_error = 1; 1870 } 1871 } 1872 1873 /* Allow the scheme to check or modify the parameters. */ 1874 if (table != NULL) { 1875 error = G_PART_PRECHECK(table, ctlreq, &gpp); 1876 if (error) { 1877 gctl_error(req, "%d pre-check failed", error); 1878 goto out; 1879 } 1880 } else 1881 error = EDOOFUS; /* Prevent bogus uninit. warning. */ 1882 1883 switch (ctlreq) { 1884 case G_PART_CTL_NONE: 1885 panic("%s", __func__); 1886 case G_PART_CTL_ADD: 1887 error = g_part_ctl_add(req, &gpp); 1888 break; 1889 case G_PART_CTL_BOOTCODE: 1890 error = g_part_ctl_bootcode(req, &gpp); 1891 break; 1892 case G_PART_CTL_COMMIT: 1893 error = g_part_ctl_commit(req, &gpp); 1894 break; 1895 case G_PART_CTL_CREATE: 1896 error = g_part_ctl_create(req, &gpp); 1897 break; 1898 case G_PART_CTL_DELETE: 1899 error = g_part_ctl_delete(req, &gpp); 1900 break; 1901 case G_PART_CTL_DESTROY: 1902 error = g_part_ctl_destroy(req, &gpp); 1903 break; 1904 case G_PART_CTL_MODIFY: 1905 error = g_part_ctl_modify(req, &gpp); 1906 break; 1907 case G_PART_CTL_MOVE: 1908 error = g_part_ctl_move(req, &gpp); 1909 break; 1910 case G_PART_CTL_RECOVER: 1911 error = g_part_ctl_recover(req, &gpp); 1912 break; 1913 case G_PART_CTL_RESIZE: 1914 error = g_part_ctl_resize(req, &gpp); 1915 break; 1916 case G_PART_CTL_SET: 1917 error = g_part_ctl_setunset(req, &gpp, 1); 1918 break; 1919 case G_PART_CTL_UNDO: 1920 error = g_part_ctl_undo(req, &gpp); 1921 break; 1922 case G_PART_CTL_UNSET: 1923 error = g_part_ctl_setunset(req, &gpp, 0); 1924 break; 1925 } 1926 1927 /* Implement automatic commit. */ 1928 if (!error) { 1929 auto_commit = (modifies && 1930 (gpp.gpp_parms & G_PART_PARM_FLAGS) && 1931 strchr(gpp.gpp_flags, 'C') != NULL) ? 1 : 0; 1932 if (auto_commit) { 1933 KASSERT(gpp.gpp_parms & G_PART_PARM_GEOM, ("%s", 1934 __func__)); 1935 error = g_part_ctl_commit(req, &gpp); 1936 } 1937 } 1938 1939 out: 1940 if (error && close_on_error) { 1941 g_access(LIST_FIRST(&gpp.gpp_geom->consumer), -1, -1, -1); 1942 table->gpt_opened = 0; 1943 } 1944 } 1945 1946 static int 1947 g_part_destroy_geom(struct gctl_req *req, struct g_class *mp, 1948 struct g_geom *gp) 1949 { 1950 1951 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s,%s)", __func__, mp->name, gp->name)); 1952 g_topology_assert(); 1953 1954 g_part_wither(gp, EINVAL); 1955 return (0); 1956 } 1957 1958 static struct g_geom * 1959 g_part_taste(struct g_class *mp, struct g_provider *pp, int flags __unused) 1960 { 1961 struct g_consumer *cp; 1962 struct g_geom *gp; 1963 struct g_part_entry *entry; 1964 struct g_part_table *table; 1965 struct root_hold_token *rht; 1966 int attr, depth; 1967 int error; 1968 1969 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s,%s)", __func__, mp->name, pp->name)); 1970 g_topology_assert(); 1971 1972 /* Skip providers that are already open for writing. */ 1973 if (pp->acw > 0) 1974 return (NULL); 1975 1976 /* 1977 * Create a GEOM with consumer and hook it up to the provider. 1978 * With that we become part of the topology. Obtain read access 1979 * to the provider. 1980 */ 1981 gp = g_new_geomf(mp, "%s", pp->name); 1982 cp = g_new_consumer(gp); 1983 cp->flags |= G_CF_DIRECT_SEND | G_CF_DIRECT_RECEIVE; 1984 error = g_attach(cp, pp); 1985 if (error == 0) 1986 error = g_access(cp, 1, 0, 0); 1987 if (error != 0) { 1988 if (cp->provider) 1989 g_detach(cp); 1990 g_destroy_consumer(cp); 1991 g_destroy_geom(gp); 1992 return (NULL); 1993 } 1994 1995 rht = root_mount_hold(mp->name); 1996 g_topology_unlock(); 1997 1998 /* 1999 * Short-circuit the whole probing galore when there's no 2000 * media present. 2001 */ 2002 if (pp->mediasize == 0 || pp->sectorsize == 0) { 2003 error = ENODEV; 2004 goto fail; 2005 } 2006 2007 /* Make sure we can nest and if so, determine our depth. */ 2008 error = g_getattr("PART::isleaf", cp, &attr); 2009 if (!error && attr) { 2010 error = ENODEV; 2011 goto fail; 2012 } 2013 error = g_getattr("PART::depth", cp, &attr); 2014 depth = (!error) ? attr + 1 : 0; 2015 2016 error = g_part_probe(gp, cp, depth); 2017 if (error) 2018 goto fail; 2019 2020 table = gp->softc; 2021 2022 /* 2023 * Synthesize a disk geometry. Some partitioning schemes 2024 * depend on it and since some file systems need it even 2025 * when the partitition scheme doesn't, we do it here in 2026 * scheme-independent code. 2027 */ 2028 g_part_geometry(table, cp, pp->mediasize / pp->sectorsize); 2029 2030 error = G_PART_READ(table, cp); 2031 if (error) 2032 goto fail; 2033 error = g_part_check_integrity(table, cp); 2034 if (error) 2035 goto fail; 2036 2037 g_topology_lock(); 2038 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) { 2039 if (!entry->gpe_internal) 2040 g_part_new_provider(gp, table, entry); 2041 } 2042 2043 root_mount_rel(rht); 2044 g_access(cp, -1, 0, 0); 2045 return (gp); 2046 2047 fail: 2048 g_topology_lock(); 2049 root_mount_rel(rht); 2050 g_access(cp, -1, 0, 0); 2051 g_detach(cp); 2052 g_destroy_consumer(cp); 2053 g_destroy_geom(gp); 2054 return (NULL); 2055 } 2056 2057 /* 2058 * Geom methods. 2059 */ 2060 2061 static int 2062 g_part_access(struct g_provider *pp, int dr, int dw, int de) 2063 { 2064 struct g_consumer *cp; 2065 2066 G_PART_TRACE((G_T_ACCESS, "%s(%s,%d,%d,%d)", __func__, pp->name, dr, 2067 dw, de)); 2068 2069 cp = LIST_FIRST(&pp->geom->consumer); 2070 2071 /* We always gain write-exclusive access. */ 2072 return (g_access(cp, dr, dw, dw + de)); 2073 } 2074 2075 static void 2076 g_part_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp, 2077 struct g_consumer *cp, struct g_provider *pp) 2078 { 2079 char buf[64]; 2080 struct g_part_entry *entry; 2081 struct g_part_table *table; 2082 2083 KASSERT(sb != NULL && gp != NULL, ("%s", __func__)); 2084 table = gp->softc; 2085 2086 if (indent == NULL) { 2087 KASSERT(cp == NULL && pp != NULL, ("%s", __func__)); 2088 entry = pp->private; 2089 if (entry == NULL) 2090 return; 2091 sbuf_printf(sb, " i %u o %ju ty %s", entry->gpe_index, 2092 (uintmax_t)entry->gpe_offset, 2093 G_PART_TYPE(table, entry, buf, sizeof(buf))); 2094 /* 2095 * libdisk compatibility quirk - the scheme dumps the 2096 * slicer name and partition type in a way that is 2097 * compatible with libdisk. When libdisk is not used 2098 * anymore, this should go away. 2099 */ 2100 G_PART_DUMPCONF(table, entry, sb, indent); 2101 } else if (cp != NULL) { /* Consumer configuration. */ 2102 KASSERT(pp == NULL, ("%s", __func__)); 2103 /* none */ 2104 } else if (pp != NULL) { /* Provider configuration. */ 2105 entry = pp->private; 2106 if (entry == NULL) 2107 return; 2108 sbuf_printf(sb, "%s<start>%ju</start>\n", indent, 2109 (uintmax_t)entry->gpe_start); 2110 sbuf_printf(sb, "%s<end>%ju</end>\n", indent, 2111 (uintmax_t)entry->gpe_end); 2112 sbuf_printf(sb, "%s<index>%u</index>\n", indent, 2113 entry->gpe_index); 2114 sbuf_printf(sb, "%s<type>%s</type>\n", indent, 2115 G_PART_TYPE(table, entry, buf, sizeof(buf))); 2116 sbuf_printf(sb, "%s<offset>%ju</offset>\n", indent, 2117 (uintmax_t)entry->gpe_offset); 2118 sbuf_printf(sb, "%s<length>%ju</length>\n", indent, 2119 (uintmax_t)pp->mediasize); 2120 G_PART_DUMPCONF(table, entry, sb, indent); 2121 } else { /* Geom configuration. */ 2122 sbuf_printf(sb, "%s<scheme>%s</scheme>\n", indent, 2123 table->gpt_scheme->name); 2124 sbuf_printf(sb, "%s<entries>%u</entries>\n", indent, 2125 table->gpt_entries); 2126 sbuf_printf(sb, "%s<first>%ju</first>\n", indent, 2127 (uintmax_t)table->gpt_first); 2128 sbuf_printf(sb, "%s<last>%ju</last>\n", indent, 2129 (uintmax_t)table->gpt_last); 2130 sbuf_printf(sb, "%s<fwsectors>%u</fwsectors>\n", indent, 2131 table->gpt_sectors); 2132 sbuf_printf(sb, "%s<fwheads>%u</fwheads>\n", indent, 2133 table->gpt_heads); 2134 sbuf_printf(sb, "%s<state>%s</state>\n", indent, 2135 table->gpt_corrupt ? "CORRUPT": "OK"); 2136 sbuf_printf(sb, "%s<modified>%s</modified>\n", indent, 2137 table->gpt_opened ? "true": "false"); 2138 G_PART_DUMPCONF(table, NULL, sb, indent); 2139 } 2140 } 2141 2142 /*- 2143 * This start routine is only called for non-trivial requests, all the 2144 * trivial ones are handled autonomously by the slice code. 2145 * For requests we handle here, we must call the g_io_deliver() on the 2146 * bio, and return non-zero to indicate to the slice code that we did so. 2147 * This code executes in the "DOWN" I/O path, this means: 2148 * * No sleeping. 2149 * * Don't grab the topology lock. 2150 * * Don't call biowait, g_getattr(), g_setattr() or g_read_data() 2151 */ 2152 static int 2153 g_part_ioctl(struct g_provider *pp, u_long cmd, void *data, int fflag, struct thread *td) 2154 { 2155 struct g_part_table *table; 2156 2157 table = pp->geom->softc; 2158 return G_PART_IOCTL(table, pp, cmd, data, fflag, td); 2159 } 2160 2161 static void 2162 g_part_resize(struct g_consumer *cp) 2163 { 2164 struct g_part_table *table; 2165 2166 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, cp->provider->name)); 2167 g_topology_assert(); 2168 2169 if (auto_resize == 0) 2170 return; 2171 2172 table = cp->geom->softc; 2173 if (table->gpt_opened == 0) { 2174 if (g_access(cp, 1, 1, 1) != 0) 2175 return; 2176 table->gpt_opened = 1; 2177 } 2178 if (G_PART_RESIZE(table, NULL, NULL) == 0) 2179 printf("GEOM_PART: %s was automatically resized.\n" 2180 " Use `gpart commit %s` to save changes or " 2181 "`gpart undo %s` to revert them.\n", cp->geom->name, 2182 cp->geom->name, cp->geom->name); 2183 if (g_part_check_integrity(table, cp) != 0) { 2184 g_access(cp, -1, -1, -1); 2185 table->gpt_opened = 0; 2186 g_part_wither(table->gpt_gp, ENXIO); 2187 } 2188 } 2189 2190 static void 2191 g_part_orphan(struct g_consumer *cp) 2192 { 2193 struct g_provider *pp; 2194 struct g_part_table *table; 2195 2196 pp = cp->provider; 2197 KASSERT(pp != NULL, ("%s", __func__)); 2198 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, pp->name)); 2199 g_topology_assert(); 2200 2201 KASSERT(pp->error != 0, ("%s", __func__)); 2202 table = cp->geom->softc; 2203 if (table != NULL && table->gpt_opened) 2204 g_access(cp, -1, -1, -1); 2205 g_part_wither(cp->geom, pp->error); 2206 } 2207 2208 static void 2209 g_part_spoiled(struct g_consumer *cp) 2210 { 2211 2212 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, cp->provider->name)); 2213 g_topology_assert(); 2214 2215 cp->flags |= G_CF_ORPHAN; 2216 g_part_wither(cp->geom, ENXIO); 2217 } 2218 2219 static void 2220 g_part_start(struct bio *bp) 2221 { 2222 struct bio *bp2; 2223 struct g_consumer *cp; 2224 struct g_geom *gp; 2225 struct g_part_entry *entry; 2226 struct g_part_table *table; 2227 struct g_kerneldump *gkd; 2228 struct g_provider *pp; 2229 void (*done_func)(struct bio *) = g_std_done; 2230 char buf[64]; 2231 2232 biotrack(bp, __func__); 2233 2234 pp = bp->bio_to; 2235 gp = pp->geom; 2236 table = gp->softc; 2237 cp = LIST_FIRST(&gp->consumer); 2238 2239 G_PART_TRACE((G_T_BIO, "%s: cmd=%d, provider=%s", __func__, bp->bio_cmd, 2240 pp->name)); 2241 2242 entry = pp->private; 2243 if (entry == NULL) { 2244 g_io_deliver(bp, ENXIO); 2245 return; 2246 } 2247 2248 switch(bp->bio_cmd) { 2249 case BIO_DELETE: 2250 case BIO_READ: 2251 case BIO_WRITE: 2252 if (bp->bio_offset >= pp->mediasize) { 2253 g_io_deliver(bp, EIO); 2254 return; 2255 } 2256 bp2 = g_clone_bio(bp); 2257 if (bp2 == NULL) { 2258 g_io_deliver(bp, ENOMEM); 2259 return; 2260 } 2261 if (bp2->bio_offset + bp2->bio_length > pp->mediasize) 2262 bp2->bio_length = pp->mediasize - bp2->bio_offset; 2263 bp2->bio_done = g_std_done; 2264 bp2->bio_offset += entry->gpe_offset; 2265 g_io_request(bp2, cp); 2266 return; 2267 case BIO_SPEEDUP: 2268 case BIO_FLUSH: 2269 break; 2270 case BIO_GETATTR: 2271 if (g_handleattr_int(bp, "GEOM::fwheads", table->gpt_heads)) 2272 return; 2273 if (g_handleattr_int(bp, "GEOM::fwsectors", table->gpt_sectors)) 2274 return; 2275 /* 2276 * allow_nesting overrides "isleaf" to false _unless_ the 2277 * provider offset is zero, since otherwise we would recurse. 2278 */ 2279 if (g_handleattr_int(bp, "PART::isleaf", 2280 table->gpt_isleaf && 2281 (allow_nesting == 0 || entry->gpe_offset == 0))) 2282 return; 2283 if (g_handleattr_int(bp, "PART::depth", table->gpt_depth)) 2284 return; 2285 if (g_handleattr_str(bp, "PART::scheme", 2286 table->gpt_scheme->name)) 2287 return; 2288 if (g_handleattr_str(bp, "PART::type", 2289 G_PART_TYPE(table, entry, buf, sizeof(buf)))) 2290 return; 2291 if (!strcmp("GEOM::physpath", bp->bio_attribute)) { 2292 done_func = g_part_get_physpath_done; 2293 break; 2294 } 2295 if (!strcmp("GEOM::kerneldump", bp->bio_attribute)) { 2296 /* 2297 * Check that the partition is suitable for kernel 2298 * dumps. Typically only swap partitions should be 2299 * used. If the request comes from the nested scheme 2300 * we allow dumping there as well. 2301 */ 2302 if ((bp->bio_from == NULL || 2303 bp->bio_from->geom->class != &g_part_class) && 2304 G_PART_DUMPTO(table, entry) == 0) { 2305 g_io_deliver(bp, ENODEV); 2306 printf("GEOM_PART: Partition '%s' not suitable" 2307 " for kernel dumps (wrong type?)\n", 2308 pp->name); 2309 return; 2310 } 2311 gkd = (struct g_kerneldump *)bp->bio_data; 2312 if (gkd->offset >= pp->mediasize) { 2313 g_io_deliver(bp, EIO); 2314 return; 2315 } 2316 if (gkd->offset + gkd->length > pp->mediasize) 2317 gkd->length = pp->mediasize - gkd->offset; 2318 gkd->offset += entry->gpe_offset; 2319 } 2320 break; 2321 default: 2322 g_io_deliver(bp, EOPNOTSUPP); 2323 return; 2324 } 2325 2326 bp2 = g_clone_bio(bp); 2327 if (bp2 == NULL) { 2328 g_io_deliver(bp, ENOMEM); 2329 return; 2330 } 2331 bp2->bio_done = done_func; 2332 g_io_request(bp2, cp); 2333 } 2334 2335 static void 2336 g_part_init(struct g_class *mp) 2337 { 2338 2339 TAILQ_INSERT_HEAD(&g_part_schemes, &g_part_null_scheme, scheme_list); 2340 } 2341 2342 static void 2343 g_part_fini(struct g_class *mp) 2344 { 2345 2346 TAILQ_REMOVE(&g_part_schemes, &g_part_null_scheme, scheme_list); 2347 } 2348 2349 static void 2350 g_part_unload_event(void *arg, int flag) 2351 { 2352 struct g_consumer *cp; 2353 struct g_geom *gp; 2354 struct g_provider *pp; 2355 struct g_part_scheme *scheme; 2356 struct g_part_table *table; 2357 uintptr_t *xchg; 2358 int acc, error; 2359 2360 if (flag == EV_CANCEL) 2361 return; 2362 2363 xchg = arg; 2364 error = 0; 2365 scheme = (void *)(*xchg); 2366 2367 g_topology_assert(); 2368 2369 LIST_FOREACH(gp, &g_part_class.geom, geom) { 2370 table = gp->softc; 2371 if (table->gpt_scheme != scheme) 2372 continue; 2373 2374 acc = 0; 2375 LIST_FOREACH(pp, &gp->provider, provider) 2376 acc += pp->acr + pp->acw + pp->ace; 2377 LIST_FOREACH(cp, &gp->consumer, consumer) 2378 acc += cp->acr + cp->acw + cp->ace; 2379 2380 if (!acc) 2381 g_part_wither(gp, ENOSYS); 2382 else 2383 error = EBUSY; 2384 } 2385 2386 if (!error) 2387 TAILQ_REMOVE(&g_part_schemes, scheme, scheme_list); 2388 2389 *xchg = error; 2390 } 2391 2392 int 2393 g_part_modevent(module_t mod, int type, struct g_part_scheme *scheme) 2394 { 2395 struct g_part_scheme *iter; 2396 uintptr_t arg; 2397 int error; 2398 2399 error = 0; 2400 switch (type) { 2401 case MOD_LOAD: 2402 TAILQ_FOREACH(iter, &g_part_schemes, scheme_list) { 2403 if (scheme == iter) { 2404 printf("GEOM_PART: scheme %s is already " 2405 "registered!\n", scheme->name); 2406 break; 2407 } 2408 } 2409 if (iter == NULL) { 2410 TAILQ_INSERT_TAIL(&g_part_schemes, scheme, 2411 scheme_list); 2412 g_retaste(&g_part_class); 2413 } 2414 break; 2415 case MOD_UNLOAD: 2416 arg = (uintptr_t)scheme; 2417 error = g_waitfor_event(g_part_unload_event, &arg, M_WAITOK, 2418 NULL); 2419 if (error == 0) 2420 error = arg; 2421 break; 2422 default: 2423 error = EOPNOTSUPP; 2424 break; 2425 } 2426 2427 return (error); 2428 } 2429