1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2002, 2005-2009 Marcel Moolenaar 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 */ 28 29 #include <sys/cdefs.h> 30 __FBSDID("$FreeBSD$"); 31 32 #include <sys/param.h> 33 #include <sys/bio.h> 34 #include <sys/endian.h> 35 #include <sys/kernel.h> 36 #include <sys/kobj.h> 37 #include <sys/limits.h> 38 #include <sys/lock.h> 39 #include <sys/malloc.h> 40 #include <sys/mutex.h> 41 #include <sys/queue.h> 42 #include <sys/sbuf.h> 43 #include <sys/sysctl.h> 44 #include <sys/systm.h> 45 #include <sys/uuid.h> 46 #include <geom/geom.h> 47 #include <geom/geom_ctl.h> 48 #include <geom/geom_int.h> 49 #include <geom/part/g_part.h> 50 51 #include "g_part_if.h" 52 53 #ifndef _PATH_DEV 54 #define _PATH_DEV "/dev/" 55 #endif 56 57 static kobj_method_t g_part_null_methods[] = { 58 { 0, 0 } 59 }; 60 61 static struct g_part_scheme g_part_null_scheme = { 62 "(none)", 63 g_part_null_methods, 64 sizeof(struct g_part_table), 65 }; 66 67 TAILQ_HEAD(, g_part_scheme) g_part_schemes = 68 TAILQ_HEAD_INITIALIZER(g_part_schemes); 69 70 struct g_part_alias_list { 71 const char *lexeme; 72 enum g_part_alias alias; 73 } g_part_alias_list[G_PART_ALIAS_COUNT] = { 74 { "apple-apfs", G_PART_ALIAS_APPLE_APFS }, 75 { "apple-boot", G_PART_ALIAS_APPLE_BOOT }, 76 { "apple-core-storage", G_PART_ALIAS_APPLE_CORE_STORAGE }, 77 { "apple-hfs", G_PART_ALIAS_APPLE_HFS }, 78 { "apple-label", G_PART_ALIAS_APPLE_LABEL }, 79 { "apple-raid", G_PART_ALIAS_APPLE_RAID }, 80 { "apple-raid-offline", G_PART_ALIAS_APPLE_RAID_OFFLINE }, 81 { "apple-tv-recovery", G_PART_ALIAS_APPLE_TV_RECOVERY }, 82 { "apple-ufs", G_PART_ALIAS_APPLE_UFS }, 83 { "bios-boot", G_PART_ALIAS_BIOS_BOOT }, 84 { "chromeos-firmware", G_PART_ALIAS_CHROMEOS_FIRMWARE }, 85 { "chromeos-kernel", G_PART_ALIAS_CHROMEOS_KERNEL }, 86 { "chromeos-reserved", G_PART_ALIAS_CHROMEOS_RESERVED }, 87 { "chromeos-root", G_PART_ALIAS_CHROMEOS_ROOT }, 88 { "dragonfly-ccd", G_PART_ALIAS_DFBSD_CCD }, 89 { "dragonfly-hammer", G_PART_ALIAS_DFBSD_HAMMER }, 90 { "dragonfly-hammer2", G_PART_ALIAS_DFBSD_HAMMER2 }, 91 { "dragonfly-label32", G_PART_ALIAS_DFBSD }, 92 { "dragonfly-label64", G_PART_ALIAS_DFBSD64 }, 93 { "dragonfly-legacy", G_PART_ALIAS_DFBSD_LEGACY }, 94 { "dragonfly-swap", G_PART_ALIAS_DFBSD_SWAP }, 95 { "dragonfly-ufs", G_PART_ALIAS_DFBSD_UFS }, 96 { "dragonfly-vinum", G_PART_ALIAS_DFBSD_VINUM }, 97 { "ebr", G_PART_ALIAS_EBR }, 98 { "efi", G_PART_ALIAS_EFI }, 99 { "fat16", G_PART_ALIAS_MS_FAT16 }, 100 { "fat32", G_PART_ALIAS_MS_FAT32 }, 101 { "fat32lba", G_PART_ALIAS_MS_FAT32LBA }, 102 { "freebsd", G_PART_ALIAS_FREEBSD }, 103 { "freebsd-boot", G_PART_ALIAS_FREEBSD_BOOT }, 104 { "freebsd-nandfs", G_PART_ALIAS_FREEBSD_NANDFS }, 105 { "freebsd-swap", G_PART_ALIAS_FREEBSD_SWAP }, 106 { "freebsd-ufs", G_PART_ALIAS_FREEBSD_UFS }, 107 { "freebsd-vinum", G_PART_ALIAS_FREEBSD_VINUM }, 108 { "freebsd-zfs", G_PART_ALIAS_FREEBSD_ZFS }, 109 { "linux-data", G_PART_ALIAS_LINUX_DATA }, 110 { "linux-lvm", G_PART_ALIAS_LINUX_LVM }, 111 { "linux-raid", G_PART_ALIAS_LINUX_RAID }, 112 { "linux-swap", G_PART_ALIAS_LINUX_SWAP }, 113 { "mbr", G_PART_ALIAS_MBR }, 114 { "ms-basic-data", G_PART_ALIAS_MS_BASIC_DATA }, 115 { "ms-ldm-data", G_PART_ALIAS_MS_LDM_DATA }, 116 { "ms-ldm-metadata", G_PART_ALIAS_MS_LDM_METADATA }, 117 { "ms-recovery", G_PART_ALIAS_MS_RECOVERY }, 118 { "ms-reserved", G_PART_ALIAS_MS_RESERVED }, 119 { "ms-spaces", G_PART_ALIAS_MS_SPACES }, 120 { "netbsd-ccd", G_PART_ALIAS_NETBSD_CCD }, 121 { "netbsd-cgd", G_PART_ALIAS_NETBSD_CGD }, 122 { "netbsd-ffs", G_PART_ALIAS_NETBSD_FFS }, 123 { "netbsd-lfs", G_PART_ALIAS_NETBSD_LFS }, 124 { "netbsd-raid", G_PART_ALIAS_NETBSD_RAID }, 125 { "netbsd-swap", G_PART_ALIAS_NETBSD_SWAP }, 126 { "ntfs", G_PART_ALIAS_MS_NTFS }, 127 { "openbsd-data", G_PART_ALIAS_OPENBSD_DATA }, 128 { "prep-boot", G_PART_ALIAS_PREP_BOOT }, 129 { "vmware-reserved", G_PART_ALIAS_VMRESERVED }, 130 { "vmware-vmfs", G_PART_ALIAS_VMFS }, 131 { "vmware-vmkdiag", G_PART_ALIAS_VMKDIAG }, 132 { "vmware-vsanhdr", G_PART_ALIAS_VMVSANHDR }, 133 }; 134 135 SYSCTL_DECL(_kern_geom); 136 SYSCTL_NODE(_kern_geom, OID_AUTO, part, CTLFLAG_RW, 0, 137 "GEOM_PART stuff"); 138 static u_int check_integrity = 1; 139 SYSCTL_UINT(_kern_geom_part, OID_AUTO, check_integrity, 140 CTLFLAG_RWTUN, &check_integrity, 1, 141 "Enable integrity checking"); 142 static u_int auto_resize = 1; 143 SYSCTL_UINT(_kern_geom_part, OID_AUTO, auto_resize, 144 CTLFLAG_RWTUN, &auto_resize, 1, 145 "Enable auto resize"); 146 static u_int allow_nesting = 0; 147 SYSCTL_UINT(_kern_geom_part, OID_AUTO, allow_nesting, 148 CTLFLAG_RWTUN, &allow_nesting, 0, 149 "Allow additional levels of nesting"); 150 char g_part_separator[MAXPATHLEN] = ""; 151 SYSCTL_STRING(_kern_geom_part, OID_AUTO, separator, 152 CTLFLAG_RDTUN, &g_part_separator, sizeof(g_part_separator), 153 "Partition name separator"); 154 155 /* 156 * The GEOM partitioning class. 157 */ 158 static g_ctl_req_t g_part_ctlreq; 159 static g_ctl_destroy_geom_t g_part_destroy_geom; 160 static g_fini_t g_part_fini; 161 static g_init_t g_part_init; 162 static g_taste_t g_part_taste; 163 164 static g_access_t g_part_access; 165 static g_dumpconf_t g_part_dumpconf; 166 static g_orphan_t g_part_orphan; 167 static g_spoiled_t g_part_spoiled; 168 static g_start_t g_part_start; 169 static g_resize_t g_part_resize; 170 static g_ioctl_t g_part_ioctl; 171 172 static struct g_class g_part_class = { 173 .name = "PART", 174 .version = G_VERSION, 175 /* Class methods. */ 176 .ctlreq = g_part_ctlreq, 177 .destroy_geom = g_part_destroy_geom, 178 .fini = g_part_fini, 179 .init = g_part_init, 180 .taste = g_part_taste, 181 /* Geom methods. */ 182 .access = g_part_access, 183 .dumpconf = g_part_dumpconf, 184 .orphan = g_part_orphan, 185 .spoiled = g_part_spoiled, 186 .start = g_part_start, 187 .resize = g_part_resize, 188 .ioctl = g_part_ioctl, 189 }; 190 191 DECLARE_GEOM_CLASS(g_part_class, g_part); 192 MODULE_VERSION(g_part, 0); 193 194 /* 195 * Support functions. 196 */ 197 198 static void g_part_wither(struct g_geom *, int); 199 200 const char * 201 g_part_alias_name(enum g_part_alias alias) 202 { 203 int i; 204 205 for (i = 0; i < G_PART_ALIAS_COUNT; i++) { 206 if (g_part_alias_list[i].alias != alias) 207 continue; 208 return (g_part_alias_list[i].lexeme); 209 } 210 211 return (NULL); 212 } 213 214 void 215 g_part_geometry_heads(off_t blocks, u_int sectors, off_t *bestchs, 216 u_int *bestheads) 217 { 218 static u_int candidate_heads[] = { 1, 2, 16, 32, 64, 128, 255, 0 }; 219 off_t chs, cylinders; 220 u_int heads; 221 int idx; 222 223 *bestchs = 0; 224 *bestheads = 0; 225 for (idx = 0; candidate_heads[idx] != 0; idx++) { 226 heads = candidate_heads[idx]; 227 cylinders = blocks / heads / sectors; 228 if (cylinders < heads || cylinders < sectors) 229 break; 230 if (cylinders > 1023) 231 continue; 232 chs = cylinders * heads * sectors; 233 if (chs > *bestchs || (chs == *bestchs && *bestheads == 1)) { 234 *bestchs = chs; 235 *bestheads = heads; 236 } 237 } 238 } 239 240 static void 241 g_part_geometry(struct g_part_table *table, struct g_consumer *cp, 242 off_t blocks) 243 { 244 static u_int candidate_sectors[] = { 1, 9, 17, 33, 63, 0 }; 245 off_t chs, bestchs; 246 u_int heads, sectors; 247 int idx; 248 249 if (g_getattr("GEOM::fwsectors", cp, §ors) != 0 || sectors == 0 || 250 g_getattr("GEOM::fwheads", cp, &heads) != 0 || heads == 0) { 251 table->gpt_fixgeom = 0; 252 table->gpt_heads = 0; 253 table->gpt_sectors = 0; 254 bestchs = 0; 255 for (idx = 0; candidate_sectors[idx] != 0; idx++) { 256 sectors = candidate_sectors[idx]; 257 g_part_geometry_heads(blocks, sectors, &chs, &heads); 258 if (chs == 0) 259 continue; 260 /* 261 * Prefer a geometry with sectors > 1, but only if 262 * it doesn't bump down the number of heads to 1. 263 */ 264 if (chs > bestchs || (chs == bestchs && heads > 1 && 265 table->gpt_sectors == 1)) { 266 bestchs = chs; 267 table->gpt_heads = heads; 268 table->gpt_sectors = sectors; 269 } 270 } 271 /* 272 * If we didn't find a geometry at all, then the disk is 273 * too big. This means we can use the maximum number of 274 * heads and sectors. 275 */ 276 if (bestchs == 0) { 277 table->gpt_heads = 255; 278 table->gpt_sectors = 63; 279 } 280 } else { 281 table->gpt_fixgeom = 1; 282 table->gpt_heads = heads; 283 table->gpt_sectors = sectors; 284 } 285 } 286 287 static void 288 g_part_get_physpath_done(struct bio *bp) 289 { 290 struct g_geom *gp; 291 struct g_part_entry *entry; 292 struct g_part_table *table; 293 struct g_provider *pp; 294 struct bio *pbp; 295 296 pbp = bp->bio_parent; 297 pp = pbp->bio_to; 298 gp = pp->geom; 299 table = gp->softc; 300 entry = pp->private; 301 302 if (bp->bio_error == 0) { 303 char *end; 304 size_t len, remainder; 305 len = strlcat(bp->bio_data, "/", bp->bio_length); 306 if (len < bp->bio_length) { 307 end = bp->bio_data + len; 308 remainder = bp->bio_length - len; 309 G_PART_NAME(table, entry, end, remainder); 310 } 311 } 312 g_std_done(bp); 313 } 314 315 316 #define DPRINTF(...) if (bootverbose) { \ 317 printf("GEOM_PART: " __VA_ARGS__); \ 318 } 319 320 static int 321 g_part_check_integrity(struct g_part_table *table, struct g_consumer *cp) 322 { 323 struct g_part_entry *e1, *e2; 324 struct g_provider *pp; 325 off_t offset; 326 int failed; 327 328 failed = 0; 329 pp = cp->provider; 330 if (table->gpt_last < table->gpt_first) { 331 DPRINTF("last LBA is below first LBA: %jd < %jd\n", 332 (intmax_t)table->gpt_last, (intmax_t)table->gpt_first); 333 failed++; 334 } 335 if (table->gpt_last > pp->mediasize / pp->sectorsize - 1) { 336 DPRINTF("last LBA extends beyond mediasize: " 337 "%jd > %jd\n", (intmax_t)table->gpt_last, 338 (intmax_t)pp->mediasize / pp->sectorsize - 1); 339 failed++; 340 } 341 LIST_FOREACH(e1, &table->gpt_entry, gpe_entry) { 342 if (e1->gpe_deleted || e1->gpe_internal) 343 continue; 344 if (e1->gpe_start < table->gpt_first) { 345 DPRINTF("partition %d has start offset below first " 346 "LBA: %jd < %jd\n", e1->gpe_index, 347 (intmax_t)e1->gpe_start, 348 (intmax_t)table->gpt_first); 349 failed++; 350 } 351 if (e1->gpe_start > table->gpt_last) { 352 DPRINTF("partition %d has start offset beyond last " 353 "LBA: %jd > %jd\n", e1->gpe_index, 354 (intmax_t)e1->gpe_start, 355 (intmax_t)table->gpt_last); 356 failed++; 357 } 358 if (e1->gpe_end < e1->gpe_start) { 359 DPRINTF("partition %d has end offset below start " 360 "offset: %jd < %jd\n", e1->gpe_index, 361 (intmax_t)e1->gpe_end, 362 (intmax_t)e1->gpe_start); 363 failed++; 364 } 365 if (e1->gpe_end > table->gpt_last) { 366 DPRINTF("partition %d has end offset beyond last " 367 "LBA: %jd > %jd\n", e1->gpe_index, 368 (intmax_t)e1->gpe_end, 369 (intmax_t)table->gpt_last); 370 failed++; 371 } 372 if (pp->stripesize > 0) { 373 offset = e1->gpe_start * pp->sectorsize; 374 if (e1->gpe_offset > offset) 375 offset = e1->gpe_offset; 376 if ((offset + pp->stripeoffset) % pp->stripesize) { 377 DPRINTF("partition %d on (%s, %s) is not " 378 "aligned on %ju bytes\n", e1->gpe_index, 379 pp->name, table->gpt_scheme->name, 380 (uintmax_t)pp->stripesize); 381 /* Don't treat this as a critical failure */ 382 } 383 } 384 e2 = e1; 385 while ((e2 = LIST_NEXT(e2, gpe_entry)) != NULL) { 386 if (e2->gpe_deleted || e2->gpe_internal) 387 continue; 388 if (e1->gpe_start >= e2->gpe_start && 389 e1->gpe_start <= e2->gpe_end) { 390 DPRINTF("partition %d has start offset inside " 391 "partition %d: start[%d] %jd >= start[%d] " 392 "%jd <= end[%d] %jd\n", 393 e1->gpe_index, e2->gpe_index, 394 e2->gpe_index, (intmax_t)e2->gpe_start, 395 e1->gpe_index, (intmax_t)e1->gpe_start, 396 e2->gpe_index, (intmax_t)e2->gpe_end); 397 failed++; 398 } 399 if (e1->gpe_end >= e2->gpe_start && 400 e1->gpe_end <= e2->gpe_end) { 401 DPRINTF("partition %d has end offset inside " 402 "partition %d: start[%d] %jd >= end[%d] " 403 "%jd <= end[%d] %jd\n", 404 e1->gpe_index, e2->gpe_index, 405 e2->gpe_index, (intmax_t)e2->gpe_start, 406 e1->gpe_index, (intmax_t)e1->gpe_end, 407 e2->gpe_index, (intmax_t)e2->gpe_end); 408 failed++; 409 } 410 if (e1->gpe_start < e2->gpe_start && 411 e1->gpe_end > e2->gpe_end) { 412 DPRINTF("partition %d contains partition %d: " 413 "start[%d] %jd > start[%d] %jd, end[%d] " 414 "%jd < end[%d] %jd\n", 415 e1->gpe_index, e2->gpe_index, 416 e1->gpe_index, (intmax_t)e1->gpe_start, 417 e2->gpe_index, (intmax_t)e2->gpe_start, 418 e2->gpe_index, (intmax_t)e2->gpe_end, 419 e1->gpe_index, (intmax_t)e1->gpe_end); 420 failed++; 421 } 422 } 423 } 424 if (failed != 0) { 425 printf("GEOM_PART: integrity check failed (%s, %s)\n", 426 pp->name, table->gpt_scheme->name); 427 if (check_integrity != 0) 428 return (EINVAL); 429 table->gpt_corrupt = 1; 430 } 431 return (0); 432 } 433 #undef DPRINTF 434 435 struct g_part_entry * 436 g_part_new_entry(struct g_part_table *table, int index, quad_t start, 437 quad_t end) 438 { 439 struct g_part_entry *entry, *last; 440 441 last = NULL; 442 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) { 443 if (entry->gpe_index == index) 444 break; 445 if (entry->gpe_index > index) { 446 entry = NULL; 447 break; 448 } 449 last = entry; 450 } 451 if (entry == NULL) { 452 entry = g_malloc(table->gpt_scheme->gps_entrysz, 453 M_WAITOK | M_ZERO); 454 entry->gpe_index = index; 455 if (last == NULL) 456 LIST_INSERT_HEAD(&table->gpt_entry, entry, gpe_entry); 457 else 458 LIST_INSERT_AFTER(last, entry, gpe_entry); 459 } else 460 entry->gpe_offset = 0; 461 entry->gpe_start = start; 462 entry->gpe_end = end; 463 return (entry); 464 } 465 466 static void 467 g_part_new_provider(struct g_geom *gp, struct g_part_table *table, 468 struct g_part_entry *entry) 469 { 470 struct g_consumer *cp; 471 struct g_provider *pp; 472 struct sbuf *sb; 473 struct g_geom_alias *gap; 474 off_t offset; 475 476 cp = LIST_FIRST(&gp->consumer); 477 pp = cp->provider; 478 479 offset = entry->gpe_start * pp->sectorsize; 480 if (entry->gpe_offset < offset) 481 entry->gpe_offset = offset; 482 483 if (entry->gpe_pp == NULL) { 484 /* 485 * Add aliases to the geom before we create the provider so that 486 * geom_dev can taste it with all the aliases in place so all 487 * the aliased dev_t instances get created for each partition 488 * (eg foo5p7 gets created for bar5p7 when foo is an alias of bar). 489 */ 490 LIST_FOREACH(gap, &table->gpt_gp->aliases, ga_next) { 491 sb = sbuf_new_auto(); 492 G_PART_FULLNAME(table, entry, sb, gap->ga_alias); 493 sbuf_finish(sb); 494 g_geom_add_alias(gp, sbuf_data(sb)); 495 sbuf_delete(sb); 496 } 497 sb = sbuf_new_auto(); 498 G_PART_FULLNAME(table, entry, sb, gp->name); 499 sbuf_finish(sb); 500 entry->gpe_pp = g_new_providerf(gp, "%s", sbuf_data(sb)); 501 sbuf_delete(sb); 502 entry->gpe_pp->flags |= G_PF_DIRECT_SEND | G_PF_DIRECT_RECEIVE; 503 entry->gpe_pp->private = entry; /* Close the circle. */ 504 } 505 entry->gpe_pp->index = entry->gpe_index - 1; /* index is 1-based. */ 506 entry->gpe_pp->mediasize = (entry->gpe_end - entry->gpe_start + 1) * 507 pp->sectorsize; 508 entry->gpe_pp->mediasize -= entry->gpe_offset - offset; 509 entry->gpe_pp->sectorsize = pp->sectorsize; 510 entry->gpe_pp->stripesize = pp->stripesize; 511 entry->gpe_pp->stripeoffset = pp->stripeoffset + entry->gpe_offset; 512 if (pp->stripesize > 0) 513 entry->gpe_pp->stripeoffset %= pp->stripesize; 514 entry->gpe_pp->flags |= pp->flags & G_PF_ACCEPT_UNMAPPED; 515 g_error_provider(entry->gpe_pp, 0); 516 } 517 518 static struct g_geom* 519 g_part_find_geom(const char *name) 520 { 521 struct g_geom *gp; 522 LIST_FOREACH(gp, &g_part_class.geom, geom) { 523 if ((gp->flags & G_GEOM_WITHER) == 0 && 524 strcmp(name, gp->name) == 0) 525 break; 526 } 527 return (gp); 528 } 529 530 static int 531 g_part_parm_geom(struct gctl_req *req, const char *name, struct g_geom **v) 532 { 533 struct g_geom *gp; 534 const char *gname; 535 536 gname = gctl_get_asciiparam(req, name); 537 if (gname == NULL) 538 return (ENOATTR); 539 if (strncmp(gname, _PATH_DEV, sizeof(_PATH_DEV) - 1) == 0) 540 gname += sizeof(_PATH_DEV) - 1; 541 gp = g_part_find_geom(gname); 542 if (gp == NULL) { 543 gctl_error(req, "%d %s '%s'", EINVAL, name, gname); 544 return (EINVAL); 545 } 546 *v = gp; 547 return (0); 548 } 549 550 static int 551 g_part_parm_provider(struct gctl_req *req, const char *name, 552 struct g_provider **v) 553 { 554 struct g_provider *pp; 555 const char *pname; 556 557 pname = gctl_get_asciiparam(req, name); 558 if (pname == NULL) 559 return (ENOATTR); 560 if (strncmp(pname, _PATH_DEV, sizeof(_PATH_DEV) - 1) == 0) 561 pname += sizeof(_PATH_DEV) - 1; 562 pp = g_provider_by_name(pname); 563 if (pp == NULL) { 564 gctl_error(req, "%d %s '%s'", EINVAL, name, pname); 565 return (EINVAL); 566 } 567 *v = pp; 568 return (0); 569 } 570 571 static int 572 g_part_parm_quad(struct gctl_req *req, const char *name, quad_t *v) 573 { 574 const char *p; 575 char *x; 576 quad_t q; 577 578 p = gctl_get_asciiparam(req, name); 579 if (p == NULL) 580 return (ENOATTR); 581 q = strtoq(p, &x, 0); 582 if (*x != '\0' || q < 0) { 583 gctl_error(req, "%d %s '%s'", EINVAL, name, p); 584 return (EINVAL); 585 } 586 *v = q; 587 return (0); 588 } 589 590 static int 591 g_part_parm_scheme(struct gctl_req *req, const char *name, 592 struct g_part_scheme **v) 593 { 594 struct g_part_scheme *s; 595 const char *p; 596 597 p = gctl_get_asciiparam(req, name); 598 if (p == NULL) 599 return (ENOATTR); 600 TAILQ_FOREACH(s, &g_part_schemes, scheme_list) { 601 if (s == &g_part_null_scheme) 602 continue; 603 if (!strcasecmp(s->name, p)) 604 break; 605 } 606 if (s == NULL) { 607 gctl_error(req, "%d %s '%s'", EINVAL, name, p); 608 return (EINVAL); 609 } 610 *v = s; 611 return (0); 612 } 613 614 static int 615 g_part_parm_str(struct gctl_req *req, const char *name, const char **v) 616 { 617 const char *p; 618 619 p = gctl_get_asciiparam(req, name); 620 if (p == NULL) 621 return (ENOATTR); 622 /* An empty label is always valid. */ 623 if (strcmp(name, "label") != 0 && p[0] == '\0') { 624 gctl_error(req, "%d %s '%s'", EINVAL, name, p); 625 return (EINVAL); 626 } 627 *v = p; 628 return (0); 629 } 630 631 static int 632 g_part_parm_intmax(struct gctl_req *req, const char *name, u_int *v) 633 { 634 const intmax_t *p; 635 int size; 636 637 p = gctl_get_param(req, name, &size); 638 if (p == NULL) 639 return (ENOATTR); 640 if (size != sizeof(*p) || *p < 0 || *p > INT_MAX) { 641 gctl_error(req, "%d %s '%jd'", EINVAL, name, *p); 642 return (EINVAL); 643 } 644 *v = (u_int)*p; 645 return (0); 646 } 647 648 static int 649 g_part_parm_uint32(struct gctl_req *req, const char *name, u_int *v) 650 { 651 const uint32_t *p; 652 int size; 653 654 p = gctl_get_param(req, name, &size); 655 if (p == NULL) 656 return (ENOATTR); 657 if (size != sizeof(*p) || *p > INT_MAX) { 658 gctl_error(req, "%d %s '%u'", EINVAL, name, (unsigned int)*p); 659 return (EINVAL); 660 } 661 *v = (u_int)*p; 662 return (0); 663 } 664 665 static int 666 g_part_parm_bootcode(struct gctl_req *req, const char *name, const void **v, 667 unsigned int *s) 668 { 669 const void *p; 670 int size; 671 672 p = gctl_get_param(req, name, &size); 673 if (p == NULL) 674 return (ENOATTR); 675 *v = p; 676 *s = size; 677 return (0); 678 } 679 680 static int 681 g_part_probe(struct g_geom *gp, struct g_consumer *cp, int depth) 682 { 683 struct g_part_scheme *iter, *scheme; 684 struct g_part_table *table; 685 int pri, probe; 686 687 table = gp->softc; 688 scheme = (table != NULL) ? table->gpt_scheme : NULL; 689 pri = (scheme != NULL) ? G_PART_PROBE(table, cp) : INT_MIN; 690 if (pri == 0) 691 goto done; 692 if (pri > 0) { /* error */ 693 scheme = NULL; 694 pri = INT_MIN; 695 } 696 697 TAILQ_FOREACH(iter, &g_part_schemes, scheme_list) { 698 if (iter == &g_part_null_scheme) 699 continue; 700 table = (void *)kobj_create((kobj_class_t)iter, M_GEOM, 701 M_WAITOK); 702 table->gpt_gp = gp; 703 table->gpt_scheme = iter; 704 table->gpt_depth = depth; 705 probe = G_PART_PROBE(table, cp); 706 if (probe <= 0 && probe > pri) { 707 pri = probe; 708 scheme = iter; 709 if (gp->softc != NULL) 710 kobj_delete((kobj_t)gp->softc, M_GEOM); 711 gp->softc = table; 712 if (pri == 0) 713 goto done; 714 } else 715 kobj_delete((kobj_t)table, M_GEOM); 716 } 717 718 done: 719 return ((scheme == NULL) ? ENXIO : 0); 720 } 721 722 /* 723 * Control request functions. 724 */ 725 726 static int 727 g_part_ctl_add(struct gctl_req *req, struct g_part_parms *gpp) 728 { 729 struct g_geom *gp; 730 struct g_provider *pp; 731 struct g_part_entry *delent, *last, *entry; 732 struct g_part_table *table; 733 struct sbuf *sb; 734 quad_t end; 735 unsigned int index; 736 int error; 737 738 gp = gpp->gpp_geom; 739 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name)); 740 g_topology_assert(); 741 742 pp = LIST_FIRST(&gp->consumer)->provider; 743 table = gp->softc; 744 end = gpp->gpp_start + gpp->gpp_size - 1; 745 746 if (gpp->gpp_start < table->gpt_first || 747 gpp->gpp_start > table->gpt_last) { 748 gctl_error(req, "%d start '%jd'", EINVAL, 749 (intmax_t)gpp->gpp_start); 750 return (EINVAL); 751 } 752 if (end < gpp->gpp_start || end > table->gpt_last) { 753 gctl_error(req, "%d size '%jd'", EINVAL, 754 (intmax_t)gpp->gpp_size); 755 return (EINVAL); 756 } 757 if (gpp->gpp_index > table->gpt_entries) { 758 gctl_error(req, "%d index '%d'", EINVAL, gpp->gpp_index); 759 return (EINVAL); 760 } 761 762 delent = last = NULL; 763 index = (gpp->gpp_index > 0) ? gpp->gpp_index : 1; 764 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) { 765 if (entry->gpe_deleted) { 766 if (entry->gpe_index == index) 767 delent = entry; 768 continue; 769 } 770 if (entry->gpe_index == index) 771 index = entry->gpe_index + 1; 772 if (entry->gpe_index < index) 773 last = entry; 774 if (entry->gpe_internal) 775 continue; 776 if (gpp->gpp_start >= entry->gpe_start && 777 gpp->gpp_start <= entry->gpe_end) { 778 gctl_error(req, "%d start '%jd'", ENOSPC, 779 (intmax_t)gpp->gpp_start); 780 return (ENOSPC); 781 } 782 if (end >= entry->gpe_start && end <= entry->gpe_end) { 783 gctl_error(req, "%d end '%jd'", ENOSPC, (intmax_t)end); 784 return (ENOSPC); 785 } 786 if (gpp->gpp_start < entry->gpe_start && end > entry->gpe_end) { 787 gctl_error(req, "%d size '%jd'", ENOSPC, 788 (intmax_t)gpp->gpp_size); 789 return (ENOSPC); 790 } 791 } 792 if (gpp->gpp_index > 0 && index != gpp->gpp_index) { 793 gctl_error(req, "%d index '%d'", EEXIST, gpp->gpp_index); 794 return (EEXIST); 795 } 796 if (index > table->gpt_entries) { 797 gctl_error(req, "%d index '%d'", ENOSPC, index); 798 return (ENOSPC); 799 } 800 801 entry = (delent == NULL) ? g_malloc(table->gpt_scheme->gps_entrysz, 802 M_WAITOK | M_ZERO) : delent; 803 entry->gpe_index = index; 804 entry->gpe_start = gpp->gpp_start; 805 entry->gpe_end = end; 806 error = G_PART_ADD(table, entry, gpp); 807 if (error) { 808 gctl_error(req, "%d", error); 809 if (delent == NULL) 810 g_free(entry); 811 return (error); 812 } 813 if (delent == NULL) { 814 if (last == NULL) 815 LIST_INSERT_HEAD(&table->gpt_entry, entry, gpe_entry); 816 else 817 LIST_INSERT_AFTER(last, entry, gpe_entry); 818 entry->gpe_created = 1; 819 } else { 820 entry->gpe_deleted = 0; 821 entry->gpe_modified = 1; 822 } 823 g_part_new_provider(gp, table, entry); 824 825 /* Provide feedback if so requested. */ 826 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) { 827 sb = sbuf_new_auto(); 828 G_PART_FULLNAME(table, entry, sb, gp->name); 829 if (pp->stripesize > 0 && entry->gpe_pp->stripeoffset != 0) 830 sbuf_printf(sb, " added, but partition is not " 831 "aligned on %ju bytes\n", (uintmax_t)pp->stripesize); 832 else 833 sbuf_cat(sb, " added\n"); 834 sbuf_finish(sb); 835 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1); 836 sbuf_delete(sb); 837 } 838 return (0); 839 } 840 841 static int 842 g_part_ctl_bootcode(struct gctl_req *req, struct g_part_parms *gpp) 843 { 844 struct g_geom *gp; 845 struct g_part_table *table; 846 struct sbuf *sb; 847 int error, sz; 848 849 gp = gpp->gpp_geom; 850 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name)); 851 g_topology_assert(); 852 853 table = gp->softc; 854 sz = table->gpt_scheme->gps_bootcodesz; 855 if (sz == 0) { 856 error = ENODEV; 857 goto fail; 858 } 859 if (gpp->gpp_codesize > sz) { 860 error = EFBIG; 861 goto fail; 862 } 863 864 error = G_PART_BOOTCODE(table, gpp); 865 if (error) 866 goto fail; 867 868 /* Provide feedback if so requested. */ 869 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) { 870 sb = sbuf_new_auto(); 871 sbuf_printf(sb, "bootcode written to %s\n", gp->name); 872 sbuf_finish(sb); 873 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1); 874 sbuf_delete(sb); 875 } 876 return (0); 877 878 fail: 879 gctl_error(req, "%d", error); 880 return (error); 881 } 882 883 static int 884 g_part_ctl_commit(struct gctl_req *req, struct g_part_parms *gpp) 885 { 886 struct g_consumer *cp; 887 struct g_geom *gp; 888 struct g_provider *pp; 889 struct g_part_entry *entry, *tmp; 890 struct g_part_table *table; 891 char *buf; 892 int error, i; 893 894 gp = gpp->gpp_geom; 895 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name)); 896 g_topology_assert(); 897 898 table = gp->softc; 899 if (!table->gpt_opened) { 900 gctl_error(req, "%d", EPERM); 901 return (EPERM); 902 } 903 904 g_topology_unlock(); 905 906 cp = LIST_FIRST(&gp->consumer); 907 if ((table->gpt_smhead | table->gpt_smtail) != 0) { 908 pp = cp->provider; 909 buf = g_malloc(pp->sectorsize, M_WAITOK | M_ZERO); 910 while (table->gpt_smhead != 0) { 911 i = ffs(table->gpt_smhead) - 1; 912 error = g_write_data(cp, i * pp->sectorsize, buf, 913 pp->sectorsize); 914 if (error) { 915 g_free(buf); 916 goto fail; 917 } 918 table->gpt_smhead &= ~(1 << i); 919 } 920 while (table->gpt_smtail != 0) { 921 i = ffs(table->gpt_smtail) - 1; 922 error = g_write_data(cp, pp->mediasize - (i + 1) * 923 pp->sectorsize, buf, pp->sectorsize); 924 if (error) { 925 g_free(buf); 926 goto fail; 927 } 928 table->gpt_smtail &= ~(1 << i); 929 } 930 g_free(buf); 931 } 932 933 if (table->gpt_scheme == &g_part_null_scheme) { 934 g_topology_lock(); 935 g_access(cp, -1, -1, -1); 936 g_part_wither(gp, ENXIO); 937 return (0); 938 } 939 940 error = G_PART_WRITE(table, cp); 941 if (error) 942 goto fail; 943 944 LIST_FOREACH_SAFE(entry, &table->gpt_entry, gpe_entry, tmp) { 945 if (!entry->gpe_deleted) { 946 /* Notify consumers that provider might be changed. */ 947 if (entry->gpe_modified && ( 948 entry->gpe_pp->acw + entry->gpe_pp->ace + 949 entry->gpe_pp->acr) == 0) 950 g_media_changed(entry->gpe_pp, M_NOWAIT); 951 entry->gpe_created = 0; 952 entry->gpe_modified = 0; 953 continue; 954 } 955 LIST_REMOVE(entry, gpe_entry); 956 g_free(entry); 957 } 958 table->gpt_created = 0; 959 table->gpt_opened = 0; 960 961 g_topology_lock(); 962 g_access(cp, -1, -1, -1); 963 return (0); 964 965 fail: 966 g_topology_lock(); 967 gctl_error(req, "%d", error); 968 return (error); 969 } 970 971 static int 972 g_part_ctl_create(struct gctl_req *req, struct g_part_parms *gpp) 973 { 974 struct g_consumer *cp; 975 struct g_geom *gp; 976 struct g_provider *pp; 977 struct g_part_scheme *scheme; 978 struct g_part_table *null, *table; 979 struct sbuf *sb; 980 int attr, error; 981 982 pp = gpp->gpp_provider; 983 scheme = gpp->gpp_scheme; 984 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, pp->name)); 985 g_topology_assert(); 986 987 /* Check that there isn't already a g_part geom on the provider. */ 988 gp = g_part_find_geom(pp->name); 989 if (gp != NULL) { 990 null = gp->softc; 991 if (null->gpt_scheme != &g_part_null_scheme) { 992 gctl_error(req, "%d geom '%s'", EEXIST, pp->name); 993 return (EEXIST); 994 } 995 } else 996 null = NULL; 997 998 if ((gpp->gpp_parms & G_PART_PARM_ENTRIES) && 999 (gpp->gpp_entries < scheme->gps_minent || 1000 gpp->gpp_entries > scheme->gps_maxent)) { 1001 gctl_error(req, "%d entries '%d'", EINVAL, gpp->gpp_entries); 1002 return (EINVAL); 1003 } 1004 1005 if (null == NULL) 1006 gp = g_new_geomf(&g_part_class, "%s", pp->name); 1007 gp->softc = kobj_create((kobj_class_t)gpp->gpp_scheme, M_GEOM, 1008 M_WAITOK); 1009 table = gp->softc; 1010 table->gpt_gp = gp; 1011 table->gpt_scheme = gpp->gpp_scheme; 1012 table->gpt_entries = (gpp->gpp_parms & G_PART_PARM_ENTRIES) ? 1013 gpp->gpp_entries : scheme->gps_minent; 1014 LIST_INIT(&table->gpt_entry); 1015 if (null == NULL) { 1016 cp = g_new_consumer(gp); 1017 cp->flags |= G_CF_DIRECT_SEND | G_CF_DIRECT_RECEIVE; 1018 error = g_attach(cp, pp); 1019 if (error == 0) 1020 error = g_access(cp, 1, 1, 1); 1021 if (error != 0) { 1022 g_part_wither(gp, error); 1023 gctl_error(req, "%d geom '%s'", error, pp->name); 1024 return (error); 1025 } 1026 table->gpt_opened = 1; 1027 } else { 1028 cp = LIST_FIRST(&gp->consumer); 1029 table->gpt_opened = null->gpt_opened; 1030 table->gpt_smhead = null->gpt_smhead; 1031 table->gpt_smtail = null->gpt_smtail; 1032 } 1033 1034 g_topology_unlock(); 1035 1036 /* Make sure the provider has media. */ 1037 if (pp->mediasize == 0 || pp->sectorsize == 0) { 1038 error = ENODEV; 1039 goto fail; 1040 } 1041 1042 /* Make sure we can nest and if so, determine our depth. */ 1043 error = g_getattr("PART::isleaf", cp, &attr); 1044 if (!error && attr) { 1045 error = ENODEV; 1046 goto fail; 1047 } 1048 error = g_getattr("PART::depth", cp, &attr); 1049 table->gpt_depth = (!error) ? attr + 1 : 0; 1050 1051 /* 1052 * Synthesize a disk geometry. Some partitioning schemes 1053 * depend on it and since some file systems need it even 1054 * when the partitition scheme doesn't, we do it here in 1055 * scheme-independent code. 1056 */ 1057 g_part_geometry(table, cp, pp->mediasize / pp->sectorsize); 1058 1059 error = G_PART_CREATE(table, gpp); 1060 if (error) 1061 goto fail; 1062 1063 g_topology_lock(); 1064 1065 table->gpt_created = 1; 1066 if (null != NULL) 1067 kobj_delete((kobj_t)null, M_GEOM); 1068 1069 /* 1070 * Support automatic commit by filling in the gpp_geom 1071 * parameter. 1072 */ 1073 gpp->gpp_parms |= G_PART_PARM_GEOM; 1074 gpp->gpp_geom = gp; 1075 1076 /* Provide feedback if so requested. */ 1077 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) { 1078 sb = sbuf_new_auto(); 1079 sbuf_printf(sb, "%s created\n", gp->name); 1080 sbuf_finish(sb); 1081 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1); 1082 sbuf_delete(sb); 1083 } 1084 return (0); 1085 1086 fail: 1087 g_topology_lock(); 1088 if (null == NULL) { 1089 g_access(cp, -1, -1, -1); 1090 g_part_wither(gp, error); 1091 } else { 1092 kobj_delete((kobj_t)gp->softc, M_GEOM); 1093 gp->softc = null; 1094 } 1095 gctl_error(req, "%d provider", error); 1096 return (error); 1097 } 1098 1099 static int 1100 g_part_ctl_delete(struct gctl_req *req, struct g_part_parms *gpp) 1101 { 1102 struct g_geom *gp; 1103 struct g_provider *pp; 1104 struct g_part_entry *entry; 1105 struct g_part_table *table; 1106 struct sbuf *sb; 1107 1108 gp = gpp->gpp_geom; 1109 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name)); 1110 g_topology_assert(); 1111 1112 table = gp->softc; 1113 1114 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) { 1115 if (entry->gpe_deleted || entry->gpe_internal) 1116 continue; 1117 if (entry->gpe_index == gpp->gpp_index) 1118 break; 1119 } 1120 if (entry == NULL) { 1121 gctl_error(req, "%d index '%d'", ENOENT, gpp->gpp_index); 1122 return (ENOENT); 1123 } 1124 1125 pp = entry->gpe_pp; 1126 if (pp != NULL) { 1127 if (pp->acr > 0 || pp->acw > 0 || pp->ace > 0) { 1128 gctl_error(req, "%d", EBUSY); 1129 return (EBUSY); 1130 } 1131 1132 pp->private = NULL; 1133 entry->gpe_pp = NULL; 1134 } 1135 1136 if (pp != NULL) 1137 g_wither_provider(pp, ENXIO); 1138 1139 /* Provide feedback if so requested. */ 1140 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) { 1141 sb = sbuf_new_auto(); 1142 G_PART_FULLNAME(table, entry, sb, gp->name); 1143 sbuf_cat(sb, " deleted\n"); 1144 sbuf_finish(sb); 1145 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1); 1146 sbuf_delete(sb); 1147 } 1148 1149 if (entry->gpe_created) { 1150 LIST_REMOVE(entry, gpe_entry); 1151 g_free(entry); 1152 } else { 1153 entry->gpe_modified = 0; 1154 entry->gpe_deleted = 1; 1155 } 1156 return (0); 1157 } 1158 1159 static int 1160 g_part_ctl_destroy(struct gctl_req *req, struct g_part_parms *gpp) 1161 { 1162 struct g_consumer *cp; 1163 struct g_geom *gp; 1164 struct g_provider *pp; 1165 struct g_part_entry *entry, *tmp; 1166 struct g_part_table *null, *table; 1167 struct sbuf *sb; 1168 int error; 1169 1170 gp = gpp->gpp_geom; 1171 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name)); 1172 g_topology_assert(); 1173 1174 table = gp->softc; 1175 /* Check for busy providers. */ 1176 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) { 1177 if (entry->gpe_deleted || entry->gpe_internal) 1178 continue; 1179 if (gpp->gpp_force) { 1180 pp = entry->gpe_pp; 1181 if (pp == NULL) 1182 continue; 1183 if (pp->acr == 0 && pp->acw == 0 && pp->ace == 0) 1184 continue; 1185 } 1186 gctl_error(req, "%d", EBUSY); 1187 return (EBUSY); 1188 } 1189 1190 if (gpp->gpp_force) { 1191 /* Destroy all providers. */ 1192 LIST_FOREACH_SAFE(entry, &table->gpt_entry, gpe_entry, tmp) { 1193 pp = entry->gpe_pp; 1194 if (pp != NULL) { 1195 pp->private = NULL; 1196 g_wither_provider(pp, ENXIO); 1197 } 1198 LIST_REMOVE(entry, gpe_entry); 1199 g_free(entry); 1200 } 1201 } 1202 1203 error = G_PART_DESTROY(table, gpp); 1204 if (error) { 1205 gctl_error(req, "%d", error); 1206 return (error); 1207 } 1208 1209 gp->softc = kobj_create((kobj_class_t)&g_part_null_scheme, M_GEOM, 1210 M_WAITOK); 1211 null = gp->softc; 1212 null->gpt_gp = gp; 1213 null->gpt_scheme = &g_part_null_scheme; 1214 LIST_INIT(&null->gpt_entry); 1215 1216 cp = LIST_FIRST(&gp->consumer); 1217 pp = cp->provider; 1218 null->gpt_last = pp->mediasize / pp->sectorsize - 1; 1219 1220 null->gpt_depth = table->gpt_depth; 1221 null->gpt_opened = table->gpt_opened; 1222 null->gpt_smhead = table->gpt_smhead; 1223 null->gpt_smtail = table->gpt_smtail; 1224 1225 while ((entry = LIST_FIRST(&table->gpt_entry)) != NULL) { 1226 LIST_REMOVE(entry, gpe_entry); 1227 g_free(entry); 1228 } 1229 kobj_delete((kobj_t)table, M_GEOM); 1230 1231 /* Provide feedback if so requested. */ 1232 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) { 1233 sb = sbuf_new_auto(); 1234 sbuf_printf(sb, "%s destroyed\n", gp->name); 1235 sbuf_finish(sb); 1236 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1); 1237 sbuf_delete(sb); 1238 } 1239 return (0); 1240 } 1241 1242 static int 1243 g_part_ctl_modify(struct gctl_req *req, struct g_part_parms *gpp) 1244 { 1245 struct g_geom *gp; 1246 struct g_part_entry *entry; 1247 struct g_part_table *table; 1248 struct sbuf *sb; 1249 int error; 1250 1251 gp = gpp->gpp_geom; 1252 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name)); 1253 g_topology_assert(); 1254 1255 table = gp->softc; 1256 1257 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) { 1258 if (entry->gpe_deleted || entry->gpe_internal) 1259 continue; 1260 if (entry->gpe_index == gpp->gpp_index) 1261 break; 1262 } 1263 if (entry == NULL) { 1264 gctl_error(req, "%d index '%d'", ENOENT, gpp->gpp_index); 1265 return (ENOENT); 1266 } 1267 1268 error = G_PART_MODIFY(table, entry, gpp); 1269 if (error) { 1270 gctl_error(req, "%d", error); 1271 return (error); 1272 } 1273 1274 if (!entry->gpe_created) 1275 entry->gpe_modified = 1; 1276 1277 /* Provide feedback if so requested. */ 1278 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) { 1279 sb = sbuf_new_auto(); 1280 G_PART_FULLNAME(table, entry, sb, gp->name); 1281 sbuf_cat(sb, " modified\n"); 1282 sbuf_finish(sb); 1283 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1); 1284 sbuf_delete(sb); 1285 } 1286 return (0); 1287 } 1288 1289 static int 1290 g_part_ctl_move(struct gctl_req *req, struct g_part_parms *gpp) 1291 { 1292 gctl_error(req, "%d verb 'move'", ENOSYS); 1293 return (ENOSYS); 1294 } 1295 1296 static int 1297 g_part_ctl_recover(struct gctl_req *req, struct g_part_parms *gpp) 1298 { 1299 struct g_part_table *table; 1300 struct g_geom *gp; 1301 struct sbuf *sb; 1302 int error, recovered; 1303 1304 gp = gpp->gpp_geom; 1305 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name)); 1306 g_topology_assert(); 1307 table = gp->softc; 1308 error = recovered = 0; 1309 1310 if (table->gpt_corrupt) { 1311 error = G_PART_RECOVER(table); 1312 if (error == 0) 1313 error = g_part_check_integrity(table, 1314 LIST_FIRST(&gp->consumer)); 1315 if (error) { 1316 gctl_error(req, "%d recovering '%s' failed", 1317 error, gp->name); 1318 return (error); 1319 } 1320 recovered = 1; 1321 } 1322 /* Provide feedback if so requested. */ 1323 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) { 1324 sb = sbuf_new_auto(); 1325 if (recovered) 1326 sbuf_printf(sb, "%s recovered\n", gp->name); 1327 else 1328 sbuf_printf(sb, "%s recovering is not needed\n", 1329 gp->name); 1330 sbuf_finish(sb); 1331 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1); 1332 sbuf_delete(sb); 1333 } 1334 return (0); 1335 } 1336 1337 static int 1338 g_part_ctl_resize(struct gctl_req *req, struct g_part_parms *gpp) 1339 { 1340 struct g_geom *gp; 1341 struct g_provider *pp; 1342 struct g_part_entry *pe, *entry; 1343 struct g_part_table *table; 1344 struct sbuf *sb; 1345 quad_t end; 1346 int error; 1347 off_t mediasize; 1348 1349 gp = gpp->gpp_geom; 1350 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name)); 1351 g_topology_assert(); 1352 table = gp->softc; 1353 1354 /* check gpp_index */ 1355 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) { 1356 if (entry->gpe_deleted || entry->gpe_internal) 1357 continue; 1358 if (entry->gpe_index == gpp->gpp_index) 1359 break; 1360 } 1361 if (entry == NULL) { 1362 gctl_error(req, "%d index '%d'", ENOENT, gpp->gpp_index); 1363 return (ENOENT); 1364 } 1365 1366 /* check gpp_size */ 1367 end = entry->gpe_start + gpp->gpp_size - 1; 1368 if (gpp->gpp_size < 1 || end > table->gpt_last) { 1369 gctl_error(req, "%d size '%jd'", EINVAL, 1370 (intmax_t)gpp->gpp_size); 1371 return (EINVAL); 1372 } 1373 1374 LIST_FOREACH(pe, &table->gpt_entry, gpe_entry) { 1375 if (pe->gpe_deleted || pe->gpe_internal || pe == entry) 1376 continue; 1377 if (end >= pe->gpe_start && end <= pe->gpe_end) { 1378 gctl_error(req, "%d end '%jd'", ENOSPC, 1379 (intmax_t)end); 1380 return (ENOSPC); 1381 } 1382 if (entry->gpe_start < pe->gpe_start && end > pe->gpe_end) { 1383 gctl_error(req, "%d size '%jd'", ENOSPC, 1384 (intmax_t)gpp->gpp_size); 1385 return (ENOSPC); 1386 } 1387 } 1388 1389 pp = entry->gpe_pp; 1390 if ((g_debugflags & G_F_FOOTSHOOTING) == 0 && 1391 (pp->acr > 0 || pp->acw > 0 || pp->ace > 0)) { 1392 if (entry->gpe_end - entry->gpe_start + 1 > gpp->gpp_size) { 1393 /* Deny shrinking of an opened partition. */ 1394 gctl_error(req, "%d", EBUSY); 1395 return (EBUSY); 1396 } 1397 } 1398 1399 error = G_PART_RESIZE(table, entry, gpp); 1400 if (error) { 1401 gctl_error(req, "%d%s", error, error != EBUSY ? "": 1402 " resizing will lead to unexpected shrinking" 1403 " due to alignment"); 1404 return (error); 1405 } 1406 1407 if (!entry->gpe_created) 1408 entry->gpe_modified = 1; 1409 1410 /* update mediasize of changed provider */ 1411 mediasize = (entry->gpe_end - entry->gpe_start + 1) * 1412 pp->sectorsize; 1413 g_resize_provider(pp, mediasize); 1414 1415 /* Provide feedback if so requested. */ 1416 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) { 1417 sb = sbuf_new_auto(); 1418 G_PART_FULLNAME(table, entry, sb, gp->name); 1419 sbuf_cat(sb, " resized\n"); 1420 sbuf_finish(sb); 1421 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1); 1422 sbuf_delete(sb); 1423 } 1424 return (0); 1425 } 1426 1427 static int 1428 g_part_ctl_setunset(struct gctl_req *req, struct g_part_parms *gpp, 1429 unsigned int set) 1430 { 1431 struct g_geom *gp; 1432 struct g_part_entry *entry; 1433 struct g_part_table *table; 1434 struct sbuf *sb; 1435 int error; 1436 1437 gp = gpp->gpp_geom; 1438 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name)); 1439 g_topology_assert(); 1440 1441 table = gp->softc; 1442 1443 if (gpp->gpp_parms & G_PART_PARM_INDEX) { 1444 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) { 1445 if (entry->gpe_deleted || entry->gpe_internal) 1446 continue; 1447 if (entry->gpe_index == gpp->gpp_index) 1448 break; 1449 } 1450 if (entry == NULL) { 1451 gctl_error(req, "%d index '%d'", ENOENT, 1452 gpp->gpp_index); 1453 return (ENOENT); 1454 } 1455 } else 1456 entry = NULL; 1457 1458 error = G_PART_SETUNSET(table, entry, gpp->gpp_attrib, set); 1459 if (error) { 1460 gctl_error(req, "%d attrib '%s'", error, gpp->gpp_attrib); 1461 return (error); 1462 } 1463 1464 /* Provide feedback if so requested. */ 1465 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) { 1466 sb = sbuf_new_auto(); 1467 sbuf_printf(sb, "%s %sset on ", gpp->gpp_attrib, 1468 (set) ? "" : "un"); 1469 if (entry) 1470 G_PART_FULLNAME(table, entry, sb, gp->name); 1471 else 1472 sbuf_cat(sb, gp->name); 1473 sbuf_cat(sb, "\n"); 1474 sbuf_finish(sb); 1475 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1); 1476 sbuf_delete(sb); 1477 } 1478 return (0); 1479 } 1480 1481 static int 1482 g_part_ctl_undo(struct gctl_req *req, struct g_part_parms *gpp) 1483 { 1484 struct g_consumer *cp; 1485 struct g_provider *pp; 1486 struct g_geom *gp; 1487 struct g_part_entry *entry, *tmp; 1488 struct g_part_table *table; 1489 int error, reprobe; 1490 1491 gp = gpp->gpp_geom; 1492 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name)); 1493 g_topology_assert(); 1494 1495 table = gp->softc; 1496 if (!table->gpt_opened) { 1497 gctl_error(req, "%d", EPERM); 1498 return (EPERM); 1499 } 1500 1501 cp = LIST_FIRST(&gp->consumer); 1502 LIST_FOREACH_SAFE(entry, &table->gpt_entry, gpe_entry, tmp) { 1503 entry->gpe_modified = 0; 1504 if (entry->gpe_created) { 1505 pp = entry->gpe_pp; 1506 if (pp != NULL) { 1507 pp->private = NULL; 1508 entry->gpe_pp = NULL; 1509 g_wither_provider(pp, ENXIO); 1510 } 1511 entry->gpe_deleted = 1; 1512 } 1513 if (entry->gpe_deleted) { 1514 LIST_REMOVE(entry, gpe_entry); 1515 g_free(entry); 1516 } 1517 } 1518 1519 g_topology_unlock(); 1520 1521 reprobe = (table->gpt_scheme == &g_part_null_scheme || 1522 table->gpt_created) ? 1 : 0; 1523 1524 if (reprobe) { 1525 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) { 1526 if (entry->gpe_internal) 1527 continue; 1528 error = EBUSY; 1529 goto fail; 1530 } 1531 while ((entry = LIST_FIRST(&table->gpt_entry)) != NULL) { 1532 LIST_REMOVE(entry, gpe_entry); 1533 g_free(entry); 1534 } 1535 error = g_part_probe(gp, cp, table->gpt_depth); 1536 if (error) { 1537 g_topology_lock(); 1538 g_access(cp, -1, -1, -1); 1539 g_part_wither(gp, error); 1540 return (0); 1541 } 1542 table = gp->softc; 1543 1544 /* 1545 * Synthesize a disk geometry. Some partitioning schemes 1546 * depend on it and since some file systems need it even 1547 * when the partitition scheme doesn't, we do it here in 1548 * scheme-independent code. 1549 */ 1550 pp = cp->provider; 1551 g_part_geometry(table, cp, pp->mediasize / pp->sectorsize); 1552 } 1553 1554 error = G_PART_READ(table, cp); 1555 if (error) 1556 goto fail; 1557 error = g_part_check_integrity(table, cp); 1558 if (error) 1559 goto fail; 1560 1561 g_topology_lock(); 1562 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) { 1563 if (!entry->gpe_internal) 1564 g_part_new_provider(gp, table, entry); 1565 } 1566 1567 table->gpt_opened = 0; 1568 g_access(cp, -1, -1, -1); 1569 return (0); 1570 1571 fail: 1572 g_topology_lock(); 1573 gctl_error(req, "%d", error); 1574 return (error); 1575 } 1576 1577 static void 1578 g_part_wither(struct g_geom *gp, int error) 1579 { 1580 struct g_part_entry *entry; 1581 struct g_part_table *table; 1582 struct g_provider *pp; 1583 1584 table = gp->softc; 1585 if (table != NULL) { 1586 gp->softc = NULL; 1587 while ((entry = LIST_FIRST(&table->gpt_entry)) != NULL) { 1588 LIST_REMOVE(entry, gpe_entry); 1589 pp = entry->gpe_pp; 1590 entry->gpe_pp = NULL; 1591 if (pp != NULL) { 1592 pp->private = NULL; 1593 g_wither_provider(pp, error); 1594 } 1595 g_free(entry); 1596 } 1597 G_PART_DESTROY(table, NULL); 1598 kobj_delete((kobj_t)table, M_GEOM); 1599 } 1600 g_wither_geom(gp, error); 1601 } 1602 1603 /* 1604 * Class methods. 1605 */ 1606 1607 static void 1608 g_part_ctlreq(struct gctl_req *req, struct g_class *mp, const char *verb) 1609 { 1610 struct g_part_parms gpp; 1611 struct g_part_table *table; 1612 struct gctl_req_arg *ap; 1613 enum g_part_ctl ctlreq; 1614 unsigned int i, mparms, oparms, parm; 1615 int auto_commit, close_on_error; 1616 int error, modifies; 1617 1618 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s,%s)", __func__, mp->name, verb)); 1619 g_topology_assert(); 1620 1621 ctlreq = G_PART_CTL_NONE; 1622 modifies = 1; 1623 mparms = 0; 1624 oparms = G_PART_PARM_FLAGS | G_PART_PARM_OUTPUT | G_PART_PARM_VERSION; 1625 switch (*verb) { 1626 case 'a': 1627 if (!strcmp(verb, "add")) { 1628 ctlreq = G_PART_CTL_ADD; 1629 mparms |= G_PART_PARM_GEOM | G_PART_PARM_SIZE | 1630 G_PART_PARM_START | G_PART_PARM_TYPE; 1631 oparms |= G_PART_PARM_INDEX | G_PART_PARM_LABEL; 1632 } 1633 break; 1634 case 'b': 1635 if (!strcmp(verb, "bootcode")) { 1636 ctlreq = G_PART_CTL_BOOTCODE; 1637 mparms |= G_PART_PARM_GEOM | G_PART_PARM_BOOTCODE; 1638 oparms |= G_PART_PARM_SKIP_DSN; 1639 } 1640 break; 1641 case 'c': 1642 if (!strcmp(verb, "commit")) { 1643 ctlreq = G_PART_CTL_COMMIT; 1644 mparms |= G_PART_PARM_GEOM; 1645 modifies = 0; 1646 } else if (!strcmp(verb, "create")) { 1647 ctlreq = G_PART_CTL_CREATE; 1648 mparms |= G_PART_PARM_PROVIDER | G_PART_PARM_SCHEME; 1649 oparms |= G_PART_PARM_ENTRIES; 1650 } 1651 break; 1652 case 'd': 1653 if (!strcmp(verb, "delete")) { 1654 ctlreq = G_PART_CTL_DELETE; 1655 mparms |= G_PART_PARM_GEOM | G_PART_PARM_INDEX; 1656 } else if (!strcmp(verb, "destroy")) { 1657 ctlreq = G_PART_CTL_DESTROY; 1658 mparms |= G_PART_PARM_GEOM; 1659 oparms |= G_PART_PARM_FORCE; 1660 } 1661 break; 1662 case 'm': 1663 if (!strcmp(verb, "modify")) { 1664 ctlreq = G_PART_CTL_MODIFY; 1665 mparms |= G_PART_PARM_GEOM | G_PART_PARM_INDEX; 1666 oparms |= G_PART_PARM_LABEL | G_PART_PARM_TYPE; 1667 } else if (!strcmp(verb, "move")) { 1668 ctlreq = G_PART_CTL_MOVE; 1669 mparms |= G_PART_PARM_GEOM | G_PART_PARM_INDEX; 1670 } 1671 break; 1672 case 'r': 1673 if (!strcmp(verb, "recover")) { 1674 ctlreq = G_PART_CTL_RECOVER; 1675 mparms |= G_PART_PARM_GEOM; 1676 } else if (!strcmp(verb, "resize")) { 1677 ctlreq = G_PART_CTL_RESIZE; 1678 mparms |= G_PART_PARM_GEOM | G_PART_PARM_INDEX | 1679 G_PART_PARM_SIZE; 1680 } 1681 break; 1682 case 's': 1683 if (!strcmp(verb, "set")) { 1684 ctlreq = G_PART_CTL_SET; 1685 mparms |= G_PART_PARM_ATTRIB | G_PART_PARM_GEOM; 1686 oparms |= G_PART_PARM_INDEX; 1687 } 1688 break; 1689 case 'u': 1690 if (!strcmp(verb, "undo")) { 1691 ctlreq = G_PART_CTL_UNDO; 1692 mparms |= G_PART_PARM_GEOM; 1693 modifies = 0; 1694 } else if (!strcmp(verb, "unset")) { 1695 ctlreq = G_PART_CTL_UNSET; 1696 mparms |= G_PART_PARM_ATTRIB | G_PART_PARM_GEOM; 1697 oparms |= G_PART_PARM_INDEX; 1698 } 1699 break; 1700 } 1701 if (ctlreq == G_PART_CTL_NONE) { 1702 gctl_error(req, "%d verb '%s'", EINVAL, verb); 1703 return; 1704 } 1705 1706 bzero(&gpp, sizeof(gpp)); 1707 for (i = 0; i < req->narg; i++) { 1708 ap = &req->arg[i]; 1709 parm = 0; 1710 switch (ap->name[0]) { 1711 case 'a': 1712 if (!strcmp(ap->name, "arg0")) { 1713 parm = mparms & 1714 (G_PART_PARM_GEOM | G_PART_PARM_PROVIDER); 1715 } 1716 if (!strcmp(ap->name, "attrib")) 1717 parm = G_PART_PARM_ATTRIB; 1718 break; 1719 case 'b': 1720 if (!strcmp(ap->name, "bootcode")) 1721 parm = G_PART_PARM_BOOTCODE; 1722 break; 1723 case 'c': 1724 if (!strcmp(ap->name, "class")) 1725 continue; 1726 break; 1727 case 'e': 1728 if (!strcmp(ap->name, "entries")) 1729 parm = G_PART_PARM_ENTRIES; 1730 break; 1731 case 'f': 1732 if (!strcmp(ap->name, "flags")) 1733 parm = G_PART_PARM_FLAGS; 1734 else if (!strcmp(ap->name, "force")) 1735 parm = G_PART_PARM_FORCE; 1736 break; 1737 case 'i': 1738 if (!strcmp(ap->name, "index")) 1739 parm = G_PART_PARM_INDEX; 1740 break; 1741 case 'l': 1742 if (!strcmp(ap->name, "label")) 1743 parm = G_PART_PARM_LABEL; 1744 break; 1745 case 'o': 1746 if (!strcmp(ap->name, "output")) 1747 parm = G_PART_PARM_OUTPUT; 1748 break; 1749 case 's': 1750 if (!strcmp(ap->name, "scheme")) 1751 parm = G_PART_PARM_SCHEME; 1752 else if (!strcmp(ap->name, "size")) 1753 parm = G_PART_PARM_SIZE; 1754 else if (!strcmp(ap->name, "start")) 1755 parm = G_PART_PARM_START; 1756 else if (!strcmp(ap->name, "skip_dsn")) 1757 parm = G_PART_PARM_SKIP_DSN; 1758 break; 1759 case 't': 1760 if (!strcmp(ap->name, "type")) 1761 parm = G_PART_PARM_TYPE; 1762 break; 1763 case 'v': 1764 if (!strcmp(ap->name, "verb")) 1765 continue; 1766 else if (!strcmp(ap->name, "version")) 1767 parm = G_PART_PARM_VERSION; 1768 break; 1769 } 1770 if ((parm & (mparms | oparms)) == 0) { 1771 gctl_error(req, "%d param '%s'", EINVAL, ap->name); 1772 return; 1773 } 1774 switch (parm) { 1775 case G_PART_PARM_ATTRIB: 1776 error = g_part_parm_str(req, ap->name, 1777 &gpp.gpp_attrib); 1778 break; 1779 case G_PART_PARM_BOOTCODE: 1780 error = g_part_parm_bootcode(req, ap->name, 1781 &gpp.gpp_codeptr, &gpp.gpp_codesize); 1782 break; 1783 case G_PART_PARM_ENTRIES: 1784 error = g_part_parm_intmax(req, ap->name, 1785 &gpp.gpp_entries); 1786 break; 1787 case G_PART_PARM_FLAGS: 1788 error = g_part_parm_str(req, ap->name, &gpp.gpp_flags); 1789 break; 1790 case G_PART_PARM_FORCE: 1791 error = g_part_parm_uint32(req, ap->name, 1792 &gpp.gpp_force); 1793 break; 1794 case G_PART_PARM_GEOM: 1795 error = g_part_parm_geom(req, ap->name, &gpp.gpp_geom); 1796 break; 1797 case G_PART_PARM_INDEX: 1798 error = g_part_parm_intmax(req, ap->name, 1799 &gpp.gpp_index); 1800 break; 1801 case G_PART_PARM_LABEL: 1802 error = g_part_parm_str(req, ap->name, &gpp.gpp_label); 1803 break; 1804 case G_PART_PARM_OUTPUT: 1805 error = 0; /* Write-only parameter */ 1806 break; 1807 case G_PART_PARM_PROVIDER: 1808 error = g_part_parm_provider(req, ap->name, 1809 &gpp.gpp_provider); 1810 break; 1811 case G_PART_PARM_SCHEME: 1812 error = g_part_parm_scheme(req, ap->name, 1813 &gpp.gpp_scheme); 1814 break; 1815 case G_PART_PARM_SIZE: 1816 error = g_part_parm_quad(req, ap->name, &gpp.gpp_size); 1817 break; 1818 case G_PART_PARM_SKIP_DSN: 1819 error = g_part_parm_uint32(req, ap->name, 1820 &gpp.gpp_skip_dsn); 1821 break; 1822 case G_PART_PARM_START: 1823 error = g_part_parm_quad(req, ap->name, 1824 &gpp.gpp_start); 1825 break; 1826 case G_PART_PARM_TYPE: 1827 error = g_part_parm_str(req, ap->name, &gpp.gpp_type); 1828 break; 1829 case G_PART_PARM_VERSION: 1830 error = g_part_parm_uint32(req, ap->name, 1831 &gpp.gpp_version); 1832 break; 1833 default: 1834 error = EDOOFUS; 1835 gctl_error(req, "%d %s", error, ap->name); 1836 break; 1837 } 1838 if (error != 0) { 1839 if (error == ENOATTR) { 1840 gctl_error(req, "%d param '%s'", error, 1841 ap->name); 1842 } 1843 return; 1844 } 1845 gpp.gpp_parms |= parm; 1846 } 1847 if ((gpp.gpp_parms & mparms) != mparms) { 1848 parm = mparms - (gpp.gpp_parms & mparms); 1849 gctl_error(req, "%d param '%x'", ENOATTR, parm); 1850 return; 1851 } 1852 1853 /* Obtain permissions if possible/necessary. */ 1854 close_on_error = 0; 1855 table = NULL; 1856 if (modifies && (gpp.gpp_parms & G_PART_PARM_GEOM)) { 1857 table = gpp.gpp_geom->softc; 1858 if (table != NULL && table->gpt_corrupt && 1859 ctlreq != G_PART_CTL_DESTROY && 1860 ctlreq != G_PART_CTL_RECOVER) { 1861 gctl_error(req, "%d table '%s' is corrupt", 1862 EPERM, gpp.gpp_geom->name); 1863 return; 1864 } 1865 if (table != NULL && !table->gpt_opened) { 1866 error = g_access(LIST_FIRST(&gpp.gpp_geom->consumer), 1867 1, 1, 1); 1868 if (error) { 1869 gctl_error(req, "%d geom '%s'", error, 1870 gpp.gpp_geom->name); 1871 return; 1872 } 1873 table->gpt_opened = 1; 1874 close_on_error = 1; 1875 } 1876 } 1877 1878 /* Allow the scheme to check or modify the parameters. */ 1879 if (table != NULL) { 1880 error = G_PART_PRECHECK(table, ctlreq, &gpp); 1881 if (error) { 1882 gctl_error(req, "%d pre-check failed", error); 1883 goto out; 1884 } 1885 } else 1886 error = EDOOFUS; /* Prevent bogus uninit. warning. */ 1887 1888 switch (ctlreq) { 1889 case G_PART_CTL_NONE: 1890 panic("%s", __func__); 1891 case G_PART_CTL_ADD: 1892 error = g_part_ctl_add(req, &gpp); 1893 break; 1894 case G_PART_CTL_BOOTCODE: 1895 error = g_part_ctl_bootcode(req, &gpp); 1896 break; 1897 case G_PART_CTL_COMMIT: 1898 error = g_part_ctl_commit(req, &gpp); 1899 break; 1900 case G_PART_CTL_CREATE: 1901 error = g_part_ctl_create(req, &gpp); 1902 break; 1903 case G_PART_CTL_DELETE: 1904 error = g_part_ctl_delete(req, &gpp); 1905 break; 1906 case G_PART_CTL_DESTROY: 1907 error = g_part_ctl_destroy(req, &gpp); 1908 break; 1909 case G_PART_CTL_MODIFY: 1910 error = g_part_ctl_modify(req, &gpp); 1911 break; 1912 case G_PART_CTL_MOVE: 1913 error = g_part_ctl_move(req, &gpp); 1914 break; 1915 case G_PART_CTL_RECOVER: 1916 error = g_part_ctl_recover(req, &gpp); 1917 break; 1918 case G_PART_CTL_RESIZE: 1919 error = g_part_ctl_resize(req, &gpp); 1920 break; 1921 case G_PART_CTL_SET: 1922 error = g_part_ctl_setunset(req, &gpp, 1); 1923 break; 1924 case G_PART_CTL_UNDO: 1925 error = g_part_ctl_undo(req, &gpp); 1926 break; 1927 case G_PART_CTL_UNSET: 1928 error = g_part_ctl_setunset(req, &gpp, 0); 1929 break; 1930 } 1931 1932 /* Implement automatic commit. */ 1933 if (!error) { 1934 auto_commit = (modifies && 1935 (gpp.gpp_parms & G_PART_PARM_FLAGS) && 1936 strchr(gpp.gpp_flags, 'C') != NULL) ? 1 : 0; 1937 if (auto_commit) { 1938 KASSERT(gpp.gpp_parms & G_PART_PARM_GEOM, ("%s", 1939 __func__)); 1940 error = g_part_ctl_commit(req, &gpp); 1941 } 1942 } 1943 1944 out: 1945 if (error && close_on_error) { 1946 g_access(LIST_FIRST(&gpp.gpp_geom->consumer), -1, -1, -1); 1947 table->gpt_opened = 0; 1948 } 1949 } 1950 1951 static int 1952 g_part_destroy_geom(struct gctl_req *req, struct g_class *mp, 1953 struct g_geom *gp) 1954 { 1955 1956 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s,%s)", __func__, mp->name, gp->name)); 1957 g_topology_assert(); 1958 1959 g_part_wither(gp, EINVAL); 1960 return (0); 1961 } 1962 1963 static struct g_geom * 1964 g_part_taste(struct g_class *mp, struct g_provider *pp, int flags __unused) 1965 { 1966 struct g_consumer *cp; 1967 struct g_geom *gp; 1968 struct g_part_entry *entry; 1969 struct g_part_table *table; 1970 struct root_hold_token *rht; 1971 struct g_geom_alias *gap; 1972 int attr, depth; 1973 int error; 1974 1975 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s,%s)", __func__, mp->name, pp->name)); 1976 g_topology_assert(); 1977 1978 /* Skip providers that are already open for writing. */ 1979 if (pp->acw > 0) 1980 return (NULL); 1981 1982 /* 1983 * Create a GEOM with consumer and hook it up to the provider. 1984 * With that we become part of the topology. Obtain read access 1985 * to the provider. 1986 */ 1987 gp = g_new_geomf(mp, "%s", pp->name); 1988 LIST_FOREACH(gap, &pp->geom->aliases, ga_next) 1989 g_geom_add_alias(gp, gap->ga_alias); 1990 cp = g_new_consumer(gp); 1991 cp->flags |= G_CF_DIRECT_SEND | G_CF_DIRECT_RECEIVE; 1992 error = g_attach(cp, pp); 1993 if (error == 0) 1994 error = g_access(cp, 1, 0, 0); 1995 if (error != 0) { 1996 if (cp->provider) 1997 g_detach(cp); 1998 g_destroy_consumer(cp); 1999 g_destroy_geom(gp); 2000 return (NULL); 2001 } 2002 2003 rht = root_mount_hold(mp->name); 2004 g_topology_unlock(); 2005 2006 /* 2007 * Short-circuit the whole probing galore when there's no 2008 * media present. 2009 */ 2010 if (pp->mediasize == 0 || pp->sectorsize == 0) { 2011 error = ENODEV; 2012 goto fail; 2013 } 2014 2015 /* Make sure we can nest and if so, determine our depth. */ 2016 error = g_getattr("PART::isleaf", cp, &attr); 2017 if (!error && attr) { 2018 error = ENODEV; 2019 goto fail; 2020 } 2021 error = g_getattr("PART::depth", cp, &attr); 2022 depth = (!error) ? attr + 1 : 0; 2023 2024 error = g_part_probe(gp, cp, depth); 2025 if (error) 2026 goto fail; 2027 2028 table = gp->softc; 2029 2030 /* 2031 * Synthesize a disk geometry. Some partitioning schemes 2032 * depend on it and since some file systems need it even 2033 * when the partitition scheme doesn't, we do it here in 2034 * scheme-independent code. 2035 */ 2036 g_part_geometry(table, cp, pp->mediasize / pp->sectorsize); 2037 2038 error = G_PART_READ(table, cp); 2039 if (error) 2040 goto fail; 2041 error = g_part_check_integrity(table, cp); 2042 if (error) 2043 goto fail; 2044 2045 g_topology_lock(); 2046 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) { 2047 if (!entry->gpe_internal) 2048 g_part_new_provider(gp, table, entry); 2049 } 2050 2051 root_mount_rel(rht); 2052 g_access(cp, -1, 0, 0); 2053 return (gp); 2054 2055 fail: 2056 g_topology_lock(); 2057 root_mount_rel(rht); 2058 g_access(cp, -1, 0, 0); 2059 g_detach(cp); 2060 g_destroy_consumer(cp); 2061 g_destroy_geom(gp); 2062 return (NULL); 2063 } 2064 2065 /* 2066 * Geom methods. 2067 */ 2068 2069 static int 2070 g_part_access(struct g_provider *pp, int dr, int dw, int de) 2071 { 2072 struct g_consumer *cp; 2073 2074 G_PART_TRACE((G_T_ACCESS, "%s(%s,%d,%d,%d)", __func__, pp->name, dr, 2075 dw, de)); 2076 2077 cp = LIST_FIRST(&pp->geom->consumer); 2078 2079 /* We always gain write-exclusive access. */ 2080 return (g_access(cp, dr, dw, dw + de)); 2081 } 2082 2083 static void 2084 g_part_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp, 2085 struct g_consumer *cp, struct g_provider *pp) 2086 { 2087 char buf[64]; 2088 struct g_part_entry *entry; 2089 struct g_part_table *table; 2090 2091 KASSERT(sb != NULL && gp != NULL, ("%s", __func__)); 2092 table = gp->softc; 2093 2094 if (indent == NULL) { 2095 KASSERT(cp == NULL && pp != NULL, ("%s", __func__)); 2096 entry = pp->private; 2097 if (entry == NULL) 2098 return; 2099 sbuf_printf(sb, " i %u o %ju ty %s", entry->gpe_index, 2100 (uintmax_t)entry->gpe_offset, 2101 G_PART_TYPE(table, entry, buf, sizeof(buf))); 2102 /* 2103 * libdisk compatibility quirk - the scheme dumps the 2104 * slicer name and partition type in a way that is 2105 * compatible with libdisk. When libdisk is not used 2106 * anymore, this should go away. 2107 */ 2108 G_PART_DUMPCONF(table, entry, sb, indent); 2109 } else if (cp != NULL) { /* Consumer configuration. */ 2110 KASSERT(pp == NULL, ("%s", __func__)); 2111 /* none */ 2112 } else if (pp != NULL) { /* Provider configuration. */ 2113 entry = pp->private; 2114 if (entry == NULL) 2115 return; 2116 sbuf_printf(sb, "%s<start>%ju</start>\n", indent, 2117 (uintmax_t)entry->gpe_start); 2118 sbuf_printf(sb, "%s<end>%ju</end>\n", indent, 2119 (uintmax_t)entry->gpe_end); 2120 sbuf_printf(sb, "%s<index>%u</index>\n", indent, 2121 entry->gpe_index); 2122 sbuf_printf(sb, "%s<type>%s</type>\n", indent, 2123 G_PART_TYPE(table, entry, buf, sizeof(buf))); 2124 sbuf_printf(sb, "%s<offset>%ju</offset>\n", indent, 2125 (uintmax_t)entry->gpe_offset); 2126 sbuf_printf(sb, "%s<length>%ju</length>\n", indent, 2127 (uintmax_t)pp->mediasize); 2128 G_PART_DUMPCONF(table, entry, sb, indent); 2129 } else { /* Geom configuration. */ 2130 sbuf_printf(sb, "%s<scheme>%s</scheme>\n", indent, 2131 table->gpt_scheme->name); 2132 sbuf_printf(sb, "%s<entries>%u</entries>\n", indent, 2133 table->gpt_entries); 2134 sbuf_printf(sb, "%s<first>%ju</first>\n", indent, 2135 (uintmax_t)table->gpt_first); 2136 sbuf_printf(sb, "%s<last>%ju</last>\n", indent, 2137 (uintmax_t)table->gpt_last); 2138 sbuf_printf(sb, "%s<fwsectors>%u</fwsectors>\n", indent, 2139 table->gpt_sectors); 2140 sbuf_printf(sb, "%s<fwheads>%u</fwheads>\n", indent, 2141 table->gpt_heads); 2142 sbuf_printf(sb, "%s<state>%s</state>\n", indent, 2143 table->gpt_corrupt ? "CORRUPT": "OK"); 2144 sbuf_printf(sb, "%s<modified>%s</modified>\n", indent, 2145 table->gpt_opened ? "true": "false"); 2146 G_PART_DUMPCONF(table, NULL, sb, indent); 2147 } 2148 } 2149 2150 /*- 2151 * This start routine is only called for non-trivial requests, all the 2152 * trivial ones are handled autonomously by the slice code. 2153 * For requests we handle here, we must call the g_io_deliver() on the 2154 * bio, and return non-zero to indicate to the slice code that we did so. 2155 * This code executes in the "DOWN" I/O path, this means: 2156 * * No sleeping. 2157 * * Don't grab the topology lock. 2158 * * Don't call biowait, g_getattr(), g_setattr() or g_read_data() 2159 */ 2160 static int 2161 g_part_ioctl(struct g_provider *pp, u_long cmd, void *data, int fflag, struct thread *td) 2162 { 2163 struct g_part_table *table; 2164 2165 table = pp->geom->softc; 2166 return G_PART_IOCTL(table, pp, cmd, data, fflag, td); 2167 } 2168 2169 static void 2170 g_part_resize(struct g_consumer *cp) 2171 { 2172 struct g_part_table *table; 2173 2174 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, cp->provider->name)); 2175 g_topology_assert(); 2176 2177 if (auto_resize == 0) 2178 return; 2179 2180 table = cp->geom->softc; 2181 if (table->gpt_opened == 0) { 2182 if (g_access(cp, 1, 1, 1) != 0) 2183 return; 2184 table->gpt_opened = 1; 2185 } 2186 if (G_PART_RESIZE(table, NULL, NULL) == 0) 2187 printf("GEOM_PART: %s was automatically resized.\n" 2188 " Use `gpart commit %s` to save changes or " 2189 "`gpart undo %s` to revert them.\n", cp->geom->name, 2190 cp->geom->name, cp->geom->name); 2191 if (g_part_check_integrity(table, cp) != 0) { 2192 g_access(cp, -1, -1, -1); 2193 table->gpt_opened = 0; 2194 g_part_wither(table->gpt_gp, ENXIO); 2195 } 2196 } 2197 2198 static void 2199 g_part_orphan(struct g_consumer *cp) 2200 { 2201 struct g_provider *pp; 2202 struct g_part_table *table; 2203 2204 pp = cp->provider; 2205 KASSERT(pp != NULL, ("%s", __func__)); 2206 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, pp->name)); 2207 g_topology_assert(); 2208 2209 KASSERT(pp->error != 0, ("%s", __func__)); 2210 table = cp->geom->softc; 2211 if (table != NULL && table->gpt_opened) 2212 g_access(cp, -1, -1, -1); 2213 g_part_wither(cp->geom, pp->error); 2214 } 2215 2216 static void 2217 g_part_spoiled(struct g_consumer *cp) 2218 { 2219 2220 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, cp->provider->name)); 2221 g_topology_assert(); 2222 2223 cp->flags |= G_CF_ORPHAN; 2224 g_part_wither(cp->geom, ENXIO); 2225 } 2226 2227 static void 2228 g_part_start(struct bio *bp) 2229 { 2230 struct bio *bp2; 2231 struct g_consumer *cp; 2232 struct g_geom *gp; 2233 struct g_part_entry *entry; 2234 struct g_part_table *table; 2235 struct g_kerneldump *gkd; 2236 struct g_provider *pp; 2237 void (*done_func)(struct bio *) = g_std_done; 2238 char buf[64]; 2239 2240 biotrack(bp, __func__); 2241 2242 pp = bp->bio_to; 2243 gp = pp->geom; 2244 table = gp->softc; 2245 cp = LIST_FIRST(&gp->consumer); 2246 2247 G_PART_TRACE((G_T_BIO, "%s: cmd=%d, provider=%s", __func__, bp->bio_cmd, 2248 pp->name)); 2249 2250 entry = pp->private; 2251 if (entry == NULL) { 2252 g_io_deliver(bp, ENXIO); 2253 return; 2254 } 2255 2256 switch(bp->bio_cmd) { 2257 case BIO_DELETE: 2258 case BIO_READ: 2259 case BIO_WRITE: 2260 if (bp->bio_offset >= pp->mediasize) { 2261 g_io_deliver(bp, EIO); 2262 return; 2263 } 2264 bp2 = g_clone_bio(bp); 2265 if (bp2 == NULL) { 2266 g_io_deliver(bp, ENOMEM); 2267 return; 2268 } 2269 if (bp2->bio_offset + bp2->bio_length > pp->mediasize) 2270 bp2->bio_length = pp->mediasize - bp2->bio_offset; 2271 bp2->bio_done = g_std_done; 2272 bp2->bio_offset += entry->gpe_offset; 2273 g_io_request(bp2, cp); 2274 return; 2275 case BIO_SPEEDUP: 2276 case BIO_FLUSH: 2277 break; 2278 case BIO_GETATTR: 2279 if (g_handleattr_int(bp, "GEOM::fwheads", table->gpt_heads)) 2280 return; 2281 if (g_handleattr_int(bp, "GEOM::fwsectors", table->gpt_sectors)) 2282 return; 2283 /* 2284 * allow_nesting overrides "isleaf" to false _unless_ the 2285 * provider offset is zero, since otherwise we would recurse. 2286 */ 2287 if (g_handleattr_int(bp, "PART::isleaf", 2288 table->gpt_isleaf && 2289 (allow_nesting == 0 || entry->gpe_offset == 0))) 2290 return; 2291 if (g_handleattr_int(bp, "PART::depth", table->gpt_depth)) 2292 return; 2293 if (g_handleattr_str(bp, "PART::scheme", 2294 table->gpt_scheme->name)) 2295 return; 2296 if (g_handleattr_str(bp, "PART::type", 2297 G_PART_TYPE(table, entry, buf, sizeof(buf)))) 2298 return; 2299 if (!strcmp("GEOM::physpath", bp->bio_attribute)) { 2300 done_func = g_part_get_physpath_done; 2301 break; 2302 } 2303 if (!strcmp("GEOM::kerneldump", bp->bio_attribute)) { 2304 /* 2305 * Check that the partition is suitable for kernel 2306 * dumps. Typically only swap partitions should be 2307 * used. If the request comes from the nested scheme 2308 * we allow dumping there as well. 2309 */ 2310 if ((bp->bio_from == NULL || 2311 bp->bio_from->geom->class != &g_part_class) && 2312 G_PART_DUMPTO(table, entry) == 0) { 2313 g_io_deliver(bp, ENODEV); 2314 printf("GEOM_PART: Partition '%s' not suitable" 2315 " for kernel dumps (wrong type?)\n", 2316 pp->name); 2317 return; 2318 } 2319 gkd = (struct g_kerneldump *)bp->bio_data; 2320 if (gkd->offset >= pp->mediasize) { 2321 g_io_deliver(bp, EIO); 2322 return; 2323 } 2324 if (gkd->offset + gkd->length > pp->mediasize) 2325 gkd->length = pp->mediasize - gkd->offset; 2326 gkd->offset += entry->gpe_offset; 2327 } 2328 break; 2329 default: 2330 g_io_deliver(bp, EOPNOTSUPP); 2331 return; 2332 } 2333 2334 bp2 = g_clone_bio(bp); 2335 if (bp2 == NULL) { 2336 g_io_deliver(bp, ENOMEM); 2337 return; 2338 } 2339 bp2->bio_done = done_func; 2340 g_io_request(bp2, cp); 2341 } 2342 2343 static void 2344 g_part_init(struct g_class *mp) 2345 { 2346 2347 TAILQ_INSERT_HEAD(&g_part_schemes, &g_part_null_scheme, scheme_list); 2348 } 2349 2350 static void 2351 g_part_fini(struct g_class *mp) 2352 { 2353 2354 TAILQ_REMOVE(&g_part_schemes, &g_part_null_scheme, scheme_list); 2355 } 2356 2357 static void 2358 g_part_unload_event(void *arg, int flag) 2359 { 2360 struct g_consumer *cp; 2361 struct g_geom *gp; 2362 struct g_provider *pp; 2363 struct g_part_scheme *scheme; 2364 struct g_part_table *table; 2365 uintptr_t *xchg; 2366 int acc, error; 2367 2368 if (flag == EV_CANCEL) 2369 return; 2370 2371 xchg = arg; 2372 error = 0; 2373 scheme = (void *)(*xchg); 2374 2375 g_topology_assert(); 2376 2377 LIST_FOREACH(gp, &g_part_class.geom, geom) { 2378 table = gp->softc; 2379 if (table->gpt_scheme != scheme) 2380 continue; 2381 2382 acc = 0; 2383 LIST_FOREACH(pp, &gp->provider, provider) 2384 acc += pp->acr + pp->acw + pp->ace; 2385 LIST_FOREACH(cp, &gp->consumer, consumer) 2386 acc += cp->acr + cp->acw + cp->ace; 2387 2388 if (!acc) 2389 g_part_wither(gp, ENOSYS); 2390 else 2391 error = EBUSY; 2392 } 2393 2394 if (!error) 2395 TAILQ_REMOVE(&g_part_schemes, scheme, scheme_list); 2396 2397 *xchg = error; 2398 } 2399 2400 int 2401 g_part_modevent(module_t mod, int type, struct g_part_scheme *scheme) 2402 { 2403 struct g_part_scheme *iter; 2404 uintptr_t arg; 2405 int error; 2406 2407 error = 0; 2408 switch (type) { 2409 case MOD_LOAD: 2410 TAILQ_FOREACH(iter, &g_part_schemes, scheme_list) { 2411 if (scheme == iter) { 2412 printf("GEOM_PART: scheme %s is already " 2413 "registered!\n", scheme->name); 2414 break; 2415 } 2416 } 2417 if (iter == NULL) { 2418 TAILQ_INSERT_TAIL(&g_part_schemes, scheme, 2419 scheme_list); 2420 g_retaste(&g_part_class); 2421 } 2422 break; 2423 case MOD_UNLOAD: 2424 arg = (uintptr_t)scheme; 2425 error = g_waitfor_event(g_part_unload_event, &arg, M_WAITOK, 2426 NULL); 2427 if (error == 0) 2428 error = arg; 2429 break; 2430 default: 2431 error = EOPNOTSUPP; 2432 break; 2433 } 2434 2435 return (error); 2436 } 2437