1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2002, 2005-2009 Marcel Moolenaar 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 */ 28 29 #include <sys/cdefs.h> 30 __FBSDID("$FreeBSD$"); 31 32 #include <sys/param.h> 33 #include <sys/bio.h> 34 #include <sys/endian.h> 35 #include <sys/kernel.h> 36 #include <sys/kobj.h> 37 #include <sys/limits.h> 38 #include <sys/lock.h> 39 #include <sys/malloc.h> 40 #include <sys/mutex.h> 41 #include <sys/queue.h> 42 #include <sys/sbuf.h> 43 #include <sys/sysctl.h> 44 #include <sys/systm.h> 45 #include <sys/uuid.h> 46 #include <geom/geom.h> 47 #include <geom/geom_ctl.h> 48 #include <geom/geom_int.h> 49 #include <geom/part/g_part.h> 50 51 #include "g_part_if.h" 52 53 #ifndef _PATH_DEV 54 #define _PATH_DEV "/dev/" 55 #endif 56 57 static kobj_method_t g_part_null_methods[] = { 58 { 0, 0 } 59 }; 60 61 static struct g_part_scheme g_part_null_scheme = { 62 "(none)", 63 g_part_null_methods, 64 sizeof(struct g_part_table), 65 }; 66 67 TAILQ_HEAD(, g_part_scheme) g_part_schemes = 68 TAILQ_HEAD_INITIALIZER(g_part_schemes); 69 70 struct g_part_alias_list { 71 const char *lexeme; 72 enum g_part_alias alias; 73 } g_part_alias_list[G_PART_ALIAS_COUNT] = { 74 { "apple-apfs", G_PART_ALIAS_APPLE_APFS }, 75 { "apple-boot", G_PART_ALIAS_APPLE_BOOT }, 76 { "apple-core-storage", G_PART_ALIAS_APPLE_CORE_STORAGE }, 77 { "apple-hfs", G_PART_ALIAS_APPLE_HFS }, 78 { "apple-label", G_PART_ALIAS_APPLE_LABEL }, 79 { "apple-raid", G_PART_ALIAS_APPLE_RAID }, 80 { "apple-raid-offline", G_PART_ALIAS_APPLE_RAID_OFFLINE }, 81 { "apple-tv-recovery", G_PART_ALIAS_APPLE_TV_RECOVERY }, 82 { "apple-ufs", G_PART_ALIAS_APPLE_UFS }, 83 { "bios-boot", G_PART_ALIAS_BIOS_BOOT }, 84 { "chromeos-firmware", G_PART_ALIAS_CHROMEOS_FIRMWARE }, 85 { "chromeos-kernel", G_PART_ALIAS_CHROMEOS_KERNEL }, 86 { "chromeos-reserved", G_PART_ALIAS_CHROMEOS_RESERVED }, 87 { "chromeos-root", G_PART_ALIAS_CHROMEOS_ROOT }, 88 { "dragonfly-ccd", G_PART_ALIAS_DFBSD_CCD }, 89 { "dragonfly-hammer", G_PART_ALIAS_DFBSD_HAMMER }, 90 { "dragonfly-hammer2", G_PART_ALIAS_DFBSD_HAMMER2 }, 91 { "dragonfly-label32", G_PART_ALIAS_DFBSD }, 92 { "dragonfly-label64", G_PART_ALIAS_DFBSD64 }, 93 { "dragonfly-legacy", G_PART_ALIAS_DFBSD_LEGACY }, 94 { "dragonfly-swap", G_PART_ALIAS_DFBSD_SWAP }, 95 { "dragonfly-ufs", G_PART_ALIAS_DFBSD_UFS }, 96 { "dragonfly-vinum", G_PART_ALIAS_DFBSD_VINUM }, 97 { "ebr", G_PART_ALIAS_EBR }, 98 { "efi", G_PART_ALIAS_EFI }, 99 { "fat16", G_PART_ALIAS_MS_FAT16 }, 100 { "fat32", G_PART_ALIAS_MS_FAT32 }, 101 { "fat32lba", G_PART_ALIAS_MS_FAT32LBA }, 102 { "freebsd", G_PART_ALIAS_FREEBSD }, 103 { "freebsd-boot", G_PART_ALIAS_FREEBSD_BOOT }, 104 { "freebsd-nandfs", G_PART_ALIAS_FREEBSD_NANDFS }, 105 { "freebsd-swap", G_PART_ALIAS_FREEBSD_SWAP }, 106 { "freebsd-ufs", G_PART_ALIAS_FREEBSD_UFS }, 107 { "freebsd-vinum", G_PART_ALIAS_FREEBSD_VINUM }, 108 { "freebsd-zfs", G_PART_ALIAS_FREEBSD_ZFS }, 109 { "linux-data", G_PART_ALIAS_LINUX_DATA }, 110 { "linux-lvm", G_PART_ALIAS_LINUX_LVM }, 111 { "linux-raid", G_PART_ALIAS_LINUX_RAID }, 112 { "linux-swap", G_PART_ALIAS_LINUX_SWAP }, 113 { "mbr", G_PART_ALIAS_MBR }, 114 { "ms-basic-data", G_PART_ALIAS_MS_BASIC_DATA }, 115 { "ms-ldm-data", G_PART_ALIAS_MS_LDM_DATA }, 116 { "ms-ldm-metadata", G_PART_ALIAS_MS_LDM_METADATA }, 117 { "ms-recovery", G_PART_ALIAS_MS_RECOVERY }, 118 { "ms-reserved", G_PART_ALIAS_MS_RESERVED }, 119 { "ms-spaces", G_PART_ALIAS_MS_SPACES }, 120 { "netbsd-ccd", G_PART_ALIAS_NETBSD_CCD }, 121 { "netbsd-cgd", G_PART_ALIAS_NETBSD_CGD }, 122 { "netbsd-ffs", G_PART_ALIAS_NETBSD_FFS }, 123 { "netbsd-lfs", G_PART_ALIAS_NETBSD_LFS }, 124 { "netbsd-raid", G_PART_ALIAS_NETBSD_RAID }, 125 { "netbsd-swap", G_PART_ALIAS_NETBSD_SWAP }, 126 { "ntfs", G_PART_ALIAS_MS_NTFS }, 127 { "openbsd-data", G_PART_ALIAS_OPENBSD_DATA }, 128 { "prep-boot", G_PART_ALIAS_PREP_BOOT }, 129 { "vmware-reserved", G_PART_ALIAS_VMRESERVED }, 130 { "vmware-vmfs", G_PART_ALIAS_VMFS }, 131 { "vmware-vmkdiag", G_PART_ALIAS_VMKDIAG }, 132 { "vmware-vsanhdr", G_PART_ALIAS_VMVSANHDR }, 133 }; 134 135 SYSCTL_DECL(_kern_geom); 136 SYSCTL_NODE(_kern_geom, OID_AUTO, part, CTLFLAG_RW, 0, 137 "GEOM_PART stuff"); 138 static u_int check_integrity = 1; 139 SYSCTL_UINT(_kern_geom_part, OID_AUTO, check_integrity, 140 CTLFLAG_RWTUN, &check_integrity, 1, 141 "Enable integrity checking"); 142 static u_int auto_resize = 1; 143 SYSCTL_UINT(_kern_geom_part, OID_AUTO, auto_resize, 144 CTLFLAG_RWTUN, &auto_resize, 1, 145 "Enable auto resize"); 146 static u_int allow_nesting = 0; 147 SYSCTL_UINT(_kern_geom_part, OID_AUTO, allow_nesting, 148 CTLFLAG_RWTUN, &allow_nesting, 0, 149 "Allow additional levels of nesting"); 150 151 /* 152 * The GEOM partitioning class. 153 */ 154 static g_ctl_req_t g_part_ctlreq; 155 static g_ctl_destroy_geom_t g_part_destroy_geom; 156 static g_fini_t g_part_fini; 157 static g_init_t g_part_init; 158 static g_taste_t g_part_taste; 159 160 static g_access_t g_part_access; 161 static g_dumpconf_t g_part_dumpconf; 162 static g_orphan_t g_part_orphan; 163 static g_spoiled_t g_part_spoiled; 164 static g_start_t g_part_start; 165 static g_resize_t g_part_resize; 166 static g_ioctl_t g_part_ioctl; 167 168 static struct g_class g_part_class = { 169 .name = "PART", 170 .version = G_VERSION, 171 /* Class methods. */ 172 .ctlreq = g_part_ctlreq, 173 .destroy_geom = g_part_destroy_geom, 174 .fini = g_part_fini, 175 .init = g_part_init, 176 .taste = g_part_taste, 177 /* Geom methods. */ 178 .access = g_part_access, 179 .dumpconf = g_part_dumpconf, 180 .orphan = g_part_orphan, 181 .spoiled = g_part_spoiled, 182 .start = g_part_start, 183 .resize = g_part_resize, 184 .ioctl = g_part_ioctl, 185 }; 186 187 DECLARE_GEOM_CLASS(g_part_class, g_part); 188 MODULE_VERSION(g_part, 0); 189 190 /* 191 * Support functions. 192 */ 193 194 static void g_part_wither(struct g_geom *, int); 195 196 const char * 197 g_part_alias_name(enum g_part_alias alias) 198 { 199 int i; 200 201 for (i = 0; i < G_PART_ALIAS_COUNT; i++) { 202 if (g_part_alias_list[i].alias != alias) 203 continue; 204 return (g_part_alias_list[i].lexeme); 205 } 206 207 return (NULL); 208 } 209 210 void 211 g_part_geometry_heads(off_t blocks, u_int sectors, off_t *bestchs, 212 u_int *bestheads) 213 { 214 static u_int candidate_heads[] = { 1, 2, 16, 32, 64, 128, 255, 0 }; 215 off_t chs, cylinders; 216 u_int heads; 217 int idx; 218 219 *bestchs = 0; 220 *bestheads = 0; 221 for (idx = 0; candidate_heads[idx] != 0; idx++) { 222 heads = candidate_heads[idx]; 223 cylinders = blocks / heads / sectors; 224 if (cylinders < heads || cylinders < sectors) 225 break; 226 if (cylinders > 1023) 227 continue; 228 chs = cylinders * heads * sectors; 229 if (chs > *bestchs || (chs == *bestchs && *bestheads == 1)) { 230 *bestchs = chs; 231 *bestheads = heads; 232 } 233 } 234 } 235 236 static void 237 g_part_geometry(struct g_part_table *table, struct g_consumer *cp, 238 off_t blocks) 239 { 240 static u_int candidate_sectors[] = { 1, 9, 17, 33, 63, 0 }; 241 off_t chs, bestchs; 242 u_int heads, sectors; 243 int idx; 244 245 if (g_getattr("GEOM::fwsectors", cp, §ors) != 0 || sectors == 0 || 246 g_getattr("GEOM::fwheads", cp, &heads) != 0 || heads == 0) { 247 table->gpt_fixgeom = 0; 248 table->gpt_heads = 0; 249 table->gpt_sectors = 0; 250 bestchs = 0; 251 for (idx = 0; candidate_sectors[idx] != 0; idx++) { 252 sectors = candidate_sectors[idx]; 253 g_part_geometry_heads(blocks, sectors, &chs, &heads); 254 if (chs == 0) 255 continue; 256 /* 257 * Prefer a geometry with sectors > 1, but only if 258 * it doesn't bump down the number of heads to 1. 259 */ 260 if (chs > bestchs || (chs == bestchs && heads > 1 && 261 table->gpt_sectors == 1)) { 262 bestchs = chs; 263 table->gpt_heads = heads; 264 table->gpt_sectors = sectors; 265 } 266 } 267 /* 268 * If we didn't find a geometry at all, then the disk is 269 * too big. This means we can use the maximum number of 270 * heads and sectors. 271 */ 272 if (bestchs == 0) { 273 table->gpt_heads = 255; 274 table->gpt_sectors = 63; 275 } 276 } else { 277 table->gpt_fixgeom = 1; 278 table->gpt_heads = heads; 279 table->gpt_sectors = sectors; 280 } 281 } 282 283 static void 284 g_part_get_physpath_done(struct bio *bp) 285 { 286 struct g_geom *gp; 287 struct g_part_entry *entry; 288 struct g_part_table *table; 289 struct g_provider *pp; 290 struct bio *pbp; 291 292 pbp = bp->bio_parent; 293 pp = pbp->bio_to; 294 gp = pp->geom; 295 table = gp->softc; 296 entry = pp->private; 297 298 if (bp->bio_error == 0) { 299 char *end; 300 size_t len, remainder; 301 len = strlcat(bp->bio_data, "/", bp->bio_length); 302 if (len < bp->bio_length) { 303 end = bp->bio_data + len; 304 remainder = bp->bio_length - len; 305 G_PART_NAME(table, entry, end, remainder); 306 } 307 } 308 g_std_done(bp); 309 } 310 311 312 #define DPRINTF(...) if (bootverbose) { \ 313 printf("GEOM_PART: " __VA_ARGS__); \ 314 } 315 316 static int 317 g_part_check_integrity(struct g_part_table *table, struct g_consumer *cp) 318 { 319 struct g_part_entry *e1, *e2; 320 struct g_provider *pp; 321 off_t offset; 322 int failed; 323 324 failed = 0; 325 pp = cp->provider; 326 if (table->gpt_last < table->gpt_first) { 327 DPRINTF("last LBA is below first LBA: %jd < %jd\n", 328 (intmax_t)table->gpt_last, (intmax_t)table->gpt_first); 329 failed++; 330 } 331 if (table->gpt_last > pp->mediasize / pp->sectorsize - 1) { 332 DPRINTF("last LBA extends beyond mediasize: " 333 "%jd > %jd\n", (intmax_t)table->gpt_last, 334 (intmax_t)pp->mediasize / pp->sectorsize - 1); 335 failed++; 336 } 337 LIST_FOREACH(e1, &table->gpt_entry, gpe_entry) { 338 if (e1->gpe_deleted || e1->gpe_internal) 339 continue; 340 if (e1->gpe_start < table->gpt_first) { 341 DPRINTF("partition %d has start offset below first " 342 "LBA: %jd < %jd\n", e1->gpe_index, 343 (intmax_t)e1->gpe_start, 344 (intmax_t)table->gpt_first); 345 failed++; 346 } 347 if (e1->gpe_start > table->gpt_last) { 348 DPRINTF("partition %d has start offset beyond last " 349 "LBA: %jd > %jd\n", e1->gpe_index, 350 (intmax_t)e1->gpe_start, 351 (intmax_t)table->gpt_last); 352 failed++; 353 } 354 if (e1->gpe_end < e1->gpe_start) { 355 DPRINTF("partition %d has end offset below start " 356 "offset: %jd < %jd\n", e1->gpe_index, 357 (intmax_t)e1->gpe_end, 358 (intmax_t)e1->gpe_start); 359 failed++; 360 } 361 if (e1->gpe_end > table->gpt_last) { 362 DPRINTF("partition %d has end offset beyond last " 363 "LBA: %jd > %jd\n", e1->gpe_index, 364 (intmax_t)e1->gpe_end, 365 (intmax_t)table->gpt_last); 366 failed++; 367 } 368 if (pp->stripesize > 0) { 369 offset = e1->gpe_start * pp->sectorsize; 370 if (e1->gpe_offset > offset) 371 offset = e1->gpe_offset; 372 if ((offset + pp->stripeoffset) % pp->stripesize) { 373 DPRINTF("partition %d on (%s, %s) is not " 374 "aligned on %ju bytes\n", e1->gpe_index, 375 pp->name, table->gpt_scheme->name, 376 (uintmax_t)pp->stripesize); 377 /* Don't treat this as a critical failure */ 378 } 379 } 380 e2 = e1; 381 while ((e2 = LIST_NEXT(e2, gpe_entry)) != NULL) { 382 if (e2->gpe_deleted || e2->gpe_internal) 383 continue; 384 if (e1->gpe_start >= e2->gpe_start && 385 e1->gpe_start <= e2->gpe_end) { 386 DPRINTF("partition %d has start offset inside " 387 "partition %d: start[%d] %jd >= start[%d] " 388 "%jd <= end[%d] %jd\n", 389 e1->gpe_index, e2->gpe_index, 390 e2->gpe_index, (intmax_t)e2->gpe_start, 391 e1->gpe_index, (intmax_t)e1->gpe_start, 392 e2->gpe_index, (intmax_t)e2->gpe_end); 393 failed++; 394 } 395 if (e1->gpe_end >= e2->gpe_start && 396 e1->gpe_end <= e2->gpe_end) { 397 DPRINTF("partition %d has end offset inside " 398 "partition %d: start[%d] %jd >= end[%d] " 399 "%jd <= end[%d] %jd\n", 400 e1->gpe_index, e2->gpe_index, 401 e2->gpe_index, (intmax_t)e2->gpe_start, 402 e1->gpe_index, (intmax_t)e1->gpe_end, 403 e2->gpe_index, (intmax_t)e2->gpe_end); 404 failed++; 405 } 406 if (e1->gpe_start < e2->gpe_start && 407 e1->gpe_end > e2->gpe_end) { 408 DPRINTF("partition %d contains partition %d: " 409 "start[%d] %jd > start[%d] %jd, end[%d] " 410 "%jd < end[%d] %jd\n", 411 e1->gpe_index, e2->gpe_index, 412 e1->gpe_index, (intmax_t)e1->gpe_start, 413 e2->gpe_index, (intmax_t)e2->gpe_start, 414 e2->gpe_index, (intmax_t)e2->gpe_end, 415 e1->gpe_index, (intmax_t)e1->gpe_end); 416 failed++; 417 } 418 } 419 } 420 if (failed != 0) { 421 printf("GEOM_PART: integrity check failed (%s, %s)\n", 422 pp->name, table->gpt_scheme->name); 423 if (check_integrity != 0) 424 return (EINVAL); 425 table->gpt_corrupt = 1; 426 } 427 return (0); 428 } 429 #undef DPRINTF 430 431 struct g_part_entry * 432 g_part_new_entry(struct g_part_table *table, int index, quad_t start, 433 quad_t end) 434 { 435 struct g_part_entry *entry, *last; 436 437 last = NULL; 438 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) { 439 if (entry->gpe_index == index) 440 break; 441 if (entry->gpe_index > index) { 442 entry = NULL; 443 break; 444 } 445 last = entry; 446 } 447 if (entry == NULL) { 448 entry = g_malloc(table->gpt_scheme->gps_entrysz, 449 M_WAITOK | M_ZERO); 450 entry->gpe_index = index; 451 if (last == NULL) 452 LIST_INSERT_HEAD(&table->gpt_entry, entry, gpe_entry); 453 else 454 LIST_INSERT_AFTER(last, entry, gpe_entry); 455 } else 456 entry->gpe_offset = 0; 457 entry->gpe_start = start; 458 entry->gpe_end = end; 459 return (entry); 460 } 461 462 static void 463 g_part_new_provider(struct g_geom *gp, struct g_part_table *table, 464 struct g_part_entry *entry) 465 { 466 struct g_consumer *cp; 467 struct g_provider *pp; 468 struct sbuf *sb; 469 struct g_geom_alias *gap; 470 off_t offset; 471 472 cp = LIST_FIRST(&gp->consumer); 473 pp = cp->provider; 474 475 offset = entry->gpe_start * pp->sectorsize; 476 if (entry->gpe_offset < offset) 477 entry->gpe_offset = offset; 478 479 if (entry->gpe_pp == NULL) { 480 /* 481 * Add aliases to the geom before we create the provider so that 482 * geom_dev can taste it with all the aliases in place so all 483 * the aliased dev_t instances get created for each partition 484 * (eg foo5p7 gets created for bar5p7 when foo is an alias of bar). 485 */ 486 LIST_FOREACH(gap, &table->gpt_gp->aliases, ga_next) { 487 sb = sbuf_new_auto(); 488 G_PART_FULLNAME(table, entry, sb, gap->ga_alias); 489 sbuf_finish(sb); 490 g_geom_add_alias(gp, sbuf_data(sb)); 491 sbuf_delete(sb); 492 } 493 sb = sbuf_new_auto(); 494 G_PART_FULLNAME(table, entry, sb, gp->name); 495 sbuf_finish(sb); 496 entry->gpe_pp = g_new_providerf(gp, "%s", sbuf_data(sb)); 497 sbuf_delete(sb); 498 entry->gpe_pp->flags |= G_PF_DIRECT_SEND | G_PF_DIRECT_RECEIVE; 499 entry->gpe_pp->private = entry; /* Close the circle. */ 500 } 501 entry->gpe_pp->index = entry->gpe_index - 1; /* index is 1-based. */ 502 entry->gpe_pp->mediasize = (entry->gpe_end - entry->gpe_start + 1) * 503 pp->sectorsize; 504 entry->gpe_pp->mediasize -= entry->gpe_offset - offset; 505 entry->gpe_pp->sectorsize = pp->sectorsize; 506 entry->gpe_pp->stripesize = pp->stripesize; 507 entry->gpe_pp->stripeoffset = pp->stripeoffset + entry->gpe_offset; 508 if (pp->stripesize > 0) 509 entry->gpe_pp->stripeoffset %= pp->stripesize; 510 entry->gpe_pp->flags |= pp->flags & G_PF_ACCEPT_UNMAPPED; 511 g_error_provider(entry->gpe_pp, 0); 512 } 513 514 static struct g_geom* 515 g_part_find_geom(const char *name) 516 { 517 struct g_geom *gp; 518 LIST_FOREACH(gp, &g_part_class.geom, geom) { 519 if ((gp->flags & G_GEOM_WITHER) == 0 && 520 strcmp(name, gp->name) == 0) 521 break; 522 } 523 return (gp); 524 } 525 526 static int 527 g_part_parm_geom(struct gctl_req *req, const char *name, struct g_geom **v) 528 { 529 struct g_geom *gp; 530 const char *gname; 531 532 gname = gctl_get_asciiparam(req, name); 533 if (gname == NULL) 534 return (ENOATTR); 535 if (strncmp(gname, _PATH_DEV, sizeof(_PATH_DEV) - 1) == 0) 536 gname += sizeof(_PATH_DEV) - 1; 537 gp = g_part_find_geom(gname); 538 if (gp == NULL) { 539 gctl_error(req, "%d %s '%s'", EINVAL, name, gname); 540 return (EINVAL); 541 } 542 *v = gp; 543 return (0); 544 } 545 546 static int 547 g_part_parm_provider(struct gctl_req *req, const char *name, 548 struct g_provider **v) 549 { 550 struct g_provider *pp; 551 const char *pname; 552 553 pname = gctl_get_asciiparam(req, name); 554 if (pname == NULL) 555 return (ENOATTR); 556 if (strncmp(pname, _PATH_DEV, sizeof(_PATH_DEV) - 1) == 0) 557 pname += sizeof(_PATH_DEV) - 1; 558 pp = g_provider_by_name(pname); 559 if (pp == NULL) { 560 gctl_error(req, "%d %s '%s'", EINVAL, name, pname); 561 return (EINVAL); 562 } 563 *v = pp; 564 return (0); 565 } 566 567 static int 568 g_part_parm_quad(struct gctl_req *req, const char *name, quad_t *v) 569 { 570 const char *p; 571 char *x; 572 quad_t q; 573 574 p = gctl_get_asciiparam(req, name); 575 if (p == NULL) 576 return (ENOATTR); 577 q = strtoq(p, &x, 0); 578 if (*x != '\0' || q < 0) { 579 gctl_error(req, "%d %s '%s'", EINVAL, name, p); 580 return (EINVAL); 581 } 582 *v = q; 583 return (0); 584 } 585 586 static int 587 g_part_parm_scheme(struct gctl_req *req, const char *name, 588 struct g_part_scheme **v) 589 { 590 struct g_part_scheme *s; 591 const char *p; 592 593 p = gctl_get_asciiparam(req, name); 594 if (p == NULL) 595 return (ENOATTR); 596 TAILQ_FOREACH(s, &g_part_schemes, scheme_list) { 597 if (s == &g_part_null_scheme) 598 continue; 599 if (!strcasecmp(s->name, p)) 600 break; 601 } 602 if (s == NULL) { 603 gctl_error(req, "%d %s '%s'", EINVAL, name, p); 604 return (EINVAL); 605 } 606 *v = s; 607 return (0); 608 } 609 610 static int 611 g_part_parm_str(struct gctl_req *req, const char *name, const char **v) 612 { 613 const char *p; 614 615 p = gctl_get_asciiparam(req, name); 616 if (p == NULL) 617 return (ENOATTR); 618 /* An empty label is always valid. */ 619 if (strcmp(name, "label") != 0 && p[0] == '\0') { 620 gctl_error(req, "%d %s '%s'", EINVAL, name, p); 621 return (EINVAL); 622 } 623 *v = p; 624 return (0); 625 } 626 627 static int 628 g_part_parm_intmax(struct gctl_req *req, const char *name, u_int *v) 629 { 630 const intmax_t *p; 631 int size; 632 633 p = gctl_get_param(req, name, &size); 634 if (p == NULL) 635 return (ENOATTR); 636 if (size != sizeof(*p) || *p < 0 || *p > INT_MAX) { 637 gctl_error(req, "%d %s '%jd'", EINVAL, name, *p); 638 return (EINVAL); 639 } 640 *v = (u_int)*p; 641 return (0); 642 } 643 644 static int 645 g_part_parm_uint32(struct gctl_req *req, const char *name, u_int *v) 646 { 647 const uint32_t *p; 648 int size; 649 650 p = gctl_get_param(req, name, &size); 651 if (p == NULL) 652 return (ENOATTR); 653 if (size != sizeof(*p) || *p > INT_MAX) { 654 gctl_error(req, "%d %s '%u'", EINVAL, name, (unsigned int)*p); 655 return (EINVAL); 656 } 657 *v = (u_int)*p; 658 return (0); 659 } 660 661 static int 662 g_part_parm_bootcode(struct gctl_req *req, const char *name, const void **v, 663 unsigned int *s) 664 { 665 const void *p; 666 int size; 667 668 p = gctl_get_param(req, name, &size); 669 if (p == NULL) 670 return (ENOATTR); 671 *v = p; 672 *s = size; 673 return (0); 674 } 675 676 static int 677 g_part_probe(struct g_geom *gp, struct g_consumer *cp, int depth) 678 { 679 struct g_part_scheme *iter, *scheme; 680 struct g_part_table *table; 681 int pri, probe; 682 683 table = gp->softc; 684 scheme = (table != NULL) ? table->gpt_scheme : NULL; 685 pri = (scheme != NULL) ? G_PART_PROBE(table, cp) : INT_MIN; 686 if (pri == 0) 687 goto done; 688 if (pri > 0) { /* error */ 689 scheme = NULL; 690 pri = INT_MIN; 691 } 692 693 TAILQ_FOREACH(iter, &g_part_schemes, scheme_list) { 694 if (iter == &g_part_null_scheme) 695 continue; 696 table = (void *)kobj_create((kobj_class_t)iter, M_GEOM, 697 M_WAITOK); 698 table->gpt_gp = gp; 699 table->gpt_scheme = iter; 700 table->gpt_depth = depth; 701 probe = G_PART_PROBE(table, cp); 702 if (probe <= 0 && probe > pri) { 703 pri = probe; 704 scheme = iter; 705 if (gp->softc != NULL) 706 kobj_delete((kobj_t)gp->softc, M_GEOM); 707 gp->softc = table; 708 if (pri == 0) 709 goto done; 710 } else 711 kobj_delete((kobj_t)table, M_GEOM); 712 } 713 714 done: 715 return ((scheme == NULL) ? ENXIO : 0); 716 } 717 718 /* 719 * Control request functions. 720 */ 721 722 static int 723 g_part_ctl_add(struct gctl_req *req, struct g_part_parms *gpp) 724 { 725 struct g_geom *gp; 726 struct g_provider *pp; 727 struct g_part_entry *delent, *last, *entry; 728 struct g_part_table *table; 729 struct sbuf *sb; 730 quad_t end; 731 unsigned int index; 732 int error; 733 734 gp = gpp->gpp_geom; 735 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name)); 736 g_topology_assert(); 737 738 pp = LIST_FIRST(&gp->consumer)->provider; 739 table = gp->softc; 740 end = gpp->gpp_start + gpp->gpp_size - 1; 741 742 if (gpp->gpp_start < table->gpt_first || 743 gpp->gpp_start > table->gpt_last) { 744 gctl_error(req, "%d start '%jd'", EINVAL, 745 (intmax_t)gpp->gpp_start); 746 return (EINVAL); 747 } 748 if (end < gpp->gpp_start || end > table->gpt_last) { 749 gctl_error(req, "%d size '%jd'", EINVAL, 750 (intmax_t)gpp->gpp_size); 751 return (EINVAL); 752 } 753 if (gpp->gpp_index > table->gpt_entries) { 754 gctl_error(req, "%d index '%d'", EINVAL, gpp->gpp_index); 755 return (EINVAL); 756 } 757 758 delent = last = NULL; 759 index = (gpp->gpp_index > 0) ? gpp->gpp_index : 1; 760 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) { 761 if (entry->gpe_deleted) { 762 if (entry->gpe_index == index) 763 delent = entry; 764 continue; 765 } 766 if (entry->gpe_index == index) 767 index = entry->gpe_index + 1; 768 if (entry->gpe_index < index) 769 last = entry; 770 if (entry->gpe_internal) 771 continue; 772 if (gpp->gpp_start >= entry->gpe_start && 773 gpp->gpp_start <= entry->gpe_end) { 774 gctl_error(req, "%d start '%jd'", ENOSPC, 775 (intmax_t)gpp->gpp_start); 776 return (ENOSPC); 777 } 778 if (end >= entry->gpe_start && end <= entry->gpe_end) { 779 gctl_error(req, "%d end '%jd'", ENOSPC, (intmax_t)end); 780 return (ENOSPC); 781 } 782 if (gpp->gpp_start < entry->gpe_start && end > entry->gpe_end) { 783 gctl_error(req, "%d size '%jd'", ENOSPC, 784 (intmax_t)gpp->gpp_size); 785 return (ENOSPC); 786 } 787 } 788 if (gpp->gpp_index > 0 && index != gpp->gpp_index) { 789 gctl_error(req, "%d index '%d'", EEXIST, gpp->gpp_index); 790 return (EEXIST); 791 } 792 if (index > table->gpt_entries) { 793 gctl_error(req, "%d index '%d'", ENOSPC, index); 794 return (ENOSPC); 795 } 796 797 entry = (delent == NULL) ? g_malloc(table->gpt_scheme->gps_entrysz, 798 M_WAITOK | M_ZERO) : delent; 799 entry->gpe_index = index; 800 entry->gpe_start = gpp->gpp_start; 801 entry->gpe_end = end; 802 error = G_PART_ADD(table, entry, gpp); 803 if (error) { 804 gctl_error(req, "%d", error); 805 if (delent == NULL) 806 g_free(entry); 807 return (error); 808 } 809 if (delent == NULL) { 810 if (last == NULL) 811 LIST_INSERT_HEAD(&table->gpt_entry, entry, gpe_entry); 812 else 813 LIST_INSERT_AFTER(last, entry, gpe_entry); 814 entry->gpe_created = 1; 815 } else { 816 entry->gpe_deleted = 0; 817 entry->gpe_modified = 1; 818 } 819 g_part_new_provider(gp, table, entry); 820 821 /* Provide feedback if so requested. */ 822 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) { 823 sb = sbuf_new_auto(); 824 G_PART_FULLNAME(table, entry, sb, gp->name); 825 if (pp->stripesize > 0 && entry->gpe_pp->stripeoffset != 0) 826 sbuf_printf(sb, " added, but partition is not " 827 "aligned on %ju bytes\n", (uintmax_t)pp->stripesize); 828 else 829 sbuf_cat(sb, " added\n"); 830 sbuf_finish(sb); 831 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1); 832 sbuf_delete(sb); 833 } 834 return (0); 835 } 836 837 static int 838 g_part_ctl_bootcode(struct gctl_req *req, struct g_part_parms *gpp) 839 { 840 struct g_geom *gp; 841 struct g_part_table *table; 842 struct sbuf *sb; 843 int error, sz; 844 845 gp = gpp->gpp_geom; 846 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name)); 847 g_topology_assert(); 848 849 table = gp->softc; 850 sz = table->gpt_scheme->gps_bootcodesz; 851 if (sz == 0) { 852 error = ENODEV; 853 goto fail; 854 } 855 if (gpp->gpp_codesize > sz) { 856 error = EFBIG; 857 goto fail; 858 } 859 860 error = G_PART_BOOTCODE(table, gpp); 861 if (error) 862 goto fail; 863 864 /* Provide feedback if so requested. */ 865 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) { 866 sb = sbuf_new_auto(); 867 sbuf_printf(sb, "bootcode written to %s\n", gp->name); 868 sbuf_finish(sb); 869 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1); 870 sbuf_delete(sb); 871 } 872 return (0); 873 874 fail: 875 gctl_error(req, "%d", error); 876 return (error); 877 } 878 879 static int 880 g_part_ctl_commit(struct gctl_req *req, struct g_part_parms *gpp) 881 { 882 struct g_consumer *cp; 883 struct g_geom *gp; 884 struct g_provider *pp; 885 struct g_part_entry *entry, *tmp; 886 struct g_part_table *table; 887 char *buf; 888 int error, i; 889 890 gp = gpp->gpp_geom; 891 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name)); 892 g_topology_assert(); 893 894 table = gp->softc; 895 if (!table->gpt_opened) { 896 gctl_error(req, "%d", EPERM); 897 return (EPERM); 898 } 899 900 g_topology_unlock(); 901 902 cp = LIST_FIRST(&gp->consumer); 903 if ((table->gpt_smhead | table->gpt_smtail) != 0) { 904 pp = cp->provider; 905 buf = g_malloc(pp->sectorsize, M_WAITOK | M_ZERO); 906 while (table->gpt_smhead != 0) { 907 i = ffs(table->gpt_smhead) - 1; 908 error = g_write_data(cp, i * pp->sectorsize, buf, 909 pp->sectorsize); 910 if (error) { 911 g_free(buf); 912 goto fail; 913 } 914 table->gpt_smhead &= ~(1 << i); 915 } 916 while (table->gpt_smtail != 0) { 917 i = ffs(table->gpt_smtail) - 1; 918 error = g_write_data(cp, pp->mediasize - (i + 1) * 919 pp->sectorsize, buf, pp->sectorsize); 920 if (error) { 921 g_free(buf); 922 goto fail; 923 } 924 table->gpt_smtail &= ~(1 << i); 925 } 926 g_free(buf); 927 } 928 929 if (table->gpt_scheme == &g_part_null_scheme) { 930 g_topology_lock(); 931 g_access(cp, -1, -1, -1); 932 g_part_wither(gp, ENXIO); 933 return (0); 934 } 935 936 error = G_PART_WRITE(table, cp); 937 if (error) 938 goto fail; 939 940 LIST_FOREACH_SAFE(entry, &table->gpt_entry, gpe_entry, tmp) { 941 if (!entry->gpe_deleted) { 942 /* Notify consumers that provider might be changed. */ 943 if (entry->gpe_modified && ( 944 entry->gpe_pp->acw + entry->gpe_pp->ace + 945 entry->gpe_pp->acr) == 0) 946 g_media_changed(entry->gpe_pp, M_NOWAIT); 947 entry->gpe_created = 0; 948 entry->gpe_modified = 0; 949 continue; 950 } 951 LIST_REMOVE(entry, gpe_entry); 952 g_free(entry); 953 } 954 table->gpt_created = 0; 955 table->gpt_opened = 0; 956 957 g_topology_lock(); 958 g_access(cp, -1, -1, -1); 959 return (0); 960 961 fail: 962 g_topology_lock(); 963 gctl_error(req, "%d", error); 964 return (error); 965 } 966 967 static int 968 g_part_ctl_create(struct gctl_req *req, struct g_part_parms *gpp) 969 { 970 struct g_consumer *cp; 971 struct g_geom *gp; 972 struct g_provider *pp; 973 struct g_part_scheme *scheme; 974 struct g_part_table *null, *table; 975 struct sbuf *sb; 976 int attr, error; 977 978 pp = gpp->gpp_provider; 979 scheme = gpp->gpp_scheme; 980 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, pp->name)); 981 g_topology_assert(); 982 983 /* Check that there isn't already a g_part geom on the provider. */ 984 gp = g_part_find_geom(pp->name); 985 if (gp != NULL) { 986 null = gp->softc; 987 if (null->gpt_scheme != &g_part_null_scheme) { 988 gctl_error(req, "%d geom '%s'", EEXIST, pp->name); 989 return (EEXIST); 990 } 991 } else 992 null = NULL; 993 994 if ((gpp->gpp_parms & G_PART_PARM_ENTRIES) && 995 (gpp->gpp_entries < scheme->gps_minent || 996 gpp->gpp_entries > scheme->gps_maxent)) { 997 gctl_error(req, "%d entries '%d'", EINVAL, gpp->gpp_entries); 998 return (EINVAL); 999 } 1000 1001 if (null == NULL) 1002 gp = g_new_geomf(&g_part_class, "%s", pp->name); 1003 gp->softc = kobj_create((kobj_class_t)gpp->gpp_scheme, M_GEOM, 1004 M_WAITOK); 1005 table = gp->softc; 1006 table->gpt_gp = gp; 1007 table->gpt_scheme = gpp->gpp_scheme; 1008 table->gpt_entries = (gpp->gpp_parms & G_PART_PARM_ENTRIES) ? 1009 gpp->gpp_entries : scheme->gps_minent; 1010 LIST_INIT(&table->gpt_entry); 1011 if (null == NULL) { 1012 cp = g_new_consumer(gp); 1013 cp->flags |= G_CF_DIRECT_SEND | G_CF_DIRECT_RECEIVE; 1014 error = g_attach(cp, pp); 1015 if (error == 0) 1016 error = g_access(cp, 1, 1, 1); 1017 if (error != 0) { 1018 g_part_wither(gp, error); 1019 gctl_error(req, "%d geom '%s'", error, pp->name); 1020 return (error); 1021 } 1022 table->gpt_opened = 1; 1023 } else { 1024 cp = LIST_FIRST(&gp->consumer); 1025 table->gpt_opened = null->gpt_opened; 1026 table->gpt_smhead = null->gpt_smhead; 1027 table->gpt_smtail = null->gpt_smtail; 1028 } 1029 1030 g_topology_unlock(); 1031 1032 /* Make sure the provider has media. */ 1033 if (pp->mediasize == 0 || pp->sectorsize == 0) { 1034 error = ENODEV; 1035 goto fail; 1036 } 1037 1038 /* Make sure we can nest and if so, determine our depth. */ 1039 error = g_getattr("PART::isleaf", cp, &attr); 1040 if (!error && attr) { 1041 error = ENODEV; 1042 goto fail; 1043 } 1044 error = g_getattr("PART::depth", cp, &attr); 1045 table->gpt_depth = (!error) ? attr + 1 : 0; 1046 1047 /* 1048 * Synthesize a disk geometry. Some partitioning schemes 1049 * depend on it and since some file systems need it even 1050 * when the partitition scheme doesn't, we do it here in 1051 * scheme-independent code. 1052 */ 1053 g_part_geometry(table, cp, pp->mediasize / pp->sectorsize); 1054 1055 error = G_PART_CREATE(table, gpp); 1056 if (error) 1057 goto fail; 1058 1059 g_topology_lock(); 1060 1061 table->gpt_created = 1; 1062 if (null != NULL) 1063 kobj_delete((kobj_t)null, M_GEOM); 1064 1065 /* 1066 * Support automatic commit by filling in the gpp_geom 1067 * parameter. 1068 */ 1069 gpp->gpp_parms |= G_PART_PARM_GEOM; 1070 gpp->gpp_geom = gp; 1071 1072 /* Provide feedback if so requested. */ 1073 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) { 1074 sb = sbuf_new_auto(); 1075 sbuf_printf(sb, "%s created\n", gp->name); 1076 sbuf_finish(sb); 1077 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1); 1078 sbuf_delete(sb); 1079 } 1080 return (0); 1081 1082 fail: 1083 g_topology_lock(); 1084 if (null == NULL) { 1085 g_access(cp, -1, -1, -1); 1086 g_part_wither(gp, error); 1087 } else { 1088 kobj_delete((kobj_t)gp->softc, M_GEOM); 1089 gp->softc = null; 1090 } 1091 gctl_error(req, "%d provider", error); 1092 return (error); 1093 } 1094 1095 static int 1096 g_part_ctl_delete(struct gctl_req *req, struct g_part_parms *gpp) 1097 { 1098 struct g_geom *gp; 1099 struct g_provider *pp; 1100 struct g_part_entry *entry; 1101 struct g_part_table *table; 1102 struct sbuf *sb; 1103 1104 gp = gpp->gpp_geom; 1105 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name)); 1106 g_topology_assert(); 1107 1108 table = gp->softc; 1109 1110 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) { 1111 if (entry->gpe_deleted || entry->gpe_internal) 1112 continue; 1113 if (entry->gpe_index == gpp->gpp_index) 1114 break; 1115 } 1116 if (entry == NULL) { 1117 gctl_error(req, "%d index '%d'", ENOENT, gpp->gpp_index); 1118 return (ENOENT); 1119 } 1120 1121 pp = entry->gpe_pp; 1122 if (pp != NULL) { 1123 if (pp->acr > 0 || pp->acw > 0 || pp->ace > 0) { 1124 gctl_error(req, "%d", EBUSY); 1125 return (EBUSY); 1126 } 1127 1128 pp->private = NULL; 1129 entry->gpe_pp = NULL; 1130 } 1131 1132 if (pp != NULL) 1133 g_wither_provider(pp, ENXIO); 1134 1135 /* Provide feedback if so requested. */ 1136 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) { 1137 sb = sbuf_new_auto(); 1138 G_PART_FULLNAME(table, entry, sb, gp->name); 1139 sbuf_cat(sb, " deleted\n"); 1140 sbuf_finish(sb); 1141 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1); 1142 sbuf_delete(sb); 1143 } 1144 1145 if (entry->gpe_created) { 1146 LIST_REMOVE(entry, gpe_entry); 1147 g_free(entry); 1148 } else { 1149 entry->gpe_modified = 0; 1150 entry->gpe_deleted = 1; 1151 } 1152 return (0); 1153 } 1154 1155 static int 1156 g_part_ctl_destroy(struct gctl_req *req, struct g_part_parms *gpp) 1157 { 1158 struct g_consumer *cp; 1159 struct g_geom *gp; 1160 struct g_provider *pp; 1161 struct g_part_entry *entry, *tmp; 1162 struct g_part_table *null, *table; 1163 struct sbuf *sb; 1164 int error; 1165 1166 gp = gpp->gpp_geom; 1167 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name)); 1168 g_topology_assert(); 1169 1170 table = gp->softc; 1171 /* Check for busy providers. */ 1172 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) { 1173 if (entry->gpe_deleted || entry->gpe_internal) 1174 continue; 1175 if (gpp->gpp_force) { 1176 pp = entry->gpe_pp; 1177 if (pp == NULL) 1178 continue; 1179 if (pp->acr == 0 && pp->acw == 0 && pp->ace == 0) 1180 continue; 1181 } 1182 gctl_error(req, "%d", EBUSY); 1183 return (EBUSY); 1184 } 1185 1186 if (gpp->gpp_force) { 1187 /* Destroy all providers. */ 1188 LIST_FOREACH_SAFE(entry, &table->gpt_entry, gpe_entry, tmp) { 1189 pp = entry->gpe_pp; 1190 if (pp != NULL) { 1191 pp->private = NULL; 1192 g_wither_provider(pp, ENXIO); 1193 } 1194 LIST_REMOVE(entry, gpe_entry); 1195 g_free(entry); 1196 } 1197 } 1198 1199 error = G_PART_DESTROY(table, gpp); 1200 if (error) { 1201 gctl_error(req, "%d", error); 1202 return (error); 1203 } 1204 1205 gp->softc = kobj_create((kobj_class_t)&g_part_null_scheme, M_GEOM, 1206 M_WAITOK); 1207 null = gp->softc; 1208 null->gpt_gp = gp; 1209 null->gpt_scheme = &g_part_null_scheme; 1210 LIST_INIT(&null->gpt_entry); 1211 1212 cp = LIST_FIRST(&gp->consumer); 1213 pp = cp->provider; 1214 null->gpt_last = pp->mediasize / pp->sectorsize - 1; 1215 1216 null->gpt_depth = table->gpt_depth; 1217 null->gpt_opened = table->gpt_opened; 1218 null->gpt_smhead = table->gpt_smhead; 1219 null->gpt_smtail = table->gpt_smtail; 1220 1221 while ((entry = LIST_FIRST(&table->gpt_entry)) != NULL) { 1222 LIST_REMOVE(entry, gpe_entry); 1223 g_free(entry); 1224 } 1225 kobj_delete((kobj_t)table, M_GEOM); 1226 1227 /* Provide feedback if so requested. */ 1228 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) { 1229 sb = sbuf_new_auto(); 1230 sbuf_printf(sb, "%s destroyed\n", gp->name); 1231 sbuf_finish(sb); 1232 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1); 1233 sbuf_delete(sb); 1234 } 1235 return (0); 1236 } 1237 1238 static int 1239 g_part_ctl_modify(struct gctl_req *req, struct g_part_parms *gpp) 1240 { 1241 struct g_geom *gp; 1242 struct g_part_entry *entry; 1243 struct g_part_table *table; 1244 struct sbuf *sb; 1245 int error; 1246 1247 gp = gpp->gpp_geom; 1248 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name)); 1249 g_topology_assert(); 1250 1251 table = gp->softc; 1252 1253 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) { 1254 if (entry->gpe_deleted || entry->gpe_internal) 1255 continue; 1256 if (entry->gpe_index == gpp->gpp_index) 1257 break; 1258 } 1259 if (entry == NULL) { 1260 gctl_error(req, "%d index '%d'", ENOENT, gpp->gpp_index); 1261 return (ENOENT); 1262 } 1263 1264 error = G_PART_MODIFY(table, entry, gpp); 1265 if (error) { 1266 gctl_error(req, "%d", error); 1267 return (error); 1268 } 1269 1270 if (!entry->gpe_created) 1271 entry->gpe_modified = 1; 1272 1273 /* Provide feedback if so requested. */ 1274 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) { 1275 sb = sbuf_new_auto(); 1276 G_PART_FULLNAME(table, entry, sb, gp->name); 1277 sbuf_cat(sb, " modified\n"); 1278 sbuf_finish(sb); 1279 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1); 1280 sbuf_delete(sb); 1281 } 1282 return (0); 1283 } 1284 1285 static int 1286 g_part_ctl_move(struct gctl_req *req, struct g_part_parms *gpp) 1287 { 1288 gctl_error(req, "%d verb 'move'", ENOSYS); 1289 return (ENOSYS); 1290 } 1291 1292 static int 1293 g_part_ctl_recover(struct gctl_req *req, struct g_part_parms *gpp) 1294 { 1295 struct g_part_table *table; 1296 struct g_geom *gp; 1297 struct sbuf *sb; 1298 int error, recovered; 1299 1300 gp = gpp->gpp_geom; 1301 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name)); 1302 g_topology_assert(); 1303 table = gp->softc; 1304 error = recovered = 0; 1305 1306 if (table->gpt_corrupt) { 1307 error = G_PART_RECOVER(table); 1308 if (error == 0) 1309 error = g_part_check_integrity(table, 1310 LIST_FIRST(&gp->consumer)); 1311 if (error) { 1312 gctl_error(req, "%d recovering '%s' failed", 1313 error, gp->name); 1314 return (error); 1315 } 1316 recovered = 1; 1317 } 1318 /* Provide feedback if so requested. */ 1319 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) { 1320 sb = sbuf_new_auto(); 1321 if (recovered) 1322 sbuf_printf(sb, "%s recovered\n", gp->name); 1323 else 1324 sbuf_printf(sb, "%s recovering is not needed\n", 1325 gp->name); 1326 sbuf_finish(sb); 1327 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1); 1328 sbuf_delete(sb); 1329 } 1330 return (0); 1331 } 1332 1333 static int 1334 g_part_ctl_resize(struct gctl_req *req, struct g_part_parms *gpp) 1335 { 1336 struct g_geom *gp; 1337 struct g_provider *pp; 1338 struct g_part_entry *pe, *entry; 1339 struct g_part_table *table; 1340 struct sbuf *sb; 1341 quad_t end; 1342 int error; 1343 off_t mediasize; 1344 1345 gp = gpp->gpp_geom; 1346 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name)); 1347 g_topology_assert(); 1348 table = gp->softc; 1349 1350 /* check gpp_index */ 1351 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) { 1352 if (entry->gpe_deleted || entry->gpe_internal) 1353 continue; 1354 if (entry->gpe_index == gpp->gpp_index) 1355 break; 1356 } 1357 if (entry == NULL) { 1358 gctl_error(req, "%d index '%d'", ENOENT, gpp->gpp_index); 1359 return (ENOENT); 1360 } 1361 1362 /* check gpp_size */ 1363 end = entry->gpe_start + gpp->gpp_size - 1; 1364 if (gpp->gpp_size < 1 || end > table->gpt_last) { 1365 gctl_error(req, "%d size '%jd'", EINVAL, 1366 (intmax_t)gpp->gpp_size); 1367 return (EINVAL); 1368 } 1369 1370 LIST_FOREACH(pe, &table->gpt_entry, gpe_entry) { 1371 if (pe->gpe_deleted || pe->gpe_internal || pe == entry) 1372 continue; 1373 if (end >= pe->gpe_start && end <= pe->gpe_end) { 1374 gctl_error(req, "%d end '%jd'", ENOSPC, 1375 (intmax_t)end); 1376 return (ENOSPC); 1377 } 1378 if (entry->gpe_start < pe->gpe_start && end > pe->gpe_end) { 1379 gctl_error(req, "%d size '%jd'", ENOSPC, 1380 (intmax_t)gpp->gpp_size); 1381 return (ENOSPC); 1382 } 1383 } 1384 1385 pp = entry->gpe_pp; 1386 if ((g_debugflags & 16) == 0 && 1387 (pp->acr > 0 || pp->acw > 0 || pp->ace > 0)) { 1388 if (entry->gpe_end - entry->gpe_start + 1 > gpp->gpp_size) { 1389 /* Deny shrinking of an opened partition. */ 1390 gctl_error(req, "%d", EBUSY); 1391 return (EBUSY); 1392 } 1393 } 1394 1395 error = G_PART_RESIZE(table, entry, gpp); 1396 if (error) { 1397 gctl_error(req, "%d%s", error, error != EBUSY ? "": 1398 " resizing will lead to unexpected shrinking" 1399 " due to alignment"); 1400 return (error); 1401 } 1402 1403 if (!entry->gpe_created) 1404 entry->gpe_modified = 1; 1405 1406 /* update mediasize of changed provider */ 1407 mediasize = (entry->gpe_end - entry->gpe_start + 1) * 1408 pp->sectorsize; 1409 g_resize_provider(pp, mediasize); 1410 1411 /* Provide feedback if so requested. */ 1412 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) { 1413 sb = sbuf_new_auto(); 1414 G_PART_FULLNAME(table, entry, sb, gp->name); 1415 sbuf_cat(sb, " resized\n"); 1416 sbuf_finish(sb); 1417 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1); 1418 sbuf_delete(sb); 1419 } 1420 return (0); 1421 } 1422 1423 static int 1424 g_part_ctl_setunset(struct gctl_req *req, struct g_part_parms *gpp, 1425 unsigned int set) 1426 { 1427 struct g_geom *gp; 1428 struct g_part_entry *entry; 1429 struct g_part_table *table; 1430 struct sbuf *sb; 1431 int error; 1432 1433 gp = gpp->gpp_geom; 1434 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name)); 1435 g_topology_assert(); 1436 1437 table = gp->softc; 1438 1439 if (gpp->gpp_parms & G_PART_PARM_INDEX) { 1440 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) { 1441 if (entry->gpe_deleted || entry->gpe_internal) 1442 continue; 1443 if (entry->gpe_index == gpp->gpp_index) 1444 break; 1445 } 1446 if (entry == NULL) { 1447 gctl_error(req, "%d index '%d'", ENOENT, 1448 gpp->gpp_index); 1449 return (ENOENT); 1450 } 1451 } else 1452 entry = NULL; 1453 1454 error = G_PART_SETUNSET(table, entry, gpp->gpp_attrib, set); 1455 if (error) { 1456 gctl_error(req, "%d attrib '%s'", error, gpp->gpp_attrib); 1457 return (error); 1458 } 1459 1460 /* Provide feedback if so requested. */ 1461 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) { 1462 sb = sbuf_new_auto(); 1463 sbuf_printf(sb, "%s %sset on ", gpp->gpp_attrib, 1464 (set) ? "" : "un"); 1465 if (entry) 1466 G_PART_FULLNAME(table, entry, sb, gp->name); 1467 else 1468 sbuf_cat(sb, gp->name); 1469 sbuf_cat(sb, "\n"); 1470 sbuf_finish(sb); 1471 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1); 1472 sbuf_delete(sb); 1473 } 1474 return (0); 1475 } 1476 1477 static int 1478 g_part_ctl_undo(struct gctl_req *req, struct g_part_parms *gpp) 1479 { 1480 struct g_consumer *cp; 1481 struct g_provider *pp; 1482 struct g_geom *gp; 1483 struct g_part_entry *entry, *tmp; 1484 struct g_part_table *table; 1485 int error, reprobe; 1486 1487 gp = gpp->gpp_geom; 1488 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name)); 1489 g_topology_assert(); 1490 1491 table = gp->softc; 1492 if (!table->gpt_opened) { 1493 gctl_error(req, "%d", EPERM); 1494 return (EPERM); 1495 } 1496 1497 cp = LIST_FIRST(&gp->consumer); 1498 LIST_FOREACH_SAFE(entry, &table->gpt_entry, gpe_entry, tmp) { 1499 entry->gpe_modified = 0; 1500 if (entry->gpe_created) { 1501 pp = entry->gpe_pp; 1502 if (pp != NULL) { 1503 pp->private = NULL; 1504 entry->gpe_pp = NULL; 1505 g_wither_provider(pp, ENXIO); 1506 } 1507 entry->gpe_deleted = 1; 1508 } 1509 if (entry->gpe_deleted) { 1510 LIST_REMOVE(entry, gpe_entry); 1511 g_free(entry); 1512 } 1513 } 1514 1515 g_topology_unlock(); 1516 1517 reprobe = (table->gpt_scheme == &g_part_null_scheme || 1518 table->gpt_created) ? 1 : 0; 1519 1520 if (reprobe) { 1521 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) { 1522 if (entry->gpe_internal) 1523 continue; 1524 error = EBUSY; 1525 goto fail; 1526 } 1527 while ((entry = LIST_FIRST(&table->gpt_entry)) != NULL) { 1528 LIST_REMOVE(entry, gpe_entry); 1529 g_free(entry); 1530 } 1531 error = g_part_probe(gp, cp, table->gpt_depth); 1532 if (error) { 1533 g_topology_lock(); 1534 g_access(cp, -1, -1, -1); 1535 g_part_wither(gp, error); 1536 return (0); 1537 } 1538 table = gp->softc; 1539 1540 /* 1541 * Synthesize a disk geometry. Some partitioning schemes 1542 * depend on it and since some file systems need it even 1543 * when the partitition scheme doesn't, we do it here in 1544 * scheme-independent code. 1545 */ 1546 pp = cp->provider; 1547 g_part_geometry(table, cp, pp->mediasize / pp->sectorsize); 1548 } 1549 1550 error = G_PART_READ(table, cp); 1551 if (error) 1552 goto fail; 1553 error = g_part_check_integrity(table, cp); 1554 if (error) 1555 goto fail; 1556 1557 g_topology_lock(); 1558 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) { 1559 if (!entry->gpe_internal) 1560 g_part_new_provider(gp, table, entry); 1561 } 1562 1563 table->gpt_opened = 0; 1564 g_access(cp, -1, -1, -1); 1565 return (0); 1566 1567 fail: 1568 g_topology_lock(); 1569 gctl_error(req, "%d", error); 1570 return (error); 1571 } 1572 1573 static void 1574 g_part_wither(struct g_geom *gp, int error) 1575 { 1576 struct g_part_entry *entry; 1577 struct g_part_table *table; 1578 struct g_provider *pp; 1579 1580 table = gp->softc; 1581 if (table != NULL) { 1582 gp->softc = NULL; 1583 while ((entry = LIST_FIRST(&table->gpt_entry)) != NULL) { 1584 LIST_REMOVE(entry, gpe_entry); 1585 pp = entry->gpe_pp; 1586 entry->gpe_pp = NULL; 1587 if (pp != NULL) { 1588 pp->private = NULL; 1589 g_wither_provider(pp, error); 1590 } 1591 g_free(entry); 1592 } 1593 G_PART_DESTROY(table, NULL); 1594 kobj_delete((kobj_t)table, M_GEOM); 1595 } 1596 g_wither_geom(gp, error); 1597 } 1598 1599 /* 1600 * Class methods. 1601 */ 1602 1603 static void 1604 g_part_ctlreq(struct gctl_req *req, struct g_class *mp, const char *verb) 1605 { 1606 struct g_part_parms gpp; 1607 struct g_part_table *table; 1608 struct gctl_req_arg *ap; 1609 enum g_part_ctl ctlreq; 1610 unsigned int i, mparms, oparms, parm; 1611 int auto_commit, close_on_error; 1612 int error, modifies; 1613 1614 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s,%s)", __func__, mp->name, verb)); 1615 g_topology_assert(); 1616 1617 ctlreq = G_PART_CTL_NONE; 1618 modifies = 1; 1619 mparms = 0; 1620 oparms = G_PART_PARM_FLAGS | G_PART_PARM_OUTPUT | G_PART_PARM_VERSION; 1621 switch (*verb) { 1622 case 'a': 1623 if (!strcmp(verb, "add")) { 1624 ctlreq = G_PART_CTL_ADD; 1625 mparms |= G_PART_PARM_GEOM | G_PART_PARM_SIZE | 1626 G_PART_PARM_START | G_PART_PARM_TYPE; 1627 oparms |= G_PART_PARM_INDEX | G_PART_PARM_LABEL; 1628 } 1629 break; 1630 case 'b': 1631 if (!strcmp(verb, "bootcode")) { 1632 ctlreq = G_PART_CTL_BOOTCODE; 1633 mparms |= G_PART_PARM_GEOM | G_PART_PARM_BOOTCODE; 1634 oparms |= G_PART_PARM_SKIP_DSN; 1635 } 1636 break; 1637 case 'c': 1638 if (!strcmp(verb, "commit")) { 1639 ctlreq = G_PART_CTL_COMMIT; 1640 mparms |= G_PART_PARM_GEOM; 1641 modifies = 0; 1642 } else if (!strcmp(verb, "create")) { 1643 ctlreq = G_PART_CTL_CREATE; 1644 mparms |= G_PART_PARM_PROVIDER | G_PART_PARM_SCHEME; 1645 oparms |= G_PART_PARM_ENTRIES; 1646 } 1647 break; 1648 case 'd': 1649 if (!strcmp(verb, "delete")) { 1650 ctlreq = G_PART_CTL_DELETE; 1651 mparms |= G_PART_PARM_GEOM | G_PART_PARM_INDEX; 1652 } else if (!strcmp(verb, "destroy")) { 1653 ctlreq = G_PART_CTL_DESTROY; 1654 mparms |= G_PART_PARM_GEOM; 1655 oparms |= G_PART_PARM_FORCE; 1656 } 1657 break; 1658 case 'm': 1659 if (!strcmp(verb, "modify")) { 1660 ctlreq = G_PART_CTL_MODIFY; 1661 mparms |= G_PART_PARM_GEOM | G_PART_PARM_INDEX; 1662 oparms |= G_PART_PARM_LABEL | G_PART_PARM_TYPE; 1663 } else if (!strcmp(verb, "move")) { 1664 ctlreq = G_PART_CTL_MOVE; 1665 mparms |= G_PART_PARM_GEOM | G_PART_PARM_INDEX; 1666 } 1667 break; 1668 case 'r': 1669 if (!strcmp(verb, "recover")) { 1670 ctlreq = G_PART_CTL_RECOVER; 1671 mparms |= G_PART_PARM_GEOM; 1672 } else if (!strcmp(verb, "resize")) { 1673 ctlreq = G_PART_CTL_RESIZE; 1674 mparms |= G_PART_PARM_GEOM | G_PART_PARM_INDEX | 1675 G_PART_PARM_SIZE; 1676 } 1677 break; 1678 case 's': 1679 if (!strcmp(verb, "set")) { 1680 ctlreq = G_PART_CTL_SET; 1681 mparms |= G_PART_PARM_ATTRIB | G_PART_PARM_GEOM; 1682 oparms |= G_PART_PARM_INDEX; 1683 } 1684 break; 1685 case 'u': 1686 if (!strcmp(verb, "undo")) { 1687 ctlreq = G_PART_CTL_UNDO; 1688 mparms |= G_PART_PARM_GEOM; 1689 modifies = 0; 1690 } else if (!strcmp(verb, "unset")) { 1691 ctlreq = G_PART_CTL_UNSET; 1692 mparms |= G_PART_PARM_ATTRIB | G_PART_PARM_GEOM; 1693 oparms |= G_PART_PARM_INDEX; 1694 } 1695 break; 1696 } 1697 if (ctlreq == G_PART_CTL_NONE) { 1698 gctl_error(req, "%d verb '%s'", EINVAL, verb); 1699 return; 1700 } 1701 1702 bzero(&gpp, sizeof(gpp)); 1703 for (i = 0; i < req->narg; i++) { 1704 ap = &req->arg[i]; 1705 parm = 0; 1706 switch (ap->name[0]) { 1707 case 'a': 1708 if (!strcmp(ap->name, "arg0")) { 1709 parm = mparms & 1710 (G_PART_PARM_GEOM | G_PART_PARM_PROVIDER); 1711 } 1712 if (!strcmp(ap->name, "attrib")) 1713 parm = G_PART_PARM_ATTRIB; 1714 break; 1715 case 'b': 1716 if (!strcmp(ap->name, "bootcode")) 1717 parm = G_PART_PARM_BOOTCODE; 1718 break; 1719 case 'c': 1720 if (!strcmp(ap->name, "class")) 1721 continue; 1722 break; 1723 case 'e': 1724 if (!strcmp(ap->name, "entries")) 1725 parm = G_PART_PARM_ENTRIES; 1726 break; 1727 case 'f': 1728 if (!strcmp(ap->name, "flags")) 1729 parm = G_PART_PARM_FLAGS; 1730 else if (!strcmp(ap->name, "force")) 1731 parm = G_PART_PARM_FORCE; 1732 break; 1733 case 'i': 1734 if (!strcmp(ap->name, "index")) 1735 parm = G_PART_PARM_INDEX; 1736 break; 1737 case 'l': 1738 if (!strcmp(ap->name, "label")) 1739 parm = G_PART_PARM_LABEL; 1740 break; 1741 case 'o': 1742 if (!strcmp(ap->name, "output")) 1743 parm = G_PART_PARM_OUTPUT; 1744 break; 1745 case 's': 1746 if (!strcmp(ap->name, "scheme")) 1747 parm = G_PART_PARM_SCHEME; 1748 else if (!strcmp(ap->name, "size")) 1749 parm = G_PART_PARM_SIZE; 1750 else if (!strcmp(ap->name, "start")) 1751 parm = G_PART_PARM_START; 1752 else if (!strcmp(ap->name, "skip_dsn")) 1753 parm = G_PART_PARM_SKIP_DSN; 1754 break; 1755 case 't': 1756 if (!strcmp(ap->name, "type")) 1757 parm = G_PART_PARM_TYPE; 1758 break; 1759 case 'v': 1760 if (!strcmp(ap->name, "verb")) 1761 continue; 1762 else if (!strcmp(ap->name, "version")) 1763 parm = G_PART_PARM_VERSION; 1764 break; 1765 } 1766 if ((parm & (mparms | oparms)) == 0) { 1767 gctl_error(req, "%d param '%s'", EINVAL, ap->name); 1768 return; 1769 } 1770 switch (parm) { 1771 case G_PART_PARM_ATTRIB: 1772 error = g_part_parm_str(req, ap->name, 1773 &gpp.gpp_attrib); 1774 break; 1775 case G_PART_PARM_BOOTCODE: 1776 error = g_part_parm_bootcode(req, ap->name, 1777 &gpp.gpp_codeptr, &gpp.gpp_codesize); 1778 break; 1779 case G_PART_PARM_ENTRIES: 1780 error = g_part_parm_intmax(req, ap->name, 1781 &gpp.gpp_entries); 1782 break; 1783 case G_PART_PARM_FLAGS: 1784 error = g_part_parm_str(req, ap->name, &gpp.gpp_flags); 1785 break; 1786 case G_PART_PARM_FORCE: 1787 error = g_part_parm_uint32(req, ap->name, 1788 &gpp.gpp_force); 1789 break; 1790 case G_PART_PARM_GEOM: 1791 error = g_part_parm_geom(req, ap->name, &gpp.gpp_geom); 1792 break; 1793 case G_PART_PARM_INDEX: 1794 error = g_part_parm_intmax(req, ap->name, 1795 &gpp.gpp_index); 1796 break; 1797 case G_PART_PARM_LABEL: 1798 error = g_part_parm_str(req, ap->name, &gpp.gpp_label); 1799 break; 1800 case G_PART_PARM_OUTPUT: 1801 error = 0; /* Write-only parameter */ 1802 break; 1803 case G_PART_PARM_PROVIDER: 1804 error = g_part_parm_provider(req, ap->name, 1805 &gpp.gpp_provider); 1806 break; 1807 case G_PART_PARM_SCHEME: 1808 error = g_part_parm_scheme(req, ap->name, 1809 &gpp.gpp_scheme); 1810 break; 1811 case G_PART_PARM_SIZE: 1812 error = g_part_parm_quad(req, ap->name, &gpp.gpp_size); 1813 break; 1814 case G_PART_PARM_SKIP_DSN: 1815 error = g_part_parm_uint32(req, ap->name, 1816 &gpp.gpp_skip_dsn); 1817 break; 1818 case G_PART_PARM_START: 1819 error = g_part_parm_quad(req, ap->name, 1820 &gpp.gpp_start); 1821 break; 1822 case G_PART_PARM_TYPE: 1823 error = g_part_parm_str(req, ap->name, &gpp.gpp_type); 1824 break; 1825 case G_PART_PARM_VERSION: 1826 error = g_part_parm_uint32(req, ap->name, 1827 &gpp.gpp_version); 1828 break; 1829 default: 1830 error = EDOOFUS; 1831 gctl_error(req, "%d %s", error, ap->name); 1832 break; 1833 } 1834 if (error != 0) { 1835 if (error == ENOATTR) { 1836 gctl_error(req, "%d param '%s'", error, 1837 ap->name); 1838 } 1839 return; 1840 } 1841 gpp.gpp_parms |= parm; 1842 } 1843 if ((gpp.gpp_parms & mparms) != mparms) { 1844 parm = mparms - (gpp.gpp_parms & mparms); 1845 gctl_error(req, "%d param '%x'", ENOATTR, parm); 1846 return; 1847 } 1848 1849 /* Obtain permissions if possible/necessary. */ 1850 close_on_error = 0; 1851 table = NULL; 1852 if (modifies && (gpp.gpp_parms & G_PART_PARM_GEOM)) { 1853 table = gpp.gpp_geom->softc; 1854 if (table != NULL && table->gpt_corrupt && 1855 ctlreq != G_PART_CTL_DESTROY && 1856 ctlreq != G_PART_CTL_RECOVER) { 1857 gctl_error(req, "%d table '%s' is corrupt", 1858 EPERM, gpp.gpp_geom->name); 1859 return; 1860 } 1861 if (table != NULL && !table->gpt_opened) { 1862 error = g_access(LIST_FIRST(&gpp.gpp_geom->consumer), 1863 1, 1, 1); 1864 if (error) { 1865 gctl_error(req, "%d geom '%s'", error, 1866 gpp.gpp_geom->name); 1867 return; 1868 } 1869 table->gpt_opened = 1; 1870 close_on_error = 1; 1871 } 1872 } 1873 1874 /* Allow the scheme to check or modify the parameters. */ 1875 if (table != NULL) { 1876 error = G_PART_PRECHECK(table, ctlreq, &gpp); 1877 if (error) { 1878 gctl_error(req, "%d pre-check failed", error); 1879 goto out; 1880 } 1881 } else 1882 error = EDOOFUS; /* Prevent bogus uninit. warning. */ 1883 1884 switch (ctlreq) { 1885 case G_PART_CTL_NONE: 1886 panic("%s", __func__); 1887 case G_PART_CTL_ADD: 1888 error = g_part_ctl_add(req, &gpp); 1889 break; 1890 case G_PART_CTL_BOOTCODE: 1891 error = g_part_ctl_bootcode(req, &gpp); 1892 break; 1893 case G_PART_CTL_COMMIT: 1894 error = g_part_ctl_commit(req, &gpp); 1895 break; 1896 case G_PART_CTL_CREATE: 1897 error = g_part_ctl_create(req, &gpp); 1898 break; 1899 case G_PART_CTL_DELETE: 1900 error = g_part_ctl_delete(req, &gpp); 1901 break; 1902 case G_PART_CTL_DESTROY: 1903 error = g_part_ctl_destroy(req, &gpp); 1904 break; 1905 case G_PART_CTL_MODIFY: 1906 error = g_part_ctl_modify(req, &gpp); 1907 break; 1908 case G_PART_CTL_MOVE: 1909 error = g_part_ctl_move(req, &gpp); 1910 break; 1911 case G_PART_CTL_RECOVER: 1912 error = g_part_ctl_recover(req, &gpp); 1913 break; 1914 case G_PART_CTL_RESIZE: 1915 error = g_part_ctl_resize(req, &gpp); 1916 break; 1917 case G_PART_CTL_SET: 1918 error = g_part_ctl_setunset(req, &gpp, 1); 1919 break; 1920 case G_PART_CTL_UNDO: 1921 error = g_part_ctl_undo(req, &gpp); 1922 break; 1923 case G_PART_CTL_UNSET: 1924 error = g_part_ctl_setunset(req, &gpp, 0); 1925 break; 1926 } 1927 1928 /* Implement automatic commit. */ 1929 if (!error) { 1930 auto_commit = (modifies && 1931 (gpp.gpp_parms & G_PART_PARM_FLAGS) && 1932 strchr(gpp.gpp_flags, 'C') != NULL) ? 1 : 0; 1933 if (auto_commit) { 1934 KASSERT(gpp.gpp_parms & G_PART_PARM_GEOM, ("%s", 1935 __func__)); 1936 error = g_part_ctl_commit(req, &gpp); 1937 } 1938 } 1939 1940 out: 1941 if (error && close_on_error) { 1942 g_access(LIST_FIRST(&gpp.gpp_geom->consumer), -1, -1, -1); 1943 table->gpt_opened = 0; 1944 } 1945 } 1946 1947 static int 1948 g_part_destroy_geom(struct gctl_req *req, struct g_class *mp, 1949 struct g_geom *gp) 1950 { 1951 1952 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s,%s)", __func__, mp->name, gp->name)); 1953 g_topology_assert(); 1954 1955 g_part_wither(gp, EINVAL); 1956 return (0); 1957 } 1958 1959 static struct g_geom * 1960 g_part_taste(struct g_class *mp, struct g_provider *pp, int flags __unused) 1961 { 1962 struct g_consumer *cp; 1963 struct g_geom *gp; 1964 struct g_part_entry *entry; 1965 struct g_part_table *table; 1966 struct root_hold_token *rht; 1967 struct g_geom_alias *gap; 1968 int attr, depth; 1969 int error; 1970 1971 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s,%s)", __func__, mp->name, pp->name)); 1972 g_topology_assert(); 1973 1974 /* Skip providers that are already open for writing. */ 1975 if (pp->acw > 0) 1976 return (NULL); 1977 1978 /* 1979 * Create a GEOM with consumer and hook it up to the provider. 1980 * With that we become part of the topology. Obtain read access 1981 * to the provider. 1982 */ 1983 gp = g_new_geomf(mp, "%s", pp->name); 1984 LIST_FOREACH(gap, &pp->geom->aliases, ga_next) 1985 g_geom_add_alias(gp, gap->ga_alias); 1986 cp = g_new_consumer(gp); 1987 cp->flags |= G_CF_DIRECT_SEND | G_CF_DIRECT_RECEIVE; 1988 error = g_attach(cp, pp); 1989 if (error == 0) 1990 error = g_access(cp, 1, 0, 0); 1991 if (error != 0) { 1992 if (cp->provider) 1993 g_detach(cp); 1994 g_destroy_consumer(cp); 1995 g_destroy_geom(gp); 1996 return (NULL); 1997 } 1998 1999 rht = root_mount_hold(mp->name); 2000 g_topology_unlock(); 2001 2002 /* 2003 * Short-circuit the whole probing galore when there's no 2004 * media present. 2005 */ 2006 if (pp->mediasize == 0 || pp->sectorsize == 0) { 2007 error = ENODEV; 2008 goto fail; 2009 } 2010 2011 /* Make sure we can nest and if so, determine our depth. */ 2012 error = g_getattr("PART::isleaf", cp, &attr); 2013 if (!error && attr) { 2014 error = ENODEV; 2015 goto fail; 2016 } 2017 error = g_getattr("PART::depth", cp, &attr); 2018 depth = (!error) ? attr + 1 : 0; 2019 2020 error = g_part_probe(gp, cp, depth); 2021 if (error) 2022 goto fail; 2023 2024 table = gp->softc; 2025 2026 /* 2027 * Synthesize a disk geometry. Some partitioning schemes 2028 * depend on it and since some file systems need it even 2029 * when the partitition scheme doesn't, we do it here in 2030 * scheme-independent code. 2031 */ 2032 g_part_geometry(table, cp, pp->mediasize / pp->sectorsize); 2033 2034 error = G_PART_READ(table, cp); 2035 if (error) 2036 goto fail; 2037 error = g_part_check_integrity(table, cp); 2038 if (error) 2039 goto fail; 2040 2041 g_topology_lock(); 2042 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) { 2043 if (!entry->gpe_internal) 2044 g_part_new_provider(gp, table, entry); 2045 } 2046 2047 root_mount_rel(rht); 2048 g_access(cp, -1, 0, 0); 2049 return (gp); 2050 2051 fail: 2052 g_topology_lock(); 2053 root_mount_rel(rht); 2054 g_access(cp, -1, 0, 0); 2055 g_detach(cp); 2056 g_destroy_consumer(cp); 2057 g_destroy_geom(gp); 2058 return (NULL); 2059 } 2060 2061 /* 2062 * Geom methods. 2063 */ 2064 2065 static int 2066 g_part_access(struct g_provider *pp, int dr, int dw, int de) 2067 { 2068 struct g_consumer *cp; 2069 2070 G_PART_TRACE((G_T_ACCESS, "%s(%s,%d,%d,%d)", __func__, pp->name, dr, 2071 dw, de)); 2072 2073 cp = LIST_FIRST(&pp->geom->consumer); 2074 2075 /* We always gain write-exclusive access. */ 2076 return (g_access(cp, dr, dw, dw + de)); 2077 } 2078 2079 static void 2080 g_part_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp, 2081 struct g_consumer *cp, struct g_provider *pp) 2082 { 2083 char buf[64]; 2084 struct g_part_entry *entry; 2085 struct g_part_table *table; 2086 2087 KASSERT(sb != NULL && gp != NULL, ("%s", __func__)); 2088 table = gp->softc; 2089 2090 if (indent == NULL) { 2091 KASSERT(cp == NULL && pp != NULL, ("%s", __func__)); 2092 entry = pp->private; 2093 if (entry == NULL) 2094 return; 2095 sbuf_printf(sb, " i %u o %ju ty %s", entry->gpe_index, 2096 (uintmax_t)entry->gpe_offset, 2097 G_PART_TYPE(table, entry, buf, sizeof(buf))); 2098 /* 2099 * libdisk compatibility quirk - the scheme dumps the 2100 * slicer name and partition type in a way that is 2101 * compatible with libdisk. When libdisk is not used 2102 * anymore, this should go away. 2103 */ 2104 G_PART_DUMPCONF(table, entry, sb, indent); 2105 } else if (cp != NULL) { /* Consumer configuration. */ 2106 KASSERT(pp == NULL, ("%s", __func__)); 2107 /* none */ 2108 } else if (pp != NULL) { /* Provider configuration. */ 2109 entry = pp->private; 2110 if (entry == NULL) 2111 return; 2112 sbuf_printf(sb, "%s<start>%ju</start>\n", indent, 2113 (uintmax_t)entry->gpe_start); 2114 sbuf_printf(sb, "%s<end>%ju</end>\n", indent, 2115 (uintmax_t)entry->gpe_end); 2116 sbuf_printf(sb, "%s<index>%u</index>\n", indent, 2117 entry->gpe_index); 2118 sbuf_printf(sb, "%s<type>%s</type>\n", indent, 2119 G_PART_TYPE(table, entry, buf, sizeof(buf))); 2120 sbuf_printf(sb, "%s<offset>%ju</offset>\n", indent, 2121 (uintmax_t)entry->gpe_offset); 2122 sbuf_printf(sb, "%s<length>%ju</length>\n", indent, 2123 (uintmax_t)pp->mediasize); 2124 G_PART_DUMPCONF(table, entry, sb, indent); 2125 } else { /* Geom configuration. */ 2126 sbuf_printf(sb, "%s<scheme>%s</scheme>\n", indent, 2127 table->gpt_scheme->name); 2128 sbuf_printf(sb, "%s<entries>%u</entries>\n", indent, 2129 table->gpt_entries); 2130 sbuf_printf(sb, "%s<first>%ju</first>\n", indent, 2131 (uintmax_t)table->gpt_first); 2132 sbuf_printf(sb, "%s<last>%ju</last>\n", indent, 2133 (uintmax_t)table->gpt_last); 2134 sbuf_printf(sb, "%s<fwsectors>%u</fwsectors>\n", indent, 2135 table->gpt_sectors); 2136 sbuf_printf(sb, "%s<fwheads>%u</fwheads>\n", indent, 2137 table->gpt_heads); 2138 sbuf_printf(sb, "%s<state>%s</state>\n", indent, 2139 table->gpt_corrupt ? "CORRUPT": "OK"); 2140 sbuf_printf(sb, "%s<modified>%s</modified>\n", indent, 2141 table->gpt_opened ? "true": "false"); 2142 G_PART_DUMPCONF(table, NULL, sb, indent); 2143 } 2144 } 2145 2146 /*- 2147 * This start routine is only called for non-trivial requests, all the 2148 * trivial ones are handled autonomously by the slice code. 2149 * For requests we handle here, we must call the g_io_deliver() on the 2150 * bio, and return non-zero to indicate to the slice code that we did so. 2151 * This code executes in the "DOWN" I/O path, this means: 2152 * * No sleeping. 2153 * * Don't grab the topology lock. 2154 * * Don't call biowait, g_getattr(), g_setattr() or g_read_data() 2155 */ 2156 static int 2157 g_part_ioctl(struct g_provider *pp, u_long cmd, void *data, int fflag, struct thread *td) 2158 { 2159 struct g_part_table *table; 2160 2161 table = pp->geom->softc; 2162 return G_PART_IOCTL(table, pp, cmd, data, fflag, td); 2163 } 2164 2165 static void 2166 g_part_resize(struct g_consumer *cp) 2167 { 2168 struct g_part_table *table; 2169 2170 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, cp->provider->name)); 2171 g_topology_assert(); 2172 2173 if (auto_resize == 0) 2174 return; 2175 2176 table = cp->geom->softc; 2177 if (table->gpt_opened == 0) { 2178 if (g_access(cp, 1, 1, 1) != 0) 2179 return; 2180 table->gpt_opened = 1; 2181 } 2182 if (G_PART_RESIZE(table, NULL, NULL) == 0) 2183 printf("GEOM_PART: %s was automatically resized.\n" 2184 " Use `gpart commit %s` to save changes or " 2185 "`gpart undo %s` to revert them.\n", cp->geom->name, 2186 cp->geom->name, cp->geom->name); 2187 if (g_part_check_integrity(table, cp) != 0) { 2188 g_access(cp, -1, -1, -1); 2189 table->gpt_opened = 0; 2190 g_part_wither(table->gpt_gp, ENXIO); 2191 } 2192 } 2193 2194 static void 2195 g_part_orphan(struct g_consumer *cp) 2196 { 2197 struct g_provider *pp; 2198 struct g_part_table *table; 2199 2200 pp = cp->provider; 2201 KASSERT(pp != NULL, ("%s", __func__)); 2202 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, pp->name)); 2203 g_topology_assert(); 2204 2205 KASSERT(pp->error != 0, ("%s", __func__)); 2206 table = cp->geom->softc; 2207 if (table != NULL && table->gpt_opened) 2208 g_access(cp, -1, -1, -1); 2209 g_part_wither(cp->geom, pp->error); 2210 } 2211 2212 static void 2213 g_part_spoiled(struct g_consumer *cp) 2214 { 2215 2216 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, cp->provider->name)); 2217 g_topology_assert(); 2218 2219 cp->flags |= G_CF_ORPHAN; 2220 g_part_wither(cp->geom, ENXIO); 2221 } 2222 2223 static void 2224 g_part_start(struct bio *bp) 2225 { 2226 struct bio *bp2; 2227 struct g_consumer *cp; 2228 struct g_geom *gp; 2229 struct g_part_entry *entry; 2230 struct g_part_table *table; 2231 struct g_kerneldump *gkd; 2232 struct g_provider *pp; 2233 void (*done_func)(struct bio *) = g_std_done; 2234 char buf[64]; 2235 2236 biotrack(bp, __func__); 2237 2238 pp = bp->bio_to; 2239 gp = pp->geom; 2240 table = gp->softc; 2241 cp = LIST_FIRST(&gp->consumer); 2242 2243 G_PART_TRACE((G_T_BIO, "%s: cmd=%d, provider=%s", __func__, bp->bio_cmd, 2244 pp->name)); 2245 2246 entry = pp->private; 2247 if (entry == NULL) { 2248 g_io_deliver(bp, ENXIO); 2249 return; 2250 } 2251 2252 switch(bp->bio_cmd) { 2253 case BIO_DELETE: 2254 case BIO_READ: 2255 case BIO_WRITE: 2256 if (bp->bio_offset >= pp->mediasize) { 2257 g_io_deliver(bp, EIO); 2258 return; 2259 } 2260 bp2 = g_clone_bio(bp); 2261 if (bp2 == NULL) { 2262 g_io_deliver(bp, ENOMEM); 2263 return; 2264 } 2265 if (bp2->bio_offset + bp2->bio_length > pp->mediasize) 2266 bp2->bio_length = pp->mediasize - bp2->bio_offset; 2267 bp2->bio_done = g_std_done; 2268 bp2->bio_offset += entry->gpe_offset; 2269 g_io_request(bp2, cp); 2270 return; 2271 case BIO_FLUSH: 2272 break; 2273 case BIO_GETATTR: 2274 if (g_handleattr_int(bp, "GEOM::fwheads", table->gpt_heads)) 2275 return; 2276 if (g_handleattr_int(bp, "GEOM::fwsectors", table->gpt_sectors)) 2277 return; 2278 /* 2279 * allow_nesting overrides "isleaf" to false _unless_ the 2280 * provider offset is zero, since otherwise we would recurse. 2281 */ 2282 if (g_handleattr_int(bp, "PART::isleaf", 2283 table->gpt_isleaf && 2284 (allow_nesting == 0 || entry->gpe_offset == 0))) 2285 return; 2286 if (g_handleattr_int(bp, "PART::depth", table->gpt_depth)) 2287 return; 2288 if (g_handleattr_str(bp, "PART::scheme", 2289 table->gpt_scheme->name)) 2290 return; 2291 if (g_handleattr_str(bp, "PART::type", 2292 G_PART_TYPE(table, entry, buf, sizeof(buf)))) 2293 return; 2294 if (!strcmp("GEOM::physpath", bp->bio_attribute)) { 2295 done_func = g_part_get_physpath_done; 2296 break; 2297 } 2298 if (!strcmp("GEOM::kerneldump", bp->bio_attribute)) { 2299 /* 2300 * Check that the partition is suitable for kernel 2301 * dumps. Typically only swap partitions should be 2302 * used. If the request comes from the nested scheme 2303 * we allow dumping there as well. 2304 */ 2305 if ((bp->bio_from == NULL || 2306 bp->bio_from->geom->class != &g_part_class) && 2307 G_PART_DUMPTO(table, entry) == 0) { 2308 g_io_deliver(bp, ENODEV); 2309 printf("GEOM_PART: Partition '%s' not suitable" 2310 " for kernel dumps (wrong type?)\n", 2311 pp->name); 2312 return; 2313 } 2314 gkd = (struct g_kerneldump *)bp->bio_data; 2315 if (gkd->offset >= pp->mediasize) { 2316 g_io_deliver(bp, EIO); 2317 return; 2318 } 2319 if (gkd->offset + gkd->length > pp->mediasize) 2320 gkd->length = pp->mediasize - gkd->offset; 2321 gkd->offset += entry->gpe_offset; 2322 } 2323 break; 2324 default: 2325 g_io_deliver(bp, EOPNOTSUPP); 2326 return; 2327 } 2328 2329 bp2 = g_clone_bio(bp); 2330 if (bp2 == NULL) { 2331 g_io_deliver(bp, ENOMEM); 2332 return; 2333 } 2334 bp2->bio_done = done_func; 2335 g_io_request(bp2, cp); 2336 } 2337 2338 static void 2339 g_part_init(struct g_class *mp) 2340 { 2341 2342 TAILQ_INSERT_HEAD(&g_part_schemes, &g_part_null_scheme, scheme_list); 2343 } 2344 2345 static void 2346 g_part_fini(struct g_class *mp) 2347 { 2348 2349 TAILQ_REMOVE(&g_part_schemes, &g_part_null_scheme, scheme_list); 2350 } 2351 2352 static void 2353 g_part_unload_event(void *arg, int flag) 2354 { 2355 struct g_consumer *cp; 2356 struct g_geom *gp; 2357 struct g_provider *pp; 2358 struct g_part_scheme *scheme; 2359 struct g_part_table *table; 2360 uintptr_t *xchg; 2361 int acc, error; 2362 2363 if (flag == EV_CANCEL) 2364 return; 2365 2366 xchg = arg; 2367 error = 0; 2368 scheme = (void *)(*xchg); 2369 2370 g_topology_assert(); 2371 2372 LIST_FOREACH(gp, &g_part_class.geom, geom) { 2373 table = gp->softc; 2374 if (table->gpt_scheme != scheme) 2375 continue; 2376 2377 acc = 0; 2378 LIST_FOREACH(pp, &gp->provider, provider) 2379 acc += pp->acr + pp->acw + pp->ace; 2380 LIST_FOREACH(cp, &gp->consumer, consumer) 2381 acc += cp->acr + cp->acw + cp->ace; 2382 2383 if (!acc) 2384 g_part_wither(gp, ENOSYS); 2385 else 2386 error = EBUSY; 2387 } 2388 2389 if (!error) 2390 TAILQ_REMOVE(&g_part_schemes, scheme, scheme_list); 2391 2392 *xchg = error; 2393 } 2394 2395 int 2396 g_part_modevent(module_t mod, int type, struct g_part_scheme *scheme) 2397 { 2398 struct g_part_scheme *iter; 2399 uintptr_t arg; 2400 int error; 2401 2402 error = 0; 2403 switch (type) { 2404 case MOD_LOAD: 2405 TAILQ_FOREACH(iter, &g_part_schemes, scheme_list) { 2406 if (scheme == iter) { 2407 printf("GEOM_PART: scheme %s is already " 2408 "registered!\n", scheme->name); 2409 break; 2410 } 2411 } 2412 if (iter == NULL) { 2413 TAILQ_INSERT_TAIL(&g_part_schemes, scheme, 2414 scheme_list); 2415 g_retaste(&g_part_class); 2416 } 2417 break; 2418 case MOD_UNLOAD: 2419 arg = (uintptr_t)scheme; 2420 error = g_waitfor_event(g_part_unload_event, &arg, M_WAITOK, 2421 NULL); 2422 if (error == 0) 2423 error = arg; 2424 break; 2425 default: 2426 error = EOPNOTSUPP; 2427 break; 2428 } 2429 2430 return (error); 2431 } 2432