1 /*- 2 * Copyright (c) 2002, 2005-2009 Marcel Moolenaar 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 */ 26 27 #include <sys/cdefs.h> 28 __FBSDID("$FreeBSD$"); 29 30 #include <sys/param.h> 31 #include <sys/bio.h> 32 #include <sys/endian.h> 33 #include <sys/kernel.h> 34 #include <sys/kobj.h> 35 #include <sys/limits.h> 36 #include <sys/lock.h> 37 #include <sys/malloc.h> 38 #include <sys/mutex.h> 39 #include <sys/queue.h> 40 #include <sys/sbuf.h> 41 #include <sys/sysctl.h> 42 #include <sys/systm.h> 43 #include <sys/uuid.h> 44 #include <geom/geom.h> 45 #include <geom/geom_ctl.h> 46 #include <geom/geom_int.h> 47 #include <geom/part/g_part.h> 48 49 #include "g_part_if.h" 50 51 #ifndef _PATH_DEV 52 #define _PATH_DEV "/dev/" 53 #endif 54 55 static kobj_method_t g_part_null_methods[] = { 56 { 0, 0 } 57 }; 58 59 static struct g_part_scheme g_part_null_scheme = { 60 "(none)", 61 g_part_null_methods, 62 sizeof(struct g_part_table), 63 }; 64 65 TAILQ_HEAD(, g_part_scheme) g_part_schemes = 66 TAILQ_HEAD_INITIALIZER(g_part_schemes); 67 68 struct g_part_alias_list { 69 const char *lexeme; 70 enum g_part_alias alias; 71 } g_part_alias_list[G_PART_ALIAS_COUNT] = { 72 { "apple-boot", G_PART_ALIAS_APPLE_BOOT }, 73 { "apple-core-storage", G_PART_ALIAS_APPLE_CORE_STORAGE }, 74 { "apple-hfs", G_PART_ALIAS_APPLE_HFS }, 75 { "apple-label", G_PART_ALIAS_APPLE_LABEL }, 76 { "apple-raid", G_PART_ALIAS_APPLE_RAID }, 77 { "apple-raid-offline", G_PART_ALIAS_APPLE_RAID_OFFLINE }, 78 { "apple-tv-recovery", G_PART_ALIAS_APPLE_TV_RECOVERY }, 79 { "apple-ufs", G_PART_ALIAS_APPLE_UFS }, 80 { "bios-boot", G_PART_ALIAS_BIOS_BOOT }, 81 { "chromeos-firmware", G_PART_ALIAS_CHROMEOS_FIRMWARE }, 82 { "chromeos-kernel", G_PART_ALIAS_CHROMEOS_KERNEL }, 83 { "chromeos-reserved", G_PART_ALIAS_CHROMEOS_RESERVED }, 84 { "chromeos-root", G_PART_ALIAS_CHROMEOS_ROOT }, 85 { "dragonfly-ccd", G_PART_ALIAS_DFBSD_CCD }, 86 { "dragonfly-hammer", G_PART_ALIAS_DFBSD_HAMMER }, 87 { "dragonfly-hammer2", G_PART_ALIAS_DFBSD_HAMMER2 }, 88 { "dragonfly-label32", G_PART_ALIAS_DFBSD }, 89 { "dragonfly-label64", G_PART_ALIAS_DFBSD64 }, 90 { "dragonfly-legacy", G_PART_ALIAS_DFBSD_LEGACY }, 91 { "dragonfly-swap", G_PART_ALIAS_DFBSD_SWAP }, 92 { "dragonfly-ufs", G_PART_ALIAS_DFBSD_UFS }, 93 { "dragonfly-vinum", G_PART_ALIAS_DFBSD_VINUM }, 94 { "ebr", G_PART_ALIAS_EBR }, 95 { "efi", G_PART_ALIAS_EFI }, 96 { "fat16", G_PART_ALIAS_MS_FAT16 }, 97 { "fat32", G_PART_ALIAS_MS_FAT32 }, 98 { "freebsd", G_PART_ALIAS_FREEBSD }, 99 { "freebsd-boot", G_PART_ALIAS_FREEBSD_BOOT }, 100 { "freebsd-nandfs", G_PART_ALIAS_FREEBSD_NANDFS }, 101 { "freebsd-swap", G_PART_ALIAS_FREEBSD_SWAP }, 102 { "freebsd-ufs", G_PART_ALIAS_FREEBSD_UFS }, 103 { "freebsd-vinum", G_PART_ALIAS_FREEBSD_VINUM }, 104 { "freebsd-zfs", G_PART_ALIAS_FREEBSD_ZFS }, 105 { "linux-data", G_PART_ALIAS_LINUX_DATA }, 106 { "linux-lvm", G_PART_ALIAS_LINUX_LVM }, 107 { "linux-raid", G_PART_ALIAS_LINUX_RAID }, 108 { "linux-swap", G_PART_ALIAS_LINUX_SWAP }, 109 { "mbr", G_PART_ALIAS_MBR }, 110 { "ms-basic-data", G_PART_ALIAS_MS_BASIC_DATA }, 111 { "ms-ldm-data", G_PART_ALIAS_MS_LDM_DATA }, 112 { "ms-ldm-metadata", G_PART_ALIAS_MS_LDM_METADATA }, 113 { "ms-recovery", G_PART_ALIAS_MS_RECOVERY }, 114 { "ms-reserved", G_PART_ALIAS_MS_RESERVED }, 115 { "ms-spaces", G_PART_ALIAS_MS_SPACES }, 116 { "netbsd-ccd", G_PART_ALIAS_NETBSD_CCD }, 117 { "netbsd-cgd", G_PART_ALIAS_NETBSD_CGD }, 118 { "netbsd-ffs", G_PART_ALIAS_NETBSD_FFS }, 119 { "netbsd-lfs", G_PART_ALIAS_NETBSD_LFS }, 120 { "netbsd-raid", G_PART_ALIAS_NETBSD_RAID }, 121 { "netbsd-swap", G_PART_ALIAS_NETBSD_SWAP }, 122 { "ntfs", G_PART_ALIAS_MS_NTFS }, 123 { "openbsd-data", G_PART_ALIAS_OPENBSD_DATA }, 124 { "prep-boot", G_PART_ALIAS_PREP_BOOT }, 125 { "vmware-reserved", G_PART_ALIAS_VMRESERVED }, 126 { "vmware-vmfs", G_PART_ALIAS_VMFS }, 127 { "vmware-vmkdiag", G_PART_ALIAS_VMKDIAG }, 128 { "vmware-vsanhdr", G_PART_ALIAS_VMVSANHDR }, 129 }; 130 131 SYSCTL_DECL(_kern_geom); 132 SYSCTL_NODE(_kern_geom, OID_AUTO, part, CTLFLAG_RW, 0, 133 "GEOM_PART stuff"); 134 static u_int check_integrity = 1; 135 SYSCTL_UINT(_kern_geom_part, OID_AUTO, check_integrity, 136 CTLFLAG_RWTUN, &check_integrity, 1, 137 "Enable integrity checking"); 138 static u_int auto_resize = 1; 139 SYSCTL_UINT(_kern_geom_part, OID_AUTO, auto_resize, 140 CTLFLAG_RWTUN, &auto_resize, 1, 141 "Enable auto resize"); 142 143 /* 144 * The GEOM partitioning class. 145 */ 146 static g_ctl_req_t g_part_ctlreq; 147 static g_ctl_destroy_geom_t g_part_destroy_geom; 148 static g_fini_t g_part_fini; 149 static g_init_t g_part_init; 150 static g_taste_t g_part_taste; 151 152 static g_access_t g_part_access; 153 static g_dumpconf_t g_part_dumpconf; 154 static g_orphan_t g_part_orphan; 155 static g_spoiled_t g_part_spoiled; 156 static g_start_t g_part_start; 157 static g_resize_t g_part_resize; 158 static g_ioctl_t g_part_ioctl; 159 160 static struct g_class g_part_class = { 161 .name = "PART", 162 .version = G_VERSION, 163 /* Class methods. */ 164 .ctlreq = g_part_ctlreq, 165 .destroy_geom = g_part_destroy_geom, 166 .fini = g_part_fini, 167 .init = g_part_init, 168 .taste = g_part_taste, 169 /* Geom methods. */ 170 .access = g_part_access, 171 .dumpconf = g_part_dumpconf, 172 .orphan = g_part_orphan, 173 .spoiled = g_part_spoiled, 174 .start = g_part_start, 175 .resize = g_part_resize, 176 .ioctl = g_part_ioctl, 177 }; 178 179 DECLARE_GEOM_CLASS(g_part_class, g_part); 180 MODULE_VERSION(g_part, 0); 181 182 /* 183 * Support functions. 184 */ 185 186 static void g_part_wither(struct g_geom *, int); 187 188 const char * 189 g_part_alias_name(enum g_part_alias alias) 190 { 191 int i; 192 193 for (i = 0; i < G_PART_ALIAS_COUNT; i++) { 194 if (g_part_alias_list[i].alias != alias) 195 continue; 196 return (g_part_alias_list[i].lexeme); 197 } 198 199 return (NULL); 200 } 201 202 void 203 g_part_geometry_heads(off_t blocks, u_int sectors, off_t *bestchs, 204 u_int *bestheads) 205 { 206 static u_int candidate_heads[] = { 1, 2, 16, 32, 64, 128, 255, 0 }; 207 off_t chs, cylinders; 208 u_int heads; 209 int idx; 210 211 *bestchs = 0; 212 *bestheads = 0; 213 for (idx = 0; candidate_heads[idx] != 0; idx++) { 214 heads = candidate_heads[idx]; 215 cylinders = blocks / heads / sectors; 216 if (cylinders < heads || cylinders < sectors) 217 break; 218 if (cylinders > 1023) 219 continue; 220 chs = cylinders * heads * sectors; 221 if (chs > *bestchs || (chs == *bestchs && *bestheads == 1)) { 222 *bestchs = chs; 223 *bestheads = heads; 224 } 225 } 226 } 227 228 static void 229 g_part_geometry(struct g_part_table *table, struct g_consumer *cp, 230 off_t blocks) 231 { 232 static u_int candidate_sectors[] = { 1, 9, 17, 33, 63, 0 }; 233 off_t chs, bestchs; 234 u_int heads, sectors; 235 int idx; 236 237 if (g_getattr("GEOM::fwsectors", cp, §ors) != 0 || sectors == 0 || 238 g_getattr("GEOM::fwheads", cp, &heads) != 0 || heads == 0) { 239 table->gpt_fixgeom = 0; 240 table->gpt_heads = 0; 241 table->gpt_sectors = 0; 242 bestchs = 0; 243 for (idx = 0; candidate_sectors[idx] != 0; idx++) { 244 sectors = candidate_sectors[idx]; 245 g_part_geometry_heads(blocks, sectors, &chs, &heads); 246 if (chs == 0) 247 continue; 248 /* 249 * Prefer a geometry with sectors > 1, but only if 250 * it doesn't bump down the number of heads to 1. 251 */ 252 if (chs > bestchs || (chs == bestchs && heads > 1 && 253 table->gpt_sectors == 1)) { 254 bestchs = chs; 255 table->gpt_heads = heads; 256 table->gpt_sectors = sectors; 257 } 258 } 259 /* 260 * If we didn't find a geometry at all, then the disk is 261 * too big. This means we can use the maximum number of 262 * heads and sectors. 263 */ 264 if (bestchs == 0) { 265 table->gpt_heads = 255; 266 table->gpt_sectors = 63; 267 } 268 } else { 269 table->gpt_fixgeom = 1; 270 table->gpt_heads = heads; 271 table->gpt_sectors = sectors; 272 } 273 } 274 275 #define DPRINTF(...) if (bootverbose) { \ 276 printf("GEOM_PART: " __VA_ARGS__); \ 277 } 278 279 static int 280 g_part_check_integrity(struct g_part_table *table, struct g_consumer *cp) 281 { 282 struct g_part_entry *e1, *e2; 283 struct g_provider *pp; 284 off_t offset; 285 int failed; 286 287 failed = 0; 288 pp = cp->provider; 289 if (table->gpt_last < table->gpt_first) { 290 DPRINTF("last LBA is below first LBA: %jd < %jd\n", 291 (intmax_t)table->gpt_last, (intmax_t)table->gpt_first); 292 failed++; 293 } 294 if (table->gpt_last > pp->mediasize / pp->sectorsize - 1) { 295 DPRINTF("last LBA extends beyond mediasize: " 296 "%jd > %jd\n", (intmax_t)table->gpt_last, 297 (intmax_t)pp->mediasize / pp->sectorsize - 1); 298 failed++; 299 } 300 LIST_FOREACH(e1, &table->gpt_entry, gpe_entry) { 301 if (e1->gpe_deleted || e1->gpe_internal) 302 continue; 303 if (e1->gpe_start < table->gpt_first) { 304 DPRINTF("partition %d has start offset below first " 305 "LBA: %jd < %jd\n", e1->gpe_index, 306 (intmax_t)e1->gpe_start, 307 (intmax_t)table->gpt_first); 308 failed++; 309 } 310 if (e1->gpe_start > table->gpt_last) { 311 DPRINTF("partition %d has start offset beyond last " 312 "LBA: %jd > %jd\n", e1->gpe_index, 313 (intmax_t)e1->gpe_start, 314 (intmax_t)table->gpt_last); 315 failed++; 316 } 317 if (e1->gpe_end < e1->gpe_start) { 318 DPRINTF("partition %d has end offset below start " 319 "offset: %jd < %jd\n", e1->gpe_index, 320 (intmax_t)e1->gpe_end, 321 (intmax_t)e1->gpe_start); 322 failed++; 323 } 324 if (e1->gpe_end > table->gpt_last) { 325 DPRINTF("partition %d has end offset beyond last " 326 "LBA: %jd > %jd\n", e1->gpe_index, 327 (intmax_t)e1->gpe_end, 328 (intmax_t)table->gpt_last); 329 failed++; 330 } 331 if (pp->stripesize > 0) { 332 offset = e1->gpe_start * pp->sectorsize; 333 if (e1->gpe_offset > offset) 334 offset = e1->gpe_offset; 335 if ((offset + pp->stripeoffset) % pp->stripesize) { 336 DPRINTF("partition %d on (%s, %s) is not " 337 "aligned on %u bytes\n", e1->gpe_index, 338 pp->name, table->gpt_scheme->name, 339 pp->stripesize); 340 /* Don't treat this as a critical failure */ 341 } 342 } 343 e2 = e1; 344 while ((e2 = LIST_NEXT(e2, gpe_entry)) != NULL) { 345 if (e2->gpe_deleted || e2->gpe_internal) 346 continue; 347 if (e1->gpe_start >= e2->gpe_start && 348 e1->gpe_start <= e2->gpe_end) { 349 DPRINTF("partition %d has start offset inside " 350 "partition %d: start[%d] %jd >= start[%d] " 351 "%jd <= end[%d] %jd\n", 352 e1->gpe_index, e2->gpe_index, 353 e2->gpe_index, (intmax_t)e2->gpe_start, 354 e1->gpe_index, (intmax_t)e1->gpe_start, 355 e2->gpe_index, (intmax_t)e2->gpe_end); 356 failed++; 357 } 358 if (e1->gpe_end >= e2->gpe_start && 359 e1->gpe_end <= e2->gpe_end) { 360 DPRINTF("partition %d has end offset inside " 361 "partition %d: start[%d] %jd >= end[%d] " 362 "%jd <= end[%d] %jd\n", 363 e1->gpe_index, e2->gpe_index, 364 e2->gpe_index, (intmax_t)e2->gpe_start, 365 e1->gpe_index, (intmax_t)e1->gpe_end, 366 e2->gpe_index, (intmax_t)e2->gpe_end); 367 failed++; 368 } 369 if (e1->gpe_start < e2->gpe_start && 370 e1->gpe_end > e2->gpe_end) { 371 DPRINTF("partition %d contains partition %d: " 372 "start[%d] %jd > start[%d] %jd, end[%d] " 373 "%jd < end[%d] %jd\n", 374 e1->gpe_index, e2->gpe_index, 375 e1->gpe_index, (intmax_t)e1->gpe_start, 376 e2->gpe_index, (intmax_t)e2->gpe_start, 377 e2->gpe_index, (intmax_t)e2->gpe_end, 378 e1->gpe_index, (intmax_t)e1->gpe_end); 379 failed++; 380 } 381 } 382 } 383 if (failed != 0) { 384 printf("GEOM_PART: integrity check failed (%s, %s)\n", 385 pp->name, table->gpt_scheme->name); 386 if (check_integrity != 0) 387 return (EINVAL); 388 table->gpt_corrupt = 1; 389 } 390 return (0); 391 } 392 #undef DPRINTF 393 394 struct g_part_entry * 395 g_part_new_entry(struct g_part_table *table, int index, quad_t start, 396 quad_t end) 397 { 398 struct g_part_entry *entry, *last; 399 400 last = NULL; 401 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) { 402 if (entry->gpe_index == index) 403 break; 404 if (entry->gpe_index > index) { 405 entry = NULL; 406 break; 407 } 408 last = entry; 409 } 410 if (entry == NULL) { 411 entry = g_malloc(table->gpt_scheme->gps_entrysz, 412 M_WAITOK | M_ZERO); 413 entry->gpe_index = index; 414 if (last == NULL) 415 LIST_INSERT_HEAD(&table->gpt_entry, entry, gpe_entry); 416 else 417 LIST_INSERT_AFTER(last, entry, gpe_entry); 418 } else 419 entry->gpe_offset = 0; 420 entry->gpe_start = start; 421 entry->gpe_end = end; 422 return (entry); 423 } 424 425 static void 426 g_part_new_provider(struct g_geom *gp, struct g_part_table *table, 427 struct g_part_entry *entry) 428 { 429 struct g_consumer *cp; 430 struct g_provider *pp; 431 struct sbuf *sb; 432 struct g_geom_alias *gap; 433 off_t offset; 434 435 cp = LIST_FIRST(&gp->consumer); 436 pp = cp->provider; 437 438 offset = entry->gpe_start * pp->sectorsize; 439 if (entry->gpe_offset < offset) 440 entry->gpe_offset = offset; 441 442 if (entry->gpe_pp == NULL) { 443 /* 444 * Add aliases to the geom before we create the provider so that 445 * geom_dev can taste it with all the aliases in place so all 446 * the aliased dev_t instances get created for each partition 447 * (eg foo5p7 gets created for bar5p7 when foo is an alias of bar). 448 */ 449 LIST_FOREACH(gap, &table->gpt_gp->aliases, ga_next) { 450 sb = sbuf_new_auto(); 451 G_PART_FULLNAME(table, entry, sb, gap->ga_alias); 452 sbuf_finish(sb); 453 g_geom_add_alias(gp, sbuf_data(sb)); 454 sbuf_delete(sb); 455 } 456 sb = sbuf_new_auto(); 457 G_PART_FULLNAME(table, entry, sb, gp->name); 458 sbuf_finish(sb); 459 entry->gpe_pp = g_new_providerf(gp, "%s", sbuf_data(sb)); 460 sbuf_delete(sb); 461 entry->gpe_pp->flags |= G_PF_DIRECT_SEND | G_PF_DIRECT_RECEIVE; 462 entry->gpe_pp->private = entry; /* Close the circle. */ 463 } 464 entry->gpe_pp->index = entry->gpe_index - 1; /* index is 1-based. */ 465 entry->gpe_pp->mediasize = (entry->gpe_end - entry->gpe_start + 1) * 466 pp->sectorsize; 467 entry->gpe_pp->mediasize -= entry->gpe_offset - offset; 468 entry->gpe_pp->sectorsize = pp->sectorsize; 469 entry->gpe_pp->stripesize = pp->stripesize; 470 entry->gpe_pp->stripeoffset = pp->stripeoffset + entry->gpe_offset; 471 if (pp->stripesize > 0) 472 entry->gpe_pp->stripeoffset %= pp->stripesize; 473 entry->gpe_pp->flags |= pp->flags & G_PF_ACCEPT_UNMAPPED; 474 g_error_provider(entry->gpe_pp, 0); 475 } 476 477 static struct g_geom* 478 g_part_find_geom(const char *name) 479 { 480 struct g_geom *gp; 481 LIST_FOREACH(gp, &g_part_class.geom, geom) { 482 if ((gp->flags & G_GEOM_WITHER) == 0 && 483 strcmp(name, gp->name) == 0) 484 break; 485 } 486 return (gp); 487 } 488 489 static int 490 g_part_parm_geom(struct gctl_req *req, const char *name, struct g_geom **v) 491 { 492 struct g_geom *gp; 493 const char *gname; 494 495 gname = gctl_get_asciiparam(req, name); 496 if (gname == NULL) 497 return (ENOATTR); 498 if (strncmp(gname, _PATH_DEV, sizeof(_PATH_DEV) - 1) == 0) 499 gname += sizeof(_PATH_DEV) - 1; 500 gp = g_part_find_geom(gname); 501 if (gp == NULL) { 502 gctl_error(req, "%d %s '%s'", EINVAL, name, gname); 503 return (EINVAL); 504 } 505 *v = gp; 506 return (0); 507 } 508 509 static int 510 g_part_parm_provider(struct gctl_req *req, const char *name, 511 struct g_provider **v) 512 { 513 struct g_provider *pp; 514 const char *pname; 515 516 pname = gctl_get_asciiparam(req, name); 517 if (pname == NULL) 518 return (ENOATTR); 519 if (strncmp(pname, _PATH_DEV, sizeof(_PATH_DEV) - 1) == 0) 520 pname += sizeof(_PATH_DEV) - 1; 521 pp = g_provider_by_name(pname); 522 if (pp == NULL) { 523 gctl_error(req, "%d %s '%s'", EINVAL, name, pname); 524 return (EINVAL); 525 } 526 *v = pp; 527 return (0); 528 } 529 530 static int 531 g_part_parm_quad(struct gctl_req *req, const char *name, quad_t *v) 532 { 533 const char *p; 534 char *x; 535 quad_t q; 536 537 p = gctl_get_asciiparam(req, name); 538 if (p == NULL) 539 return (ENOATTR); 540 q = strtoq(p, &x, 0); 541 if (*x != '\0' || q < 0) { 542 gctl_error(req, "%d %s '%s'", EINVAL, name, p); 543 return (EINVAL); 544 } 545 *v = q; 546 return (0); 547 } 548 549 static int 550 g_part_parm_scheme(struct gctl_req *req, const char *name, 551 struct g_part_scheme **v) 552 { 553 struct g_part_scheme *s; 554 const char *p; 555 556 p = gctl_get_asciiparam(req, name); 557 if (p == NULL) 558 return (ENOATTR); 559 TAILQ_FOREACH(s, &g_part_schemes, scheme_list) { 560 if (s == &g_part_null_scheme) 561 continue; 562 if (!strcasecmp(s->name, p)) 563 break; 564 } 565 if (s == NULL) { 566 gctl_error(req, "%d %s '%s'", EINVAL, name, p); 567 return (EINVAL); 568 } 569 *v = s; 570 return (0); 571 } 572 573 static int 574 g_part_parm_str(struct gctl_req *req, const char *name, const char **v) 575 { 576 const char *p; 577 578 p = gctl_get_asciiparam(req, name); 579 if (p == NULL) 580 return (ENOATTR); 581 /* An empty label is always valid. */ 582 if (strcmp(name, "label") != 0 && p[0] == '\0') { 583 gctl_error(req, "%d %s '%s'", EINVAL, name, p); 584 return (EINVAL); 585 } 586 *v = p; 587 return (0); 588 } 589 590 static int 591 g_part_parm_intmax(struct gctl_req *req, const char *name, u_int *v) 592 { 593 const intmax_t *p; 594 int size; 595 596 p = gctl_get_param(req, name, &size); 597 if (p == NULL) 598 return (ENOATTR); 599 if (size != sizeof(*p) || *p < 0 || *p > INT_MAX) { 600 gctl_error(req, "%d %s '%jd'", EINVAL, name, *p); 601 return (EINVAL); 602 } 603 *v = (u_int)*p; 604 return (0); 605 } 606 607 static int 608 g_part_parm_uint32(struct gctl_req *req, const char *name, u_int *v) 609 { 610 const uint32_t *p; 611 int size; 612 613 p = gctl_get_param(req, name, &size); 614 if (p == NULL) 615 return (ENOATTR); 616 if (size != sizeof(*p) || *p > INT_MAX) { 617 gctl_error(req, "%d %s '%u'", EINVAL, name, (unsigned int)*p); 618 return (EINVAL); 619 } 620 *v = (u_int)*p; 621 return (0); 622 } 623 624 static int 625 g_part_parm_bootcode(struct gctl_req *req, const char *name, const void **v, 626 unsigned int *s) 627 { 628 const void *p; 629 int size; 630 631 p = gctl_get_param(req, name, &size); 632 if (p == NULL) 633 return (ENOATTR); 634 *v = p; 635 *s = size; 636 return (0); 637 } 638 639 static int 640 g_part_probe(struct g_geom *gp, struct g_consumer *cp, int depth) 641 { 642 struct g_part_scheme *iter, *scheme; 643 struct g_part_table *table; 644 int pri, probe; 645 646 table = gp->softc; 647 scheme = (table != NULL) ? table->gpt_scheme : NULL; 648 pri = (scheme != NULL) ? G_PART_PROBE(table, cp) : INT_MIN; 649 if (pri == 0) 650 goto done; 651 if (pri > 0) { /* error */ 652 scheme = NULL; 653 pri = INT_MIN; 654 } 655 656 TAILQ_FOREACH(iter, &g_part_schemes, scheme_list) { 657 if (iter == &g_part_null_scheme) 658 continue; 659 table = (void *)kobj_create((kobj_class_t)iter, M_GEOM, 660 M_WAITOK); 661 table->gpt_gp = gp; 662 table->gpt_scheme = iter; 663 table->gpt_depth = depth; 664 probe = G_PART_PROBE(table, cp); 665 if (probe <= 0 && probe > pri) { 666 pri = probe; 667 scheme = iter; 668 if (gp->softc != NULL) 669 kobj_delete((kobj_t)gp->softc, M_GEOM); 670 gp->softc = table; 671 if (pri == 0) 672 goto done; 673 } else 674 kobj_delete((kobj_t)table, M_GEOM); 675 } 676 677 done: 678 return ((scheme == NULL) ? ENXIO : 0); 679 } 680 681 /* 682 * Control request functions. 683 */ 684 685 static int 686 g_part_ctl_add(struct gctl_req *req, struct g_part_parms *gpp) 687 { 688 struct g_geom *gp; 689 struct g_provider *pp; 690 struct g_part_entry *delent, *last, *entry; 691 struct g_part_table *table; 692 struct sbuf *sb; 693 quad_t end; 694 unsigned int index; 695 int error; 696 697 gp = gpp->gpp_geom; 698 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name)); 699 g_topology_assert(); 700 701 pp = LIST_FIRST(&gp->consumer)->provider; 702 table = gp->softc; 703 end = gpp->gpp_start + gpp->gpp_size - 1; 704 705 if (gpp->gpp_start < table->gpt_first || 706 gpp->gpp_start > table->gpt_last) { 707 gctl_error(req, "%d start '%jd'", EINVAL, 708 (intmax_t)gpp->gpp_start); 709 return (EINVAL); 710 } 711 if (end < gpp->gpp_start || end > table->gpt_last) { 712 gctl_error(req, "%d size '%jd'", EINVAL, 713 (intmax_t)gpp->gpp_size); 714 return (EINVAL); 715 } 716 if (gpp->gpp_index > table->gpt_entries) { 717 gctl_error(req, "%d index '%d'", EINVAL, gpp->gpp_index); 718 return (EINVAL); 719 } 720 721 delent = last = NULL; 722 index = (gpp->gpp_index > 0) ? gpp->gpp_index : 1; 723 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) { 724 if (entry->gpe_deleted) { 725 if (entry->gpe_index == index) 726 delent = entry; 727 continue; 728 } 729 if (entry->gpe_index == index) 730 index = entry->gpe_index + 1; 731 if (entry->gpe_index < index) 732 last = entry; 733 if (entry->gpe_internal) 734 continue; 735 if (gpp->gpp_start >= entry->gpe_start && 736 gpp->gpp_start <= entry->gpe_end) { 737 gctl_error(req, "%d start '%jd'", ENOSPC, 738 (intmax_t)gpp->gpp_start); 739 return (ENOSPC); 740 } 741 if (end >= entry->gpe_start && end <= entry->gpe_end) { 742 gctl_error(req, "%d end '%jd'", ENOSPC, (intmax_t)end); 743 return (ENOSPC); 744 } 745 if (gpp->gpp_start < entry->gpe_start && end > entry->gpe_end) { 746 gctl_error(req, "%d size '%jd'", ENOSPC, 747 (intmax_t)gpp->gpp_size); 748 return (ENOSPC); 749 } 750 } 751 if (gpp->gpp_index > 0 && index != gpp->gpp_index) { 752 gctl_error(req, "%d index '%d'", EEXIST, gpp->gpp_index); 753 return (EEXIST); 754 } 755 if (index > table->gpt_entries) { 756 gctl_error(req, "%d index '%d'", ENOSPC, index); 757 return (ENOSPC); 758 } 759 760 entry = (delent == NULL) ? g_malloc(table->gpt_scheme->gps_entrysz, 761 M_WAITOK | M_ZERO) : delent; 762 entry->gpe_index = index; 763 entry->gpe_start = gpp->gpp_start; 764 entry->gpe_end = end; 765 error = G_PART_ADD(table, entry, gpp); 766 if (error) { 767 gctl_error(req, "%d", error); 768 if (delent == NULL) 769 g_free(entry); 770 return (error); 771 } 772 if (delent == NULL) { 773 if (last == NULL) 774 LIST_INSERT_HEAD(&table->gpt_entry, entry, gpe_entry); 775 else 776 LIST_INSERT_AFTER(last, entry, gpe_entry); 777 entry->gpe_created = 1; 778 } else { 779 entry->gpe_deleted = 0; 780 entry->gpe_modified = 1; 781 } 782 g_part_new_provider(gp, table, entry); 783 784 /* Provide feedback if so requested. */ 785 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) { 786 sb = sbuf_new_auto(); 787 G_PART_FULLNAME(table, entry, sb, gp->name); 788 if (pp->stripesize > 0 && entry->gpe_pp->stripeoffset != 0) 789 sbuf_printf(sb, " added, but partition is not " 790 "aligned on %u bytes\n", pp->stripesize); 791 else 792 sbuf_cat(sb, " added\n"); 793 sbuf_finish(sb); 794 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1); 795 sbuf_delete(sb); 796 } 797 return (0); 798 } 799 800 static int 801 g_part_ctl_bootcode(struct gctl_req *req, struct g_part_parms *gpp) 802 { 803 struct g_geom *gp; 804 struct g_part_table *table; 805 struct sbuf *sb; 806 int error, sz; 807 808 gp = gpp->gpp_geom; 809 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name)); 810 g_topology_assert(); 811 812 table = gp->softc; 813 sz = table->gpt_scheme->gps_bootcodesz; 814 if (sz == 0) { 815 error = ENODEV; 816 goto fail; 817 } 818 if (gpp->gpp_codesize > sz) { 819 error = EFBIG; 820 goto fail; 821 } 822 823 error = G_PART_BOOTCODE(table, gpp); 824 if (error) 825 goto fail; 826 827 /* Provide feedback if so requested. */ 828 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) { 829 sb = sbuf_new_auto(); 830 sbuf_printf(sb, "bootcode written to %s\n", gp->name); 831 sbuf_finish(sb); 832 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1); 833 sbuf_delete(sb); 834 } 835 return (0); 836 837 fail: 838 gctl_error(req, "%d", error); 839 return (error); 840 } 841 842 static int 843 g_part_ctl_commit(struct gctl_req *req, struct g_part_parms *gpp) 844 { 845 struct g_consumer *cp; 846 struct g_geom *gp; 847 struct g_provider *pp; 848 struct g_part_entry *entry, *tmp; 849 struct g_part_table *table; 850 char *buf; 851 int error, i; 852 853 gp = gpp->gpp_geom; 854 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name)); 855 g_topology_assert(); 856 857 table = gp->softc; 858 if (!table->gpt_opened) { 859 gctl_error(req, "%d", EPERM); 860 return (EPERM); 861 } 862 863 g_topology_unlock(); 864 865 cp = LIST_FIRST(&gp->consumer); 866 if ((table->gpt_smhead | table->gpt_smtail) != 0) { 867 pp = cp->provider; 868 buf = g_malloc(pp->sectorsize, M_WAITOK | M_ZERO); 869 while (table->gpt_smhead != 0) { 870 i = ffs(table->gpt_smhead) - 1; 871 error = g_write_data(cp, i * pp->sectorsize, buf, 872 pp->sectorsize); 873 if (error) { 874 g_free(buf); 875 goto fail; 876 } 877 table->gpt_smhead &= ~(1 << i); 878 } 879 while (table->gpt_smtail != 0) { 880 i = ffs(table->gpt_smtail) - 1; 881 error = g_write_data(cp, pp->mediasize - (i + 1) * 882 pp->sectorsize, buf, pp->sectorsize); 883 if (error) { 884 g_free(buf); 885 goto fail; 886 } 887 table->gpt_smtail &= ~(1 << i); 888 } 889 g_free(buf); 890 } 891 892 if (table->gpt_scheme == &g_part_null_scheme) { 893 g_topology_lock(); 894 g_access(cp, -1, -1, -1); 895 g_part_wither(gp, ENXIO); 896 return (0); 897 } 898 899 error = G_PART_WRITE(table, cp); 900 if (error) 901 goto fail; 902 903 LIST_FOREACH_SAFE(entry, &table->gpt_entry, gpe_entry, tmp) { 904 if (!entry->gpe_deleted) { 905 /* Notify consumers that provider might be changed. */ 906 if (entry->gpe_modified && ( 907 entry->gpe_pp->acw + entry->gpe_pp->ace + 908 entry->gpe_pp->acr) == 0) 909 g_media_changed(entry->gpe_pp, M_NOWAIT); 910 entry->gpe_created = 0; 911 entry->gpe_modified = 0; 912 continue; 913 } 914 LIST_REMOVE(entry, gpe_entry); 915 g_free(entry); 916 } 917 table->gpt_created = 0; 918 table->gpt_opened = 0; 919 920 g_topology_lock(); 921 g_access(cp, -1, -1, -1); 922 return (0); 923 924 fail: 925 g_topology_lock(); 926 gctl_error(req, "%d", error); 927 return (error); 928 } 929 930 static int 931 g_part_ctl_create(struct gctl_req *req, struct g_part_parms *gpp) 932 { 933 struct g_consumer *cp; 934 struct g_geom *gp; 935 struct g_provider *pp; 936 struct g_part_scheme *scheme; 937 struct g_part_table *null, *table; 938 struct sbuf *sb; 939 int attr, error; 940 941 pp = gpp->gpp_provider; 942 scheme = gpp->gpp_scheme; 943 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, pp->name)); 944 g_topology_assert(); 945 946 /* Check that there isn't already a g_part geom on the provider. */ 947 gp = g_part_find_geom(pp->name); 948 if (gp != NULL) { 949 null = gp->softc; 950 if (null->gpt_scheme != &g_part_null_scheme) { 951 gctl_error(req, "%d geom '%s'", EEXIST, pp->name); 952 return (EEXIST); 953 } 954 } else 955 null = NULL; 956 957 if ((gpp->gpp_parms & G_PART_PARM_ENTRIES) && 958 (gpp->gpp_entries < scheme->gps_minent || 959 gpp->gpp_entries > scheme->gps_maxent)) { 960 gctl_error(req, "%d entries '%d'", EINVAL, gpp->gpp_entries); 961 return (EINVAL); 962 } 963 964 if (null == NULL) 965 gp = g_new_geomf(&g_part_class, "%s", pp->name); 966 gp->softc = kobj_create((kobj_class_t)gpp->gpp_scheme, M_GEOM, 967 M_WAITOK); 968 table = gp->softc; 969 table->gpt_gp = gp; 970 table->gpt_scheme = gpp->gpp_scheme; 971 table->gpt_entries = (gpp->gpp_parms & G_PART_PARM_ENTRIES) ? 972 gpp->gpp_entries : scheme->gps_minent; 973 LIST_INIT(&table->gpt_entry); 974 if (null == NULL) { 975 cp = g_new_consumer(gp); 976 cp->flags |= G_CF_DIRECT_SEND | G_CF_DIRECT_RECEIVE; 977 error = g_attach(cp, pp); 978 if (error == 0) 979 error = g_access(cp, 1, 1, 1); 980 if (error != 0) { 981 g_part_wither(gp, error); 982 gctl_error(req, "%d geom '%s'", error, pp->name); 983 return (error); 984 } 985 table->gpt_opened = 1; 986 } else { 987 cp = LIST_FIRST(&gp->consumer); 988 table->gpt_opened = null->gpt_opened; 989 table->gpt_smhead = null->gpt_smhead; 990 table->gpt_smtail = null->gpt_smtail; 991 } 992 993 g_topology_unlock(); 994 995 /* Make sure the provider has media. */ 996 if (pp->mediasize == 0 || pp->sectorsize == 0) { 997 error = ENODEV; 998 goto fail; 999 } 1000 1001 /* Make sure we can nest and if so, determine our depth. */ 1002 error = g_getattr("PART::isleaf", cp, &attr); 1003 if (!error && attr) { 1004 error = ENODEV; 1005 goto fail; 1006 } 1007 error = g_getattr("PART::depth", cp, &attr); 1008 table->gpt_depth = (!error) ? attr + 1 : 0; 1009 1010 /* 1011 * Synthesize a disk geometry. Some partitioning schemes 1012 * depend on it and since some file systems need it even 1013 * when the partitition scheme doesn't, we do it here in 1014 * scheme-independent code. 1015 */ 1016 g_part_geometry(table, cp, pp->mediasize / pp->sectorsize); 1017 1018 error = G_PART_CREATE(table, gpp); 1019 if (error) 1020 goto fail; 1021 1022 g_topology_lock(); 1023 1024 table->gpt_created = 1; 1025 if (null != NULL) 1026 kobj_delete((kobj_t)null, M_GEOM); 1027 1028 /* 1029 * Support automatic commit by filling in the gpp_geom 1030 * parameter. 1031 */ 1032 gpp->gpp_parms |= G_PART_PARM_GEOM; 1033 gpp->gpp_geom = gp; 1034 1035 /* Provide feedback if so requested. */ 1036 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) { 1037 sb = sbuf_new_auto(); 1038 sbuf_printf(sb, "%s created\n", gp->name); 1039 sbuf_finish(sb); 1040 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1); 1041 sbuf_delete(sb); 1042 } 1043 return (0); 1044 1045 fail: 1046 g_topology_lock(); 1047 if (null == NULL) { 1048 g_access(cp, -1, -1, -1); 1049 g_part_wither(gp, error); 1050 } else { 1051 kobj_delete((kobj_t)gp->softc, M_GEOM); 1052 gp->softc = null; 1053 } 1054 gctl_error(req, "%d provider", error); 1055 return (error); 1056 } 1057 1058 static int 1059 g_part_ctl_delete(struct gctl_req *req, struct g_part_parms *gpp) 1060 { 1061 struct g_geom *gp; 1062 struct g_provider *pp; 1063 struct g_part_entry *entry; 1064 struct g_part_table *table; 1065 struct sbuf *sb; 1066 1067 gp = gpp->gpp_geom; 1068 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name)); 1069 g_topology_assert(); 1070 1071 table = gp->softc; 1072 1073 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) { 1074 if (entry->gpe_deleted || entry->gpe_internal) 1075 continue; 1076 if (entry->gpe_index == gpp->gpp_index) 1077 break; 1078 } 1079 if (entry == NULL) { 1080 gctl_error(req, "%d index '%d'", ENOENT, gpp->gpp_index); 1081 return (ENOENT); 1082 } 1083 1084 pp = entry->gpe_pp; 1085 if (pp != NULL) { 1086 if (pp->acr > 0 || pp->acw > 0 || pp->ace > 0) { 1087 gctl_error(req, "%d", EBUSY); 1088 return (EBUSY); 1089 } 1090 1091 pp->private = NULL; 1092 entry->gpe_pp = NULL; 1093 } 1094 1095 if (pp != NULL) 1096 g_wither_provider(pp, ENXIO); 1097 1098 /* Provide feedback if so requested. */ 1099 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) { 1100 sb = sbuf_new_auto(); 1101 G_PART_FULLNAME(table, entry, sb, gp->name); 1102 sbuf_cat(sb, " deleted\n"); 1103 sbuf_finish(sb); 1104 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1); 1105 sbuf_delete(sb); 1106 } 1107 1108 if (entry->gpe_created) { 1109 LIST_REMOVE(entry, gpe_entry); 1110 g_free(entry); 1111 } else { 1112 entry->gpe_modified = 0; 1113 entry->gpe_deleted = 1; 1114 } 1115 return (0); 1116 } 1117 1118 static int 1119 g_part_ctl_destroy(struct gctl_req *req, struct g_part_parms *gpp) 1120 { 1121 struct g_consumer *cp; 1122 struct g_geom *gp; 1123 struct g_provider *pp; 1124 struct g_part_entry *entry, *tmp; 1125 struct g_part_table *null, *table; 1126 struct sbuf *sb; 1127 int error; 1128 1129 gp = gpp->gpp_geom; 1130 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name)); 1131 g_topology_assert(); 1132 1133 table = gp->softc; 1134 /* Check for busy providers. */ 1135 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) { 1136 if (entry->gpe_deleted || entry->gpe_internal) 1137 continue; 1138 if (gpp->gpp_force) { 1139 pp = entry->gpe_pp; 1140 if (pp == NULL) 1141 continue; 1142 if (pp->acr == 0 && pp->acw == 0 && pp->ace == 0) 1143 continue; 1144 } 1145 gctl_error(req, "%d", EBUSY); 1146 return (EBUSY); 1147 } 1148 1149 if (gpp->gpp_force) { 1150 /* Destroy all providers. */ 1151 LIST_FOREACH_SAFE(entry, &table->gpt_entry, gpe_entry, tmp) { 1152 pp = entry->gpe_pp; 1153 if (pp != NULL) { 1154 pp->private = NULL; 1155 g_wither_provider(pp, ENXIO); 1156 } 1157 LIST_REMOVE(entry, gpe_entry); 1158 g_free(entry); 1159 } 1160 } 1161 1162 error = G_PART_DESTROY(table, gpp); 1163 if (error) { 1164 gctl_error(req, "%d", error); 1165 return (error); 1166 } 1167 1168 gp->softc = kobj_create((kobj_class_t)&g_part_null_scheme, M_GEOM, 1169 M_WAITOK); 1170 null = gp->softc; 1171 null->gpt_gp = gp; 1172 null->gpt_scheme = &g_part_null_scheme; 1173 LIST_INIT(&null->gpt_entry); 1174 1175 cp = LIST_FIRST(&gp->consumer); 1176 pp = cp->provider; 1177 null->gpt_last = pp->mediasize / pp->sectorsize - 1; 1178 1179 null->gpt_depth = table->gpt_depth; 1180 null->gpt_opened = table->gpt_opened; 1181 null->gpt_smhead = table->gpt_smhead; 1182 null->gpt_smtail = table->gpt_smtail; 1183 1184 while ((entry = LIST_FIRST(&table->gpt_entry)) != NULL) { 1185 LIST_REMOVE(entry, gpe_entry); 1186 g_free(entry); 1187 } 1188 kobj_delete((kobj_t)table, M_GEOM); 1189 1190 /* Provide feedback if so requested. */ 1191 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) { 1192 sb = sbuf_new_auto(); 1193 sbuf_printf(sb, "%s destroyed\n", gp->name); 1194 sbuf_finish(sb); 1195 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1); 1196 sbuf_delete(sb); 1197 } 1198 return (0); 1199 } 1200 1201 static int 1202 g_part_ctl_modify(struct gctl_req *req, struct g_part_parms *gpp) 1203 { 1204 struct g_geom *gp; 1205 struct g_part_entry *entry; 1206 struct g_part_table *table; 1207 struct sbuf *sb; 1208 int error; 1209 1210 gp = gpp->gpp_geom; 1211 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name)); 1212 g_topology_assert(); 1213 1214 table = gp->softc; 1215 1216 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) { 1217 if (entry->gpe_deleted || entry->gpe_internal) 1218 continue; 1219 if (entry->gpe_index == gpp->gpp_index) 1220 break; 1221 } 1222 if (entry == NULL) { 1223 gctl_error(req, "%d index '%d'", ENOENT, gpp->gpp_index); 1224 return (ENOENT); 1225 } 1226 1227 error = G_PART_MODIFY(table, entry, gpp); 1228 if (error) { 1229 gctl_error(req, "%d", error); 1230 return (error); 1231 } 1232 1233 if (!entry->gpe_created) 1234 entry->gpe_modified = 1; 1235 1236 /* Provide feedback if so requested. */ 1237 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) { 1238 sb = sbuf_new_auto(); 1239 G_PART_FULLNAME(table, entry, sb, gp->name); 1240 sbuf_cat(sb, " modified\n"); 1241 sbuf_finish(sb); 1242 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1); 1243 sbuf_delete(sb); 1244 } 1245 return (0); 1246 } 1247 1248 static int 1249 g_part_ctl_move(struct gctl_req *req, struct g_part_parms *gpp) 1250 { 1251 gctl_error(req, "%d verb 'move'", ENOSYS); 1252 return (ENOSYS); 1253 } 1254 1255 static int 1256 g_part_ctl_recover(struct gctl_req *req, struct g_part_parms *gpp) 1257 { 1258 struct g_part_table *table; 1259 struct g_geom *gp; 1260 struct sbuf *sb; 1261 int error, recovered; 1262 1263 gp = gpp->gpp_geom; 1264 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name)); 1265 g_topology_assert(); 1266 table = gp->softc; 1267 error = recovered = 0; 1268 1269 if (table->gpt_corrupt) { 1270 error = G_PART_RECOVER(table); 1271 if (error == 0) 1272 error = g_part_check_integrity(table, 1273 LIST_FIRST(&gp->consumer)); 1274 if (error) { 1275 gctl_error(req, "%d recovering '%s' failed", 1276 error, gp->name); 1277 return (error); 1278 } 1279 recovered = 1; 1280 } 1281 /* Provide feedback if so requested. */ 1282 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) { 1283 sb = sbuf_new_auto(); 1284 if (recovered) 1285 sbuf_printf(sb, "%s recovered\n", gp->name); 1286 else 1287 sbuf_printf(sb, "%s recovering is not needed\n", 1288 gp->name); 1289 sbuf_finish(sb); 1290 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1); 1291 sbuf_delete(sb); 1292 } 1293 return (0); 1294 } 1295 1296 static int 1297 g_part_ctl_resize(struct gctl_req *req, struct g_part_parms *gpp) 1298 { 1299 struct g_geom *gp; 1300 struct g_provider *pp; 1301 struct g_part_entry *pe, *entry; 1302 struct g_part_table *table; 1303 struct sbuf *sb; 1304 quad_t end; 1305 int error; 1306 off_t mediasize; 1307 1308 gp = gpp->gpp_geom; 1309 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name)); 1310 g_topology_assert(); 1311 table = gp->softc; 1312 1313 /* check gpp_index */ 1314 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) { 1315 if (entry->gpe_deleted || entry->gpe_internal) 1316 continue; 1317 if (entry->gpe_index == gpp->gpp_index) 1318 break; 1319 } 1320 if (entry == NULL) { 1321 gctl_error(req, "%d index '%d'", ENOENT, gpp->gpp_index); 1322 return (ENOENT); 1323 } 1324 1325 /* check gpp_size */ 1326 end = entry->gpe_start + gpp->gpp_size - 1; 1327 if (gpp->gpp_size < 1 || end > table->gpt_last) { 1328 gctl_error(req, "%d size '%jd'", EINVAL, 1329 (intmax_t)gpp->gpp_size); 1330 return (EINVAL); 1331 } 1332 1333 LIST_FOREACH(pe, &table->gpt_entry, gpe_entry) { 1334 if (pe->gpe_deleted || pe->gpe_internal || pe == entry) 1335 continue; 1336 if (end >= pe->gpe_start && end <= pe->gpe_end) { 1337 gctl_error(req, "%d end '%jd'", ENOSPC, 1338 (intmax_t)end); 1339 return (ENOSPC); 1340 } 1341 if (entry->gpe_start < pe->gpe_start && end > pe->gpe_end) { 1342 gctl_error(req, "%d size '%jd'", ENOSPC, 1343 (intmax_t)gpp->gpp_size); 1344 return (ENOSPC); 1345 } 1346 } 1347 1348 pp = entry->gpe_pp; 1349 if ((g_debugflags & 16) == 0 && 1350 (pp->acr > 0 || pp->acw > 0 || pp->ace > 0)) { 1351 if (entry->gpe_end - entry->gpe_start + 1 > gpp->gpp_size) { 1352 /* Deny shrinking of an opened partition. */ 1353 gctl_error(req, "%d", EBUSY); 1354 return (EBUSY); 1355 } 1356 } 1357 1358 error = G_PART_RESIZE(table, entry, gpp); 1359 if (error) { 1360 gctl_error(req, "%d%s", error, error != EBUSY ? "": 1361 " resizing will lead to unexpected shrinking" 1362 " due to alignment"); 1363 return (error); 1364 } 1365 1366 if (!entry->gpe_created) 1367 entry->gpe_modified = 1; 1368 1369 /* update mediasize of changed provider */ 1370 mediasize = (entry->gpe_end - entry->gpe_start + 1) * 1371 pp->sectorsize; 1372 g_resize_provider(pp, mediasize); 1373 1374 /* Provide feedback if so requested. */ 1375 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) { 1376 sb = sbuf_new_auto(); 1377 G_PART_FULLNAME(table, entry, sb, gp->name); 1378 sbuf_cat(sb, " resized\n"); 1379 sbuf_finish(sb); 1380 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1); 1381 sbuf_delete(sb); 1382 } 1383 return (0); 1384 } 1385 1386 static int 1387 g_part_ctl_setunset(struct gctl_req *req, struct g_part_parms *gpp, 1388 unsigned int set) 1389 { 1390 struct g_geom *gp; 1391 struct g_part_entry *entry; 1392 struct g_part_table *table; 1393 struct sbuf *sb; 1394 int error; 1395 1396 gp = gpp->gpp_geom; 1397 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name)); 1398 g_topology_assert(); 1399 1400 table = gp->softc; 1401 1402 if (gpp->gpp_parms & G_PART_PARM_INDEX) { 1403 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) { 1404 if (entry->gpe_deleted || entry->gpe_internal) 1405 continue; 1406 if (entry->gpe_index == gpp->gpp_index) 1407 break; 1408 } 1409 if (entry == NULL) { 1410 gctl_error(req, "%d index '%d'", ENOENT, 1411 gpp->gpp_index); 1412 return (ENOENT); 1413 } 1414 } else 1415 entry = NULL; 1416 1417 error = G_PART_SETUNSET(table, entry, gpp->gpp_attrib, set); 1418 if (error) { 1419 gctl_error(req, "%d attrib '%s'", error, gpp->gpp_attrib); 1420 return (error); 1421 } 1422 1423 /* Provide feedback if so requested. */ 1424 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) { 1425 sb = sbuf_new_auto(); 1426 sbuf_printf(sb, "%s %sset on ", gpp->gpp_attrib, 1427 (set) ? "" : "un"); 1428 if (entry) 1429 G_PART_FULLNAME(table, entry, sb, gp->name); 1430 else 1431 sbuf_cat(sb, gp->name); 1432 sbuf_cat(sb, "\n"); 1433 sbuf_finish(sb); 1434 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1); 1435 sbuf_delete(sb); 1436 } 1437 return (0); 1438 } 1439 1440 static int 1441 g_part_ctl_undo(struct gctl_req *req, struct g_part_parms *gpp) 1442 { 1443 struct g_consumer *cp; 1444 struct g_provider *pp; 1445 struct g_geom *gp; 1446 struct g_part_entry *entry, *tmp; 1447 struct g_part_table *table; 1448 int error, reprobe; 1449 1450 gp = gpp->gpp_geom; 1451 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name)); 1452 g_topology_assert(); 1453 1454 table = gp->softc; 1455 if (!table->gpt_opened) { 1456 gctl_error(req, "%d", EPERM); 1457 return (EPERM); 1458 } 1459 1460 cp = LIST_FIRST(&gp->consumer); 1461 LIST_FOREACH_SAFE(entry, &table->gpt_entry, gpe_entry, tmp) { 1462 entry->gpe_modified = 0; 1463 if (entry->gpe_created) { 1464 pp = entry->gpe_pp; 1465 if (pp != NULL) { 1466 pp->private = NULL; 1467 entry->gpe_pp = NULL; 1468 g_wither_provider(pp, ENXIO); 1469 } 1470 entry->gpe_deleted = 1; 1471 } 1472 if (entry->gpe_deleted) { 1473 LIST_REMOVE(entry, gpe_entry); 1474 g_free(entry); 1475 } 1476 } 1477 1478 g_topology_unlock(); 1479 1480 reprobe = (table->gpt_scheme == &g_part_null_scheme || 1481 table->gpt_created) ? 1 : 0; 1482 1483 if (reprobe) { 1484 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) { 1485 if (entry->gpe_internal) 1486 continue; 1487 error = EBUSY; 1488 goto fail; 1489 } 1490 while ((entry = LIST_FIRST(&table->gpt_entry)) != NULL) { 1491 LIST_REMOVE(entry, gpe_entry); 1492 g_free(entry); 1493 } 1494 error = g_part_probe(gp, cp, table->gpt_depth); 1495 if (error) { 1496 g_topology_lock(); 1497 g_access(cp, -1, -1, -1); 1498 g_part_wither(gp, error); 1499 return (0); 1500 } 1501 table = gp->softc; 1502 1503 /* 1504 * Synthesize a disk geometry. Some partitioning schemes 1505 * depend on it and since some file systems need it even 1506 * when the partitition scheme doesn't, we do it here in 1507 * scheme-independent code. 1508 */ 1509 pp = cp->provider; 1510 g_part_geometry(table, cp, pp->mediasize / pp->sectorsize); 1511 } 1512 1513 error = G_PART_READ(table, cp); 1514 if (error) 1515 goto fail; 1516 error = g_part_check_integrity(table, cp); 1517 if (error) 1518 goto fail; 1519 1520 g_topology_lock(); 1521 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) { 1522 if (!entry->gpe_internal) 1523 g_part_new_provider(gp, table, entry); 1524 } 1525 1526 table->gpt_opened = 0; 1527 g_access(cp, -1, -1, -1); 1528 return (0); 1529 1530 fail: 1531 g_topology_lock(); 1532 gctl_error(req, "%d", error); 1533 return (error); 1534 } 1535 1536 static void 1537 g_part_wither(struct g_geom *gp, int error) 1538 { 1539 struct g_part_entry *entry; 1540 struct g_part_table *table; 1541 1542 table = gp->softc; 1543 if (table != NULL) { 1544 G_PART_DESTROY(table, NULL); 1545 while ((entry = LIST_FIRST(&table->gpt_entry)) != NULL) { 1546 LIST_REMOVE(entry, gpe_entry); 1547 g_free(entry); 1548 } 1549 if (gp->softc != NULL) { 1550 kobj_delete((kobj_t)gp->softc, M_GEOM); 1551 gp->softc = NULL; 1552 } 1553 } 1554 g_wither_geom(gp, error); 1555 } 1556 1557 /* 1558 * Class methods. 1559 */ 1560 1561 static void 1562 g_part_ctlreq(struct gctl_req *req, struct g_class *mp, const char *verb) 1563 { 1564 struct g_part_parms gpp; 1565 struct g_part_table *table; 1566 struct gctl_req_arg *ap; 1567 enum g_part_ctl ctlreq; 1568 unsigned int i, mparms, oparms, parm; 1569 int auto_commit, close_on_error; 1570 int error, modifies; 1571 1572 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s,%s)", __func__, mp->name, verb)); 1573 g_topology_assert(); 1574 1575 ctlreq = G_PART_CTL_NONE; 1576 modifies = 1; 1577 mparms = 0; 1578 oparms = G_PART_PARM_FLAGS | G_PART_PARM_OUTPUT | G_PART_PARM_VERSION; 1579 switch (*verb) { 1580 case 'a': 1581 if (!strcmp(verb, "add")) { 1582 ctlreq = G_PART_CTL_ADD; 1583 mparms |= G_PART_PARM_GEOM | G_PART_PARM_SIZE | 1584 G_PART_PARM_START | G_PART_PARM_TYPE; 1585 oparms |= G_PART_PARM_INDEX | G_PART_PARM_LABEL; 1586 } 1587 break; 1588 case 'b': 1589 if (!strcmp(verb, "bootcode")) { 1590 ctlreq = G_PART_CTL_BOOTCODE; 1591 mparms |= G_PART_PARM_GEOM | G_PART_PARM_BOOTCODE; 1592 } 1593 break; 1594 case 'c': 1595 if (!strcmp(verb, "commit")) { 1596 ctlreq = G_PART_CTL_COMMIT; 1597 mparms |= G_PART_PARM_GEOM; 1598 modifies = 0; 1599 } else if (!strcmp(verb, "create")) { 1600 ctlreq = G_PART_CTL_CREATE; 1601 mparms |= G_PART_PARM_PROVIDER | G_PART_PARM_SCHEME; 1602 oparms |= G_PART_PARM_ENTRIES; 1603 } 1604 break; 1605 case 'd': 1606 if (!strcmp(verb, "delete")) { 1607 ctlreq = G_PART_CTL_DELETE; 1608 mparms |= G_PART_PARM_GEOM | G_PART_PARM_INDEX; 1609 } else if (!strcmp(verb, "destroy")) { 1610 ctlreq = G_PART_CTL_DESTROY; 1611 mparms |= G_PART_PARM_GEOM; 1612 oparms |= G_PART_PARM_FORCE; 1613 } 1614 break; 1615 case 'm': 1616 if (!strcmp(verb, "modify")) { 1617 ctlreq = G_PART_CTL_MODIFY; 1618 mparms |= G_PART_PARM_GEOM | G_PART_PARM_INDEX; 1619 oparms |= G_PART_PARM_LABEL | G_PART_PARM_TYPE; 1620 } else if (!strcmp(verb, "move")) { 1621 ctlreq = G_PART_CTL_MOVE; 1622 mparms |= G_PART_PARM_GEOM | G_PART_PARM_INDEX; 1623 } 1624 break; 1625 case 'r': 1626 if (!strcmp(verb, "recover")) { 1627 ctlreq = G_PART_CTL_RECOVER; 1628 mparms |= G_PART_PARM_GEOM; 1629 } else if (!strcmp(verb, "resize")) { 1630 ctlreq = G_PART_CTL_RESIZE; 1631 mparms |= G_PART_PARM_GEOM | G_PART_PARM_INDEX | 1632 G_PART_PARM_SIZE; 1633 } 1634 break; 1635 case 's': 1636 if (!strcmp(verb, "set")) { 1637 ctlreq = G_PART_CTL_SET; 1638 mparms |= G_PART_PARM_ATTRIB | G_PART_PARM_GEOM; 1639 oparms |= G_PART_PARM_INDEX; 1640 } 1641 break; 1642 case 'u': 1643 if (!strcmp(verb, "undo")) { 1644 ctlreq = G_PART_CTL_UNDO; 1645 mparms |= G_PART_PARM_GEOM; 1646 modifies = 0; 1647 } else if (!strcmp(verb, "unset")) { 1648 ctlreq = G_PART_CTL_UNSET; 1649 mparms |= G_PART_PARM_ATTRIB | G_PART_PARM_GEOM; 1650 oparms |= G_PART_PARM_INDEX; 1651 } 1652 break; 1653 } 1654 if (ctlreq == G_PART_CTL_NONE) { 1655 gctl_error(req, "%d verb '%s'", EINVAL, verb); 1656 return; 1657 } 1658 1659 bzero(&gpp, sizeof(gpp)); 1660 for (i = 0; i < req->narg; i++) { 1661 ap = &req->arg[i]; 1662 parm = 0; 1663 switch (ap->name[0]) { 1664 case 'a': 1665 if (!strcmp(ap->name, "arg0")) { 1666 parm = mparms & 1667 (G_PART_PARM_GEOM | G_PART_PARM_PROVIDER); 1668 } 1669 if (!strcmp(ap->name, "attrib")) 1670 parm = G_PART_PARM_ATTRIB; 1671 break; 1672 case 'b': 1673 if (!strcmp(ap->name, "bootcode")) 1674 parm = G_PART_PARM_BOOTCODE; 1675 break; 1676 case 'c': 1677 if (!strcmp(ap->name, "class")) 1678 continue; 1679 break; 1680 case 'e': 1681 if (!strcmp(ap->name, "entries")) 1682 parm = G_PART_PARM_ENTRIES; 1683 break; 1684 case 'f': 1685 if (!strcmp(ap->name, "flags")) 1686 parm = G_PART_PARM_FLAGS; 1687 else if (!strcmp(ap->name, "force")) 1688 parm = G_PART_PARM_FORCE; 1689 break; 1690 case 'i': 1691 if (!strcmp(ap->name, "index")) 1692 parm = G_PART_PARM_INDEX; 1693 break; 1694 case 'l': 1695 if (!strcmp(ap->name, "label")) 1696 parm = G_PART_PARM_LABEL; 1697 break; 1698 case 'o': 1699 if (!strcmp(ap->name, "output")) 1700 parm = G_PART_PARM_OUTPUT; 1701 break; 1702 case 's': 1703 if (!strcmp(ap->name, "scheme")) 1704 parm = G_PART_PARM_SCHEME; 1705 else if (!strcmp(ap->name, "size")) 1706 parm = G_PART_PARM_SIZE; 1707 else if (!strcmp(ap->name, "start")) 1708 parm = G_PART_PARM_START; 1709 break; 1710 case 't': 1711 if (!strcmp(ap->name, "type")) 1712 parm = G_PART_PARM_TYPE; 1713 break; 1714 case 'v': 1715 if (!strcmp(ap->name, "verb")) 1716 continue; 1717 else if (!strcmp(ap->name, "version")) 1718 parm = G_PART_PARM_VERSION; 1719 break; 1720 } 1721 if ((parm & (mparms | oparms)) == 0) { 1722 gctl_error(req, "%d param '%s'", EINVAL, ap->name); 1723 return; 1724 } 1725 switch (parm) { 1726 case G_PART_PARM_ATTRIB: 1727 error = g_part_parm_str(req, ap->name, 1728 &gpp.gpp_attrib); 1729 break; 1730 case G_PART_PARM_BOOTCODE: 1731 error = g_part_parm_bootcode(req, ap->name, 1732 &gpp.gpp_codeptr, &gpp.gpp_codesize); 1733 break; 1734 case G_PART_PARM_ENTRIES: 1735 error = g_part_parm_intmax(req, ap->name, 1736 &gpp.gpp_entries); 1737 break; 1738 case G_PART_PARM_FLAGS: 1739 error = g_part_parm_str(req, ap->name, &gpp.gpp_flags); 1740 break; 1741 case G_PART_PARM_FORCE: 1742 error = g_part_parm_uint32(req, ap->name, 1743 &gpp.gpp_force); 1744 break; 1745 case G_PART_PARM_GEOM: 1746 error = g_part_parm_geom(req, ap->name, &gpp.gpp_geom); 1747 break; 1748 case G_PART_PARM_INDEX: 1749 error = g_part_parm_intmax(req, ap->name, 1750 &gpp.gpp_index); 1751 break; 1752 case G_PART_PARM_LABEL: 1753 error = g_part_parm_str(req, ap->name, &gpp.gpp_label); 1754 break; 1755 case G_PART_PARM_OUTPUT: 1756 error = 0; /* Write-only parameter */ 1757 break; 1758 case G_PART_PARM_PROVIDER: 1759 error = g_part_parm_provider(req, ap->name, 1760 &gpp.gpp_provider); 1761 break; 1762 case G_PART_PARM_SCHEME: 1763 error = g_part_parm_scheme(req, ap->name, 1764 &gpp.gpp_scheme); 1765 break; 1766 case G_PART_PARM_SIZE: 1767 error = g_part_parm_quad(req, ap->name, &gpp.gpp_size); 1768 break; 1769 case G_PART_PARM_START: 1770 error = g_part_parm_quad(req, ap->name, 1771 &gpp.gpp_start); 1772 break; 1773 case G_PART_PARM_TYPE: 1774 error = g_part_parm_str(req, ap->name, &gpp.gpp_type); 1775 break; 1776 case G_PART_PARM_VERSION: 1777 error = g_part_parm_uint32(req, ap->name, 1778 &gpp.gpp_version); 1779 break; 1780 default: 1781 error = EDOOFUS; 1782 gctl_error(req, "%d %s", error, ap->name); 1783 break; 1784 } 1785 if (error != 0) { 1786 if (error == ENOATTR) { 1787 gctl_error(req, "%d param '%s'", error, 1788 ap->name); 1789 } 1790 return; 1791 } 1792 gpp.gpp_parms |= parm; 1793 } 1794 if ((gpp.gpp_parms & mparms) != mparms) { 1795 parm = mparms - (gpp.gpp_parms & mparms); 1796 gctl_error(req, "%d param '%x'", ENOATTR, parm); 1797 return; 1798 } 1799 1800 /* Obtain permissions if possible/necessary. */ 1801 close_on_error = 0; 1802 table = NULL; 1803 if (modifies && (gpp.gpp_parms & G_PART_PARM_GEOM)) { 1804 table = gpp.gpp_geom->softc; 1805 if (table != NULL && table->gpt_corrupt && 1806 ctlreq != G_PART_CTL_DESTROY && 1807 ctlreq != G_PART_CTL_RECOVER) { 1808 gctl_error(req, "%d table '%s' is corrupt", 1809 EPERM, gpp.gpp_geom->name); 1810 return; 1811 } 1812 if (table != NULL && !table->gpt_opened) { 1813 error = g_access(LIST_FIRST(&gpp.gpp_geom->consumer), 1814 1, 1, 1); 1815 if (error) { 1816 gctl_error(req, "%d geom '%s'", error, 1817 gpp.gpp_geom->name); 1818 return; 1819 } 1820 table->gpt_opened = 1; 1821 close_on_error = 1; 1822 } 1823 } 1824 1825 /* Allow the scheme to check or modify the parameters. */ 1826 if (table != NULL) { 1827 error = G_PART_PRECHECK(table, ctlreq, &gpp); 1828 if (error) { 1829 gctl_error(req, "%d pre-check failed", error); 1830 goto out; 1831 } 1832 } else 1833 error = EDOOFUS; /* Prevent bogus uninit. warning. */ 1834 1835 switch (ctlreq) { 1836 case G_PART_CTL_NONE: 1837 panic("%s", __func__); 1838 case G_PART_CTL_ADD: 1839 error = g_part_ctl_add(req, &gpp); 1840 break; 1841 case G_PART_CTL_BOOTCODE: 1842 error = g_part_ctl_bootcode(req, &gpp); 1843 break; 1844 case G_PART_CTL_COMMIT: 1845 error = g_part_ctl_commit(req, &gpp); 1846 break; 1847 case G_PART_CTL_CREATE: 1848 error = g_part_ctl_create(req, &gpp); 1849 break; 1850 case G_PART_CTL_DELETE: 1851 error = g_part_ctl_delete(req, &gpp); 1852 break; 1853 case G_PART_CTL_DESTROY: 1854 error = g_part_ctl_destroy(req, &gpp); 1855 break; 1856 case G_PART_CTL_MODIFY: 1857 error = g_part_ctl_modify(req, &gpp); 1858 break; 1859 case G_PART_CTL_MOVE: 1860 error = g_part_ctl_move(req, &gpp); 1861 break; 1862 case G_PART_CTL_RECOVER: 1863 error = g_part_ctl_recover(req, &gpp); 1864 break; 1865 case G_PART_CTL_RESIZE: 1866 error = g_part_ctl_resize(req, &gpp); 1867 break; 1868 case G_PART_CTL_SET: 1869 error = g_part_ctl_setunset(req, &gpp, 1); 1870 break; 1871 case G_PART_CTL_UNDO: 1872 error = g_part_ctl_undo(req, &gpp); 1873 break; 1874 case G_PART_CTL_UNSET: 1875 error = g_part_ctl_setunset(req, &gpp, 0); 1876 break; 1877 } 1878 1879 /* Implement automatic commit. */ 1880 if (!error) { 1881 auto_commit = (modifies && 1882 (gpp.gpp_parms & G_PART_PARM_FLAGS) && 1883 strchr(gpp.gpp_flags, 'C') != NULL) ? 1 : 0; 1884 if (auto_commit) { 1885 KASSERT(gpp.gpp_parms & G_PART_PARM_GEOM, ("%s", 1886 __func__)); 1887 error = g_part_ctl_commit(req, &gpp); 1888 } 1889 } 1890 1891 out: 1892 if (error && close_on_error) { 1893 g_access(LIST_FIRST(&gpp.gpp_geom->consumer), -1, -1, -1); 1894 table->gpt_opened = 0; 1895 } 1896 } 1897 1898 static int 1899 g_part_destroy_geom(struct gctl_req *req, struct g_class *mp, 1900 struct g_geom *gp) 1901 { 1902 1903 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s,%s)", __func__, mp->name, gp->name)); 1904 g_topology_assert(); 1905 1906 g_part_wither(gp, EINVAL); 1907 return (0); 1908 } 1909 1910 static struct g_geom * 1911 g_part_taste(struct g_class *mp, struct g_provider *pp, int flags __unused) 1912 { 1913 struct g_consumer *cp; 1914 struct g_geom *gp; 1915 struct g_part_entry *entry; 1916 struct g_part_table *table; 1917 struct root_hold_token *rht; 1918 struct g_geom_alias *gap; 1919 int attr, depth; 1920 int error; 1921 1922 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s,%s)", __func__, mp->name, pp->name)); 1923 g_topology_assert(); 1924 1925 /* Skip providers that are already open for writing. */ 1926 if (pp->acw > 0) 1927 return (NULL); 1928 1929 /* 1930 * Create a GEOM with consumer and hook it up to the provider. 1931 * With that we become part of the topology. Obtain read access 1932 * to the provider. 1933 */ 1934 gp = g_new_geomf(mp, "%s", pp->name); 1935 LIST_FOREACH(gap, &pp->geom->aliases, ga_next) 1936 g_geom_add_alias(gp, gap->ga_alias); 1937 cp = g_new_consumer(gp); 1938 cp->flags |= G_CF_DIRECT_SEND | G_CF_DIRECT_RECEIVE; 1939 error = g_attach(cp, pp); 1940 if (error == 0) 1941 error = g_access(cp, 1, 0, 0); 1942 if (error != 0) { 1943 if (cp->provider) 1944 g_detach(cp); 1945 g_destroy_consumer(cp); 1946 g_destroy_geom(gp); 1947 return (NULL); 1948 } 1949 1950 rht = root_mount_hold(mp->name); 1951 g_topology_unlock(); 1952 1953 /* 1954 * Short-circuit the whole probing galore when there's no 1955 * media present. 1956 */ 1957 if (pp->mediasize == 0 || pp->sectorsize == 0) { 1958 error = ENODEV; 1959 goto fail; 1960 } 1961 1962 /* Make sure we can nest and if so, determine our depth. */ 1963 error = g_getattr("PART::isleaf", cp, &attr); 1964 if (!error && attr) { 1965 error = ENODEV; 1966 goto fail; 1967 } 1968 error = g_getattr("PART::depth", cp, &attr); 1969 depth = (!error) ? attr + 1 : 0; 1970 1971 error = g_part_probe(gp, cp, depth); 1972 if (error) 1973 goto fail; 1974 1975 table = gp->softc; 1976 1977 /* 1978 * Synthesize a disk geometry. Some partitioning schemes 1979 * depend on it and since some file systems need it even 1980 * when the partitition scheme doesn't, we do it here in 1981 * scheme-independent code. 1982 */ 1983 g_part_geometry(table, cp, pp->mediasize / pp->sectorsize); 1984 1985 error = G_PART_READ(table, cp); 1986 if (error) 1987 goto fail; 1988 error = g_part_check_integrity(table, cp); 1989 if (error) 1990 goto fail; 1991 1992 g_topology_lock(); 1993 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) { 1994 if (!entry->gpe_internal) 1995 g_part_new_provider(gp, table, entry); 1996 } 1997 1998 root_mount_rel(rht); 1999 g_access(cp, -1, 0, 0); 2000 return (gp); 2001 2002 fail: 2003 g_topology_lock(); 2004 root_mount_rel(rht); 2005 g_access(cp, -1, 0, 0); 2006 g_detach(cp); 2007 g_destroy_consumer(cp); 2008 g_destroy_geom(gp); 2009 return (NULL); 2010 } 2011 2012 /* 2013 * Geom methods. 2014 */ 2015 2016 static int 2017 g_part_access(struct g_provider *pp, int dr, int dw, int de) 2018 { 2019 struct g_consumer *cp; 2020 2021 G_PART_TRACE((G_T_ACCESS, "%s(%s,%d,%d,%d)", __func__, pp->name, dr, 2022 dw, de)); 2023 2024 cp = LIST_FIRST(&pp->geom->consumer); 2025 2026 /* We always gain write-exclusive access. */ 2027 return (g_access(cp, dr, dw, dw + de)); 2028 } 2029 2030 static void 2031 g_part_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp, 2032 struct g_consumer *cp, struct g_provider *pp) 2033 { 2034 char buf[64]; 2035 struct g_part_entry *entry; 2036 struct g_part_table *table; 2037 2038 KASSERT(sb != NULL && gp != NULL, ("%s", __func__)); 2039 table = gp->softc; 2040 2041 if (indent == NULL) { 2042 KASSERT(cp == NULL && pp != NULL, ("%s", __func__)); 2043 entry = pp->private; 2044 if (entry == NULL) 2045 return; 2046 sbuf_printf(sb, " i %u o %ju ty %s", entry->gpe_index, 2047 (uintmax_t)entry->gpe_offset, 2048 G_PART_TYPE(table, entry, buf, sizeof(buf))); 2049 /* 2050 * libdisk compatibility quirk - the scheme dumps the 2051 * slicer name and partition type in a way that is 2052 * compatible with libdisk. When libdisk is not used 2053 * anymore, this should go away. 2054 */ 2055 G_PART_DUMPCONF(table, entry, sb, indent); 2056 } else if (cp != NULL) { /* Consumer configuration. */ 2057 KASSERT(pp == NULL, ("%s", __func__)); 2058 /* none */ 2059 } else if (pp != NULL) { /* Provider configuration. */ 2060 entry = pp->private; 2061 if (entry == NULL) 2062 return; 2063 sbuf_printf(sb, "%s<start>%ju</start>\n", indent, 2064 (uintmax_t)entry->gpe_start); 2065 sbuf_printf(sb, "%s<end>%ju</end>\n", indent, 2066 (uintmax_t)entry->gpe_end); 2067 sbuf_printf(sb, "%s<index>%u</index>\n", indent, 2068 entry->gpe_index); 2069 sbuf_printf(sb, "%s<type>%s</type>\n", indent, 2070 G_PART_TYPE(table, entry, buf, sizeof(buf))); 2071 sbuf_printf(sb, "%s<offset>%ju</offset>\n", indent, 2072 (uintmax_t)entry->gpe_offset); 2073 sbuf_printf(sb, "%s<length>%ju</length>\n", indent, 2074 (uintmax_t)pp->mediasize); 2075 G_PART_DUMPCONF(table, entry, sb, indent); 2076 } else { /* Geom configuration. */ 2077 sbuf_printf(sb, "%s<scheme>%s</scheme>\n", indent, 2078 table->gpt_scheme->name); 2079 sbuf_printf(sb, "%s<entries>%u</entries>\n", indent, 2080 table->gpt_entries); 2081 sbuf_printf(sb, "%s<first>%ju</first>\n", indent, 2082 (uintmax_t)table->gpt_first); 2083 sbuf_printf(sb, "%s<last>%ju</last>\n", indent, 2084 (uintmax_t)table->gpt_last); 2085 sbuf_printf(sb, "%s<fwsectors>%u</fwsectors>\n", indent, 2086 table->gpt_sectors); 2087 sbuf_printf(sb, "%s<fwheads>%u</fwheads>\n", indent, 2088 table->gpt_heads); 2089 sbuf_printf(sb, "%s<state>%s</state>\n", indent, 2090 table->gpt_corrupt ? "CORRUPT": "OK"); 2091 sbuf_printf(sb, "%s<modified>%s</modified>\n", indent, 2092 table->gpt_opened ? "true": "false"); 2093 G_PART_DUMPCONF(table, NULL, sb, indent); 2094 } 2095 } 2096 2097 /*- 2098 * This start routine is only called for non-trivial requests, all the 2099 * trivial ones are handled autonomously by the slice code. 2100 * For requests we handle here, we must call the g_io_deliver() on the 2101 * bio, and return non-zero to indicate to the slice code that we did so. 2102 * This code executes in the "DOWN" I/O path, this means: 2103 * * No sleeping. 2104 * * Don't grab the topology lock. 2105 * * Don't call biowait, g_getattr(), g_setattr() or g_read_data() 2106 */ 2107 static int 2108 g_part_ioctl(struct g_provider *pp, u_long cmd, void *data, int fflag, struct thread *td) 2109 { 2110 struct g_part_table *table; 2111 2112 table = pp->geom->softc; 2113 return G_PART_IOCTL(table, pp, cmd, data, fflag, td); 2114 } 2115 2116 static void 2117 g_part_resize(struct g_consumer *cp) 2118 { 2119 struct g_part_table *table; 2120 2121 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, cp->provider->name)); 2122 g_topology_assert(); 2123 2124 if (auto_resize == 0) 2125 return; 2126 2127 table = cp->geom->softc; 2128 if (table->gpt_opened == 0) { 2129 if (g_access(cp, 1, 1, 1) != 0) 2130 return; 2131 table->gpt_opened = 1; 2132 } 2133 if (G_PART_RESIZE(table, NULL, NULL) == 0) 2134 printf("GEOM_PART: %s was automatically resized.\n" 2135 " Use `gpart commit %s` to save changes or " 2136 "`gpart undo %s` to revert them.\n", cp->geom->name, 2137 cp->geom->name, cp->geom->name); 2138 if (g_part_check_integrity(table, cp) != 0) { 2139 g_access(cp, -1, -1, -1); 2140 table->gpt_opened = 0; 2141 g_part_wither(table->gpt_gp, ENXIO); 2142 } 2143 } 2144 2145 static void 2146 g_part_orphan(struct g_consumer *cp) 2147 { 2148 struct g_provider *pp; 2149 struct g_part_table *table; 2150 2151 pp = cp->provider; 2152 KASSERT(pp != NULL, ("%s", __func__)); 2153 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, pp->name)); 2154 g_topology_assert(); 2155 2156 KASSERT(pp->error != 0, ("%s", __func__)); 2157 table = cp->geom->softc; 2158 if (table != NULL && table->gpt_opened) 2159 g_access(cp, -1, -1, -1); 2160 g_part_wither(cp->geom, pp->error); 2161 } 2162 2163 static void 2164 g_part_spoiled(struct g_consumer *cp) 2165 { 2166 2167 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, cp->provider->name)); 2168 g_topology_assert(); 2169 2170 cp->flags |= G_CF_ORPHAN; 2171 g_part_wither(cp->geom, ENXIO); 2172 } 2173 2174 static void 2175 g_part_start(struct bio *bp) 2176 { 2177 struct bio *bp2; 2178 struct g_consumer *cp; 2179 struct g_geom *gp; 2180 struct g_part_entry *entry; 2181 struct g_part_table *table; 2182 struct g_kerneldump *gkd; 2183 struct g_provider *pp; 2184 char buf[64]; 2185 2186 biotrack(bp, __func__); 2187 2188 pp = bp->bio_to; 2189 gp = pp->geom; 2190 table = gp->softc; 2191 cp = LIST_FIRST(&gp->consumer); 2192 2193 G_PART_TRACE((G_T_BIO, "%s: cmd=%d, provider=%s", __func__, bp->bio_cmd, 2194 pp->name)); 2195 2196 entry = pp->private; 2197 if (entry == NULL) { 2198 g_io_deliver(bp, ENXIO); 2199 return; 2200 } 2201 2202 switch(bp->bio_cmd) { 2203 case BIO_DELETE: 2204 case BIO_READ: 2205 case BIO_WRITE: 2206 if (bp->bio_offset >= pp->mediasize) { 2207 g_io_deliver(bp, EIO); 2208 return; 2209 } 2210 bp2 = g_clone_bio(bp); 2211 if (bp2 == NULL) { 2212 g_io_deliver(bp, ENOMEM); 2213 return; 2214 } 2215 if (bp2->bio_offset + bp2->bio_length > pp->mediasize) 2216 bp2->bio_length = pp->mediasize - bp2->bio_offset; 2217 bp2->bio_done = g_std_done; 2218 bp2->bio_offset += entry->gpe_offset; 2219 g_io_request(bp2, cp); 2220 return; 2221 case BIO_FLUSH: 2222 break; 2223 case BIO_GETATTR: 2224 if (g_handleattr_int(bp, "GEOM::fwheads", table->gpt_heads)) 2225 return; 2226 if (g_handleattr_int(bp, "GEOM::fwsectors", table->gpt_sectors)) 2227 return; 2228 if (g_handleattr_int(bp, "PART::isleaf", table->gpt_isleaf)) 2229 return; 2230 if (g_handleattr_int(bp, "PART::depth", table->gpt_depth)) 2231 return; 2232 if (g_handleattr_str(bp, "PART::scheme", 2233 table->gpt_scheme->name)) 2234 return; 2235 if (g_handleattr_str(bp, "PART::type", 2236 G_PART_TYPE(table, entry, buf, sizeof(buf)))) 2237 return; 2238 if (!strcmp("GEOM::kerneldump", bp->bio_attribute)) { 2239 /* 2240 * Check that the partition is suitable for kernel 2241 * dumps. Typically only swap partitions should be 2242 * used. If the request comes from the nested scheme 2243 * we allow dumping there as well. 2244 */ 2245 if ((bp->bio_from == NULL || 2246 bp->bio_from->geom->class != &g_part_class) && 2247 G_PART_DUMPTO(table, entry) == 0) { 2248 g_io_deliver(bp, ENODEV); 2249 printf("GEOM_PART: Partition '%s' not suitable" 2250 " for kernel dumps (wrong type?)\n", 2251 pp->name); 2252 return; 2253 } 2254 gkd = (struct g_kerneldump *)bp->bio_data; 2255 if (gkd->offset >= pp->mediasize) { 2256 g_io_deliver(bp, EIO); 2257 return; 2258 } 2259 if (gkd->offset + gkd->length > pp->mediasize) 2260 gkd->length = pp->mediasize - gkd->offset; 2261 gkd->offset += entry->gpe_offset; 2262 } 2263 break; 2264 default: 2265 g_io_deliver(bp, EOPNOTSUPP); 2266 return; 2267 } 2268 2269 bp2 = g_clone_bio(bp); 2270 if (bp2 == NULL) { 2271 g_io_deliver(bp, ENOMEM); 2272 return; 2273 } 2274 bp2->bio_done = g_std_done; 2275 g_io_request(bp2, cp); 2276 } 2277 2278 static void 2279 g_part_init(struct g_class *mp) 2280 { 2281 2282 TAILQ_INSERT_HEAD(&g_part_schemes, &g_part_null_scheme, scheme_list); 2283 } 2284 2285 static void 2286 g_part_fini(struct g_class *mp) 2287 { 2288 2289 TAILQ_REMOVE(&g_part_schemes, &g_part_null_scheme, scheme_list); 2290 } 2291 2292 static void 2293 g_part_unload_event(void *arg, int flag) 2294 { 2295 struct g_consumer *cp; 2296 struct g_geom *gp; 2297 struct g_provider *pp; 2298 struct g_part_scheme *scheme; 2299 struct g_part_table *table; 2300 uintptr_t *xchg; 2301 int acc, error; 2302 2303 if (flag == EV_CANCEL) 2304 return; 2305 2306 xchg = arg; 2307 error = 0; 2308 scheme = (void *)(*xchg); 2309 2310 g_topology_assert(); 2311 2312 LIST_FOREACH(gp, &g_part_class.geom, geom) { 2313 table = gp->softc; 2314 if (table->gpt_scheme != scheme) 2315 continue; 2316 2317 acc = 0; 2318 LIST_FOREACH(pp, &gp->provider, provider) 2319 acc += pp->acr + pp->acw + pp->ace; 2320 LIST_FOREACH(cp, &gp->consumer, consumer) 2321 acc += cp->acr + cp->acw + cp->ace; 2322 2323 if (!acc) 2324 g_part_wither(gp, ENOSYS); 2325 else 2326 error = EBUSY; 2327 } 2328 2329 if (!error) 2330 TAILQ_REMOVE(&g_part_schemes, scheme, scheme_list); 2331 2332 *xchg = error; 2333 } 2334 2335 int 2336 g_part_modevent(module_t mod, int type, struct g_part_scheme *scheme) 2337 { 2338 struct g_part_scheme *iter; 2339 uintptr_t arg; 2340 int error; 2341 2342 error = 0; 2343 switch (type) { 2344 case MOD_LOAD: 2345 TAILQ_FOREACH(iter, &g_part_schemes, scheme_list) { 2346 if (scheme == iter) { 2347 printf("GEOM_PART: scheme %s is already " 2348 "registered!\n", scheme->name); 2349 break; 2350 } 2351 } 2352 if (iter == NULL) { 2353 TAILQ_INSERT_TAIL(&g_part_schemes, scheme, 2354 scheme_list); 2355 g_retaste(&g_part_class); 2356 } 2357 break; 2358 case MOD_UNLOAD: 2359 arg = (uintptr_t)scheme; 2360 error = g_waitfor_event(g_part_unload_event, &arg, M_WAITOK, 2361 NULL); 2362 if (error == 0) 2363 error = arg; 2364 break; 2365 default: 2366 error = EOPNOTSUPP; 2367 break; 2368 } 2369 2370 return (error); 2371 } 2372