1 /*- 2 * Copyright (c) 2002, 2005-2009 Marcel Moolenaar 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 */ 26 27 #include <sys/cdefs.h> 28 __FBSDID("$FreeBSD$"); 29 30 #include <sys/param.h> 31 #include <sys/bio.h> 32 #include <sys/endian.h> 33 #include <sys/kernel.h> 34 #include <sys/kobj.h> 35 #include <sys/limits.h> 36 #include <sys/lock.h> 37 #include <sys/malloc.h> 38 #include <sys/mutex.h> 39 #include <sys/queue.h> 40 #include <sys/sbuf.h> 41 #include <sys/sysctl.h> 42 #include <sys/systm.h> 43 #include <sys/uuid.h> 44 #include <geom/geom.h> 45 #include <geom/geom_ctl.h> 46 #include <geom/geom_int.h> 47 #include <geom/part/g_part.h> 48 49 #include "g_part_if.h" 50 51 #ifndef _PATH_DEV 52 #define _PATH_DEV "/dev/" 53 #endif 54 55 static kobj_method_t g_part_null_methods[] = { 56 { 0, 0 } 57 }; 58 59 static struct g_part_scheme g_part_null_scheme = { 60 "(none)", 61 g_part_null_methods, 62 sizeof(struct g_part_table), 63 }; 64 65 TAILQ_HEAD(, g_part_scheme) g_part_schemes = 66 TAILQ_HEAD_INITIALIZER(g_part_schemes); 67 68 struct g_part_alias_list { 69 const char *lexeme; 70 enum g_part_alias alias; 71 } g_part_alias_list[G_PART_ALIAS_COUNT] = { 72 { "apple-boot", G_PART_ALIAS_APPLE_BOOT }, 73 { "apple-core-storage", G_PART_ALIAS_APPLE_CORE_STORAGE }, 74 { "apple-hfs", G_PART_ALIAS_APPLE_HFS }, 75 { "apple-label", G_PART_ALIAS_APPLE_LABEL }, 76 { "apple-raid", G_PART_ALIAS_APPLE_RAID }, 77 { "apple-raid-offline", G_PART_ALIAS_APPLE_RAID_OFFLINE }, 78 { "apple-tv-recovery", G_PART_ALIAS_APPLE_TV_RECOVERY }, 79 { "apple-ufs", G_PART_ALIAS_APPLE_UFS }, 80 { "bios-boot", G_PART_ALIAS_BIOS_BOOT }, 81 { "chromeos-firmware", G_PART_ALIAS_CHROMEOS_FIRMWARE }, 82 { "chromeos-kernel", G_PART_ALIAS_CHROMEOS_KERNEL }, 83 { "chromeos-reserved", G_PART_ALIAS_CHROMEOS_RESERVED }, 84 { "chromeos-root", G_PART_ALIAS_CHROMEOS_ROOT }, 85 { "dragonfly-ccd", G_PART_ALIAS_DFBSD_CCD }, 86 { "dragonfly-hammer", G_PART_ALIAS_DFBSD_HAMMER }, 87 { "dragonfly-hammer2", G_PART_ALIAS_DFBSD_HAMMER2 }, 88 { "dragonfly-label32", G_PART_ALIAS_DFBSD }, 89 { "dragonfly-label64", G_PART_ALIAS_DFBSD64 }, 90 { "dragonfly-legacy", G_PART_ALIAS_DFBSD_LEGACY }, 91 { "dragonfly-swap", G_PART_ALIAS_DFBSD_SWAP }, 92 { "dragonfly-ufs", G_PART_ALIAS_DFBSD_UFS }, 93 { "dragonfly-vinum", G_PART_ALIAS_DFBSD_VINUM }, 94 { "ebr", G_PART_ALIAS_EBR }, 95 { "efi", G_PART_ALIAS_EFI }, 96 { "fat16", G_PART_ALIAS_MS_FAT16 }, 97 { "fat32", G_PART_ALIAS_MS_FAT32 }, 98 { "freebsd", G_PART_ALIAS_FREEBSD }, 99 { "freebsd-boot", G_PART_ALIAS_FREEBSD_BOOT }, 100 { "freebsd-nandfs", G_PART_ALIAS_FREEBSD_NANDFS }, 101 { "freebsd-swap", G_PART_ALIAS_FREEBSD_SWAP }, 102 { "freebsd-ufs", G_PART_ALIAS_FREEBSD_UFS }, 103 { "freebsd-vinum", G_PART_ALIAS_FREEBSD_VINUM }, 104 { "freebsd-zfs", G_PART_ALIAS_FREEBSD_ZFS }, 105 { "linux-data", G_PART_ALIAS_LINUX_DATA }, 106 { "linux-lvm", G_PART_ALIAS_LINUX_LVM }, 107 { "linux-raid", G_PART_ALIAS_LINUX_RAID }, 108 { "linux-swap", G_PART_ALIAS_LINUX_SWAP }, 109 { "mbr", G_PART_ALIAS_MBR }, 110 { "ms-basic-data", G_PART_ALIAS_MS_BASIC_DATA }, 111 { "ms-ldm-data", G_PART_ALIAS_MS_LDM_DATA }, 112 { "ms-ldm-metadata", G_PART_ALIAS_MS_LDM_METADATA }, 113 { "ms-recovery", G_PART_ALIAS_MS_RECOVERY }, 114 { "ms-reserved", G_PART_ALIAS_MS_RESERVED }, 115 { "ms-spaces", G_PART_ALIAS_MS_SPACES }, 116 { "netbsd-ccd", G_PART_ALIAS_NETBSD_CCD }, 117 { "netbsd-cgd", G_PART_ALIAS_NETBSD_CGD }, 118 { "netbsd-ffs", G_PART_ALIAS_NETBSD_FFS }, 119 { "netbsd-lfs", G_PART_ALIAS_NETBSD_LFS }, 120 { "netbsd-raid", G_PART_ALIAS_NETBSD_RAID }, 121 { "netbsd-swap", G_PART_ALIAS_NETBSD_SWAP }, 122 { "ntfs", G_PART_ALIAS_MS_NTFS }, 123 { "openbsd-data", G_PART_ALIAS_OPENBSD_DATA }, 124 { "prep-boot", G_PART_ALIAS_PREP_BOOT }, 125 { "vmware-reserved", G_PART_ALIAS_VMRESERVED }, 126 { "vmware-vmfs", G_PART_ALIAS_VMFS }, 127 { "vmware-vmkdiag", G_PART_ALIAS_VMKDIAG }, 128 { "vmware-vsanhdr", G_PART_ALIAS_VMVSANHDR }, 129 }; 130 131 SYSCTL_DECL(_kern_geom); 132 SYSCTL_NODE(_kern_geom, OID_AUTO, part, CTLFLAG_RW, 0, 133 "GEOM_PART stuff"); 134 static u_int check_integrity = 1; 135 SYSCTL_UINT(_kern_geom_part, OID_AUTO, check_integrity, 136 CTLFLAG_RWTUN, &check_integrity, 1, 137 "Enable integrity checking"); 138 static u_int auto_resize = 1; 139 SYSCTL_UINT(_kern_geom_part, OID_AUTO, auto_resize, 140 CTLFLAG_RWTUN, &auto_resize, 1, 141 "Enable auto resize"); 142 143 /* 144 * The GEOM partitioning class. 145 */ 146 static g_ctl_req_t g_part_ctlreq; 147 static g_ctl_destroy_geom_t g_part_destroy_geom; 148 static g_fini_t g_part_fini; 149 static g_init_t g_part_init; 150 static g_taste_t g_part_taste; 151 152 static g_access_t g_part_access; 153 static g_dumpconf_t g_part_dumpconf; 154 static g_orphan_t g_part_orphan; 155 static g_spoiled_t g_part_spoiled; 156 static g_start_t g_part_start; 157 static g_resize_t g_part_resize; 158 static g_ioctl_t g_part_ioctl; 159 160 static struct g_class g_part_class = { 161 .name = "PART", 162 .version = G_VERSION, 163 /* Class methods. */ 164 .ctlreq = g_part_ctlreq, 165 .destroy_geom = g_part_destroy_geom, 166 .fini = g_part_fini, 167 .init = g_part_init, 168 .taste = g_part_taste, 169 /* Geom methods. */ 170 .access = g_part_access, 171 .dumpconf = g_part_dumpconf, 172 .orphan = g_part_orphan, 173 .spoiled = g_part_spoiled, 174 .start = g_part_start, 175 .resize = g_part_resize, 176 .ioctl = g_part_ioctl, 177 }; 178 179 DECLARE_GEOM_CLASS(g_part_class, g_part); 180 MODULE_VERSION(g_part, 0); 181 182 /* 183 * Support functions. 184 */ 185 186 static void g_part_wither(struct g_geom *, int); 187 188 const char * 189 g_part_alias_name(enum g_part_alias alias) 190 { 191 int i; 192 193 for (i = 0; i < G_PART_ALIAS_COUNT; i++) { 194 if (g_part_alias_list[i].alias != alias) 195 continue; 196 return (g_part_alias_list[i].lexeme); 197 } 198 199 return (NULL); 200 } 201 202 void 203 g_part_geometry_heads(off_t blocks, u_int sectors, off_t *bestchs, 204 u_int *bestheads) 205 { 206 static u_int candidate_heads[] = { 1, 2, 16, 32, 64, 128, 255, 0 }; 207 off_t chs, cylinders; 208 u_int heads; 209 int idx; 210 211 *bestchs = 0; 212 *bestheads = 0; 213 for (idx = 0; candidate_heads[idx] != 0; idx++) { 214 heads = candidate_heads[idx]; 215 cylinders = blocks / heads / sectors; 216 if (cylinders < heads || cylinders < sectors) 217 break; 218 if (cylinders > 1023) 219 continue; 220 chs = cylinders * heads * sectors; 221 if (chs > *bestchs || (chs == *bestchs && *bestheads == 1)) { 222 *bestchs = chs; 223 *bestheads = heads; 224 } 225 } 226 } 227 228 static void 229 g_part_geometry(struct g_part_table *table, struct g_consumer *cp, 230 off_t blocks) 231 { 232 static u_int candidate_sectors[] = { 1, 9, 17, 33, 63, 0 }; 233 off_t chs, bestchs; 234 u_int heads, sectors; 235 int idx; 236 237 if (g_getattr("GEOM::fwsectors", cp, §ors) != 0 || sectors == 0 || 238 g_getattr("GEOM::fwheads", cp, &heads) != 0 || heads == 0) { 239 table->gpt_fixgeom = 0; 240 table->gpt_heads = 0; 241 table->gpt_sectors = 0; 242 bestchs = 0; 243 for (idx = 0; candidate_sectors[idx] != 0; idx++) { 244 sectors = candidate_sectors[idx]; 245 g_part_geometry_heads(blocks, sectors, &chs, &heads); 246 if (chs == 0) 247 continue; 248 /* 249 * Prefer a geometry with sectors > 1, but only if 250 * it doesn't bump down the number of heads to 1. 251 */ 252 if (chs > bestchs || (chs == bestchs && heads > 1 && 253 table->gpt_sectors == 1)) { 254 bestchs = chs; 255 table->gpt_heads = heads; 256 table->gpt_sectors = sectors; 257 } 258 } 259 /* 260 * If we didn't find a geometry at all, then the disk is 261 * too big. This means we can use the maximum number of 262 * heads and sectors. 263 */ 264 if (bestchs == 0) { 265 table->gpt_heads = 255; 266 table->gpt_sectors = 63; 267 } 268 } else { 269 table->gpt_fixgeom = 1; 270 table->gpt_heads = heads; 271 table->gpt_sectors = sectors; 272 } 273 } 274 275 #define DPRINTF(...) if (bootverbose) { \ 276 printf("GEOM_PART: " __VA_ARGS__); \ 277 } 278 279 static int 280 g_part_check_integrity(struct g_part_table *table, struct g_consumer *cp) 281 { 282 struct g_part_entry *e1, *e2; 283 struct g_provider *pp; 284 off_t offset; 285 int failed; 286 287 failed = 0; 288 pp = cp->provider; 289 if (table->gpt_last < table->gpt_first) { 290 DPRINTF("last LBA is below first LBA: %jd < %jd\n", 291 (intmax_t)table->gpt_last, (intmax_t)table->gpt_first); 292 failed++; 293 } 294 if (table->gpt_last > pp->mediasize / pp->sectorsize - 1) { 295 DPRINTF("last LBA extends beyond mediasize: " 296 "%jd > %jd\n", (intmax_t)table->gpt_last, 297 (intmax_t)pp->mediasize / pp->sectorsize - 1); 298 failed++; 299 } 300 LIST_FOREACH(e1, &table->gpt_entry, gpe_entry) { 301 if (e1->gpe_deleted || e1->gpe_internal) 302 continue; 303 if (e1->gpe_start < table->gpt_first) { 304 DPRINTF("partition %d has start offset below first " 305 "LBA: %jd < %jd\n", e1->gpe_index, 306 (intmax_t)e1->gpe_start, 307 (intmax_t)table->gpt_first); 308 failed++; 309 } 310 if (e1->gpe_start > table->gpt_last) { 311 DPRINTF("partition %d has start offset beyond last " 312 "LBA: %jd > %jd\n", e1->gpe_index, 313 (intmax_t)e1->gpe_start, 314 (intmax_t)table->gpt_last); 315 failed++; 316 } 317 if (e1->gpe_end < e1->gpe_start) { 318 DPRINTF("partition %d has end offset below start " 319 "offset: %jd < %jd\n", e1->gpe_index, 320 (intmax_t)e1->gpe_end, 321 (intmax_t)e1->gpe_start); 322 failed++; 323 } 324 if (e1->gpe_end > table->gpt_last) { 325 DPRINTF("partition %d has end offset beyond last " 326 "LBA: %jd > %jd\n", e1->gpe_index, 327 (intmax_t)e1->gpe_end, 328 (intmax_t)table->gpt_last); 329 failed++; 330 } 331 if (pp->stripesize > 0) { 332 offset = e1->gpe_start * pp->sectorsize; 333 if (e1->gpe_offset > offset) 334 offset = e1->gpe_offset; 335 if ((offset + pp->stripeoffset) % pp->stripesize) { 336 DPRINTF("partition %d on (%s, %s) is not " 337 "aligned on %u bytes\n", e1->gpe_index, 338 pp->name, table->gpt_scheme->name, 339 pp->stripesize); 340 /* Don't treat this as a critical failure */ 341 } 342 } 343 e2 = e1; 344 while ((e2 = LIST_NEXT(e2, gpe_entry)) != NULL) { 345 if (e2->gpe_deleted || e2->gpe_internal) 346 continue; 347 if (e1->gpe_start >= e2->gpe_start && 348 e1->gpe_start <= e2->gpe_end) { 349 DPRINTF("partition %d has start offset inside " 350 "partition %d: start[%d] %jd >= start[%d] " 351 "%jd <= end[%d] %jd\n", 352 e1->gpe_index, e2->gpe_index, 353 e2->gpe_index, (intmax_t)e2->gpe_start, 354 e1->gpe_index, (intmax_t)e1->gpe_start, 355 e2->gpe_index, (intmax_t)e2->gpe_end); 356 failed++; 357 } 358 if (e1->gpe_end >= e2->gpe_start && 359 e1->gpe_end <= e2->gpe_end) { 360 DPRINTF("partition %d has end offset inside " 361 "partition %d: start[%d] %jd >= end[%d] " 362 "%jd <= end[%d] %jd\n", 363 e1->gpe_index, e2->gpe_index, 364 e2->gpe_index, (intmax_t)e2->gpe_start, 365 e1->gpe_index, (intmax_t)e1->gpe_end, 366 e2->gpe_index, (intmax_t)e2->gpe_end); 367 failed++; 368 } 369 if (e1->gpe_start < e2->gpe_start && 370 e1->gpe_end > e2->gpe_end) { 371 DPRINTF("partition %d contains partition %d: " 372 "start[%d] %jd > start[%d] %jd, end[%d] " 373 "%jd < end[%d] %jd\n", 374 e1->gpe_index, e2->gpe_index, 375 e1->gpe_index, (intmax_t)e1->gpe_start, 376 e2->gpe_index, (intmax_t)e2->gpe_start, 377 e2->gpe_index, (intmax_t)e2->gpe_end, 378 e1->gpe_index, (intmax_t)e1->gpe_end); 379 failed++; 380 } 381 } 382 } 383 if (failed != 0) { 384 printf("GEOM_PART: integrity check failed (%s, %s)\n", 385 pp->name, table->gpt_scheme->name); 386 if (check_integrity != 0) 387 return (EINVAL); 388 table->gpt_corrupt = 1; 389 } 390 return (0); 391 } 392 #undef DPRINTF 393 394 struct g_part_entry * 395 g_part_new_entry(struct g_part_table *table, int index, quad_t start, 396 quad_t end) 397 { 398 struct g_part_entry *entry, *last; 399 400 last = NULL; 401 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) { 402 if (entry->gpe_index == index) 403 break; 404 if (entry->gpe_index > index) { 405 entry = NULL; 406 break; 407 } 408 last = entry; 409 } 410 if (entry == NULL) { 411 entry = g_malloc(table->gpt_scheme->gps_entrysz, 412 M_WAITOK | M_ZERO); 413 entry->gpe_index = index; 414 if (last == NULL) 415 LIST_INSERT_HEAD(&table->gpt_entry, entry, gpe_entry); 416 else 417 LIST_INSERT_AFTER(last, entry, gpe_entry); 418 } else 419 entry->gpe_offset = 0; 420 entry->gpe_start = start; 421 entry->gpe_end = end; 422 return (entry); 423 } 424 425 static void 426 g_part_new_provider(struct g_geom *gp, struct g_part_table *table, 427 struct g_part_entry *entry) 428 { 429 struct g_consumer *cp; 430 struct g_provider *pp; 431 struct sbuf *sb; 432 off_t offset; 433 434 cp = LIST_FIRST(&gp->consumer); 435 pp = cp->provider; 436 437 offset = entry->gpe_start * pp->sectorsize; 438 if (entry->gpe_offset < offset) 439 entry->gpe_offset = offset; 440 441 if (entry->gpe_pp == NULL) { 442 sb = sbuf_new_auto(); 443 G_PART_FULLNAME(table, entry, sb, gp->name); 444 sbuf_finish(sb); 445 entry->gpe_pp = g_new_providerf(gp, "%s", sbuf_data(sb)); 446 sbuf_delete(sb); 447 entry->gpe_pp->flags |= G_PF_DIRECT_SEND | G_PF_DIRECT_RECEIVE; 448 entry->gpe_pp->private = entry; /* Close the circle. */ 449 } 450 entry->gpe_pp->index = entry->gpe_index - 1; /* index is 1-based. */ 451 entry->gpe_pp->mediasize = (entry->gpe_end - entry->gpe_start + 1) * 452 pp->sectorsize; 453 entry->gpe_pp->mediasize -= entry->gpe_offset - offset; 454 entry->gpe_pp->sectorsize = pp->sectorsize; 455 entry->gpe_pp->stripesize = pp->stripesize; 456 entry->gpe_pp->stripeoffset = pp->stripeoffset + entry->gpe_offset; 457 if (pp->stripesize > 0) 458 entry->gpe_pp->stripeoffset %= pp->stripesize; 459 entry->gpe_pp->flags |= pp->flags & G_PF_ACCEPT_UNMAPPED; 460 g_error_provider(entry->gpe_pp, 0); 461 } 462 463 static struct g_geom* 464 g_part_find_geom(const char *name) 465 { 466 struct g_geom *gp; 467 LIST_FOREACH(gp, &g_part_class.geom, geom) { 468 if ((gp->flags & G_GEOM_WITHER) == 0 && 469 strcmp(name, gp->name) == 0) 470 break; 471 } 472 return (gp); 473 } 474 475 static int 476 g_part_parm_geom(struct gctl_req *req, const char *name, struct g_geom **v) 477 { 478 struct g_geom *gp; 479 const char *gname; 480 481 gname = gctl_get_asciiparam(req, name); 482 if (gname == NULL) 483 return (ENOATTR); 484 if (strncmp(gname, _PATH_DEV, sizeof(_PATH_DEV) - 1) == 0) 485 gname += sizeof(_PATH_DEV) - 1; 486 gp = g_part_find_geom(gname); 487 if (gp == NULL) { 488 gctl_error(req, "%d %s '%s'", EINVAL, name, gname); 489 return (EINVAL); 490 } 491 *v = gp; 492 return (0); 493 } 494 495 static int 496 g_part_parm_provider(struct gctl_req *req, const char *name, 497 struct g_provider **v) 498 { 499 struct g_provider *pp; 500 const char *pname; 501 502 pname = gctl_get_asciiparam(req, name); 503 if (pname == NULL) 504 return (ENOATTR); 505 if (strncmp(pname, _PATH_DEV, sizeof(_PATH_DEV) - 1) == 0) 506 pname += sizeof(_PATH_DEV) - 1; 507 pp = g_provider_by_name(pname); 508 if (pp == NULL) { 509 gctl_error(req, "%d %s '%s'", EINVAL, name, pname); 510 return (EINVAL); 511 } 512 *v = pp; 513 return (0); 514 } 515 516 static int 517 g_part_parm_quad(struct gctl_req *req, const char *name, quad_t *v) 518 { 519 const char *p; 520 char *x; 521 quad_t q; 522 523 p = gctl_get_asciiparam(req, name); 524 if (p == NULL) 525 return (ENOATTR); 526 q = strtoq(p, &x, 0); 527 if (*x != '\0' || q < 0) { 528 gctl_error(req, "%d %s '%s'", EINVAL, name, p); 529 return (EINVAL); 530 } 531 *v = q; 532 return (0); 533 } 534 535 static int 536 g_part_parm_scheme(struct gctl_req *req, const char *name, 537 struct g_part_scheme **v) 538 { 539 struct g_part_scheme *s; 540 const char *p; 541 542 p = gctl_get_asciiparam(req, name); 543 if (p == NULL) 544 return (ENOATTR); 545 TAILQ_FOREACH(s, &g_part_schemes, scheme_list) { 546 if (s == &g_part_null_scheme) 547 continue; 548 if (!strcasecmp(s->name, p)) 549 break; 550 } 551 if (s == NULL) { 552 gctl_error(req, "%d %s '%s'", EINVAL, name, p); 553 return (EINVAL); 554 } 555 *v = s; 556 return (0); 557 } 558 559 static int 560 g_part_parm_str(struct gctl_req *req, const char *name, const char **v) 561 { 562 const char *p; 563 564 p = gctl_get_asciiparam(req, name); 565 if (p == NULL) 566 return (ENOATTR); 567 /* An empty label is always valid. */ 568 if (strcmp(name, "label") != 0 && p[0] == '\0') { 569 gctl_error(req, "%d %s '%s'", EINVAL, name, p); 570 return (EINVAL); 571 } 572 *v = p; 573 return (0); 574 } 575 576 static int 577 g_part_parm_intmax(struct gctl_req *req, const char *name, u_int *v) 578 { 579 const intmax_t *p; 580 int size; 581 582 p = gctl_get_param(req, name, &size); 583 if (p == NULL) 584 return (ENOATTR); 585 if (size != sizeof(*p) || *p < 0 || *p > INT_MAX) { 586 gctl_error(req, "%d %s '%jd'", EINVAL, name, *p); 587 return (EINVAL); 588 } 589 *v = (u_int)*p; 590 return (0); 591 } 592 593 static int 594 g_part_parm_uint32(struct gctl_req *req, const char *name, u_int *v) 595 { 596 const uint32_t *p; 597 int size; 598 599 p = gctl_get_param(req, name, &size); 600 if (p == NULL) 601 return (ENOATTR); 602 if (size != sizeof(*p) || *p > INT_MAX) { 603 gctl_error(req, "%d %s '%u'", EINVAL, name, (unsigned int)*p); 604 return (EINVAL); 605 } 606 *v = (u_int)*p; 607 return (0); 608 } 609 610 static int 611 g_part_parm_bootcode(struct gctl_req *req, const char *name, const void **v, 612 unsigned int *s) 613 { 614 const void *p; 615 int size; 616 617 p = gctl_get_param(req, name, &size); 618 if (p == NULL) 619 return (ENOATTR); 620 *v = p; 621 *s = size; 622 return (0); 623 } 624 625 static int 626 g_part_probe(struct g_geom *gp, struct g_consumer *cp, int depth) 627 { 628 struct g_part_scheme *iter, *scheme; 629 struct g_part_table *table; 630 int pri, probe; 631 632 table = gp->softc; 633 scheme = (table != NULL) ? table->gpt_scheme : NULL; 634 pri = (scheme != NULL) ? G_PART_PROBE(table, cp) : INT_MIN; 635 if (pri == 0) 636 goto done; 637 if (pri > 0) { /* error */ 638 scheme = NULL; 639 pri = INT_MIN; 640 } 641 642 TAILQ_FOREACH(iter, &g_part_schemes, scheme_list) { 643 if (iter == &g_part_null_scheme) 644 continue; 645 table = (void *)kobj_create((kobj_class_t)iter, M_GEOM, 646 M_WAITOK); 647 table->gpt_gp = gp; 648 table->gpt_scheme = iter; 649 table->gpt_depth = depth; 650 probe = G_PART_PROBE(table, cp); 651 if (probe <= 0 && probe > pri) { 652 pri = probe; 653 scheme = iter; 654 if (gp->softc != NULL) 655 kobj_delete((kobj_t)gp->softc, M_GEOM); 656 gp->softc = table; 657 if (pri == 0) 658 goto done; 659 } else 660 kobj_delete((kobj_t)table, M_GEOM); 661 } 662 663 done: 664 return ((scheme == NULL) ? ENXIO : 0); 665 } 666 667 /* 668 * Control request functions. 669 */ 670 671 static int 672 g_part_ctl_add(struct gctl_req *req, struct g_part_parms *gpp) 673 { 674 struct g_geom *gp; 675 struct g_provider *pp; 676 struct g_part_entry *delent, *last, *entry; 677 struct g_part_table *table; 678 struct sbuf *sb; 679 quad_t end; 680 unsigned int index; 681 int error; 682 683 gp = gpp->gpp_geom; 684 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name)); 685 g_topology_assert(); 686 687 pp = LIST_FIRST(&gp->consumer)->provider; 688 table = gp->softc; 689 end = gpp->gpp_start + gpp->gpp_size - 1; 690 691 if (gpp->gpp_start < table->gpt_first || 692 gpp->gpp_start > table->gpt_last) { 693 gctl_error(req, "%d start '%jd'", EINVAL, 694 (intmax_t)gpp->gpp_start); 695 return (EINVAL); 696 } 697 if (end < gpp->gpp_start || end > table->gpt_last) { 698 gctl_error(req, "%d size '%jd'", EINVAL, 699 (intmax_t)gpp->gpp_size); 700 return (EINVAL); 701 } 702 if (gpp->gpp_index > table->gpt_entries) { 703 gctl_error(req, "%d index '%d'", EINVAL, gpp->gpp_index); 704 return (EINVAL); 705 } 706 707 delent = last = NULL; 708 index = (gpp->gpp_index > 0) ? gpp->gpp_index : 1; 709 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) { 710 if (entry->gpe_deleted) { 711 if (entry->gpe_index == index) 712 delent = entry; 713 continue; 714 } 715 if (entry->gpe_index == index) 716 index = entry->gpe_index + 1; 717 if (entry->gpe_index < index) 718 last = entry; 719 if (entry->gpe_internal) 720 continue; 721 if (gpp->gpp_start >= entry->gpe_start && 722 gpp->gpp_start <= entry->gpe_end) { 723 gctl_error(req, "%d start '%jd'", ENOSPC, 724 (intmax_t)gpp->gpp_start); 725 return (ENOSPC); 726 } 727 if (end >= entry->gpe_start && end <= entry->gpe_end) { 728 gctl_error(req, "%d end '%jd'", ENOSPC, (intmax_t)end); 729 return (ENOSPC); 730 } 731 if (gpp->gpp_start < entry->gpe_start && end > entry->gpe_end) { 732 gctl_error(req, "%d size '%jd'", ENOSPC, 733 (intmax_t)gpp->gpp_size); 734 return (ENOSPC); 735 } 736 } 737 if (gpp->gpp_index > 0 && index != gpp->gpp_index) { 738 gctl_error(req, "%d index '%d'", EEXIST, gpp->gpp_index); 739 return (EEXIST); 740 } 741 if (index > table->gpt_entries) { 742 gctl_error(req, "%d index '%d'", ENOSPC, index); 743 return (ENOSPC); 744 } 745 746 entry = (delent == NULL) ? g_malloc(table->gpt_scheme->gps_entrysz, 747 M_WAITOK | M_ZERO) : delent; 748 entry->gpe_index = index; 749 entry->gpe_start = gpp->gpp_start; 750 entry->gpe_end = end; 751 error = G_PART_ADD(table, entry, gpp); 752 if (error) { 753 gctl_error(req, "%d", error); 754 if (delent == NULL) 755 g_free(entry); 756 return (error); 757 } 758 if (delent == NULL) { 759 if (last == NULL) 760 LIST_INSERT_HEAD(&table->gpt_entry, entry, gpe_entry); 761 else 762 LIST_INSERT_AFTER(last, entry, gpe_entry); 763 entry->gpe_created = 1; 764 } else { 765 entry->gpe_deleted = 0; 766 entry->gpe_modified = 1; 767 } 768 g_part_new_provider(gp, table, entry); 769 770 /* Provide feedback if so requested. */ 771 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) { 772 sb = sbuf_new_auto(); 773 G_PART_FULLNAME(table, entry, sb, gp->name); 774 if (pp->stripesize > 0 && entry->gpe_pp->stripeoffset != 0) 775 sbuf_printf(sb, " added, but partition is not " 776 "aligned on %u bytes\n", pp->stripesize); 777 else 778 sbuf_cat(sb, " added\n"); 779 sbuf_finish(sb); 780 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1); 781 sbuf_delete(sb); 782 } 783 return (0); 784 } 785 786 static int 787 g_part_ctl_bootcode(struct gctl_req *req, struct g_part_parms *gpp) 788 { 789 struct g_geom *gp; 790 struct g_part_table *table; 791 struct sbuf *sb; 792 int error, sz; 793 794 gp = gpp->gpp_geom; 795 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name)); 796 g_topology_assert(); 797 798 table = gp->softc; 799 sz = table->gpt_scheme->gps_bootcodesz; 800 if (sz == 0) { 801 error = ENODEV; 802 goto fail; 803 } 804 if (gpp->gpp_codesize > sz) { 805 error = EFBIG; 806 goto fail; 807 } 808 809 error = G_PART_BOOTCODE(table, gpp); 810 if (error) 811 goto fail; 812 813 /* Provide feedback if so requested. */ 814 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) { 815 sb = sbuf_new_auto(); 816 sbuf_printf(sb, "bootcode written to %s\n", gp->name); 817 sbuf_finish(sb); 818 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1); 819 sbuf_delete(sb); 820 } 821 return (0); 822 823 fail: 824 gctl_error(req, "%d", error); 825 return (error); 826 } 827 828 static int 829 g_part_ctl_commit(struct gctl_req *req, struct g_part_parms *gpp) 830 { 831 struct g_consumer *cp; 832 struct g_geom *gp; 833 struct g_provider *pp; 834 struct g_part_entry *entry, *tmp; 835 struct g_part_table *table; 836 char *buf; 837 int error, i; 838 839 gp = gpp->gpp_geom; 840 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name)); 841 g_topology_assert(); 842 843 table = gp->softc; 844 if (!table->gpt_opened) { 845 gctl_error(req, "%d", EPERM); 846 return (EPERM); 847 } 848 849 g_topology_unlock(); 850 851 cp = LIST_FIRST(&gp->consumer); 852 if ((table->gpt_smhead | table->gpt_smtail) != 0) { 853 pp = cp->provider; 854 buf = g_malloc(pp->sectorsize, M_WAITOK | M_ZERO); 855 while (table->gpt_smhead != 0) { 856 i = ffs(table->gpt_smhead) - 1; 857 error = g_write_data(cp, i * pp->sectorsize, buf, 858 pp->sectorsize); 859 if (error) { 860 g_free(buf); 861 goto fail; 862 } 863 table->gpt_smhead &= ~(1 << i); 864 } 865 while (table->gpt_smtail != 0) { 866 i = ffs(table->gpt_smtail) - 1; 867 error = g_write_data(cp, pp->mediasize - (i + 1) * 868 pp->sectorsize, buf, pp->sectorsize); 869 if (error) { 870 g_free(buf); 871 goto fail; 872 } 873 table->gpt_smtail &= ~(1 << i); 874 } 875 g_free(buf); 876 } 877 878 if (table->gpt_scheme == &g_part_null_scheme) { 879 g_topology_lock(); 880 g_access(cp, -1, -1, -1); 881 g_part_wither(gp, ENXIO); 882 return (0); 883 } 884 885 error = G_PART_WRITE(table, cp); 886 if (error) 887 goto fail; 888 889 LIST_FOREACH_SAFE(entry, &table->gpt_entry, gpe_entry, tmp) { 890 if (!entry->gpe_deleted) { 891 entry->gpe_created = 0; 892 entry->gpe_modified = 0; 893 continue; 894 } 895 LIST_REMOVE(entry, gpe_entry); 896 g_free(entry); 897 } 898 table->gpt_created = 0; 899 table->gpt_opened = 0; 900 901 g_topology_lock(); 902 g_access(cp, -1, -1, -1); 903 return (0); 904 905 fail: 906 g_topology_lock(); 907 gctl_error(req, "%d", error); 908 return (error); 909 } 910 911 static int 912 g_part_ctl_create(struct gctl_req *req, struct g_part_parms *gpp) 913 { 914 struct g_consumer *cp; 915 struct g_geom *gp; 916 struct g_provider *pp; 917 struct g_part_scheme *scheme; 918 struct g_part_table *null, *table; 919 struct sbuf *sb; 920 int attr, error; 921 922 pp = gpp->gpp_provider; 923 scheme = gpp->gpp_scheme; 924 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, pp->name)); 925 g_topology_assert(); 926 927 /* Check that there isn't already a g_part geom on the provider. */ 928 gp = g_part_find_geom(pp->name); 929 if (gp != NULL) { 930 null = gp->softc; 931 if (null->gpt_scheme != &g_part_null_scheme) { 932 gctl_error(req, "%d geom '%s'", EEXIST, pp->name); 933 return (EEXIST); 934 } 935 } else 936 null = NULL; 937 938 if ((gpp->gpp_parms & G_PART_PARM_ENTRIES) && 939 (gpp->gpp_entries < scheme->gps_minent || 940 gpp->gpp_entries > scheme->gps_maxent)) { 941 gctl_error(req, "%d entries '%d'", EINVAL, gpp->gpp_entries); 942 return (EINVAL); 943 } 944 945 if (null == NULL) 946 gp = g_new_geomf(&g_part_class, "%s", pp->name); 947 gp->softc = kobj_create((kobj_class_t)gpp->gpp_scheme, M_GEOM, 948 M_WAITOK); 949 table = gp->softc; 950 table->gpt_gp = gp; 951 table->gpt_scheme = gpp->gpp_scheme; 952 table->gpt_entries = (gpp->gpp_parms & G_PART_PARM_ENTRIES) ? 953 gpp->gpp_entries : scheme->gps_minent; 954 LIST_INIT(&table->gpt_entry); 955 if (null == NULL) { 956 cp = g_new_consumer(gp); 957 cp->flags |= G_CF_DIRECT_SEND | G_CF_DIRECT_RECEIVE; 958 error = g_attach(cp, pp); 959 if (error == 0) 960 error = g_access(cp, 1, 1, 1); 961 if (error != 0) { 962 g_part_wither(gp, error); 963 gctl_error(req, "%d geom '%s'", error, pp->name); 964 return (error); 965 } 966 table->gpt_opened = 1; 967 } else { 968 cp = LIST_FIRST(&gp->consumer); 969 table->gpt_opened = null->gpt_opened; 970 table->gpt_smhead = null->gpt_smhead; 971 table->gpt_smtail = null->gpt_smtail; 972 } 973 974 g_topology_unlock(); 975 976 /* Make sure the provider has media. */ 977 if (pp->mediasize == 0 || pp->sectorsize == 0) { 978 error = ENODEV; 979 goto fail; 980 } 981 982 /* Make sure we can nest and if so, determine our depth. */ 983 error = g_getattr("PART::isleaf", cp, &attr); 984 if (!error && attr) { 985 error = ENODEV; 986 goto fail; 987 } 988 error = g_getattr("PART::depth", cp, &attr); 989 table->gpt_depth = (!error) ? attr + 1 : 0; 990 991 /* 992 * Synthesize a disk geometry. Some partitioning schemes 993 * depend on it and since some file systems need it even 994 * when the partitition scheme doesn't, we do it here in 995 * scheme-independent code. 996 */ 997 g_part_geometry(table, cp, pp->mediasize / pp->sectorsize); 998 999 error = G_PART_CREATE(table, gpp); 1000 if (error) 1001 goto fail; 1002 1003 g_topology_lock(); 1004 1005 table->gpt_created = 1; 1006 if (null != NULL) 1007 kobj_delete((kobj_t)null, M_GEOM); 1008 1009 /* 1010 * Support automatic commit by filling in the gpp_geom 1011 * parameter. 1012 */ 1013 gpp->gpp_parms |= G_PART_PARM_GEOM; 1014 gpp->gpp_geom = gp; 1015 1016 /* Provide feedback if so requested. */ 1017 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) { 1018 sb = sbuf_new_auto(); 1019 sbuf_printf(sb, "%s created\n", gp->name); 1020 sbuf_finish(sb); 1021 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1); 1022 sbuf_delete(sb); 1023 } 1024 return (0); 1025 1026 fail: 1027 g_topology_lock(); 1028 if (null == NULL) { 1029 g_access(cp, -1, -1, -1); 1030 g_part_wither(gp, error); 1031 } else { 1032 kobj_delete((kobj_t)gp->softc, M_GEOM); 1033 gp->softc = null; 1034 } 1035 gctl_error(req, "%d provider", error); 1036 return (error); 1037 } 1038 1039 static int 1040 g_part_ctl_delete(struct gctl_req *req, struct g_part_parms *gpp) 1041 { 1042 struct g_geom *gp; 1043 struct g_provider *pp; 1044 struct g_part_entry *entry; 1045 struct g_part_table *table; 1046 struct sbuf *sb; 1047 1048 gp = gpp->gpp_geom; 1049 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name)); 1050 g_topology_assert(); 1051 1052 table = gp->softc; 1053 1054 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) { 1055 if (entry->gpe_deleted || entry->gpe_internal) 1056 continue; 1057 if (entry->gpe_index == gpp->gpp_index) 1058 break; 1059 } 1060 if (entry == NULL) { 1061 gctl_error(req, "%d index '%d'", ENOENT, gpp->gpp_index); 1062 return (ENOENT); 1063 } 1064 1065 pp = entry->gpe_pp; 1066 if (pp != NULL) { 1067 if (pp->acr > 0 || pp->acw > 0 || pp->ace > 0) { 1068 gctl_error(req, "%d", EBUSY); 1069 return (EBUSY); 1070 } 1071 1072 pp->private = NULL; 1073 entry->gpe_pp = NULL; 1074 } 1075 1076 if (pp != NULL) 1077 g_wither_provider(pp, ENXIO); 1078 1079 /* Provide feedback if so requested. */ 1080 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) { 1081 sb = sbuf_new_auto(); 1082 G_PART_FULLNAME(table, entry, sb, gp->name); 1083 sbuf_cat(sb, " deleted\n"); 1084 sbuf_finish(sb); 1085 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1); 1086 sbuf_delete(sb); 1087 } 1088 1089 if (entry->gpe_created) { 1090 LIST_REMOVE(entry, gpe_entry); 1091 g_free(entry); 1092 } else { 1093 entry->gpe_modified = 0; 1094 entry->gpe_deleted = 1; 1095 } 1096 return (0); 1097 } 1098 1099 static int 1100 g_part_ctl_destroy(struct gctl_req *req, struct g_part_parms *gpp) 1101 { 1102 struct g_consumer *cp; 1103 struct g_geom *gp; 1104 struct g_provider *pp; 1105 struct g_part_entry *entry, *tmp; 1106 struct g_part_table *null, *table; 1107 struct sbuf *sb; 1108 int error; 1109 1110 gp = gpp->gpp_geom; 1111 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name)); 1112 g_topology_assert(); 1113 1114 table = gp->softc; 1115 /* Check for busy providers. */ 1116 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) { 1117 if (entry->gpe_deleted || entry->gpe_internal) 1118 continue; 1119 if (gpp->gpp_force) { 1120 pp = entry->gpe_pp; 1121 if (pp == NULL) 1122 continue; 1123 if (pp->acr == 0 && pp->acw == 0 && pp->ace == 0) 1124 continue; 1125 } 1126 gctl_error(req, "%d", EBUSY); 1127 return (EBUSY); 1128 } 1129 1130 if (gpp->gpp_force) { 1131 /* Destroy all providers. */ 1132 LIST_FOREACH_SAFE(entry, &table->gpt_entry, gpe_entry, tmp) { 1133 pp = entry->gpe_pp; 1134 if (pp != NULL) { 1135 pp->private = NULL; 1136 g_wither_provider(pp, ENXIO); 1137 } 1138 LIST_REMOVE(entry, gpe_entry); 1139 g_free(entry); 1140 } 1141 } 1142 1143 error = G_PART_DESTROY(table, gpp); 1144 if (error) { 1145 gctl_error(req, "%d", error); 1146 return (error); 1147 } 1148 1149 gp->softc = kobj_create((kobj_class_t)&g_part_null_scheme, M_GEOM, 1150 M_WAITOK); 1151 null = gp->softc; 1152 null->gpt_gp = gp; 1153 null->gpt_scheme = &g_part_null_scheme; 1154 LIST_INIT(&null->gpt_entry); 1155 1156 cp = LIST_FIRST(&gp->consumer); 1157 pp = cp->provider; 1158 null->gpt_last = pp->mediasize / pp->sectorsize - 1; 1159 1160 null->gpt_depth = table->gpt_depth; 1161 null->gpt_opened = table->gpt_opened; 1162 null->gpt_smhead = table->gpt_smhead; 1163 null->gpt_smtail = table->gpt_smtail; 1164 1165 while ((entry = LIST_FIRST(&table->gpt_entry)) != NULL) { 1166 LIST_REMOVE(entry, gpe_entry); 1167 g_free(entry); 1168 } 1169 kobj_delete((kobj_t)table, M_GEOM); 1170 1171 /* Provide feedback if so requested. */ 1172 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) { 1173 sb = sbuf_new_auto(); 1174 sbuf_printf(sb, "%s destroyed\n", gp->name); 1175 sbuf_finish(sb); 1176 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1); 1177 sbuf_delete(sb); 1178 } 1179 return (0); 1180 } 1181 1182 static int 1183 g_part_ctl_modify(struct gctl_req *req, struct g_part_parms *gpp) 1184 { 1185 struct g_geom *gp; 1186 struct g_part_entry *entry; 1187 struct g_part_table *table; 1188 struct sbuf *sb; 1189 int error; 1190 1191 gp = gpp->gpp_geom; 1192 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name)); 1193 g_topology_assert(); 1194 1195 table = gp->softc; 1196 1197 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) { 1198 if (entry->gpe_deleted || entry->gpe_internal) 1199 continue; 1200 if (entry->gpe_index == gpp->gpp_index) 1201 break; 1202 } 1203 if (entry == NULL) { 1204 gctl_error(req, "%d index '%d'", ENOENT, gpp->gpp_index); 1205 return (ENOENT); 1206 } 1207 1208 error = G_PART_MODIFY(table, entry, gpp); 1209 if (error) { 1210 gctl_error(req, "%d", error); 1211 return (error); 1212 } 1213 1214 if (!entry->gpe_created) 1215 entry->gpe_modified = 1; 1216 1217 /* Provide feedback if so requested. */ 1218 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) { 1219 sb = sbuf_new_auto(); 1220 G_PART_FULLNAME(table, entry, sb, gp->name); 1221 sbuf_cat(sb, " modified\n"); 1222 sbuf_finish(sb); 1223 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1); 1224 sbuf_delete(sb); 1225 } 1226 return (0); 1227 } 1228 1229 static int 1230 g_part_ctl_move(struct gctl_req *req, struct g_part_parms *gpp) 1231 { 1232 gctl_error(req, "%d verb 'move'", ENOSYS); 1233 return (ENOSYS); 1234 } 1235 1236 static int 1237 g_part_ctl_recover(struct gctl_req *req, struct g_part_parms *gpp) 1238 { 1239 struct g_part_table *table; 1240 struct g_geom *gp; 1241 struct sbuf *sb; 1242 int error, recovered; 1243 1244 gp = gpp->gpp_geom; 1245 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name)); 1246 g_topology_assert(); 1247 table = gp->softc; 1248 error = recovered = 0; 1249 1250 if (table->gpt_corrupt) { 1251 error = G_PART_RECOVER(table); 1252 if (error == 0) 1253 error = g_part_check_integrity(table, 1254 LIST_FIRST(&gp->consumer)); 1255 if (error) { 1256 gctl_error(req, "%d recovering '%s' failed", 1257 error, gp->name); 1258 return (error); 1259 } 1260 recovered = 1; 1261 } 1262 /* Provide feedback if so requested. */ 1263 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) { 1264 sb = sbuf_new_auto(); 1265 if (recovered) 1266 sbuf_printf(sb, "%s recovered\n", gp->name); 1267 else 1268 sbuf_printf(sb, "%s recovering is not needed\n", 1269 gp->name); 1270 sbuf_finish(sb); 1271 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1); 1272 sbuf_delete(sb); 1273 } 1274 return (0); 1275 } 1276 1277 static int 1278 g_part_ctl_resize(struct gctl_req *req, struct g_part_parms *gpp) 1279 { 1280 struct g_geom *gp; 1281 struct g_provider *pp; 1282 struct g_part_entry *pe, *entry; 1283 struct g_part_table *table; 1284 struct sbuf *sb; 1285 quad_t end; 1286 int error; 1287 off_t mediasize; 1288 1289 gp = gpp->gpp_geom; 1290 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name)); 1291 g_topology_assert(); 1292 table = gp->softc; 1293 1294 /* check gpp_index */ 1295 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) { 1296 if (entry->gpe_deleted || entry->gpe_internal) 1297 continue; 1298 if (entry->gpe_index == gpp->gpp_index) 1299 break; 1300 } 1301 if (entry == NULL) { 1302 gctl_error(req, "%d index '%d'", ENOENT, gpp->gpp_index); 1303 return (ENOENT); 1304 } 1305 1306 /* check gpp_size */ 1307 end = entry->gpe_start + gpp->gpp_size - 1; 1308 if (gpp->gpp_size < 1 || end > table->gpt_last) { 1309 gctl_error(req, "%d size '%jd'", EINVAL, 1310 (intmax_t)gpp->gpp_size); 1311 return (EINVAL); 1312 } 1313 1314 LIST_FOREACH(pe, &table->gpt_entry, gpe_entry) { 1315 if (pe->gpe_deleted || pe->gpe_internal || pe == entry) 1316 continue; 1317 if (end >= pe->gpe_start && end <= pe->gpe_end) { 1318 gctl_error(req, "%d end '%jd'", ENOSPC, 1319 (intmax_t)end); 1320 return (ENOSPC); 1321 } 1322 if (entry->gpe_start < pe->gpe_start && end > pe->gpe_end) { 1323 gctl_error(req, "%d size '%jd'", ENOSPC, 1324 (intmax_t)gpp->gpp_size); 1325 return (ENOSPC); 1326 } 1327 } 1328 1329 pp = entry->gpe_pp; 1330 if ((g_debugflags & 16) == 0 && 1331 (pp->acr > 0 || pp->acw > 0 || pp->ace > 0)) { 1332 if (entry->gpe_end - entry->gpe_start + 1 > gpp->gpp_size) { 1333 /* Deny shrinking of an opened partition. */ 1334 gctl_error(req, "%d", EBUSY); 1335 return (EBUSY); 1336 } 1337 } 1338 1339 error = G_PART_RESIZE(table, entry, gpp); 1340 if (error) { 1341 gctl_error(req, "%d%s", error, error != EBUSY ? "": 1342 " resizing will lead to unexpected shrinking" 1343 " due to alignment"); 1344 return (error); 1345 } 1346 1347 if (!entry->gpe_created) 1348 entry->gpe_modified = 1; 1349 1350 /* update mediasize of changed provider */ 1351 mediasize = (entry->gpe_end - entry->gpe_start + 1) * 1352 pp->sectorsize; 1353 g_resize_provider(pp, mediasize); 1354 1355 /* Provide feedback if so requested. */ 1356 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) { 1357 sb = sbuf_new_auto(); 1358 G_PART_FULLNAME(table, entry, sb, gp->name); 1359 sbuf_cat(sb, " resized\n"); 1360 sbuf_finish(sb); 1361 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1); 1362 sbuf_delete(sb); 1363 } 1364 return (0); 1365 } 1366 1367 static int 1368 g_part_ctl_setunset(struct gctl_req *req, struct g_part_parms *gpp, 1369 unsigned int set) 1370 { 1371 struct g_geom *gp; 1372 struct g_part_entry *entry; 1373 struct g_part_table *table; 1374 struct sbuf *sb; 1375 int error; 1376 1377 gp = gpp->gpp_geom; 1378 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name)); 1379 g_topology_assert(); 1380 1381 table = gp->softc; 1382 1383 if (gpp->gpp_parms & G_PART_PARM_INDEX) { 1384 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) { 1385 if (entry->gpe_deleted || entry->gpe_internal) 1386 continue; 1387 if (entry->gpe_index == gpp->gpp_index) 1388 break; 1389 } 1390 if (entry == NULL) { 1391 gctl_error(req, "%d index '%d'", ENOENT, 1392 gpp->gpp_index); 1393 return (ENOENT); 1394 } 1395 } else 1396 entry = NULL; 1397 1398 error = G_PART_SETUNSET(table, entry, gpp->gpp_attrib, set); 1399 if (error) { 1400 gctl_error(req, "%d attrib '%s'", error, gpp->gpp_attrib); 1401 return (error); 1402 } 1403 1404 /* Provide feedback if so requested. */ 1405 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) { 1406 sb = sbuf_new_auto(); 1407 sbuf_printf(sb, "%s %sset on ", gpp->gpp_attrib, 1408 (set) ? "" : "un"); 1409 if (entry) 1410 G_PART_FULLNAME(table, entry, sb, gp->name); 1411 else 1412 sbuf_cat(sb, gp->name); 1413 sbuf_cat(sb, "\n"); 1414 sbuf_finish(sb); 1415 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1); 1416 sbuf_delete(sb); 1417 } 1418 return (0); 1419 } 1420 1421 static int 1422 g_part_ctl_undo(struct gctl_req *req, struct g_part_parms *gpp) 1423 { 1424 struct g_consumer *cp; 1425 struct g_provider *pp; 1426 struct g_geom *gp; 1427 struct g_part_entry *entry, *tmp; 1428 struct g_part_table *table; 1429 int error, reprobe; 1430 1431 gp = gpp->gpp_geom; 1432 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name)); 1433 g_topology_assert(); 1434 1435 table = gp->softc; 1436 if (!table->gpt_opened) { 1437 gctl_error(req, "%d", EPERM); 1438 return (EPERM); 1439 } 1440 1441 cp = LIST_FIRST(&gp->consumer); 1442 LIST_FOREACH_SAFE(entry, &table->gpt_entry, gpe_entry, tmp) { 1443 entry->gpe_modified = 0; 1444 if (entry->gpe_created) { 1445 pp = entry->gpe_pp; 1446 if (pp != NULL) { 1447 pp->private = NULL; 1448 entry->gpe_pp = NULL; 1449 g_wither_provider(pp, ENXIO); 1450 } 1451 entry->gpe_deleted = 1; 1452 } 1453 if (entry->gpe_deleted) { 1454 LIST_REMOVE(entry, gpe_entry); 1455 g_free(entry); 1456 } 1457 } 1458 1459 g_topology_unlock(); 1460 1461 reprobe = (table->gpt_scheme == &g_part_null_scheme || 1462 table->gpt_created) ? 1 : 0; 1463 1464 if (reprobe) { 1465 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) { 1466 if (entry->gpe_internal) 1467 continue; 1468 error = EBUSY; 1469 goto fail; 1470 } 1471 while ((entry = LIST_FIRST(&table->gpt_entry)) != NULL) { 1472 LIST_REMOVE(entry, gpe_entry); 1473 g_free(entry); 1474 } 1475 error = g_part_probe(gp, cp, table->gpt_depth); 1476 if (error) { 1477 g_topology_lock(); 1478 g_access(cp, -1, -1, -1); 1479 g_part_wither(gp, error); 1480 return (0); 1481 } 1482 table = gp->softc; 1483 1484 /* 1485 * Synthesize a disk geometry. Some partitioning schemes 1486 * depend on it and since some file systems need it even 1487 * when the partitition scheme doesn't, we do it here in 1488 * scheme-independent code. 1489 */ 1490 pp = cp->provider; 1491 g_part_geometry(table, cp, pp->mediasize / pp->sectorsize); 1492 } 1493 1494 error = G_PART_READ(table, cp); 1495 if (error) 1496 goto fail; 1497 error = g_part_check_integrity(table, cp); 1498 if (error) 1499 goto fail; 1500 1501 g_topology_lock(); 1502 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) { 1503 if (!entry->gpe_internal) 1504 g_part_new_provider(gp, table, entry); 1505 } 1506 1507 table->gpt_opened = 0; 1508 g_access(cp, -1, -1, -1); 1509 return (0); 1510 1511 fail: 1512 g_topology_lock(); 1513 gctl_error(req, "%d", error); 1514 return (error); 1515 } 1516 1517 static void 1518 g_part_wither(struct g_geom *gp, int error) 1519 { 1520 struct g_part_entry *entry; 1521 struct g_part_table *table; 1522 1523 table = gp->softc; 1524 if (table != NULL) { 1525 G_PART_DESTROY(table, NULL); 1526 while ((entry = LIST_FIRST(&table->gpt_entry)) != NULL) { 1527 LIST_REMOVE(entry, gpe_entry); 1528 g_free(entry); 1529 } 1530 if (gp->softc != NULL) { 1531 kobj_delete((kobj_t)gp->softc, M_GEOM); 1532 gp->softc = NULL; 1533 } 1534 } 1535 g_wither_geom(gp, error); 1536 } 1537 1538 /* 1539 * Class methods. 1540 */ 1541 1542 static void 1543 g_part_ctlreq(struct gctl_req *req, struct g_class *mp, const char *verb) 1544 { 1545 struct g_part_parms gpp; 1546 struct g_part_table *table; 1547 struct gctl_req_arg *ap; 1548 enum g_part_ctl ctlreq; 1549 unsigned int i, mparms, oparms, parm; 1550 int auto_commit, close_on_error; 1551 int error, modifies; 1552 1553 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s,%s)", __func__, mp->name, verb)); 1554 g_topology_assert(); 1555 1556 ctlreq = G_PART_CTL_NONE; 1557 modifies = 1; 1558 mparms = 0; 1559 oparms = G_PART_PARM_FLAGS | G_PART_PARM_OUTPUT | G_PART_PARM_VERSION; 1560 switch (*verb) { 1561 case 'a': 1562 if (!strcmp(verb, "add")) { 1563 ctlreq = G_PART_CTL_ADD; 1564 mparms |= G_PART_PARM_GEOM | G_PART_PARM_SIZE | 1565 G_PART_PARM_START | G_PART_PARM_TYPE; 1566 oparms |= G_PART_PARM_INDEX | G_PART_PARM_LABEL; 1567 } 1568 break; 1569 case 'b': 1570 if (!strcmp(verb, "bootcode")) { 1571 ctlreq = G_PART_CTL_BOOTCODE; 1572 mparms |= G_PART_PARM_GEOM | G_PART_PARM_BOOTCODE; 1573 } 1574 break; 1575 case 'c': 1576 if (!strcmp(verb, "commit")) { 1577 ctlreq = G_PART_CTL_COMMIT; 1578 mparms |= G_PART_PARM_GEOM; 1579 modifies = 0; 1580 } else if (!strcmp(verb, "create")) { 1581 ctlreq = G_PART_CTL_CREATE; 1582 mparms |= G_PART_PARM_PROVIDER | G_PART_PARM_SCHEME; 1583 oparms |= G_PART_PARM_ENTRIES; 1584 } 1585 break; 1586 case 'd': 1587 if (!strcmp(verb, "delete")) { 1588 ctlreq = G_PART_CTL_DELETE; 1589 mparms |= G_PART_PARM_GEOM | G_PART_PARM_INDEX; 1590 } else if (!strcmp(verb, "destroy")) { 1591 ctlreq = G_PART_CTL_DESTROY; 1592 mparms |= G_PART_PARM_GEOM; 1593 oparms |= G_PART_PARM_FORCE; 1594 } 1595 break; 1596 case 'm': 1597 if (!strcmp(verb, "modify")) { 1598 ctlreq = G_PART_CTL_MODIFY; 1599 mparms |= G_PART_PARM_GEOM | G_PART_PARM_INDEX; 1600 oparms |= G_PART_PARM_LABEL | G_PART_PARM_TYPE; 1601 } else if (!strcmp(verb, "move")) { 1602 ctlreq = G_PART_CTL_MOVE; 1603 mparms |= G_PART_PARM_GEOM | G_PART_PARM_INDEX; 1604 } 1605 break; 1606 case 'r': 1607 if (!strcmp(verb, "recover")) { 1608 ctlreq = G_PART_CTL_RECOVER; 1609 mparms |= G_PART_PARM_GEOM; 1610 } else if (!strcmp(verb, "resize")) { 1611 ctlreq = G_PART_CTL_RESIZE; 1612 mparms |= G_PART_PARM_GEOM | G_PART_PARM_INDEX | 1613 G_PART_PARM_SIZE; 1614 } 1615 break; 1616 case 's': 1617 if (!strcmp(verb, "set")) { 1618 ctlreq = G_PART_CTL_SET; 1619 mparms |= G_PART_PARM_ATTRIB | G_PART_PARM_GEOM; 1620 oparms |= G_PART_PARM_INDEX; 1621 } 1622 break; 1623 case 'u': 1624 if (!strcmp(verb, "undo")) { 1625 ctlreq = G_PART_CTL_UNDO; 1626 mparms |= G_PART_PARM_GEOM; 1627 modifies = 0; 1628 } else if (!strcmp(verb, "unset")) { 1629 ctlreq = G_PART_CTL_UNSET; 1630 mparms |= G_PART_PARM_ATTRIB | G_PART_PARM_GEOM; 1631 oparms |= G_PART_PARM_INDEX; 1632 } 1633 break; 1634 } 1635 if (ctlreq == G_PART_CTL_NONE) { 1636 gctl_error(req, "%d verb '%s'", EINVAL, verb); 1637 return; 1638 } 1639 1640 bzero(&gpp, sizeof(gpp)); 1641 for (i = 0; i < req->narg; i++) { 1642 ap = &req->arg[i]; 1643 parm = 0; 1644 switch (ap->name[0]) { 1645 case 'a': 1646 if (!strcmp(ap->name, "arg0")) { 1647 parm = mparms & 1648 (G_PART_PARM_GEOM | G_PART_PARM_PROVIDER); 1649 } 1650 if (!strcmp(ap->name, "attrib")) 1651 parm = G_PART_PARM_ATTRIB; 1652 break; 1653 case 'b': 1654 if (!strcmp(ap->name, "bootcode")) 1655 parm = G_PART_PARM_BOOTCODE; 1656 break; 1657 case 'c': 1658 if (!strcmp(ap->name, "class")) 1659 continue; 1660 break; 1661 case 'e': 1662 if (!strcmp(ap->name, "entries")) 1663 parm = G_PART_PARM_ENTRIES; 1664 break; 1665 case 'f': 1666 if (!strcmp(ap->name, "flags")) 1667 parm = G_PART_PARM_FLAGS; 1668 else if (!strcmp(ap->name, "force")) 1669 parm = G_PART_PARM_FORCE; 1670 break; 1671 case 'i': 1672 if (!strcmp(ap->name, "index")) 1673 parm = G_PART_PARM_INDEX; 1674 break; 1675 case 'l': 1676 if (!strcmp(ap->name, "label")) 1677 parm = G_PART_PARM_LABEL; 1678 break; 1679 case 'o': 1680 if (!strcmp(ap->name, "output")) 1681 parm = G_PART_PARM_OUTPUT; 1682 break; 1683 case 's': 1684 if (!strcmp(ap->name, "scheme")) 1685 parm = G_PART_PARM_SCHEME; 1686 else if (!strcmp(ap->name, "size")) 1687 parm = G_PART_PARM_SIZE; 1688 else if (!strcmp(ap->name, "start")) 1689 parm = G_PART_PARM_START; 1690 break; 1691 case 't': 1692 if (!strcmp(ap->name, "type")) 1693 parm = G_PART_PARM_TYPE; 1694 break; 1695 case 'v': 1696 if (!strcmp(ap->name, "verb")) 1697 continue; 1698 else if (!strcmp(ap->name, "version")) 1699 parm = G_PART_PARM_VERSION; 1700 break; 1701 } 1702 if ((parm & (mparms | oparms)) == 0) { 1703 gctl_error(req, "%d param '%s'", EINVAL, ap->name); 1704 return; 1705 } 1706 switch (parm) { 1707 case G_PART_PARM_ATTRIB: 1708 error = g_part_parm_str(req, ap->name, 1709 &gpp.gpp_attrib); 1710 break; 1711 case G_PART_PARM_BOOTCODE: 1712 error = g_part_parm_bootcode(req, ap->name, 1713 &gpp.gpp_codeptr, &gpp.gpp_codesize); 1714 break; 1715 case G_PART_PARM_ENTRIES: 1716 error = g_part_parm_intmax(req, ap->name, 1717 &gpp.gpp_entries); 1718 break; 1719 case G_PART_PARM_FLAGS: 1720 error = g_part_parm_str(req, ap->name, &gpp.gpp_flags); 1721 break; 1722 case G_PART_PARM_FORCE: 1723 error = g_part_parm_uint32(req, ap->name, 1724 &gpp.gpp_force); 1725 break; 1726 case G_PART_PARM_GEOM: 1727 error = g_part_parm_geom(req, ap->name, &gpp.gpp_geom); 1728 break; 1729 case G_PART_PARM_INDEX: 1730 error = g_part_parm_intmax(req, ap->name, 1731 &gpp.gpp_index); 1732 break; 1733 case G_PART_PARM_LABEL: 1734 error = g_part_parm_str(req, ap->name, &gpp.gpp_label); 1735 break; 1736 case G_PART_PARM_OUTPUT: 1737 error = 0; /* Write-only parameter */ 1738 break; 1739 case G_PART_PARM_PROVIDER: 1740 error = g_part_parm_provider(req, ap->name, 1741 &gpp.gpp_provider); 1742 break; 1743 case G_PART_PARM_SCHEME: 1744 error = g_part_parm_scheme(req, ap->name, 1745 &gpp.gpp_scheme); 1746 break; 1747 case G_PART_PARM_SIZE: 1748 error = g_part_parm_quad(req, ap->name, &gpp.gpp_size); 1749 break; 1750 case G_PART_PARM_START: 1751 error = g_part_parm_quad(req, ap->name, 1752 &gpp.gpp_start); 1753 break; 1754 case G_PART_PARM_TYPE: 1755 error = g_part_parm_str(req, ap->name, &gpp.gpp_type); 1756 break; 1757 case G_PART_PARM_VERSION: 1758 error = g_part_parm_uint32(req, ap->name, 1759 &gpp.gpp_version); 1760 break; 1761 default: 1762 error = EDOOFUS; 1763 gctl_error(req, "%d %s", error, ap->name); 1764 break; 1765 } 1766 if (error != 0) { 1767 if (error == ENOATTR) { 1768 gctl_error(req, "%d param '%s'", error, 1769 ap->name); 1770 } 1771 return; 1772 } 1773 gpp.gpp_parms |= parm; 1774 } 1775 if ((gpp.gpp_parms & mparms) != mparms) { 1776 parm = mparms - (gpp.gpp_parms & mparms); 1777 gctl_error(req, "%d param '%x'", ENOATTR, parm); 1778 return; 1779 } 1780 1781 /* Obtain permissions if possible/necessary. */ 1782 close_on_error = 0; 1783 table = NULL; 1784 if (modifies && (gpp.gpp_parms & G_PART_PARM_GEOM)) { 1785 table = gpp.gpp_geom->softc; 1786 if (table != NULL && table->gpt_corrupt && 1787 ctlreq != G_PART_CTL_DESTROY && 1788 ctlreq != G_PART_CTL_RECOVER) { 1789 gctl_error(req, "%d table '%s' is corrupt", 1790 EPERM, gpp.gpp_geom->name); 1791 return; 1792 } 1793 if (table != NULL && !table->gpt_opened) { 1794 error = g_access(LIST_FIRST(&gpp.gpp_geom->consumer), 1795 1, 1, 1); 1796 if (error) { 1797 gctl_error(req, "%d geom '%s'", error, 1798 gpp.gpp_geom->name); 1799 return; 1800 } 1801 table->gpt_opened = 1; 1802 close_on_error = 1; 1803 } 1804 } 1805 1806 /* Allow the scheme to check or modify the parameters. */ 1807 if (table != NULL) { 1808 error = G_PART_PRECHECK(table, ctlreq, &gpp); 1809 if (error) { 1810 gctl_error(req, "%d pre-check failed", error); 1811 goto out; 1812 } 1813 } else 1814 error = EDOOFUS; /* Prevent bogus uninit. warning. */ 1815 1816 switch (ctlreq) { 1817 case G_PART_CTL_NONE: 1818 panic("%s", __func__); 1819 case G_PART_CTL_ADD: 1820 error = g_part_ctl_add(req, &gpp); 1821 break; 1822 case G_PART_CTL_BOOTCODE: 1823 error = g_part_ctl_bootcode(req, &gpp); 1824 break; 1825 case G_PART_CTL_COMMIT: 1826 error = g_part_ctl_commit(req, &gpp); 1827 break; 1828 case G_PART_CTL_CREATE: 1829 error = g_part_ctl_create(req, &gpp); 1830 break; 1831 case G_PART_CTL_DELETE: 1832 error = g_part_ctl_delete(req, &gpp); 1833 break; 1834 case G_PART_CTL_DESTROY: 1835 error = g_part_ctl_destroy(req, &gpp); 1836 break; 1837 case G_PART_CTL_MODIFY: 1838 error = g_part_ctl_modify(req, &gpp); 1839 break; 1840 case G_PART_CTL_MOVE: 1841 error = g_part_ctl_move(req, &gpp); 1842 break; 1843 case G_PART_CTL_RECOVER: 1844 error = g_part_ctl_recover(req, &gpp); 1845 break; 1846 case G_PART_CTL_RESIZE: 1847 error = g_part_ctl_resize(req, &gpp); 1848 break; 1849 case G_PART_CTL_SET: 1850 error = g_part_ctl_setunset(req, &gpp, 1); 1851 break; 1852 case G_PART_CTL_UNDO: 1853 error = g_part_ctl_undo(req, &gpp); 1854 break; 1855 case G_PART_CTL_UNSET: 1856 error = g_part_ctl_setunset(req, &gpp, 0); 1857 break; 1858 } 1859 1860 /* Implement automatic commit. */ 1861 if (!error) { 1862 auto_commit = (modifies && 1863 (gpp.gpp_parms & G_PART_PARM_FLAGS) && 1864 strchr(gpp.gpp_flags, 'C') != NULL) ? 1 : 0; 1865 if (auto_commit) { 1866 KASSERT(gpp.gpp_parms & G_PART_PARM_GEOM, ("%s", 1867 __func__)); 1868 error = g_part_ctl_commit(req, &gpp); 1869 } 1870 } 1871 1872 out: 1873 if (error && close_on_error) { 1874 g_access(LIST_FIRST(&gpp.gpp_geom->consumer), -1, -1, -1); 1875 table->gpt_opened = 0; 1876 } 1877 } 1878 1879 static int 1880 g_part_destroy_geom(struct gctl_req *req, struct g_class *mp, 1881 struct g_geom *gp) 1882 { 1883 1884 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s,%s)", __func__, mp->name, gp->name)); 1885 g_topology_assert(); 1886 1887 g_part_wither(gp, EINVAL); 1888 return (0); 1889 } 1890 1891 static struct g_geom * 1892 g_part_taste(struct g_class *mp, struct g_provider *pp, int flags __unused) 1893 { 1894 struct g_consumer *cp; 1895 struct g_geom *gp; 1896 struct g_part_entry *entry; 1897 struct g_part_table *table; 1898 struct root_hold_token *rht; 1899 int attr, depth; 1900 int error; 1901 1902 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s,%s)", __func__, mp->name, pp->name)); 1903 g_topology_assert(); 1904 1905 /* Skip providers that are already open for writing. */ 1906 if (pp->acw > 0) 1907 return (NULL); 1908 1909 /* 1910 * Create a GEOM with consumer and hook it up to the provider. 1911 * With that we become part of the topology. Optain read access 1912 * to the provider. 1913 */ 1914 gp = g_new_geomf(mp, "%s", pp->name); 1915 cp = g_new_consumer(gp); 1916 cp->flags |= G_CF_DIRECT_SEND | G_CF_DIRECT_RECEIVE; 1917 error = g_attach(cp, pp); 1918 if (error == 0) 1919 error = g_access(cp, 1, 0, 0); 1920 if (error != 0) { 1921 if (cp->provider) 1922 g_detach(cp); 1923 g_destroy_consumer(cp); 1924 g_destroy_geom(gp); 1925 return (NULL); 1926 } 1927 1928 rht = root_mount_hold(mp->name); 1929 g_topology_unlock(); 1930 1931 /* 1932 * Short-circuit the whole probing galore when there's no 1933 * media present. 1934 */ 1935 if (pp->mediasize == 0 || pp->sectorsize == 0) { 1936 error = ENODEV; 1937 goto fail; 1938 } 1939 1940 /* Make sure we can nest and if so, determine our depth. */ 1941 error = g_getattr("PART::isleaf", cp, &attr); 1942 if (!error && attr) { 1943 error = ENODEV; 1944 goto fail; 1945 } 1946 error = g_getattr("PART::depth", cp, &attr); 1947 depth = (!error) ? attr + 1 : 0; 1948 1949 error = g_part_probe(gp, cp, depth); 1950 if (error) 1951 goto fail; 1952 1953 table = gp->softc; 1954 1955 /* 1956 * Synthesize a disk geometry. Some partitioning schemes 1957 * depend on it and since some file systems need it even 1958 * when the partitition scheme doesn't, we do it here in 1959 * scheme-independent code. 1960 */ 1961 g_part_geometry(table, cp, pp->mediasize / pp->sectorsize); 1962 1963 error = G_PART_READ(table, cp); 1964 if (error) 1965 goto fail; 1966 error = g_part_check_integrity(table, cp); 1967 if (error) 1968 goto fail; 1969 1970 g_topology_lock(); 1971 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) { 1972 if (!entry->gpe_internal) 1973 g_part_new_provider(gp, table, entry); 1974 } 1975 1976 root_mount_rel(rht); 1977 g_access(cp, -1, 0, 0); 1978 return (gp); 1979 1980 fail: 1981 g_topology_lock(); 1982 root_mount_rel(rht); 1983 g_access(cp, -1, 0, 0); 1984 g_detach(cp); 1985 g_destroy_consumer(cp); 1986 g_destroy_geom(gp); 1987 return (NULL); 1988 } 1989 1990 /* 1991 * Geom methods. 1992 */ 1993 1994 static int 1995 g_part_access(struct g_provider *pp, int dr, int dw, int de) 1996 { 1997 struct g_consumer *cp; 1998 1999 G_PART_TRACE((G_T_ACCESS, "%s(%s,%d,%d,%d)", __func__, pp->name, dr, 2000 dw, de)); 2001 2002 cp = LIST_FIRST(&pp->geom->consumer); 2003 2004 /* We always gain write-exclusive access. */ 2005 return (g_access(cp, dr, dw, dw + de)); 2006 } 2007 2008 static void 2009 g_part_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp, 2010 struct g_consumer *cp, struct g_provider *pp) 2011 { 2012 char buf[64]; 2013 struct g_part_entry *entry; 2014 struct g_part_table *table; 2015 2016 KASSERT(sb != NULL && gp != NULL, ("%s", __func__)); 2017 table = gp->softc; 2018 2019 if (indent == NULL) { 2020 KASSERT(cp == NULL && pp != NULL, ("%s", __func__)); 2021 entry = pp->private; 2022 if (entry == NULL) 2023 return; 2024 sbuf_printf(sb, " i %u o %ju ty %s", entry->gpe_index, 2025 (uintmax_t)entry->gpe_offset, 2026 G_PART_TYPE(table, entry, buf, sizeof(buf))); 2027 /* 2028 * libdisk compatibility quirk - the scheme dumps the 2029 * slicer name and partition type in a way that is 2030 * compatible with libdisk. When libdisk is not used 2031 * anymore, this should go away. 2032 */ 2033 G_PART_DUMPCONF(table, entry, sb, indent); 2034 } else if (cp != NULL) { /* Consumer configuration. */ 2035 KASSERT(pp == NULL, ("%s", __func__)); 2036 /* none */ 2037 } else if (pp != NULL) { /* Provider configuration. */ 2038 entry = pp->private; 2039 if (entry == NULL) 2040 return; 2041 sbuf_printf(sb, "%s<start>%ju</start>\n", indent, 2042 (uintmax_t)entry->gpe_start); 2043 sbuf_printf(sb, "%s<end>%ju</end>\n", indent, 2044 (uintmax_t)entry->gpe_end); 2045 sbuf_printf(sb, "%s<index>%u</index>\n", indent, 2046 entry->gpe_index); 2047 sbuf_printf(sb, "%s<type>%s</type>\n", indent, 2048 G_PART_TYPE(table, entry, buf, sizeof(buf))); 2049 sbuf_printf(sb, "%s<offset>%ju</offset>\n", indent, 2050 (uintmax_t)entry->gpe_offset); 2051 sbuf_printf(sb, "%s<length>%ju</length>\n", indent, 2052 (uintmax_t)pp->mediasize); 2053 G_PART_DUMPCONF(table, entry, sb, indent); 2054 } else { /* Geom configuration. */ 2055 sbuf_printf(sb, "%s<scheme>%s</scheme>\n", indent, 2056 table->gpt_scheme->name); 2057 sbuf_printf(sb, "%s<entries>%u</entries>\n", indent, 2058 table->gpt_entries); 2059 sbuf_printf(sb, "%s<first>%ju</first>\n", indent, 2060 (uintmax_t)table->gpt_first); 2061 sbuf_printf(sb, "%s<last>%ju</last>\n", indent, 2062 (uintmax_t)table->gpt_last); 2063 sbuf_printf(sb, "%s<fwsectors>%u</fwsectors>\n", indent, 2064 table->gpt_sectors); 2065 sbuf_printf(sb, "%s<fwheads>%u</fwheads>\n", indent, 2066 table->gpt_heads); 2067 sbuf_printf(sb, "%s<state>%s</state>\n", indent, 2068 table->gpt_corrupt ? "CORRUPT": "OK"); 2069 sbuf_printf(sb, "%s<modified>%s</modified>\n", indent, 2070 table->gpt_opened ? "true": "false"); 2071 G_PART_DUMPCONF(table, NULL, sb, indent); 2072 } 2073 } 2074 2075 /*- 2076 * This start routine is only called for non-trivial requests, all the 2077 * trivial ones are handled autonomously by the slice code. 2078 * For requests we handle here, we must call the g_io_deliver() on the 2079 * bio, and return non-zero to indicate to the slice code that we did so. 2080 * This code executes in the "DOWN" I/O path, this means: 2081 * * No sleeping. 2082 * * Don't grab the topology lock. 2083 * * Don't call biowait, g_getattr(), g_setattr() or g_read_data() 2084 */ 2085 static int 2086 g_part_ioctl(struct g_provider *pp, u_long cmd, void *data, int fflag, struct thread *td) 2087 { 2088 struct g_part_table *table; 2089 2090 table = pp->geom->softc; 2091 return G_PART_IOCTL(table, pp, cmd, data, fflag, td); 2092 } 2093 2094 static void 2095 g_part_resize(struct g_consumer *cp) 2096 { 2097 struct g_part_table *table; 2098 2099 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, cp->provider->name)); 2100 g_topology_assert(); 2101 2102 if (auto_resize == 0) 2103 return; 2104 2105 table = cp->geom->softc; 2106 if (table->gpt_opened == 0) { 2107 if (g_access(cp, 1, 1, 1) != 0) 2108 return; 2109 table->gpt_opened = 1; 2110 } 2111 if (G_PART_RESIZE(table, NULL, NULL) == 0) 2112 printf("GEOM_PART: %s was automatically resized.\n" 2113 " Use `gpart commit %s` to save changes or " 2114 "`gpart undo %s` to revert them.\n", cp->geom->name, 2115 cp->geom->name, cp->geom->name); 2116 if (g_part_check_integrity(table, cp) != 0) { 2117 g_access(cp, -1, -1, -1); 2118 table->gpt_opened = 0; 2119 g_part_wither(table->gpt_gp, ENXIO); 2120 } 2121 } 2122 2123 static void 2124 g_part_orphan(struct g_consumer *cp) 2125 { 2126 struct g_provider *pp; 2127 struct g_part_table *table; 2128 2129 pp = cp->provider; 2130 KASSERT(pp != NULL, ("%s", __func__)); 2131 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, pp->name)); 2132 g_topology_assert(); 2133 2134 KASSERT(pp->error != 0, ("%s", __func__)); 2135 table = cp->geom->softc; 2136 if (table != NULL && table->gpt_opened) 2137 g_access(cp, -1, -1, -1); 2138 g_part_wither(cp->geom, pp->error); 2139 } 2140 2141 static void 2142 g_part_spoiled(struct g_consumer *cp) 2143 { 2144 2145 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, cp->provider->name)); 2146 g_topology_assert(); 2147 2148 cp->flags |= G_CF_ORPHAN; 2149 g_part_wither(cp->geom, ENXIO); 2150 } 2151 2152 static void 2153 g_part_start(struct bio *bp) 2154 { 2155 struct bio *bp2; 2156 struct g_consumer *cp; 2157 struct g_geom *gp; 2158 struct g_part_entry *entry; 2159 struct g_part_table *table; 2160 struct g_kerneldump *gkd; 2161 struct g_provider *pp; 2162 char buf[64]; 2163 2164 biotrack(bp, __func__); 2165 2166 pp = bp->bio_to; 2167 gp = pp->geom; 2168 table = gp->softc; 2169 cp = LIST_FIRST(&gp->consumer); 2170 2171 G_PART_TRACE((G_T_BIO, "%s: cmd=%d, provider=%s", __func__, bp->bio_cmd, 2172 pp->name)); 2173 2174 entry = pp->private; 2175 if (entry == NULL) { 2176 g_io_deliver(bp, ENXIO); 2177 return; 2178 } 2179 2180 switch(bp->bio_cmd) { 2181 case BIO_DELETE: 2182 case BIO_READ: 2183 case BIO_WRITE: 2184 if (bp->bio_offset >= pp->mediasize) { 2185 g_io_deliver(bp, EIO); 2186 return; 2187 } 2188 bp2 = g_clone_bio(bp); 2189 if (bp2 == NULL) { 2190 g_io_deliver(bp, ENOMEM); 2191 return; 2192 } 2193 if (bp2->bio_offset + bp2->bio_length > pp->mediasize) 2194 bp2->bio_length = pp->mediasize - bp2->bio_offset; 2195 bp2->bio_done = g_std_done; 2196 bp2->bio_offset += entry->gpe_offset; 2197 g_io_request(bp2, cp); 2198 return; 2199 case BIO_FLUSH: 2200 break; 2201 case BIO_GETATTR: 2202 if (g_handleattr_int(bp, "GEOM::fwheads", table->gpt_heads)) 2203 return; 2204 if (g_handleattr_int(bp, "GEOM::fwsectors", table->gpt_sectors)) 2205 return; 2206 if (g_handleattr_int(bp, "PART::isleaf", table->gpt_isleaf)) 2207 return; 2208 if (g_handleattr_int(bp, "PART::depth", table->gpt_depth)) 2209 return; 2210 if (g_handleattr_str(bp, "PART::scheme", 2211 table->gpt_scheme->name)) 2212 return; 2213 if (g_handleattr_str(bp, "PART::type", 2214 G_PART_TYPE(table, entry, buf, sizeof(buf)))) 2215 return; 2216 if (!strcmp("GEOM::kerneldump", bp->bio_attribute)) { 2217 /* 2218 * Check that the partition is suitable for kernel 2219 * dumps. Typically only swap partitions should be 2220 * used. If the request comes from the nested scheme 2221 * we allow dumping there as well. 2222 */ 2223 if ((bp->bio_from == NULL || 2224 bp->bio_from->geom->class != &g_part_class) && 2225 G_PART_DUMPTO(table, entry) == 0) { 2226 g_io_deliver(bp, ENODEV); 2227 printf("GEOM_PART: Partition '%s' not suitable" 2228 " for kernel dumps (wrong type?)\n", 2229 pp->name); 2230 return; 2231 } 2232 gkd = (struct g_kerneldump *)bp->bio_data; 2233 if (gkd->offset >= pp->mediasize) { 2234 g_io_deliver(bp, EIO); 2235 return; 2236 } 2237 if (gkd->offset + gkd->length > pp->mediasize) 2238 gkd->length = pp->mediasize - gkd->offset; 2239 gkd->offset += entry->gpe_offset; 2240 } 2241 break; 2242 default: 2243 g_io_deliver(bp, EOPNOTSUPP); 2244 return; 2245 } 2246 2247 bp2 = g_clone_bio(bp); 2248 if (bp2 == NULL) { 2249 g_io_deliver(bp, ENOMEM); 2250 return; 2251 } 2252 bp2->bio_done = g_std_done; 2253 g_io_request(bp2, cp); 2254 } 2255 2256 static void 2257 g_part_init(struct g_class *mp) 2258 { 2259 2260 TAILQ_INSERT_HEAD(&g_part_schemes, &g_part_null_scheme, scheme_list); 2261 } 2262 2263 static void 2264 g_part_fini(struct g_class *mp) 2265 { 2266 2267 TAILQ_REMOVE(&g_part_schemes, &g_part_null_scheme, scheme_list); 2268 } 2269 2270 static void 2271 g_part_unload_event(void *arg, int flag) 2272 { 2273 struct g_consumer *cp; 2274 struct g_geom *gp; 2275 struct g_provider *pp; 2276 struct g_part_scheme *scheme; 2277 struct g_part_table *table; 2278 uintptr_t *xchg; 2279 int acc, error; 2280 2281 if (flag == EV_CANCEL) 2282 return; 2283 2284 xchg = arg; 2285 error = 0; 2286 scheme = (void *)(*xchg); 2287 2288 g_topology_assert(); 2289 2290 LIST_FOREACH(gp, &g_part_class.geom, geom) { 2291 table = gp->softc; 2292 if (table->gpt_scheme != scheme) 2293 continue; 2294 2295 acc = 0; 2296 LIST_FOREACH(pp, &gp->provider, provider) 2297 acc += pp->acr + pp->acw + pp->ace; 2298 LIST_FOREACH(cp, &gp->consumer, consumer) 2299 acc += cp->acr + cp->acw + cp->ace; 2300 2301 if (!acc) 2302 g_part_wither(gp, ENOSYS); 2303 else 2304 error = EBUSY; 2305 } 2306 2307 if (!error) 2308 TAILQ_REMOVE(&g_part_schemes, scheme, scheme_list); 2309 2310 *xchg = error; 2311 } 2312 2313 int 2314 g_part_modevent(module_t mod, int type, struct g_part_scheme *scheme) 2315 { 2316 struct g_part_scheme *iter; 2317 uintptr_t arg; 2318 int error; 2319 2320 error = 0; 2321 switch (type) { 2322 case MOD_LOAD: 2323 TAILQ_FOREACH(iter, &g_part_schemes, scheme_list) { 2324 if (scheme == iter) { 2325 printf("GEOM_PART: scheme %s is already " 2326 "registered!\n", scheme->name); 2327 break; 2328 } 2329 } 2330 if (iter == NULL) { 2331 TAILQ_INSERT_TAIL(&g_part_schemes, scheme, 2332 scheme_list); 2333 g_retaste(&g_part_class); 2334 } 2335 break; 2336 case MOD_UNLOAD: 2337 arg = (uintptr_t)scheme; 2338 error = g_waitfor_event(g_part_unload_event, &arg, M_WAITOK, 2339 NULL); 2340 if (error == 0) 2341 error = arg; 2342 break; 2343 default: 2344 error = EOPNOTSUPP; 2345 break; 2346 } 2347 2348 return (error); 2349 } 2350