1 /*- 2 * Copyright (c) 2002, 2005-2009 Marcel Moolenaar 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 */ 26 27 #include <sys/cdefs.h> 28 __FBSDID("$FreeBSD$"); 29 30 #include <sys/param.h> 31 #include <sys/bio.h> 32 #include <sys/endian.h> 33 #include <sys/kernel.h> 34 #include <sys/kobj.h> 35 #include <sys/limits.h> 36 #include <sys/lock.h> 37 #include <sys/malloc.h> 38 #include <sys/mutex.h> 39 #include <sys/queue.h> 40 #include <sys/sbuf.h> 41 #include <sys/sysctl.h> 42 #include <sys/systm.h> 43 #include <sys/uuid.h> 44 #include <geom/geom.h> 45 #include <geom/geom_ctl.h> 46 #include <geom/geom_int.h> 47 #include <geom/part/g_part.h> 48 49 #include "g_part_if.h" 50 51 #ifndef _PATH_DEV 52 #define _PATH_DEV "/dev/" 53 #endif 54 55 static kobj_method_t g_part_null_methods[] = { 56 { 0, 0 } 57 }; 58 59 static struct g_part_scheme g_part_null_scheme = { 60 "(none)", 61 g_part_null_methods, 62 sizeof(struct g_part_table), 63 }; 64 65 TAILQ_HEAD(, g_part_scheme) g_part_schemes = 66 TAILQ_HEAD_INITIALIZER(g_part_schemes); 67 68 struct g_part_alias_list { 69 const char *lexeme; 70 enum g_part_alias alias; 71 } g_part_alias_list[G_PART_ALIAS_COUNT] = { 72 { "apple-boot", G_PART_ALIAS_APPLE_BOOT }, 73 { "apple-core-storage", G_PART_ALIAS_APPLE_CORE_STORAGE }, 74 { "apple-hfs", G_PART_ALIAS_APPLE_HFS }, 75 { "apple-label", G_PART_ALIAS_APPLE_LABEL }, 76 { "apple-raid", G_PART_ALIAS_APPLE_RAID }, 77 { "apple-raid-offline", G_PART_ALIAS_APPLE_RAID_OFFLINE }, 78 { "apple-tv-recovery", G_PART_ALIAS_APPLE_TV_RECOVERY }, 79 { "apple-ufs", G_PART_ALIAS_APPLE_UFS }, 80 { "bios-boot", G_PART_ALIAS_BIOS_BOOT }, 81 { "chromeos-firmware", G_PART_ALIAS_CHROMEOS_FIRMWARE }, 82 { "chromeos-kernel", G_PART_ALIAS_CHROMEOS_KERNEL }, 83 { "chromeos-reserved", G_PART_ALIAS_CHROMEOS_RESERVED }, 84 { "chromeos-root", G_PART_ALIAS_CHROMEOS_ROOT }, 85 { "dragonfly-ccd", G_PART_ALIAS_DFBSD_CCD }, 86 { "dragonfly-hammer", G_PART_ALIAS_DFBSD_HAMMER }, 87 { "dragonfly-hammer2", G_PART_ALIAS_DFBSD_HAMMER2 }, 88 { "dragonfly-label32", G_PART_ALIAS_DFBSD }, 89 { "dragonfly-label64", G_PART_ALIAS_DFBSD64 }, 90 { "dragonfly-legacy", G_PART_ALIAS_DFBSD_LEGACY }, 91 { "dragonfly-swap", G_PART_ALIAS_DFBSD_SWAP }, 92 { "dragonfly-ufs", G_PART_ALIAS_DFBSD_UFS }, 93 { "dragonfly-vinum", G_PART_ALIAS_DFBSD_VINUM }, 94 { "ebr", G_PART_ALIAS_EBR }, 95 { "efi", G_PART_ALIAS_EFI }, 96 { "fat16", G_PART_ALIAS_MS_FAT16 }, 97 { "fat32", G_PART_ALIAS_MS_FAT32 }, 98 { "freebsd", G_PART_ALIAS_FREEBSD }, 99 { "freebsd-boot", G_PART_ALIAS_FREEBSD_BOOT }, 100 { "freebsd-nandfs", G_PART_ALIAS_FREEBSD_NANDFS }, 101 { "freebsd-swap", G_PART_ALIAS_FREEBSD_SWAP }, 102 { "freebsd-ufs", G_PART_ALIAS_FREEBSD_UFS }, 103 { "freebsd-vinum", G_PART_ALIAS_FREEBSD_VINUM }, 104 { "freebsd-zfs", G_PART_ALIAS_FREEBSD_ZFS }, 105 { "linux-data", G_PART_ALIAS_LINUX_DATA }, 106 { "linux-lvm", G_PART_ALIAS_LINUX_LVM }, 107 { "linux-raid", G_PART_ALIAS_LINUX_RAID }, 108 { "linux-swap", G_PART_ALIAS_LINUX_SWAP }, 109 { "mbr", G_PART_ALIAS_MBR }, 110 { "ms-basic-data", G_PART_ALIAS_MS_BASIC_DATA }, 111 { "ms-ldm-data", G_PART_ALIAS_MS_LDM_DATA }, 112 { "ms-ldm-metadata", G_PART_ALIAS_MS_LDM_METADATA }, 113 { "ms-recovery", G_PART_ALIAS_MS_RECOVERY }, 114 { "ms-reserved", G_PART_ALIAS_MS_RESERVED }, 115 { "ms-spaces", G_PART_ALIAS_MS_SPACES }, 116 { "netbsd-ccd", G_PART_ALIAS_NETBSD_CCD }, 117 { "netbsd-cgd", G_PART_ALIAS_NETBSD_CGD }, 118 { "netbsd-ffs", G_PART_ALIAS_NETBSD_FFS }, 119 { "netbsd-lfs", G_PART_ALIAS_NETBSD_LFS }, 120 { "netbsd-raid", G_PART_ALIAS_NETBSD_RAID }, 121 { "netbsd-swap", G_PART_ALIAS_NETBSD_SWAP }, 122 { "ntfs", G_PART_ALIAS_MS_NTFS }, 123 { "openbsd-data", G_PART_ALIAS_OPENBSD_DATA }, 124 { "prep-boot", G_PART_ALIAS_PREP_BOOT }, 125 { "vmware-reserved", G_PART_ALIAS_VMRESERVED }, 126 { "vmware-vmfs", G_PART_ALIAS_VMFS }, 127 { "vmware-vmkdiag", G_PART_ALIAS_VMKDIAG }, 128 { "vmware-vsanhdr", G_PART_ALIAS_VMVSANHDR }, 129 }; 130 131 SYSCTL_DECL(_kern_geom); 132 SYSCTL_NODE(_kern_geom, OID_AUTO, part, CTLFLAG_RW, 0, 133 "GEOM_PART stuff"); 134 static u_int check_integrity = 1; 135 SYSCTL_UINT(_kern_geom_part, OID_AUTO, check_integrity, 136 CTLFLAG_RWTUN, &check_integrity, 1, 137 "Enable integrity checking"); 138 static u_int auto_resize = 1; 139 SYSCTL_UINT(_kern_geom_part, OID_AUTO, auto_resize, 140 CTLFLAG_RWTUN, &auto_resize, 1, 141 "Enable auto resize"); 142 143 /* 144 * The GEOM partitioning class. 145 */ 146 static g_ctl_req_t g_part_ctlreq; 147 static g_ctl_destroy_geom_t g_part_destroy_geom; 148 static g_fini_t g_part_fini; 149 static g_init_t g_part_init; 150 static g_taste_t g_part_taste; 151 152 static g_access_t g_part_access; 153 static g_dumpconf_t g_part_dumpconf; 154 static g_orphan_t g_part_orphan; 155 static g_spoiled_t g_part_spoiled; 156 static g_start_t g_part_start; 157 static g_resize_t g_part_resize; 158 static g_ioctl_t g_part_ioctl; 159 160 static struct g_class g_part_class = { 161 .name = "PART", 162 .version = G_VERSION, 163 /* Class methods. */ 164 .ctlreq = g_part_ctlreq, 165 .destroy_geom = g_part_destroy_geom, 166 .fini = g_part_fini, 167 .init = g_part_init, 168 .taste = g_part_taste, 169 /* Geom methods. */ 170 .access = g_part_access, 171 .dumpconf = g_part_dumpconf, 172 .orphan = g_part_orphan, 173 .spoiled = g_part_spoiled, 174 .start = g_part_start, 175 .resize = g_part_resize, 176 .ioctl = g_part_ioctl, 177 }; 178 179 DECLARE_GEOM_CLASS(g_part_class, g_part); 180 MODULE_VERSION(g_part, 0); 181 182 /* 183 * Support functions. 184 */ 185 186 static void g_part_wither(struct g_geom *, int); 187 188 const char * 189 g_part_alias_name(enum g_part_alias alias) 190 { 191 int i; 192 193 for (i = 0; i < G_PART_ALIAS_COUNT; i++) { 194 if (g_part_alias_list[i].alias != alias) 195 continue; 196 return (g_part_alias_list[i].lexeme); 197 } 198 199 return (NULL); 200 } 201 202 void 203 g_part_geometry_heads(off_t blocks, u_int sectors, off_t *bestchs, 204 u_int *bestheads) 205 { 206 static u_int candidate_heads[] = { 1, 2, 16, 32, 64, 128, 255, 0 }; 207 off_t chs, cylinders; 208 u_int heads; 209 int idx; 210 211 *bestchs = 0; 212 *bestheads = 0; 213 for (idx = 0; candidate_heads[idx] != 0; idx++) { 214 heads = candidate_heads[idx]; 215 cylinders = blocks / heads / sectors; 216 if (cylinders < heads || cylinders < sectors) 217 break; 218 if (cylinders > 1023) 219 continue; 220 chs = cylinders * heads * sectors; 221 if (chs > *bestchs || (chs == *bestchs && *bestheads == 1)) { 222 *bestchs = chs; 223 *bestheads = heads; 224 } 225 } 226 } 227 228 static void 229 g_part_geometry(struct g_part_table *table, struct g_consumer *cp, 230 off_t blocks) 231 { 232 static u_int candidate_sectors[] = { 1, 9, 17, 33, 63, 0 }; 233 off_t chs, bestchs; 234 u_int heads, sectors; 235 int idx; 236 237 if (g_getattr("GEOM::fwsectors", cp, §ors) != 0 || sectors == 0 || 238 g_getattr("GEOM::fwheads", cp, &heads) != 0 || heads == 0) { 239 table->gpt_fixgeom = 0; 240 table->gpt_heads = 0; 241 table->gpt_sectors = 0; 242 bestchs = 0; 243 for (idx = 0; candidate_sectors[idx] != 0; idx++) { 244 sectors = candidate_sectors[idx]; 245 g_part_geometry_heads(blocks, sectors, &chs, &heads); 246 if (chs == 0) 247 continue; 248 /* 249 * Prefer a geometry with sectors > 1, but only if 250 * it doesn't bump down the number of heads to 1. 251 */ 252 if (chs > bestchs || (chs == bestchs && heads > 1 && 253 table->gpt_sectors == 1)) { 254 bestchs = chs; 255 table->gpt_heads = heads; 256 table->gpt_sectors = sectors; 257 } 258 } 259 /* 260 * If we didn't find a geometry at all, then the disk is 261 * too big. This means we can use the maximum number of 262 * heads and sectors. 263 */ 264 if (bestchs == 0) { 265 table->gpt_heads = 255; 266 table->gpt_sectors = 63; 267 } 268 } else { 269 table->gpt_fixgeom = 1; 270 table->gpt_heads = heads; 271 table->gpt_sectors = sectors; 272 } 273 } 274 275 #define DPRINTF(...) if (bootverbose) { \ 276 printf("GEOM_PART: " __VA_ARGS__); \ 277 } 278 279 static int 280 g_part_check_integrity(struct g_part_table *table, struct g_consumer *cp) 281 { 282 struct g_part_entry *e1, *e2; 283 struct g_provider *pp; 284 off_t offset; 285 int failed; 286 287 failed = 0; 288 pp = cp->provider; 289 if (table->gpt_last < table->gpt_first) { 290 DPRINTF("last LBA is below first LBA: %jd < %jd\n", 291 (intmax_t)table->gpt_last, (intmax_t)table->gpt_first); 292 failed++; 293 } 294 if (table->gpt_last > pp->mediasize / pp->sectorsize - 1) { 295 DPRINTF("last LBA extends beyond mediasize: " 296 "%jd > %jd\n", (intmax_t)table->gpt_last, 297 (intmax_t)pp->mediasize / pp->sectorsize - 1); 298 failed++; 299 } 300 LIST_FOREACH(e1, &table->gpt_entry, gpe_entry) { 301 if (e1->gpe_deleted || e1->gpe_internal) 302 continue; 303 if (e1->gpe_start < table->gpt_first) { 304 DPRINTF("partition %d has start offset below first " 305 "LBA: %jd < %jd\n", e1->gpe_index, 306 (intmax_t)e1->gpe_start, 307 (intmax_t)table->gpt_first); 308 failed++; 309 } 310 if (e1->gpe_start > table->gpt_last) { 311 DPRINTF("partition %d has start offset beyond last " 312 "LBA: %jd > %jd\n", e1->gpe_index, 313 (intmax_t)e1->gpe_start, 314 (intmax_t)table->gpt_last); 315 failed++; 316 } 317 if (e1->gpe_end < e1->gpe_start) { 318 DPRINTF("partition %d has end offset below start " 319 "offset: %jd < %jd\n", e1->gpe_index, 320 (intmax_t)e1->gpe_end, 321 (intmax_t)e1->gpe_start); 322 failed++; 323 } 324 if (e1->gpe_end > table->gpt_last) { 325 DPRINTF("partition %d has end offset beyond last " 326 "LBA: %jd > %jd\n", e1->gpe_index, 327 (intmax_t)e1->gpe_end, 328 (intmax_t)table->gpt_last); 329 failed++; 330 } 331 if (pp->stripesize > 0) { 332 offset = e1->gpe_start * pp->sectorsize; 333 if (e1->gpe_offset > offset) 334 offset = e1->gpe_offset; 335 if ((offset + pp->stripeoffset) % pp->stripesize) { 336 DPRINTF("partition %d on (%s, %s) is not " 337 "aligned on %u bytes\n", e1->gpe_index, 338 pp->name, table->gpt_scheme->name, 339 pp->stripesize); 340 /* Don't treat this as a critical failure */ 341 } 342 } 343 e2 = e1; 344 while ((e2 = LIST_NEXT(e2, gpe_entry)) != NULL) { 345 if (e2->gpe_deleted || e2->gpe_internal) 346 continue; 347 if (e1->gpe_start >= e2->gpe_start && 348 e1->gpe_start <= e2->gpe_end) { 349 DPRINTF("partition %d has start offset inside " 350 "partition %d: start[%d] %jd >= start[%d] " 351 "%jd <= end[%d] %jd\n", 352 e1->gpe_index, e2->gpe_index, 353 e2->gpe_index, (intmax_t)e2->gpe_start, 354 e1->gpe_index, (intmax_t)e1->gpe_start, 355 e2->gpe_index, (intmax_t)e2->gpe_end); 356 failed++; 357 } 358 if (e1->gpe_end >= e2->gpe_start && 359 e1->gpe_end <= e2->gpe_end) { 360 DPRINTF("partition %d has end offset inside " 361 "partition %d: start[%d] %jd >= end[%d] " 362 "%jd <= end[%d] %jd\n", 363 e1->gpe_index, e2->gpe_index, 364 e2->gpe_index, (intmax_t)e2->gpe_start, 365 e1->gpe_index, (intmax_t)e1->gpe_end, 366 e2->gpe_index, (intmax_t)e2->gpe_end); 367 failed++; 368 } 369 if (e1->gpe_start < e2->gpe_start && 370 e1->gpe_end > e2->gpe_end) { 371 DPRINTF("partition %d contains partition %d: " 372 "start[%d] %jd > start[%d] %jd, end[%d] " 373 "%jd < end[%d] %jd\n", 374 e1->gpe_index, e2->gpe_index, 375 e1->gpe_index, (intmax_t)e1->gpe_start, 376 e2->gpe_index, (intmax_t)e2->gpe_start, 377 e2->gpe_index, (intmax_t)e2->gpe_end, 378 e1->gpe_index, (intmax_t)e1->gpe_end); 379 failed++; 380 } 381 } 382 } 383 if (failed != 0) { 384 printf("GEOM_PART: integrity check failed (%s, %s)\n", 385 pp->name, table->gpt_scheme->name); 386 if (check_integrity != 0) 387 return (EINVAL); 388 table->gpt_corrupt = 1; 389 } 390 return (0); 391 } 392 #undef DPRINTF 393 394 struct g_part_entry * 395 g_part_new_entry(struct g_part_table *table, int index, quad_t start, 396 quad_t end) 397 { 398 struct g_part_entry *entry, *last; 399 400 last = NULL; 401 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) { 402 if (entry->gpe_index == index) 403 break; 404 if (entry->gpe_index > index) { 405 entry = NULL; 406 break; 407 } 408 last = entry; 409 } 410 if (entry == NULL) { 411 entry = g_malloc(table->gpt_scheme->gps_entrysz, 412 M_WAITOK | M_ZERO); 413 entry->gpe_index = index; 414 if (last == NULL) 415 LIST_INSERT_HEAD(&table->gpt_entry, entry, gpe_entry); 416 else 417 LIST_INSERT_AFTER(last, entry, gpe_entry); 418 } else 419 entry->gpe_offset = 0; 420 entry->gpe_start = start; 421 entry->gpe_end = end; 422 return (entry); 423 } 424 425 static void 426 g_part_new_provider(struct g_geom *gp, struct g_part_table *table, 427 struct g_part_entry *entry) 428 { 429 struct g_consumer *cp; 430 struct g_provider *pp; 431 struct sbuf *sb; 432 off_t offset; 433 434 cp = LIST_FIRST(&gp->consumer); 435 pp = cp->provider; 436 437 offset = entry->gpe_start * pp->sectorsize; 438 if (entry->gpe_offset < offset) 439 entry->gpe_offset = offset; 440 441 if (entry->gpe_pp == NULL) { 442 sb = sbuf_new_auto(); 443 G_PART_FULLNAME(table, entry, sb, gp->name); 444 sbuf_finish(sb); 445 entry->gpe_pp = g_new_providerf(gp, "%s", sbuf_data(sb)); 446 sbuf_delete(sb); 447 entry->gpe_pp->flags |= G_PF_DIRECT_SEND | G_PF_DIRECT_RECEIVE; 448 entry->gpe_pp->private = entry; /* Close the circle. */ 449 } 450 entry->gpe_pp->index = entry->gpe_index - 1; /* index is 1-based. */ 451 entry->gpe_pp->mediasize = (entry->gpe_end - entry->gpe_start + 1) * 452 pp->sectorsize; 453 entry->gpe_pp->mediasize -= entry->gpe_offset - offset; 454 entry->gpe_pp->sectorsize = pp->sectorsize; 455 entry->gpe_pp->stripesize = pp->stripesize; 456 entry->gpe_pp->stripeoffset = pp->stripeoffset + entry->gpe_offset; 457 if (pp->stripesize > 0) 458 entry->gpe_pp->stripeoffset %= pp->stripesize; 459 entry->gpe_pp->flags |= pp->flags & G_PF_ACCEPT_UNMAPPED; 460 g_error_provider(entry->gpe_pp, 0); 461 } 462 463 static struct g_geom* 464 g_part_find_geom(const char *name) 465 { 466 struct g_geom *gp; 467 LIST_FOREACH(gp, &g_part_class.geom, geom) { 468 if ((gp->flags & G_GEOM_WITHER) == 0 && 469 strcmp(name, gp->name) == 0) 470 break; 471 } 472 return (gp); 473 } 474 475 static int 476 g_part_parm_geom(struct gctl_req *req, const char *name, struct g_geom **v) 477 { 478 struct g_geom *gp; 479 const char *gname; 480 481 gname = gctl_get_asciiparam(req, name); 482 if (gname == NULL) 483 return (ENOATTR); 484 if (strncmp(gname, _PATH_DEV, sizeof(_PATH_DEV) - 1) == 0) 485 gname += sizeof(_PATH_DEV) - 1; 486 gp = g_part_find_geom(gname); 487 if (gp == NULL) { 488 gctl_error(req, "%d %s '%s'", EINVAL, name, gname); 489 return (EINVAL); 490 } 491 *v = gp; 492 return (0); 493 } 494 495 static int 496 g_part_parm_provider(struct gctl_req *req, const char *name, 497 struct g_provider **v) 498 { 499 struct g_provider *pp; 500 const char *pname; 501 502 pname = gctl_get_asciiparam(req, name); 503 if (pname == NULL) 504 return (ENOATTR); 505 if (strncmp(pname, _PATH_DEV, sizeof(_PATH_DEV) - 1) == 0) 506 pname += sizeof(_PATH_DEV) - 1; 507 pp = g_provider_by_name(pname); 508 if (pp == NULL) { 509 gctl_error(req, "%d %s '%s'", EINVAL, name, pname); 510 return (EINVAL); 511 } 512 *v = pp; 513 return (0); 514 } 515 516 static int 517 g_part_parm_quad(struct gctl_req *req, const char *name, quad_t *v) 518 { 519 const char *p; 520 char *x; 521 quad_t q; 522 523 p = gctl_get_asciiparam(req, name); 524 if (p == NULL) 525 return (ENOATTR); 526 q = strtoq(p, &x, 0); 527 if (*x != '\0' || q < 0) { 528 gctl_error(req, "%d %s '%s'", EINVAL, name, p); 529 return (EINVAL); 530 } 531 *v = q; 532 return (0); 533 } 534 535 static int 536 g_part_parm_scheme(struct gctl_req *req, const char *name, 537 struct g_part_scheme **v) 538 { 539 struct g_part_scheme *s; 540 const char *p; 541 542 p = gctl_get_asciiparam(req, name); 543 if (p == NULL) 544 return (ENOATTR); 545 TAILQ_FOREACH(s, &g_part_schemes, scheme_list) { 546 if (s == &g_part_null_scheme) 547 continue; 548 if (!strcasecmp(s->name, p)) 549 break; 550 } 551 if (s == NULL) { 552 gctl_error(req, "%d %s '%s'", EINVAL, name, p); 553 return (EINVAL); 554 } 555 *v = s; 556 return (0); 557 } 558 559 static int 560 g_part_parm_str(struct gctl_req *req, const char *name, const char **v) 561 { 562 const char *p; 563 564 p = gctl_get_asciiparam(req, name); 565 if (p == NULL) 566 return (ENOATTR); 567 /* An empty label is always valid. */ 568 if (strcmp(name, "label") != 0 && p[0] == '\0') { 569 gctl_error(req, "%d %s '%s'", EINVAL, name, p); 570 return (EINVAL); 571 } 572 *v = p; 573 return (0); 574 } 575 576 static int 577 g_part_parm_intmax(struct gctl_req *req, const char *name, u_int *v) 578 { 579 const intmax_t *p; 580 int size; 581 582 p = gctl_get_param(req, name, &size); 583 if (p == NULL) 584 return (ENOATTR); 585 if (size != sizeof(*p) || *p < 0 || *p > INT_MAX) { 586 gctl_error(req, "%d %s '%jd'", EINVAL, name, *p); 587 return (EINVAL); 588 } 589 *v = (u_int)*p; 590 return (0); 591 } 592 593 static int 594 g_part_parm_uint32(struct gctl_req *req, const char *name, u_int *v) 595 { 596 const uint32_t *p; 597 int size; 598 599 p = gctl_get_param(req, name, &size); 600 if (p == NULL) 601 return (ENOATTR); 602 if (size != sizeof(*p) || *p > INT_MAX) { 603 gctl_error(req, "%d %s '%u'", EINVAL, name, (unsigned int)*p); 604 return (EINVAL); 605 } 606 *v = (u_int)*p; 607 return (0); 608 } 609 610 static int 611 g_part_parm_bootcode(struct gctl_req *req, const char *name, const void **v, 612 unsigned int *s) 613 { 614 const void *p; 615 int size; 616 617 p = gctl_get_param(req, name, &size); 618 if (p == NULL) 619 return (ENOATTR); 620 *v = p; 621 *s = size; 622 return (0); 623 } 624 625 static int 626 g_part_probe(struct g_geom *gp, struct g_consumer *cp, int depth) 627 { 628 struct g_part_scheme *iter, *scheme; 629 struct g_part_table *table; 630 int pri, probe; 631 632 table = gp->softc; 633 scheme = (table != NULL) ? table->gpt_scheme : NULL; 634 pri = (scheme != NULL) ? G_PART_PROBE(table, cp) : INT_MIN; 635 if (pri == 0) 636 goto done; 637 if (pri > 0) { /* error */ 638 scheme = NULL; 639 pri = INT_MIN; 640 } 641 642 TAILQ_FOREACH(iter, &g_part_schemes, scheme_list) { 643 if (iter == &g_part_null_scheme) 644 continue; 645 table = (void *)kobj_create((kobj_class_t)iter, M_GEOM, 646 M_WAITOK); 647 table->gpt_gp = gp; 648 table->gpt_scheme = iter; 649 table->gpt_depth = depth; 650 probe = G_PART_PROBE(table, cp); 651 if (probe <= 0 && probe > pri) { 652 pri = probe; 653 scheme = iter; 654 if (gp->softc != NULL) 655 kobj_delete((kobj_t)gp->softc, M_GEOM); 656 gp->softc = table; 657 if (pri == 0) 658 goto done; 659 } else 660 kobj_delete((kobj_t)table, M_GEOM); 661 } 662 663 done: 664 return ((scheme == NULL) ? ENXIO : 0); 665 } 666 667 /* 668 * Control request functions. 669 */ 670 671 static int 672 g_part_ctl_add(struct gctl_req *req, struct g_part_parms *gpp) 673 { 674 struct g_geom *gp; 675 struct g_provider *pp; 676 struct g_part_entry *delent, *last, *entry; 677 struct g_part_table *table; 678 struct sbuf *sb; 679 quad_t end; 680 unsigned int index; 681 int error; 682 683 gp = gpp->gpp_geom; 684 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name)); 685 g_topology_assert(); 686 687 pp = LIST_FIRST(&gp->consumer)->provider; 688 table = gp->softc; 689 end = gpp->gpp_start + gpp->gpp_size - 1; 690 691 if (gpp->gpp_start < table->gpt_first || 692 gpp->gpp_start > table->gpt_last) { 693 gctl_error(req, "%d start '%jd'", EINVAL, 694 (intmax_t)gpp->gpp_start); 695 return (EINVAL); 696 } 697 if (end < gpp->gpp_start || end > table->gpt_last) { 698 gctl_error(req, "%d size '%jd'", EINVAL, 699 (intmax_t)gpp->gpp_size); 700 return (EINVAL); 701 } 702 if (gpp->gpp_index > table->gpt_entries) { 703 gctl_error(req, "%d index '%d'", EINVAL, gpp->gpp_index); 704 return (EINVAL); 705 } 706 707 delent = last = NULL; 708 index = (gpp->gpp_index > 0) ? gpp->gpp_index : 1; 709 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) { 710 if (entry->gpe_deleted) { 711 if (entry->gpe_index == index) 712 delent = entry; 713 continue; 714 } 715 if (entry->gpe_index == index) 716 index = entry->gpe_index + 1; 717 if (entry->gpe_index < index) 718 last = entry; 719 if (entry->gpe_internal) 720 continue; 721 if (gpp->gpp_start >= entry->gpe_start && 722 gpp->gpp_start <= entry->gpe_end) { 723 gctl_error(req, "%d start '%jd'", ENOSPC, 724 (intmax_t)gpp->gpp_start); 725 return (ENOSPC); 726 } 727 if (end >= entry->gpe_start && end <= entry->gpe_end) { 728 gctl_error(req, "%d end '%jd'", ENOSPC, (intmax_t)end); 729 return (ENOSPC); 730 } 731 if (gpp->gpp_start < entry->gpe_start && end > entry->gpe_end) { 732 gctl_error(req, "%d size '%jd'", ENOSPC, 733 (intmax_t)gpp->gpp_size); 734 return (ENOSPC); 735 } 736 } 737 if (gpp->gpp_index > 0 && index != gpp->gpp_index) { 738 gctl_error(req, "%d index '%d'", EEXIST, gpp->gpp_index); 739 return (EEXIST); 740 } 741 if (index > table->gpt_entries) { 742 gctl_error(req, "%d index '%d'", ENOSPC, index); 743 return (ENOSPC); 744 } 745 746 entry = (delent == NULL) ? g_malloc(table->gpt_scheme->gps_entrysz, 747 M_WAITOK | M_ZERO) : delent; 748 entry->gpe_index = index; 749 entry->gpe_start = gpp->gpp_start; 750 entry->gpe_end = end; 751 error = G_PART_ADD(table, entry, gpp); 752 if (error) { 753 gctl_error(req, "%d", error); 754 if (delent == NULL) 755 g_free(entry); 756 return (error); 757 } 758 if (delent == NULL) { 759 if (last == NULL) 760 LIST_INSERT_HEAD(&table->gpt_entry, entry, gpe_entry); 761 else 762 LIST_INSERT_AFTER(last, entry, gpe_entry); 763 entry->gpe_created = 1; 764 } else { 765 entry->gpe_deleted = 0; 766 entry->gpe_modified = 1; 767 } 768 g_part_new_provider(gp, table, entry); 769 770 /* Provide feedback if so requested. */ 771 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) { 772 sb = sbuf_new_auto(); 773 G_PART_FULLNAME(table, entry, sb, gp->name); 774 if (pp->stripesize > 0 && entry->gpe_pp->stripeoffset != 0) 775 sbuf_printf(sb, " added, but partition is not " 776 "aligned on %u bytes\n", pp->stripesize); 777 else 778 sbuf_cat(sb, " added\n"); 779 sbuf_finish(sb); 780 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1); 781 sbuf_delete(sb); 782 } 783 return (0); 784 } 785 786 static int 787 g_part_ctl_bootcode(struct gctl_req *req, struct g_part_parms *gpp) 788 { 789 struct g_geom *gp; 790 struct g_part_table *table; 791 struct sbuf *sb; 792 int error, sz; 793 794 gp = gpp->gpp_geom; 795 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name)); 796 g_topology_assert(); 797 798 table = gp->softc; 799 sz = table->gpt_scheme->gps_bootcodesz; 800 if (sz == 0) { 801 error = ENODEV; 802 goto fail; 803 } 804 if (gpp->gpp_codesize > sz) { 805 error = EFBIG; 806 goto fail; 807 } 808 809 error = G_PART_BOOTCODE(table, gpp); 810 if (error) 811 goto fail; 812 813 /* Provide feedback if so requested. */ 814 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) { 815 sb = sbuf_new_auto(); 816 sbuf_printf(sb, "bootcode written to %s\n", gp->name); 817 sbuf_finish(sb); 818 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1); 819 sbuf_delete(sb); 820 } 821 return (0); 822 823 fail: 824 gctl_error(req, "%d", error); 825 return (error); 826 } 827 828 static int 829 g_part_ctl_commit(struct gctl_req *req, struct g_part_parms *gpp) 830 { 831 struct g_consumer *cp; 832 struct g_geom *gp; 833 struct g_provider *pp; 834 struct g_part_entry *entry, *tmp; 835 struct g_part_table *table; 836 char *buf; 837 int error, i; 838 839 gp = gpp->gpp_geom; 840 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name)); 841 g_topology_assert(); 842 843 table = gp->softc; 844 if (!table->gpt_opened) { 845 gctl_error(req, "%d", EPERM); 846 return (EPERM); 847 } 848 849 g_topology_unlock(); 850 851 cp = LIST_FIRST(&gp->consumer); 852 if ((table->gpt_smhead | table->gpt_smtail) != 0) { 853 pp = cp->provider; 854 buf = g_malloc(pp->sectorsize, M_WAITOK | M_ZERO); 855 while (table->gpt_smhead != 0) { 856 i = ffs(table->gpt_smhead) - 1; 857 error = g_write_data(cp, i * pp->sectorsize, buf, 858 pp->sectorsize); 859 if (error) { 860 g_free(buf); 861 goto fail; 862 } 863 table->gpt_smhead &= ~(1 << i); 864 } 865 while (table->gpt_smtail != 0) { 866 i = ffs(table->gpt_smtail) - 1; 867 error = g_write_data(cp, pp->mediasize - (i + 1) * 868 pp->sectorsize, buf, pp->sectorsize); 869 if (error) { 870 g_free(buf); 871 goto fail; 872 } 873 table->gpt_smtail &= ~(1 << i); 874 } 875 g_free(buf); 876 } 877 878 if (table->gpt_scheme == &g_part_null_scheme) { 879 g_topology_lock(); 880 g_access(cp, -1, -1, -1); 881 g_part_wither(gp, ENXIO); 882 return (0); 883 } 884 885 error = G_PART_WRITE(table, cp); 886 if (error) 887 goto fail; 888 889 LIST_FOREACH_SAFE(entry, &table->gpt_entry, gpe_entry, tmp) { 890 if (!entry->gpe_deleted) { 891 /* Notify consumers that provider might be changed. */ 892 if (entry->gpe_modified && ( 893 entry->gpe_pp->acw + entry->gpe_pp->ace + 894 entry->gpe_pp->acr) == 0) 895 g_media_changed(entry->gpe_pp, M_NOWAIT); 896 entry->gpe_created = 0; 897 entry->gpe_modified = 0; 898 continue; 899 } 900 LIST_REMOVE(entry, gpe_entry); 901 g_free(entry); 902 } 903 table->gpt_created = 0; 904 table->gpt_opened = 0; 905 906 g_topology_lock(); 907 g_access(cp, -1, -1, -1); 908 return (0); 909 910 fail: 911 g_topology_lock(); 912 gctl_error(req, "%d", error); 913 return (error); 914 } 915 916 static int 917 g_part_ctl_create(struct gctl_req *req, struct g_part_parms *gpp) 918 { 919 struct g_consumer *cp; 920 struct g_geom *gp; 921 struct g_provider *pp; 922 struct g_part_scheme *scheme; 923 struct g_part_table *null, *table; 924 struct sbuf *sb; 925 int attr, error; 926 927 pp = gpp->gpp_provider; 928 scheme = gpp->gpp_scheme; 929 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, pp->name)); 930 g_topology_assert(); 931 932 /* Check that there isn't already a g_part geom on the provider. */ 933 gp = g_part_find_geom(pp->name); 934 if (gp != NULL) { 935 null = gp->softc; 936 if (null->gpt_scheme != &g_part_null_scheme) { 937 gctl_error(req, "%d geom '%s'", EEXIST, pp->name); 938 return (EEXIST); 939 } 940 } else 941 null = NULL; 942 943 if ((gpp->gpp_parms & G_PART_PARM_ENTRIES) && 944 (gpp->gpp_entries < scheme->gps_minent || 945 gpp->gpp_entries > scheme->gps_maxent)) { 946 gctl_error(req, "%d entries '%d'", EINVAL, gpp->gpp_entries); 947 return (EINVAL); 948 } 949 950 if (null == NULL) 951 gp = g_new_geomf(&g_part_class, "%s", pp->name); 952 gp->softc = kobj_create((kobj_class_t)gpp->gpp_scheme, M_GEOM, 953 M_WAITOK); 954 table = gp->softc; 955 table->gpt_gp = gp; 956 table->gpt_scheme = gpp->gpp_scheme; 957 table->gpt_entries = (gpp->gpp_parms & G_PART_PARM_ENTRIES) ? 958 gpp->gpp_entries : scheme->gps_minent; 959 LIST_INIT(&table->gpt_entry); 960 if (null == NULL) { 961 cp = g_new_consumer(gp); 962 cp->flags |= G_CF_DIRECT_SEND | G_CF_DIRECT_RECEIVE; 963 error = g_attach(cp, pp); 964 if (error == 0) 965 error = g_access(cp, 1, 1, 1); 966 if (error != 0) { 967 g_part_wither(gp, error); 968 gctl_error(req, "%d geom '%s'", error, pp->name); 969 return (error); 970 } 971 table->gpt_opened = 1; 972 } else { 973 cp = LIST_FIRST(&gp->consumer); 974 table->gpt_opened = null->gpt_opened; 975 table->gpt_smhead = null->gpt_smhead; 976 table->gpt_smtail = null->gpt_smtail; 977 } 978 979 g_topology_unlock(); 980 981 /* Make sure the provider has media. */ 982 if (pp->mediasize == 0 || pp->sectorsize == 0) { 983 error = ENODEV; 984 goto fail; 985 } 986 987 /* Make sure we can nest and if so, determine our depth. */ 988 error = g_getattr("PART::isleaf", cp, &attr); 989 if (!error && attr) { 990 error = ENODEV; 991 goto fail; 992 } 993 error = g_getattr("PART::depth", cp, &attr); 994 table->gpt_depth = (!error) ? attr + 1 : 0; 995 996 /* 997 * Synthesize a disk geometry. Some partitioning schemes 998 * depend on it and since some file systems need it even 999 * when the partitition scheme doesn't, we do it here in 1000 * scheme-independent code. 1001 */ 1002 g_part_geometry(table, cp, pp->mediasize / pp->sectorsize); 1003 1004 error = G_PART_CREATE(table, gpp); 1005 if (error) 1006 goto fail; 1007 1008 g_topology_lock(); 1009 1010 table->gpt_created = 1; 1011 if (null != NULL) 1012 kobj_delete((kobj_t)null, M_GEOM); 1013 1014 /* 1015 * Support automatic commit by filling in the gpp_geom 1016 * parameter. 1017 */ 1018 gpp->gpp_parms |= G_PART_PARM_GEOM; 1019 gpp->gpp_geom = gp; 1020 1021 /* Provide feedback if so requested. */ 1022 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) { 1023 sb = sbuf_new_auto(); 1024 sbuf_printf(sb, "%s created\n", gp->name); 1025 sbuf_finish(sb); 1026 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1); 1027 sbuf_delete(sb); 1028 } 1029 return (0); 1030 1031 fail: 1032 g_topology_lock(); 1033 if (null == NULL) { 1034 g_access(cp, -1, -1, -1); 1035 g_part_wither(gp, error); 1036 } else { 1037 kobj_delete((kobj_t)gp->softc, M_GEOM); 1038 gp->softc = null; 1039 } 1040 gctl_error(req, "%d provider", error); 1041 return (error); 1042 } 1043 1044 static int 1045 g_part_ctl_delete(struct gctl_req *req, struct g_part_parms *gpp) 1046 { 1047 struct g_geom *gp; 1048 struct g_provider *pp; 1049 struct g_part_entry *entry; 1050 struct g_part_table *table; 1051 struct sbuf *sb; 1052 1053 gp = gpp->gpp_geom; 1054 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name)); 1055 g_topology_assert(); 1056 1057 table = gp->softc; 1058 1059 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) { 1060 if (entry->gpe_deleted || entry->gpe_internal) 1061 continue; 1062 if (entry->gpe_index == gpp->gpp_index) 1063 break; 1064 } 1065 if (entry == NULL) { 1066 gctl_error(req, "%d index '%d'", ENOENT, gpp->gpp_index); 1067 return (ENOENT); 1068 } 1069 1070 pp = entry->gpe_pp; 1071 if (pp != NULL) { 1072 if (pp->acr > 0 || pp->acw > 0 || pp->ace > 0) { 1073 gctl_error(req, "%d", EBUSY); 1074 return (EBUSY); 1075 } 1076 1077 pp->private = NULL; 1078 entry->gpe_pp = NULL; 1079 } 1080 1081 if (pp != NULL) 1082 g_wither_provider(pp, ENXIO); 1083 1084 /* Provide feedback if so requested. */ 1085 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) { 1086 sb = sbuf_new_auto(); 1087 G_PART_FULLNAME(table, entry, sb, gp->name); 1088 sbuf_cat(sb, " deleted\n"); 1089 sbuf_finish(sb); 1090 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1); 1091 sbuf_delete(sb); 1092 } 1093 1094 if (entry->gpe_created) { 1095 LIST_REMOVE(entry, gpe_entry); 1096 g_free(entry); 1097 } else { 1098 entry->gpe_modified = 0; 1099 entry->gpe_deleted = 1; 1100 } 1101 return (0); 1102 } 1103 1104 static int 1105 g_part_ctl_destroy(struct gctl_req *req, struct g_part_parms *gpp) 1106 { 1107 struct g_consumer *cp; 1108 struct g_geom *gp; 1109 struct g_provider *pp; 1110 struct g_part_entry *entry, *tmp; 1111 struct g_part_table *null, *table; 1112 struct sbuf *sb; 1113 int error; 1114 1115 gp = gpp->gpp_geom; 1116 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name)); 1117 g_topology_assert(); 1118 1119 table = gp->softc; 1120 /* Check for busy providers. */ 1121 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) { 1122 if (entry->gpe_deleted || entry->gpe_internal) 1123 continue; 1124 if (gpp->gpp_force) { 1125 pp = entry->gpe_pp; 1126 if (pp == NULL) 1127 continue; 1128 if (pp->acr == 0 && pp->acw == 0 && pp->ace == 0) 1129 continue; 1130 } 1131 gctl_error(req, "%d", EBUSY); 1132 return (EBUSY); 1133 } 1134 1135 if (gpp->gpp_force) { 1136 /* Destroy all providers. */ 1137 LIST_FOREACH_SAFE(entry, &table->gpt_entry, gpe_entry, tmp) { 1138 pp = entry->gpe_pp; 1139 if (pp != NULL) { 1140 pp->private = NULL; 1141 g_wither_provider(pp, ENXIO); 1142 } 1143 LIST_REMOVE(entry, gpe_entry); 1144 g_free(entry); 1145 } 1146 } 1147 1148 error = G_PART_DESTROY(table, gpp); 1149 if (error) { 1150 gctl_error(req, "%d", error); 1151 return (error); 1152 } 1153 1154 gp->softc = kobj_create((kobj_class_t)&g_part_null_scheme, M_GEOM, 1155 M_WAITOK); 1156 null = gp->softc; 1157 null->gpt_gp = gp; 1158 null->gpt_scheme = &g_part_null_scheme; 1159 LIST_INIT(&null->gpt_entry); 1160 1161 cp = LIST_FIRST(&gp->consumer); 1162 pp = cp->provider; 1163 null->gpt_last = pp->mediasize / pp->sectorsize - 1; 1164 1165 null->gpt_depth = table->gpt_depth; 1166 null->gpt_opened = table->gpt_opened; 1167 null->gpt_smhead = table->gpt_smhead; 1168 null->gpt_smtail = table->gpt_smtail; 1169 1170 while ((entry = LIST_FIRST(&table->gpt_entry)) != NULL) { 1171 LIST_REMOVE(entry, gpe_entry); 1172 g_free(entry); 1173 } 1174 kobj_delete((kobj_t)table, M_GEOM); 1175 1176 /* Provide feedback if so requested. */ 1177 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) { 1178 sb = sbuf_new_auto(); 1179 sbuf_printf(sb, "%s destroyed\n", gp->name); 1180 sbuf_finish(sb); 1181 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1); 1182 sbuf_delete(sb); 1183 } 1184 return (0); 1185 } 1186 1187 static int 1188 g_part_ctl_modify(struct gctl_req *req, struct g_part_parms *gpp) 1189 { 1190 struct g_geom *gp; 1191 struct g_part_entry *entry; 1192 struct g_part_table *table; 1193 struct sbuf *sb; 1194 int error; 1195 1196 gp = gpp->gpp_geom; 1197 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name)); 1198 g_topology_assert(); 1199 1200 table = gp->softc; 1201 1202 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) { 1203 if (entry->gpe_deleted || entry->gpe_internal) 1204 continue; 1205 if (entry->gpe_index == gpp->gpp_index) 1206 break; 1207 } 1208 if (entry == NULL) { 1209 gctl_error(req, "%d index '%d'", ENOENT, gpp->gpp_index); 1210 return (ENOENT); 1211 } 1212 1213 error = G_PART_MODIFY(table, entry, gpp); 1214 if (error) { 1215 gctl_error(req, "%d", error); 1216 return (error); 1217 } 1218 1219 if (!entry->gpe_created) 1220 entry->gpe_modified = 1; 1221 1222 /* Provide feedback if so requested. */ 1223 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) { 1224 sb = sbuf_new_auto(); 1225 G_PART_FULLNAME(table, entry, sb, gp->name); 1226 sbuf_cat(sb, " modified\n"); 1227 sbuf_finish(sb); 1228 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1); 1229 sbuf_delete(sb); 1230 } 1231 return (0); 1232 } 1233 1234 static int 1235 g_part_ctl_move(struct gctl_req *req, struct g_part_parms *gpp) 1236 { 1237 gctl_error(req, "%d verb 'move'", ENOSYS); 1238 return (ENOSYS); 1239 } 1240 1241 static int 1242 g_part_ctl_recover(struct gctl_req *req, struct g_part_parms *gpp) 1243 { 1244 struct g_part_table *table; 1245 struct g_geom *gp; 1246 struct sbuf *sb; 1247 int error, recovered; 1248 1249 gp = gpp->gpp_geom; 1250 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name)); 1251 g_topology_assert(); 1252 table = gp->softc; 1253 error = recovered = 0; 1254 1255 if (table->gpt_corrupt) { 1256 error = G_PART_RECOVER(table); 1257 if (error == 0) 1258 error = g_part_check_integrity(table, 1259 LIST_FIRST(&gp->consumer)); 1260 if (error) { 1261 gctl_error(req, "%d recovering '%s' failed", 1262 error, gp->name); 1263 return (error); 1264 } 1265 recovered = 1; 1266 } 1267 /* Provide feedback if so requested. */ 1268 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) { 1269 sb = sbuf_new_auto(); 1270 if (recovered) 1271 sbuf_printf(sb, "%s recovered\n", gp->name); 1272 else 1273 sbuf_printf(sb, "%s recovering is not needed\n", 1274 gp->name); 1275 sbuf_finish(sb); 1276 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1); 1277 sbuf_delete(sb); 1278 } 1279 return (0); 1280 } 1281 1282 static int 1283 g_part_ctl_resize(struct gctl_req *req, struct g_part_parms *gpp) 1284 { 1285 struct g_geom *gp; 1286 struct g_provider *pp; 1287 struct g_part_entry *pe, *entry; 1288 struct g_part_table *table; 1289 struct sbuf *sb; 1290 quad_t end; 1291 int error; 1292 off_t mediasize; 1293 1294 gp = gpp->gpp_geom; 1295 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name)); 1296 g_topology_assert(); 1297 table = gp->softc; 1298 1299 /* check gpp_index */ 1300 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) { 1301 if (entry->gpe_deleted || entry->gpe_internal) 1302 continue; 1303 if (entry->gpe_index == gpp->gpp_index) 1304 break; 1305 } 1306 if (entry == NULL) { 1307 gctl_error(req, "%d index '%d'", ENOENT, gpp->gpp_index); 1308 return (ENOENT); 1309 } 1310 1311 /* check gpp_size */ 1312 end = entry->gpe_start + gpp->gpp_size - 1; 1313 if (gpp->gpp_size < 1 || end > table->gpt_last) { 1314 gctl_error(req, "%d size '%jd'", EINVAL, 1315 (intmax_t)gpp->gpp_size); 1316 return (EINVAL); 1317 } 1318 1319 LIST_FOREACH(pe, &table->gpt_entry, gpe_entry) { 1320 if (pe->gpe_deleted || pe->gpe_internal || pe == entry) 1321 continue; 1322 if (end >= pe->gpe_start && end <= pe->gpe_end) { 1323 gctl_error(req, "%d end '%jd'", ENOSPC, 1324 (intmax_t)end); 1325 return (ENOSPC); 1326 } 1327 if (entry->gpe_start < pe->gpe_start && end > pe->gpe_end) { 1328 gctl_error(req, "%d size '%jd'", ENOSPC, 1329 (intmax_t)gpp->gpp_size); 1330 return (ENOSPC); 1331 } 1332 } 1333 1334 pp = entry->gpe_pp; 1335 if ((g_debugflags & 16) == 0 && 1336 (pp->acr > 0 || pp->acw > 0 || pp->ace > 0)) { 1337 if (entry->gpe_end - entry->gpe_start + 1 > gpp->gpp_size) { 1338 /* Deny shrinking of an opened partition. */ 1339 gctl_error(req, "%d", EBUSY); 1340 return (EBUSY); 1341 } 1342 } 1343 1344 error = G_PART_RESIZE(table, entry, gpp); 1345 if (error) { 1346 gctl_error(req, "%d%s", error, error != EBUSY ? "": 1347 " resizing will lead to unexpected shrinking" 1348 " due to alignment"); 1349 return (error); 1350 } 1351 1352 if (!entry->gpe_created) 1353 entry->gpe_modified = 1; 1354 1355 /* update mediasize of changed provider */ 1356 mediasize = (entry->gpe_end - entry->gpe_start + 1) * 1357 pp->sectorsize; 1358 g_resize_provider(pp, mediasize); 1359 1360 /* Provide feedback if so requested. */ 1361 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) { 1362 sb = sbuf_new_auto(); 1363 G_PART_FULLNAME(table, entry, sb, gp->name); 1364 sbuf_cat(sb, " resized\n"); 1365 sbuf_finish(sb); 1366 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1); 1367 sbuf_delete(sb); 1368 } 1369 return (0); 1370 } 1371 1372 static int 1373 g_part_ctl_setunset(struct gctl_req *req, struct g_part_parms *gpp, 1374 unsigned int set) 1375 { 1376 struct g_geom *gp; 1377 struct g_part_entry *entry; 1378 struct g_part_table *table; 1379 struct sbuf *sb; 1380 int error; 1381 1382 gp = gpp->gpp_geom; 1383 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name)); 1384 g_topology_assert(); 1385 1386 table = gp->softc; 1387 1388 if (gpp->gpp_parms & G_PART_PARM_INDEX) { 1389 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) { 1390 if (entry->gpe_deleted || entry->gpe_internal) 1391 continue; 1392 if (entry->gpe_index == gpp->gpp_index) 1393 break; 1394 } 1395 if (entry == NULL) { 1396 gctl_error(req, "%d index '%d'", ENOENT, 1397 gpp->gpp_index); 1398 return (ENOENT); 1399 } 1400 } else 1401 entry = NULL; 1402 1403 error = G_PART_SETUNSET(table, entry, gpp->gpp_attrib, set); 1404 if (error) { 1405 gctl_error(req, "%d attrib '%s'", error, gpp->gpp_attrib); 1406 return (error); 1407 } 1408 1409 /* Provide feedback if so requested. */ 1410 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) { 1411 sb = sbuf_new_auto(); 1412 sbuf_printf(sb, "%s %sset on ", gpp->gpp_attrib, 1413 (set) ? "" : "un"); 1414 if (entry) 1415 G_PART_FULLNAME(table, entry, sb, gp->name); 1416 else 1417 sbuf_cat(sb, gp->name); 1418 sbuf_cat(sb, "\n"); 1419 sbuf_finish(sb); 1420 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1); 1421 sbuf_delete(sb); 1422 } 1423 return (0); 1424 } 1425 1426 static int 1427 g_part_ctl_undo(struct gctl_req *req, struct g_part_parms *gpp) 1428 { 1429 struct g_consumer *cp; 1430 struct g_provider *pp; 1431 struct g_geom *gp; 1432 struct g_part_entry *entry, *tmp; 1433 struct g_part_table *table; 1434 int error, reprobe; 1435 1436 gp = gpp->gpp_geom; 1437 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name)); 1438 g_topology_assert(); 1439 1440 table = gp->softc; 1441 if (!table->gpt_opened) { 1442 gctl_error(req, "%d", EPERM); 1443 return (EPERM); 1444 } 1445 1446 cp = LIST_FIRST(&gp->consumer); 1447 LIST_FOREACH_SAFE(entry, &table->gpt_entry, gpe_entry, tmp) { 1448 entry->gpe_modified = 0; 1449 if (entry->gpe_created) { 1450 pp = entry->gpe_pp; 1451 if (pp != NULL) { 1452 pp->private = NULL; 1453 entry->gpe_pp = NULL; 1454 g_wither_provider(pp, ENXIO); 1455 } 1456 entry->gpe_deleted = 1; 1457 } 1458 if (entry->gpe_deleted) { 1459 LIST_REMOVE(entry, gpe_entry); 1460 g_free(entry); 1461 } 1462 } 1463 1464 g_topology_unlock(); 1465 1466 reprobe = (table->gpt_scheme == &g_part_null_scheme || 1467 table->gpt_created) ? 1 : 0; 1468 1469 if (reprobe) { 1470 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) { 1471 if (entry->gpe_internal) 1472 continue; 1473 error = EBUSY; 1474 goto fail; 1475 } 1476 while ((entry = LIST_FIRST(&table->gpt_entry)) != NULL) { 1477 LIST_REMOVE(entry, gpe_entry); 1478 g_free(entry); 1479 } 1480 error = g_part_probe(gp, cp, table->gpt_depth); 1481 if (error) { 1482 g_topology_lock(); 1483 g_access(cp, -1, -1, -1); 1484 g_part_wither(gp, error); 1485 return (0); 1486 } 1487 table = gp->softc; 1488 1489 /* 1490 * Synthesize a disk geometry. Some partitioning schemes 1491 * depend on it and since some file systems need it even 1492 * when the partitition scheme doesn't, we do it here in 1493 * scheme-independent code. 1494 */ 1495 pp = cp->provider; 1496 g_part_geometry(table, cp, pp->mediasize / pp->sectorsize); 1497 } 1498 1499 error = G_PART_READ(table, cp); 1500 if (error) 1501 goto fail; 1502 error = g_part_check_integrity(table, cp); 1503 if (error) 1504 goto fail; 1505 1506 g_topology_lock(); 1507 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) { 1508 if (!entry->gpe_internal) 1509 g_part_new_provider(gp, table, entry); 1510 } 1511 1512 table->gpt_opened = 0; 1513 g_access(cp, -1, -1, -1); 1514 return (0); 1515 1516 fail: 1517 g_topology_lock(); 1518 gctl_error(req, "%d", error); 1519 return (error); 1520 } 1521 1522 static void 1523 g_part_wither(struct g_geom *gp, int error) 1524 { 1525 struct g_part_entry *entry; 1526 struct g_part_table *table; 1527 1528 table = gp->softc; 1529 if (table != NULL) { 1530 G_PART_DESTROY(table, NULL); 1531 while ((entry = LIST_FIRST(&table->gpt_entry)) != NULL) { 1532 LIST_REMOVE(entry, gpe_entry); 1533 g_free(entry); 1534 } 1535 if (gp->softc != NULL) { 1536 kobj_delete((kobj_t)gp->softc, M_GEOM); 1537 gp->softc = NULL; 1538 } 1539 } 1540 g_wither_geom(gp, error); 1541 } 1542 1543 /* 1544 * Class methods. 1545 */ 1546 1547 static void 1548 g_part_ctlreq(struct gctl_req *req, struct g_class *mp, const char *verb) 1549 { 1550 struct g_part_parms gpp; 1551 struct g_part_table *table; 1552 struct gctl_req_arg *ap; 1553 enum g_part_ctl ctlreq; 1554 unsigned int i, mparms, oparms, parm; 1555 int auto_commit, close_on_error; 1556 int error, modifies; 1557 1558 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s,%s)", __func__, mp->name, verb)); 1559 g_topology_assert(); 1560 1561 ctlreq = G_PART_CTL_NONE; 1562 modifies = 1; 1563 mparms = 0; 1564 oparms = G_PART_PARM_FLAGS | G_PART_PARM_OUTPUT | G_PART_PARM_VERSION; 1565 switch (*verb) { 1566 case 'a': 1567 if (!strcmp(verb, "add")) { 1568 ctlreq = G_PART_CTL_ADD; 1569 mparms |= G_PART_PARM_GEOM | G_PART_PARM_SIZE | 1570 G_PART_PARM_START | G_PART_PARM_TYPE; 1571 oparms |= G_PART_PARM_INDEX | G_PART_PARM_LABEL; 1572 } 1573 break; 1574 case 'b': 1575 if (!strcmp(verb, "bootcode")) { 1576 ctlreq = G_PART_CTL_BOOTCODE; 1577 mparms |= G_PART_PARM_GEOM | G_PART_PARM_BOOTCODE; 1578 } 1579 break; 1580 case 'c': 1581 if (!strcmp(verb, "commit")) { 1582 ctlreq = G_PART_CTL_COMMIT; 1583 mparms |= G_PART_PARM_GEOM; 1584 modifies = 0; 1585 } else if (!strcmp(verb, "create")) { 1586 ctlreq = G_PART_CTL_CREATE; 1587 mparms |= G_PART_PARM_PROVIDER | G_PART_PARM_SCHEME; 1588 oparms |= G_PART_PARM_ENTRIES; 1589 } 1590 break; 1591 case 'd': 1592 if (!strcmp(verb, "delete")) { 1593 ctlreq = G_PART_CTL_DELETE; 1594 mparms |= G_PART_PARM_GEOM | G_PART_PARM_INDEX; 1595 } else if (!strcmp(verb, "destroy")) { 1596 ctlreq = G_PART_CTL_DESTROY; 1597 mparms |= G_PART_PARM_GEOM; 1598 oparms |= G_PART_PARM_FORCE; 1599 } 1600 break; 1601 case 'm': 1602 if (!strcmp(verb, "modify")) { 1603 ctlreq = G_PART_CTL_MODIFY; 1604 mparms |= G_PART_PARM_GEOM | G_PART_PARM_INDEX; 1605 oparms |= G_PART_PARM_LABEL | G_PART_PARM_TYPE; 1606 } else if (!strcmp(verb, "move")) { 1607 ctlreq = G_PART_CTL_MOVE; 1608 mparms |= G_PART_PARM_GEOM | G_PART_PARM_INDEX; 1609 } 1610 break; 1611 case 'r': 1612 if (!strcmp(verb, "recover")) { 1613 ctlreq = G_PART_CTL_RECOVER; 1614 mparms |= G_PART_PARM_GEOM; 1615 } else if (!strcmp(verb, "resize")) { 1616 ctlreq = G_PART_CTL_RESIZE; 1617 mparms |= G_PART_PARM_GEOM | G_PART_PARM_INDEX | 1618 G_PART_PARM_SIZE; 1619 } 1620 break; 1621 case 's': 1622 if (!strcmp(verb, "set")) { 1623 ctlreq = G_PART_CTL_SET; 1624 mparms |= G_PART_PARM_ATTRIB | G_PART_PARM_GEOM; 1625 oparms |= G_PART_PARM_INDEX; 1626 } 1627 break; 1628 case 'u': 1629 if (!strcmp(verb, "undo")) { 1630 ctlreq = G_PART_CTL_UNDO; 1631 mparms |= G_PART_PARM_GEOM; 1632 modifies = 0; 1633 } else if (!strcmp(verb, "unset")) { 1634 ctlreq = G_PART_CTL_UNSET; 1635 mparms |= G_PART_PARM_ATTRIB | G_PART_PARM_GEOM; 1636 oparms |= G_PART_PARM_INDEX; 1637 } 1638 break; 1639 } 1640 if (ctlreq == G_PART_CTL_NONE) { 1641 gctl_error(req, "%d verb '%s'", EINVAL, verb); 1642 return; 1643 } 1644 1645 bzero(&gpp, sizeof(gpp)); 1646 for (i = 0; i < req->narg; i++) { 1647 ap = &req->arg[i]; 1648 parm = 0; 1649 switch (ap->name[0]) { 1650 case 'a': 1651 if (!strcmp(ap->name, "arg0")) { 1652 parm = mparms & 1653 (G_PART_PARM_GEOM | G_PART_PARM_PROVIDER); 1654 } 1655 if (!strcmp(ap->name, "attrib")) 1656 parm = G_PART_PARM_ATTRIB; 1657 break; 1658 case 'b': 1659 if (!strcmp(ap->name, "bootcode")) 1660 parm = G_PART_PARM_BOOTCODE; 1661 break; 1662 case 'c': 1663 if (!strcmp(ap->name, "class")) 1664 continue; 1665 break; 1666 case 'e': 1667 if (!strcmp(ap->name, "entries")) 1668 parm = G_PART_PARM_ENTRIES; 1669 break; 1670 case 'f': 1671 if (!strcmp(ap->name, "flags")) 1672 parm = G_PART_PARM_FLAGS; 1673 else if (!strcmp(ap->name, "force")) 1674 parm = G_PART_PARM_FORCE; 1675 break; 1676 case 'i': 1677 if (!strcmp(ap->name, "index")) 1678 parm = G_PART_PARM_INDEX; 1679 break; 1680 case 'l': 1681 if (!strcmp(ap->name, "label")) 1682 parm = G_PART_PARM_LABEL; 1683 break; 1684 case 'o': 1685 if (!strcmp(ap->name, "output")) 1686 parm = G_PART_PARM_OUTPUT; 1687 break; 1688 case 's': 1689 if (!strcmp(ap->name, "scheme")) 1690 parm = G_PART_PARM_SCHEME; 1691 else if (!strcmp(ap->name, "size")) 1692 parm = G_PART_PARM_SIZE; 1693 else if (!strcmp(ap->name, "start")) 1694 parm = G_PART_PARM_START; 1695 break; 1696 case 't': 1697 if (!strcmp(ap->name, "type")) 1698 parm = G_PART_PARM_TYPE; 1699 break; 1700 case 'v': 1701 if (!strcmp(ap->name, "verb")) 1702 continue; 1703 else if (!strcmp(ap->name, "version")) 1704 parm = G_PART_PARM_VERSION; 1705 break; 1706 } 1707 if ((parm & (mparms | oparms)) == 0) { 1708 gctl_error(req, "%d param '%s'", EINVAL, ap->name); 1709 return; 1710 } 1711 switch (parm) { 1712 case G_PART_PARM_ATTRIB: 1713 error = g_part_parm_str(req, ap->name, 1714 &gpp.gpp_attrib); 1715 break; 1716 case G_PART_PARM_BOOTCODE: 1717 error = g_part_parm_bootcode(req, ap->name, 1718 &gpp.gpp_codeptr, &gpp.gpp_codesize); 1719 break; 1720 case G_PART_PARM_ENTRIES: 1721 error = g_part_parm_intmax(req, ap->name, 1722 &gpp.gpp_entries); 1723 break; 1724 case G_PART_PARM_FLAGS: 1725 error = g_part_parm_str(req, ap->name, &gpp.gpp_flags); 1726 break; 1727 case G_PART_PARM_FORCE: 1728 error = g_part_parm_uint32(req, ap->name, 1729 &gpp.gpp_force); 1730 break; 1731 case G_PART_PARM_GEOM: 1732 error = g_part_parm_geom(req, ap->name, &gpp.gpp_geom); 1733 break; 1734 case G_PART_PARM_INDEX: 1735 error = g_part_parm_intmax(req, ap->name, 1736 &gpp.gpp_index); 1737 break; 1738 case G_PART_PARM_LABEL: 1739 error = g_part_parm_str(req, ap->name, &gpp.gpp_label); 1740 break; 1741 case G_PART_PARM_OUTPUT: 1742 error = 0; /* Write-only parameter */ 1743 break; 1744 case G_PART_PARM_PROVIDER: 1745 error = g_part_parm_provider(req, ap->name, 1746 &gpp.gpp_provider); 1747 break; 1748 case G_PART_PARM_SCHEME: 1749 error = g_part_parm_scheme(req, ap->name, 1750 &gpp.gpp_scheme); 1751 break; 1752 case G_PART_PARM_SIZE: 1753 error = g_part_parm_quad(req, ap->name, &gpp.gpp_size); 1754 break; 1755 case G_PART_PARM_START: 1756 error = g_part_parm_quad(req, ap->name, 1757 &gpp.gpp_start); 1758 break; 1759 case G_PART_PARM_TYPE: 1760 error = g_part_parm_str(req, ap->name, &gpp.gpp_type); 1761 break; 1762 case G_PART_PARM_VERSION: 1763 error = g_part_parm_uint32(req, ap->name, 1764 &gpp.gpp_version); 1765 break; 1766 default: 1767 error = EDOOFUS; 1768 gctl_error(req, "%d %s", error, ap->name); 1769 break; 1770 } 1771 if (error != 0) { 1772 if (error == ENOATTR) { 1773 gctl_error(req, "%d param '%s'", error, 1774 ap->name); 1775 } 1776 return; 1777 } 1778 gpp.gpp_parms |= parm; 1779 } 1780 if ((gpp.gpp_parms & mparms) != mparms) { 1781 parm = mparms - (gpp.gpp_parms & mparms); 1782 gctl_error(req, "%d param '%x'", ENOATTR, parm); 1783 return; 1784 } 1785 1786 /* Obtain permissions if possible/necessary. */ 1787 close_on_error = 0; 1788 table = NULL; 1789 if (modifies && (gpp.gpp_parms & G_PART_PARM_GEOM)) { 1790 table = gpp.gpp_geom->softc; 1791 if (table != NULL && table->gpt_corrupt && 1792 ctlreq != G_PART_CTL_DESTROY && 1793 ctlreq != G_PART_CTL_RECOVER) { 1794 gctl_error(req, "%d table '%s' is corrupt", 1795 EPERM, gpp.gpp_geom->name); 1796 return; 1797 } 1798 if (table != NULL && !table->gpt_opened) { 1799 error = g_access(LIST_FIRST(&gpp.gpp_geom->consumer), 1800 1, 1, 1); 1801 if (error) { 1802 gctl_error(req, "%d geom '%s'", error, 1803 gpp.gpp_geom->name); 1804 return; 1805 } 1806 table->gpt_opened = 1; 1807 close_on_error = 1; 1808 } 1809 } 1810 1811 /* Allow the scheme to check or modify the parameters. */ 1812 if (table != NULL) { 1813 error = G_PART_PRECHECK(table, ctlreq, &gpp); 1814 if (error) { 1815 gctl_error(req, "%d pre-check failed", error); 1816 goto out; 1817 } 1818 } else 1819 error = EDOOFUS; /* Prevent bogus uninit. warning. */ 1820 1821 switch (ctlreq) { 1822 case G_PART_CTL_NONE: 1823 panic("%s", __func__); 1824 case G_PART_CTL_ADD: 1825 error = g_part_ctl_add(req, &gpp); 1826 break; 1827 case G_PART_CTL_BOOTCODE: 1828 error = g_part_ctl_bootcode(req, &gpp); 1829 break; 1830 case G_PART_CTL_COMMIT: 1831 error = g_part_ctl_commit(req, &gpp); 1832 break; 1833 case G_PART_CTL_CREATE: 1834 error = g_part_ctl_create(req, &gpp); 1835 break; 1836 case G_PART_CTL_DELETE: 1837 error = g_part_ctl_delete(req, &gpp); 1838 break; 1839 case G_PART_CTL_DESTROY: 1840 error = g_part_ctl_destroy(req, &gpp); 1841 break; 1842 case G_PART_CTL_MODIFY: 1843 error = g_part_ctl_modify(req, &gpp); 1844 break; 1845 case G_PART_CTL_MOVE: 1846 error = g_part_ctl_move(req, &gpp); 1847 break; 1848 case G_PART_CTL_RECOVER: 1849 error = g_part_ctl_recover(req, &gpp); 1850 break; 1851 case G_PART_CTL_RESIZE: 1852 error = g_part_ctl_resize(req, &gpp); 1853 break; 1854 case G_PART_CTL_SET: 1855 error = g_part_ctl_setunset(req, &gpp, 1); 1856 break; 1857 case G_PART_CTL_UNDO: 1858 error = g_part_ctl_undo(req, &gpp); 1859 break; 1860 case G_PART_CTL_UNSET: 1861 error = g_part_ctl_setunset(req, &gpp, 0); 1862 break; 1863 } 1864 1865 /* Implement automatic commit. */ 1866 if (!error) { 1867 auto_commit = (modifies && 1868 (gpp.gpp_parms & G_PART_PARM_FLAGS) && 1869 strchr(gpp.gpp_flags, 'C') != NULL) ? 1 : 0; 1870 if (auto_commit) { 1871 KASSERT(gpp.gpp_parms & G_PART_PARM_GEOM, ("%s", 1872 __func__)); 1873 error = g_part_ctl_commit(req, &gpp); 1874 } 1875 } 1876 1877 out: 1878 if (error && close_on_error) { 1879 g_access(LIST_FIRST(&gpp.gpp_geom->consumer), -1, -1, -1); 1880 table->gpt_opened = 0; 1881 } 1882 } 1883 1884 static int 1885 g_part_destroy_geom(struct gctl_req *req, struct g_class *mp, 1886 struct g_geom *gp) 1887 { 1888 1889 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s,%s)", __func__, mp->name, gp->name)); 1890 g_topology_assert(); 1891 1892 g_part_wither(gp, EINVAL); 1893 return (0); 1894 } 1895 1896 static struct g_geom * 1897 g_part_taste(struct g_class *mp, struct g_provider *pp, int flags __unused) 1898 { 1899 struct g_consumer *cp; 1900 struct g_geom *gp; 1901 struct g_part_entry *entry; 1902 struct g_part_table *table; 1903 struct root_hold_token *rht; 1904 int attr, depth; 1905 int error; 1906 1907 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s,%s)", __func__, mp->name, pp->name)); 1908 g_topology_assert(); 1909 1910 /* Skip providers that are already open for writing. */ 1911 if (pp->acw > 0) 1912 return (NULL); 1913 1914 /* 1915 * Create a GEOM with consumer and hook it up to the provider. 1916 * With that we become part of the topology. Optain read access 1917 * to the provider. 1918 */ 1919 gp = g_new_geomf(mp, "%s", pp->name); 1920 cp = g_new_consumer(gp); 1921 cp->flags |= G_CF_DIRECT_SEND | G_CF_DIRECT_RECEIVE; 1922 error = g_attach(cp, pp); 1923 if (error == 0) 1924 error = g_access(cp, 1, 0, 0); 1925 if (error != 0) { 1926 if (cp->provider) 1927 g_detach(cp); 1928 g_destroy_consumer(cp); 1929 g_destroy_geom(gp); 1930 return (NULL); 1931 } 1932 1933 rht = root_mount_hold(mp->name); 1934 g_topology_unlock(); 1935 1936 /* 1937 * Short-circuit the whole probing galore when there's no 1938 * media present. 1939 */ 1940 if (pp->mediasize == 0 || pp->sectorsize == 0) { 1941 error = ENODEV; 1942 goto fail; 1943 } 1944 1945 /* Make sure we can nest and if so, determine our depth. */ 1946 error = g_getattr("PART::isleaf", cp, &attr); 1947 if (!error && attr) { 1948 error = ENODEV; 1949 goto fail; 1950 } 1951 error = g_getattr("PART::depth", cp, &attr); 1952 depth = (!error) ? attr + 1 : 0; 1953 1954 error = g_part_probe(gp, cp, depth); 1955 if (error) 1956 goto fail; 1957 1958 table = gp->softc; 1959 1960 /* 1961 * Synthesize a disk geometry. Some partitioning schemes 1962 * depend on it and since some file systems need it even 1963 * when the partitition scheme doesn't, we do it here in 1964 * scheme-independent code. 1965 */ 1966 g_part_geometry(table, cp, pp->mediasize / pp->sectorsize); 1967 1968 error = G_PART_READ(table, cp); 1969 if (error) 1970 goto fail; 1971 error = g_part_check_integrity(table, cp); 1972 if (error) 1973 goto fail; 1974 1975 g_topology_lock(); 1976 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) { 1977 if (!entry->gpe_internal) 1978 g_part_new_provider(gp, table, entry); 1979 } 1980 1981 root_mount_rel(rht); 1982 g_access(cp, -1, 0, 0); 1983 return (gp); 1984 1985 fail: 1986 g_topology_lock(); 1987 root_mount_rel(rht); 1988 g_access(cp, -1, 0, 0); 1989 g_detach(cp); 1990 g_destroy_consumer(cp); 1991 g_destroy_geom(gp); 1992 return (NULL); 1993 } 1994 1995 /* 1996 * Geom methods. 1997 */ 1998 1999 static int 2000 g_part_access(struct g_provider *pp, int dr, int dw, int de) 2001 { 2002 struct g_consumer *cp; 2003 2004 G_PART_TRACE((G_T_ACCESS, "%s(%s,%d,%d,%d)", __func__, pp->name, dr, 2005 dw, de)); 2006 2007 cp = LIST_FIRST(&pp->geom->consumer); 2008 2009 /* We always gain write-exclusive access. */ 2010 return (g_access(cp, dr, dw, dw + de)); 2011 } 2012 2013 static void 2014 g_part_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp, 2015 struct g_consumer *cp, struct g_provider *pp) 2016 { 2017 char buf[64]; 2018 struct g_part_entry *entry; 2019 struct g_part_table *table; 2020 2021 KASSERT(sb != NULL && gp != NULL, ("%s", __func__)); 2022 table = gp->softc; 2023 2024 if (indent == NULL) { 2025 KASSERT(cp == NULL && pp != NULL, ("%s", __func__)); 2026 entry = pp->private; 2027 if (entry == NULL) 2028 return; 2029 sbuf_printf(sb, " i %u o %ju ty %s", entry->gpe_index, 2030 (uintmax_t)entry->gpe_offset, 2031 G_PART_TYPE(table, entry, buf, sizeof(buf))); 2032 /* 2033 * libdisk compatibility quirk - the scheme dumps the 2034 * slicer name and partition type in a way that is 2035 * compatible with libdisk. When libdisk is not used 2036 * anymore, this should go away. 2037 */ 2038 G_PART_DUMPCONF(table, entry, sb, indent); 2039 } else if (cp != NULL) { /* Consumer configuration. */ 2040 KASSERT(pp == NULL, ("%s", __func__)); 2041 /* none */ 2042 } else if (pp != NULL) { /* Provider configuration. */ 2043 entry = pp->private; 2044 if (entry == NULL) 2045 return; 2046 sbuf_printf(sb, "%s<start>%ju</start>\n", indent, 2047 (uintmax_t)entry->gpe_start); 2048 sbuf_printf(sb, "%s<end>%ju</end>\n", indent, 2049 (uintmax_t)entry->gpe_end); 2050 sbuf_printf(sb, "%s<index>%u</index>\n", indent, 2051 entry->gpe_index); 2052 sbuf_printf(sb, "%s<type>%s</type>\n", indent, 2053 G_PART_TYPE(table, entry, buf, sizeof(buf))); 2054 sbuf_printf(sb, "%s<offset>%ju</offset>\n", indent, 2055 (uintmax_t)entry->gpe_offset); 2056 sbuf_printf(sb, "%s<length>%ju</length>\n", indent, 2057 (uintmax_t)pp->mediasize); 2058 G_PART_DUMPCONF(table, entry, sb, indent); 2059 } else { /* Geom configuration. */ 2060 sbuf_printf(sb, "%s<scheme>%s</scheme>\n", indent, 2061 table->gpt_scheme->name); 2062 sbuf_printf(sb, "%s<entries>%u</entries>\n", indent, 2063 table->gpt_entries); 2064 sbuf_printf(sb, "%s<first>%ju</first>\n", indent, 2065 (uintmax_t)table->gpt_first); 2066 sbuf_printf(sb, "%s<last>%ju</last>\n", indent, 2067 (uintmax_t)table->gpt_last); 2068 sbuf_printf(sb, "%s<fwsectors>%u</fwsectors>\n", indent, 2069 table->gpt_sectors); 2070 sbuf_printf(sb, "%s<fwheads>%u</fwheads>\n", indent, 2071 table->gpt_heads); 2072 sbuf_printf(sb, "%s<state>%s</state>\n", indent, 2073 table->gpt_corrupt ? "CORRUPT": "OK"); 2074 sbuf_printf(sb, "%s<modified>%s</modified>\n", indent, 2075 table->gpt_opened ? "true": "false"); 2076 G_PART_DUMPCONF(table, NULL, sb, indent); 2077 } 2078 } 2079 2080 /*- 2081 * This start routine is only called for non-trivial requests, all the 2082 * trivial ones are handled autonomously by the slice code. 2083 * For requests we handle here, we must call the g_io_deliver() on the 2084 * bio, and return non-zero to indicate to the slice code that we did so. 2085 * This code executes in the "DOWN" I/O path, this means: 2086 * * No sleeping. 2087 * * Don't grab the topology lock. 2088 * * Don't call biowait, g_getattr(), g_setattr() or g_read_data() 2089 */ 2090 static int 2091 g_part_ioctl(struct g_provider *pp, u_long cmd, void *data, int fflag, struct thread *td) 2092 { 2093 struct g_part_table *table; 2094 2095 table = pp->geom->softc; 2096 return G_PART_IOCTL(table, pp, cmd, data, fflag, td); 2097 } 2098 2099 static void 2100 g_part_resize(struct g_consumer *cp) 2101 { 2102 struct g_part_table *table; 2103 2104 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, cp->provider->name)); 2105 g_topology_assert(); 2106 2107 if (auto_resize == 0) 2108 return; 2109 2110 table = cp->geom->softc; 2111 if (table->gpt_opened == 0) { 2112 if (g_access(cp, 1, 1, 1) != 0) 2113 return; 2114 table->gpt_opened = 1; 2115 } 2116 if (G_PART_RESIZE(table, NULL, NULL) == 0) 2117 printf("GEOM_PART: %s was automatically resized.\n" 2118 " Use `gpart commit %s` to save changes or " 2119 "`gpart undo %s` to revert them.\n", cp->geom->name, 2120 cp->geom->name, cp->geom->name); 2121 if (g_part_check_integrity(table, cp) != 0) { 2122 g_access(cp, -1, -1, -1); 2123 table->gpt_opened = 0; 2124 g_part_wither(table->gpt_gp, ENXIO); 2125 } 2126 } 2127 2128 static void 2129 g_part_orphan(struct g_consumer *cp) 2130 { 2131 struct g_provider *pp; 2132 struct g_part_table *table; 2133 2134 pp = cp->provider; 2135 KASSERT(pp != NULL, ("%s", __func__)); 2136 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, pp->name)); 2137 g_topology_assert(); 2138 2139 KASSERT(pp->error != 0, ("%s", __func__)); 2140 table = cp->geom->softc; 2141 if (table != NULL && table->gpt_opened) 2142 g_access(cp, -1, -1, -1); 2143 g_part_wither(cp->geom, pp->error); 2144 } 2145 2146 static void 2147 g_part_spoiled(struct g_consumer *cp) 2148 { 2149 2150 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, cp->provider->name)); 2151 g_topology_assert(); 2152 2153 cp->flags |= G_CF_ORPHAN; 2154 g_part_wither(cp->geom, ENXIO); 2155 } 2156 2157 static void 2158 g_part_start(struct bio *bp) 2159 { 2160 struct bio *bp2; 2161 struct g_consumer *cp; 2162 struct g_geom *gp; 2163 struct g_part_entry *entry; 2164 struct g_part_table *table; 2165 struct g_kerneldump *gkd; 2166 struct g_provider *pp; 2167 char buf[64]; 2168 2169 biotrack(bp, __func__); 2170 2171 pp = bp->bio_to; 2172 gp = pp->geom; 2173 table = gp->softc; 2174 cp = LIST_FIRST(&gp->consumer); 2175 2176 G_PART_TRACE((G_T_BIO, "%s: cmd=%d, provider=%s", __func__, bp->bio_cmd, 2177 pp->name)); 2178 2179 entry = pp->private; 2180 if (entry == NULL) { 2181 g_io_deliver(bp, ENXIO); 2182 return; 2183 } 2184 2185 switch(bp->bio_cmd) { 2186 case BIO_DELETE: 2187 case BIO_READ: 2188 case BIO_WRITE: 2189 if (bp->bio_offset >= pp->mediasize) { 2190 g_io_deliver(bp, EIO); 2191 return; 2192 } 2193 bp2 = g_clone_bio(bp); 2194 if (bp2 == NULL) { 2195 g_io_deliver(bp, ENOMEM); 2196 return; 2197 } 2198 if (bp2->bio_offset + bp2->bio_length > pp->mediasize) 2199 bp2->bio_length = pp->mediasize - bp2->bio_offset; 2200 bp2->bio_done = g_std_done; 2201 bp2->bio_offset += entry->gpe_offset; 2202 g_io_request(bp2, cp); 2203 return; 2204 case BIO_FLUSH: 2205 break; 2206 case BIO_GETATTR: 2207 if (g_handleattr_int(bp, "GEOM::fwheads", table->gpt_heads)) 2208 return; 2209 if (g_handleattr_int(bp, "GEOM::fwsectors", table->gpt_sectors)) 2210 return; 2211 if (g_handleattr_int(bp, "PART::isleaf", table->gpt_isleaf)) 2212 return; 2213 if (g_handleattr_int(bp, "PART::depth", table->gpt_depth)) 2214 return; 2215 if (g_handleattr_str(bp, "PART::scheme", 2216 table->gpt_scheme->name)) 2217 return; 2218 if (g_handleattr_str(bp, "PART::type", 2219 G_PART_TYPE(table, entry, buf, sizeof(buf)))) 2220 return; 2221 if (!strcmp("GEOM::kerneldump", bp->bio_attribute)) { 2222 /* 2223 * Check that the partition is suitable for kernel 2224 * dumps. Typically only swap partitions should be 2225 * used. If the request comes from the nested scheme 2226 * we allow dumping there as well. 2227 */ 2228 if ((bp->bio_from == NULL || 2229 bp->bio_from->geom->class != &g_part_class) && 2230 G_PART_DUMPTO(table, entry) == 0) { 2231 g_io_deliver(bp, ENODEV); 2232 printf("GEOM_PART: Partition '%s' not suitable" 2233 " for kernel dumps (wrong type?)\n", 2234 pp->name); 2235 return; 2236 } 2237 gkd = (struct g_kerneldump *)bp->bio_data; 2238 if (gkd->offset >= pp->mediasize) { 2239 g_io_deliver(bp, EIO); 2240 return; 2241 } 2242 if (gkd->offset + gkd->length > pp->mediasize) 2243 gkd->length = pp->mediasize - gkd->offset; 2244 gkd->offset += entry->gpe_offset; 2245 } 2246 break; 2247 default: 2248 g_io_deliver(bp, EOPNOTSUPP); 2249 return; 2250 } 2251 2252 bp2 = g_clone_bio(bp); 2253 if (bp2 == NULL) { 2254 g_io_deliver(bp, ENOMEM); 2255 return; 2256 } 2257 bp2->bio_done = g_std_done; 2258 g_io_request(bp2, cp); 2259 } 2260 2261 static void 2262 g_part_init(struct g_class *mp) 2263 { 2264 2265 TAILQ_INSERT_HEAD(&g_part_schemes, &g_part_null_scheme, scheme_list); 2266 } 2267 2268 static void 2269 g_part_fini(struct g_class *mp) 2270 { 2271 2272 TAILQ_REMOVE(&g_part_schemes, &g_part_null_scheme, scheme_list); 2273 } 2274 2275 static void 2276 g_part_unload_event(void *arg, int flag) 2277 { 2278 struct g_consumer *cp; 2279 struct g_geom *gp; 2280 struct g_provider *pp; 2281 struct g_part_scheme *scheme; 2282 struct g_part_table *table; 2283 uintptr_t *xchg; 2284 int acc, error; 2285 2286 if (flag == EV_CANCEL) 2287 return; 2288 2289 xchg = arg; 2290 error = 0; 2291 scheme = (void *)(*xchg); 2292 2293 g_topology_assert(); 2294 2295 LIST_FOREACH(gp, &g_part_class.geom, geom) { 2296 table = gp->softc; 2297 if (table->gpt_scheme != scheme) 2298 continue; 2299 2300 acc = 0; 2301 LIST_FOREACH(pp, &gp->provider, provider) 2302 acc += pp->acr + pp->acw + pp->ace; 2303 LIST_FOREACH(cp, &gp->consumer, consumer) 2304 acc += cp->acr + cp->acw + cp->ace; 2305 2306 if (!acc) 2307 g_part_wither(gp, ENOSYS); 2308 else 2309 error = EBUSY; 2310 } 2311 2312 if (!error) 2313 TAILQ_REMOVE(&g_part_schemes, scheme, scheme_list); 2314 2315 *xchg = error; 2316 } 2317 2318 int 2319 g_part_modevent(module_t mod, int type, struct g_part_scheme *scheme) 2320 { 2321 struct g_part_scheme *iter; 2322 uintptr_t arg; 2323 int error; 2324 2325 error = 0; 2326 switch (type) { 2327 case MOD_LOAD: 2328 TAILQ_FOREACH(iter, &g_part_schemes, scheme_list) { 2329 if (scheme == iter) { 2330 printf("GEOM_PART: scheme %s is already " 2331 "registered!\n", scheme->name); 2332 break; 2333 } 2334 } 2335 if (iter == NULL) { 2336 TAILQ_INSERT_TAIL(&g_part_schemes, scheme, 2337 scheme_list); 2338 g_retaste(&g_part_class); 2339 } 2340 break; 2341 case MOD_UNLOAD: 2342 arg = (uintptr_t)scheme; 2343 error = g_waitfor_event(g_part_unload_event, &arg, M_WAITOK, 2344 NULL); 2345 if (error == 0) 2346 error = arg; 2347 break; 2348 default: 2349 error = EOPNOTSUPP; 2350 break; 2351 } 2352 2353 return (error); 2354 } 2355