1 /*- 2 * Copyright (c) 2002, 2005-2009 Marcel Moolenaar 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 */ 26 27 #include <sys/cdefs.h> 28 __FBSDID("$FreeBSD$"); 29 30 #include <sys/param.h> 31 #include <sys/bio.h> 32 #include <sys/endian.h> 33 #include <sys/kernel.h> 34 #include <sys/kobj.h> 35 #include <sys/limits.h> 36 #include <sys/lock.h> 37 #include <sys/malloc.h> 38 #include <sys/mutex.h> 39 #include <sys/queue.h> 40 #include <sys/sbuf.h> 41 #include <sys/sysctl.h> 42 #include <sys/systm.h> 43 #include <sys/uuid.h> 44 #include <geom/geom.h> 45 #include <geom/geom_ctl.h> 46 #include <geom/geom_int.h> 47 #include <geom/part/g_part.h> 48 49 #include "g_part_if.h" 50 51 #ifndef _PATH_DEV 52 #define _PATH_DEV "/dev/" 53 #endif 54 55 static kobj_method_t g_part_null_methods[] = { 56 { 0, 0 } 57 }; 58 59 static struct g_part_scheme g_part_null_scheme = { 60 "(none)", 61 g_part_null_methods, 62 sizeof(struct g_part_table), 63 }; 64 65 TAILQ_HEAD(, g_part_scheme) g_part_schemes = 66 TAILQ_HEAD_INITIALIZER(g_part_schemes); 67 68 struct g_part_alias_list { 69 const char *lexeme; 70 enum g_part_alias alias; 71 } g_part_alias_list[G_PART_ALIAS_COUNT] = { 72 { "apple-boot", G_PART_ALIAS_APPLE_BOOT }, 73 { "apple-core-storage", G_PART_ALIAS_APPLE_CORE_STORAGE }, 74 { "apple-hfs", G_PART_ALIAS_APPLE_HFS }, 75 { "apple-label", G_PART_ALIAS_APPLE_LABEL }, 76 { "apple-raid", G_PART_ALIAS_APPLE_RAID }, 77 { "apple-raid-offline", G_PART_ALIAS_APPLE_RAID_OFFLINE }, 78 { "apple-tv-recovery", G_PART_ALIAS_APPLE_TV_RECOVERY }, 79 { "apple-ufs", G_PART_ALIAS_APPLE_UFS }, 80 { "bios-boot", G_PART_ALIAS_BIOS_BOOT }, 81 { "ebr", G_PART_ALIAS_EBR }, 82 { "efi", G_PART_ALIAS_EFI }, 83 { "fat16", G_PART_ALIAS_MS_FAT16 }, 84 { "fat32", G_PART_ALIAS_MS_FAT32 }, 85 { "freebsd", G_PART_ALIAS_FREEBSD }, 86 { "freebsd-boot", G_PART_ALIAS_FREEBSD_BOOT }, 87 { "freebsd-nandfs", G_PART_ALIAS_FREEBSD_NANDFS }, 88 { "freebsd-swap", G_PART_ALIAS_FREEBSD_SWAP }, 89 { "freebsd-ufs", G_PART_ALIAS_FREEBSD_UFS }, 90 { "freebsd-vinum", G_PART_ALIAS_FREEBSD_VINUM }, 91 { "freebsd-zfs", G_PART_ALIAS_FREEBSD_ZFS }, 92 { "linux-data", G_PART_ALIAS_LINUX_DATA }, 93 { "linux-lvm", G_PART_ALIAS_LINUX_LVM }, 94 { "linux-raid", G_PART_ALIAS_LINUX_RAID }, 95 { "linux-swap", G_PART_ALIAS_LINUX_SWAP }, 96 { "mbr", G_PART_ALIAS_MBR }, 97 { "ms-basic-data", G_PART_ALIAS_MS_BASIC_DATA }, 98 { "ms-ldm-data", G_PART_ALIAS_MS_LDM_DATA }, 99 { "ms-ldm-metadata", G_PART_ALIAS_MS_LDM_METADATA }, 100 { "ms-reserved", G_PART_ALIAS_MS_RESERVED }, 101 { "ntfs", G_PART_ALIAS_MS_NTFS }, 102 { "netbsd-ccd", G_PART_ALIAS_NETBSD_CCD }, 103 { "netbsd-cgd", G_PART_ALIAS_NETBSD_CGD }, 104 { "netbsd-ffs", G_PART_ALIAS_NETBSD_FFS }, 105 { "netbsd-lfs", G_PART_ALIAS_NETBSD_LFS }, 106 { "netbsd-raid", G_PART_ALIAS_NETBSD_RAID }, 107 { "netbsd-swap", G_PART_ALIAS_NETBSD_SWAP }, 108 { "vmware-vmfs", G_PART_ALIAS_VMFS }, 109 { "vmware-vmkdiag", G_PART_ALIAS_VMKDIAG }, 110 { "vmware-reserved", G_PART_ALIAS_VMRESERVED }, 111 { "vmware-vsanhdr", G_PART_ALIAS_VMVSANHDR }, 112 { "dragonfly-label32", G_PART_ALIAS_DFBSD }, 113 { "dragonfly-label64", G_PART_ALIAS_DFBSD64 }, 114 { "dragonfly-swap", G_PART_ALIAS_DFBSD_SWAP }, 115 { "dragonfly-ufs", G_PART_ALIAS_DFBSD_UFS }, 116 { "dragonfly-vinum", G_PART_ALIAS_DFBSD_VINUM }, 117 { "dragonfly-ccd", G_PART_ALIAS_DFBSD_CCD }, 118 { "dragonfly-legacy", G_PART_ALIAS_DFBSD_LEGACY }, 119 { "dragonfly-hammer", G_PART_ALIAS_DFBSD_HAMMER }, 120 { "dragonfly-hammer2", G_PART_ALIAS_DFBSD_HAMMER2 }, 121 { "prep-boot", G_PART_ALIAS_PREP_BOOT }, 122 }; 123 124 SYSCTL_DECL(_kern_geom); 125 SYSCTL_NODE(_kern_geom, OID_AUTO, part, CTLFLAG_RW, 0, 126 "GEOM_PART stuff"); 127 static u_int check_integrity = 1; 128 SYSCTL_UINT(_kern_geom_part, OID_AUTO, check_integrity, 129 CTLFLAG_RWTUN, &check_integrity, 1, 130 "Enable integrity checking"); 131 132 /* 133 * The GEOM partitioning class. 134 */ 135 static g_ctl_req_t g_part_ctlreq; 136 static g_ctl_destroy_geom_t g_part_destroy_geom; 137 static g_fini_t g_part_fini; 138 static g_init_t g_part_init; 139 static g_taste_t g_part_taste; 140 141 static g_access_t g_part_access; 142 static g_dumpconf_t g_part_dumpconf; 143 static g_orphan_t g_part_orphan; 144 static g_spoiled_t g_part_spoiled; 145 static g_start_t g_part_start; 146 static g_resize_t g_part_resize; 147 static g_ioctl_t g_part_ioctl; 148 149 static struct g_class g_part_class = { 150 .name = "PART", 151 .version = G_VERSION, 152 /* Class methods. */ 153 .ctlreq = g_part_ctlreq, 154 .destroy_geom = g_part_destroy_geom, 155 .fini = g_part_fini, 156 .init = g_part_init, 157 .taste = g_part_taste, 158 /* Geom methods. */ 159 .access = g_part_access, 160 .dumpconf = g_part_dumpconf, 161 .orphan = g_part_orphan, 162 .spoiled = g_part_spoiled, 163 .start = g_part_start, 164 .resize = g_part_resize, 165 .ioctl = g_part_ioctl, 166 }; 167 168 DECLARE_GEOM_CLASS(g_part_class, g_part); 169 MODULE_VERSION(g_part, 0); 170 171 /* 172 * Support functions. 173 */ 174 175 static void g_part_wither(struct g_geom *, int); 176 177 const char * 178 g_part_alias_name(enum g_part_alias alias) 179 { 180 int i; 181 182 for (i = 0; i < G_PART_ALIAS_COUNT; i++) { 183 if (g_part_alias_list[i].alias != alias) 184 continue; 185 return (g_part_alias_list[i].lexeme); 186 } 187 188 return (NULL); 189 } 190 191 void 192 g_part_geometry_heads(off_t blocks, u_int sectors, off_t *bestchs, 193 u_int *bestheads) 194 { 195 static u_int candidate_heads[] = { 1, 2, 16, 32, 64, 128, 255, 0 }; 196 off_t chs, cylinders; 197 u_int heads; 198 int idx; 199 200 *bestchs = 0; 201 *bestheads = 0; 202 for (idx = 0; candidate_heads[idx] != 0; idx++) { 203 heads = candidate_heads[idx]; 204 cylinders = blocks / heads / sectors; 205 if (cylinders < heads || cylinders < sectors) 206 break; 207 if (cylinders > 1023) 208 continue; 209 chs = cylinders * heads * sectors; 210 if (chs > *bestchs || (chs == *bestchs && *bestheads == 1)) { 211 *bestchs = chs; 212 *bestheads = heads; 213 } 214 } 215 } 216 217 static void 218 g_part_geometry(struct g_part_table *table, struct g_consumer *cp, 219 off_t blocks) 220 { 221 static u_int candidate_sectors[] = { 1, 9, 17, 33, 63, 0 }; 222 off_t chs, bestchs; 223 u_int heads, sectors; 224 int idx; 225 226 if (g_getattr("GEOM::fwsectors", cp, §ors) != 0 || sectors == 0 || 227 g_getattr("GEOM::fwheads", cp, &heads) != 0 || heads == 0) { 228 table->gpt_fixgeom = 0; 229 table->gpt_heads = 0; 230 table->gpt_sectors = 0; 231 bestchs = 0; 232 for (idx = 0; candidate_sectors[idx] != 0; idx++) { 233 sectors = candidate_sectors[idx]; 234 g_part_geometry_heads(blocks, sectors, &chs, &heads); 235 if (chs == 0) 236 continue; 237 /* 238 * Prefer a geometry with sectors > 1, but only if 239 * it doesn't bump down the number of heads to 1. 240 */ 241 if (chs > bestchs || (chs == bestchs && heads > 1 && 242 table->gpt_sectors == 1)) { 243 bestchs = chs; 244 table->gpt_heads = heads; 245 table->gpt_sectors = sectors; 246 } 247 } 248 /* 249 * If we didn't find a geometry at all, then the disk is 250 * too big. This means we can use the maximum number of 251 * heads and sectors. 252 */ 253 if (bestchs == 0) { 254 table->gpt_heads = 255; 255 table->gpt_sectors = 63; 256 } 257 } else { 258 table->gpt_fixgeom = 1; 259 table->gpt_heads = heads; 260 table->gpt_sectors = sectors; 261 } 262 } 263 264 #define DPRINTF(...) if (bootverbose) { \ 265 printf("GEOM_PART: " __VA_ARGS__); \ 266 } 267 268 static int 269 g_part_check_integrity(struct g_part_table *table, struct g_consumer *cp) 270 { 271 struct g_part_entry *e1, *e2; 272 struct g_provider *pp; 273 off_t offset; 274 int failed; 275 276 failed = 0; 277 pp = cp->provider; 278 if (table->gpt_last < table->gpt_first) { 279 DPRINTF("last LBA is below first LBA: %jd < %jd\n", 280 (intmax_t)table->gpt_last, (intmax_t)table->gpt_first); 281 failed++; 282 } 283 if (table->gpt_last > pp->mediasize / pp->sectorsize - 1) { 284 DPRINTF("last LBA extends beyond mediasize: " 285 "%jd > %jd\n", (intmax_t)table->gpt_last, 286 (intmax_t)pp->mediasize / pp->sectorsize - 1); 287 failed++; 288 } 289 LIST_FOREACH(e1, &table->gpt_entry, gpe_entry) { 290 if (e1->gpe_deleted || e1->gpe_internal) 291 continue; 292 if (e1->gpe_start < table->gpt_first) { 293 DPRINTF("partition %d has start offset below first " 294 "LBA: %jd < %jd\n", e1->gpe_index, 295 (intmax_t)e1->gpe_start, 296 (intmax_t)table->gpt_first); 297 failed++; 298 } 299 if (e1->gpe_start > table->gpt_last) { 300 DPRINTF("partition %d has start offset beyond last " 301 "LBA: %jd > %jd\n", e1->gpe_index, 302 (intmax_t)e1->gpe_start, 303 (intmax_t)table->gpt_last); 304 failed++; 305 } 306 if (e1->gpe_end < e1->gpe_start) { 307 DPRINTF("partition %d has end offset below start " 308 "offset: %jd < %jd\n", e1->gpe_index, 309 (intmax_t)e1->gpe_end, 310 (intmax_t)e1->gpe_start); 311 failed++; 312 } 313 if (e1->gpe_end > table->gpt_last) { 314 DPRINTF("partition %d has end offset beyond last " 315 "LBA: %jd > %jd\n", e1->gpe_index, 316 (intmax_t)e1->gpe_end, 317 (intmax_t)table->gpt_last); 318 failed++; 319 } 320 if (pp->stripesize > 0) { 321 offset = e1->gpe_start * pp->sectorsize; 322 if (e1->gpe_offset > offset) 323 offset = e1->gpe_offset; 324 if ((offset + pp->stripeoffset) % pp->stripesize) { 325 DPRINTF("partition %d on (%s, %s) is not " 326 "aligned on %u bytes\n", e1->gpe_index, 327 pp->name, table->gpt_scheme->name, 328 pp->stripesize); 329 /* Don't treat this as a critical failure */ 330 } 331 } 332 e2 = e1; 333 while ((e2 = LIST_NEXT(e2, gpe_entry)) != NULL) { 334 if (e2->gpe_deleted || e2->gpe_internal) 335 continue; 336 if (e1->gpe_start >= e2->gpe_start && 337 e1->gpe_start <= e2->gpe_end) { 338 DPRINTF("partition %d has start offset inside " 339 "partition %d: start[%d] %jd >= start[%d] " 340 "%jd <= end[%d] %jd\n", 341 e1->gpe_index, e2->gpe_index, 342 e2->gpe_index, (intmax_t)e2->gpe_start, 343 e1->gpe_index, (intmax_t)e1->gpe_start, 344 e2->gpe_index, (intmax_t)e2->gpe_end); 345 failed++; 346 } 347 if (e1->gpe_end >= e2->gpe_start && 348 e1->gpe_end <= e2->gpe_end) { 349 DPRINTF("partition %d has end offset inside " 350 "partition %d: start[%d] %jd >= end[%d] " 351 "%jd <= end[%d] %jd\n", 352 e1->gpe_index, e2->gpe_index, 353 e2->gpe_index, (intmax_t)e2->gpe_start, 354 e1->gpe_index, (intmax_t)e1->gpe_end, 355 e2->gpe_index, (intmax_t)e2->gpe_end); 356 failed++; 357 } 358 if (e1->gpe_start < e2->gpe_start && 359 e1->gpe_end > e2->gpe_end) { 360 DPRINTF("partition %d contains partition %d: " 361 "start[%d] %jd > start[%d] %jd, end[%d] " 362 "%jd < end[%d] %jd\n", 363 e1->gpe_index, e2->gpe_index, 364 e1->gpe_index, (intmax_t)e1->gpe_start, 365 e2->gpe_index, (intmax_t)e2->gpe_start, 366 e2->gpe_index, (intmax_t)e2->gpe_end, 367 e1->gpe_index, (intmax_t)e1->gpe_end); 368 failed++; 369 } 370 } 371 } 372 if (failed != 0) { 373 printf("GEOM_PART: integrity check failed (%s, %s)\n", 374 pp->name, table->gpt_scheme->name); 375 if (check_integrity != 0) 376 return (EINVAL); 377 table->gpt_corrupt = 1; 378 } 379 return (0); 380 } 381 #undef DPRINTF 382 383 struct g_part_entry * 384 g_part_new_entry(struct g_part_table *table, int index, quad_t start, 385 quad_t end) 386 { 387 struct g_part_entry *entry, *last; 388 389 last = NULL; 390 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) { 391 if (entry->gpe_index == index) 392 break; 393 if (entry->gpe_index > index) { 394 entry = NULL; 395 break; 396 } 397 last = entry; 398 } 399 if (entry == NULL) { 400 entry = g_malloc(table->gpt_scheme->gps_entrysz, 401 M_WAITOK | M_ZERO); 402 entry->gpe_index = index; 403 if (last == NULL) 404 LIST_INSERT_HEAD(&table->gpt_entry, entry, gpe_entry); 405 else 406 LIST_INSERT_AFTER(last, entry, gpe_entry); 407 } else 408 entry->gpe_offset = 0; 409 entry->gpe_start = start; 410 entry->gpe_end = end; 411 return (entry); 412 } 413 414 static void 415 g_part_new_provider(struct g_geom *gp, struct g_part_table *table, 416 struct g_part_entry *entry) 417 { 418 struct g_consumer *cp; 419 struct g_provider *pp; 420 struct sbuf *sb; 421 off_t offset; 422 423 cp = LIST_FIRST(&gp->consumer); 424 pp = cp->provider; 425 426 offset = entry->gpe_start * pp->sectorsize; 427 if (entry->gpe_offset < offset) 428 entry->gpe_offset = offset; 429 430 if (entry->gpe_pp == NULL) { 431 sb = sbuf_new_auto(); 432 G_PART_FULLNAME(table, entry, sb, gp->name); 433 sbuf_finish(sb); 434 entry->gpe_pp = g_new_providerf(gp, "%s", sbuf_data(sb)); 435 sbuf_delete(sb); 436 entry->gpe_pp->flags |= G_PF_DIRECT_SEND | G_PF_DIRECT_RECEIVE; 437 entry->gpe_pp->private = entry; /* Close the circle. */ 438 } 439 entry->gpe_pp->index = entry->gpe_index - 1; /* index is 1-based. */ 440 entry->gpe_pp->mediasize = (entry->gpe_end - entry->gpe_start + 1) * 441 pp->sectorsize; 442 entry->gpe_pp->mediasize -= entry->gpe_offset - offset; 443 entry->gpe_pp->sectorsize = pp->sectorsize; 444 entry->gpe_pp->stripesize = pp->stripesize; 445 entry->gpe_pp->stripeoffset = pp->stripeoffset + entry->gpe_offset; 446 if (pp->stripesize > 0) 447 entry->gpe_pp->stripeoffset %= pp->stripesize; 448 entry->gpe_pp->flags |= pp->flags & G_PF_ACCEPT_UNMAPPED; 449 g_error_provider(entry->gpe_pp, 0); 450 } 451 452 static struct g_geom* 453 g_part_find_geom(const char *name) 454 { 455 struct g_geom *gp; 456 LIST_FOREACH(gp, &g_part_class.geom, geom) { 457 if ((gp->flags & G_GEOM_WITHER) == 0 && 458 strcmp(name, gp->name) == 0) 459 break; 460 } 461 return (gp); 462 } 463 464 static int 465 g_part_parm_geom(struct gctl_req *req, const char *name, struct g_geom **v) 466 { 467 struct g_geom *gp; 468 const char *gname; 469 470 gname = gctl_get_asciiparam(req, name); 471 if (gname == NULL) 472 return (ENOATTR); 473 if (strncmp(gname, _PATH_DEV, sizeof(_PATH_DEV) - 1) == 0) 474 gname += sizeof(_PATH_DEV) - 1; 475 gp = g_part_find_geom(gname); 476 if (gp == NULL) { 477 gctl_error(req, "%d %s '%s'", EINVAL, name, gname); 478 return (EINVAL); 479 } 480 *v = gp; 481 return (0); 482 } 483 484 static int 485 g_part_parm_provider(struct gctl_req *req, const char *name, 486 struct g_provider **v) 487 { 488 struct g_provider *pp; 489 const char *pname; 490 491 pname = gctl_get_asciiparam(req, name); 492 if (pname == NULL) 493 return (ENOATTR); 494 if (strncmp(pname, _PATH_DEV, sizeof(_PATH_DEV) - 1) == 0) 495 pname += sizeof(_PATH_DEV) - 1; 496 pp = g_provider_by_name(pname); 497 if (pp == NULL) { 498 gctl_error(req, "%d %s '%s'", EINVAL, name, pname); 499 return (EINVAL); 500 } 501 *v = pp; 502 return (0); 503 } 504 505 static int 506 g_part_parm_quad(struct gctl_req *req, const char *name, quad_t *v) 507 { 508 const char *p; 509 char *x; 510 quad_t q; 511 512 p = gctl_get_asciiparam(req, name); 513 if (p == NULL) 514 return (ENOATTR); 515 q = strtoq(p, &x, 0); 516 if (*x != '\0' || q < 0) { 517 gctl_error(req, "%d %s '%s'", EINVAL, name, p); 518 return (EINVAL); 519 } 520 *v = q; 521 return (0); 522 } 523 524 static int 525 g_part_parm_scheme(struct gctl_req *req, const char *name, 526 struct g_part_scheme **v) 527 { 528 struct g_part_scheme *s; 529 const char *p; 530 531 p = gctl_get_asciiparam(req, name); 532 if (p == NULL) 533 return (ENOATTR); 534 TAILQ_FOREACH(s, &g_part_schemes, scheme_list) { 535 if (s == &g_part_null_scheme) 536 continue; 537 if (!strcasecmp(s->name, p)) 538 break; 539 } 540 if (s == NULL) { 541 gctl_error(req, "%d %s '%s'", EINVAL, name, p); 542 return (EINVAL); 543 } 544 *v = s; 545 return (0); 546 } 547 548 static int 549 g_part_parm_str(struct gctl_req *req, const char *name, const char **v) 550 { 551 const char *p; 552 553 p = gctl_get_asciiparam(req, name); 554 if (p == NULL) 555 return (ENOATTR); 556 /* An empty label is always valid. */ 557 if (strcmp(name, "label") != 0 && p[0] == '\0') { 558 gctl_error(req, "%d %s '%s'", EINVAL, name, p); 559 return (EINVAL); 560 } 561 *v = p; 562 return (0); 563 } 564 565 static int 566 g_part_parm_intmax(struct gctl_req *req, const char *name, u_int *v) 567 { 568 const intmax_t *p; 569 int size; 570 571 p = gctl_get_param(req, name, &size); 572 if (p == NULL) 573 return (ENOATTR); 574 if (size != sizeof(*p) || *p < 0 || *p > INT_MAX) { 575 gctl_error(req, "%d %s '%jd'", EINVAL, name, *p); 576 return (EINVAL); 577 } 578 *v = (u_int)*p; 579 return (0); 580 } 581 582 static int 583 g_part_parm_uint32(struct gctl_req *req, const char *name, u_int *v) 584 { 585 const uint32_t *p; 586 int size; 587 588 p = gctl_get_param(req, name, &size); 589 if (p == NULL) 590 return (ENOATTR); 591 if (size != sizeof(*p) || *p > INT_MAX) { 592 gctl_error(req, "%d %s '%u'", EINVAL, name, (unsigned int)*p); 593 return (EINVAL); 594 } 595 *v = (u_int)*p; 596 return (0); 597 } 598 599 static int 600 g_part_parm_bootcode(struct gctl_req *req, const char *name, const void **v, 601 unsigned int *s) 602 { 603 const void *p; 604 int size; 605 606 p = gctl_get_param(req, name, &size); 607 if (p == NULL) 608 return (ENOATTR); 609 *v = p; 610 *s = size; 611 return (0); 612 } 613 614 static int 615 g_part_probe(struct g_geom *gp, struct g_consumer *cp, int depth) 616 { 617 struct g_part_scheme *iter, *scheme; 618 struct g_part_table *table; 619 int pri, probe; 620 621 table = gp->softc; 622 scheme = (table != NULL) ? table->gpt_scheme : NULL; 623 pri = (scheme != NULL) ? G_PART_PROBE(table, cp) : INT_MIN; 624 if (pri == 0) 625 goto done; 626 if (pri > 0) { /* error */ 627 scheme = NULL; 628 pri = INT_MIN; 629 } 630 631 TAILQ_FOREACH(iter, &g_part_schemes, scheme_list) { 632 if (iter == &g_part_null_scheme) 633 continue; 634 table = (void *)kobj_create((kobj_class_t)iter, M_GEOM, 635 M_WAITOK); 636 table->gpt_gp = gp; 637 table->gpt_scheme = iter; 638 table->gpt_depth = depth; 639 probe = G_PART_PROBE(table, cp); 640 if (probe <= 0 && probe > pri) { 641 pri = probe; 642 scheme = iter; 643 if (gp->softc != NULL) 644 kobj_delete((kobj_t)gp->softc, M_GEOM); 645 gp->softc = table; 646 if (pri == 0) 647 goto done; 648 } else 649 kobj_delete((kobj_t)table, M_GEOM); 650 } 651 652 done: 653 return ((scheme == NULL) ? ENXIO : 0); 654 } 655 656 /* 657 * Control request functions. 658 */ 659 660 static int 661 g_part_ctl_add(struct gctl_req *req, struct g_part_parms *gpp) 662 { 663 struct g_geom *gp; 664 struct g_provider *pp; 665 struct g_part_entry *delent, *last, *entry; 666 struct g_part_table *table; 667 struct sbuf *sb; 668 quad_t end; 669 unsigned int index; 670 int error; 671 672 gp = gpp->gpp_geom; 673 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name)); 674 g_topology_assert(); 675 676 pp = LIST_FIRST(&gp->consumer)->provider; 677 table = gp->softc; 678 end = gpp->gpp_start + gpp->gpp_size - 1; 679 680 if (gpp->gpp_start < table->gpt_first || 681 gpp->gpp_start > table->gpt_last) { 682 gctl_error(req, "%d start '%jd'", EINVAL, 683 (intmax_t)gpp->gpp_start); 684 return (EINVAL); 685 } 686 if (end < gpp->gpp_start || end > table->gpt_last) { 687 gctl_error(req, "%d size '%jd'", EINVAL, 688 (intmax_t)gpp->gpp_size); 689 return (EINVAL); 690 } 691 if (gpp->gpp_index > table->gpt_entries) { 692 gctl_error(req, "%d index '%d'", EINVAL, gpp->gpp_index); 693 return (EINVAL); 694 } 695 696 delent = last = NULL; 697 index = (gpp->gpp_index > 0) ? gpp->gpp_index : 1; 698 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) { 699 if (entry->gpe_deleted) { 700 if (entry->gpe_index == index) 701 delent = entry; 702 continue; 703 } 704 if (entry->gpe_index == index) 705 index = entry->gpe_index + 1; 706 if (entry->gpe_index < index) 707 last = entry; 708 if (entry->gpe_internal) 709 continue; 710 if (gpp->gpp_start >= entry->gpe_start && 711 gpp->gpp_start <= entry->gpe_end) { 712 gctl_error(req, "%d start '%jd'", ENOSPC, 713 (intmax_t)gpp->gpp_start); 714 return (ENOSPC); 715 } 716 if (end >= entry->gpe_start && end <= entry->gpe_end) { 717 gctl_error(req, "%d end '%jd'", ENOSPC, (intmax_t)end); 718 return (ENOSPC); 719 } 720 if (gpp->gpp_start < entry->gpe_start && end > entry->gpe_end) { 721 gctl_error(req, "%d size '%jd'", ENOSPC, 722 (intmax_t)gpp->gpp_size); 723 return (ENOSPC); 724 } 725 } 726 if (gpp->gpp_index > 0 && index != gpp->gpp_index) { 727 gctl_error(req, "%d index '%d'", EEXIST, gpp->gpp_index); 728 return (EEXIST); 729 } 730 if (index > table->gpt_entries) { 731 gctl_error(req, "%d index '%d'", ENOSPC, index); 732 return (ENOSPC); 733 } 734 735 entry = (delent == NULL) ? g_malloc(table->gpt_scheme->gps_entrysz, 736 M_WAITOK | M_ZERO) : delent; 737 entry->gpe_index = index; 738 entry->gpe_start = gpp->gpp_start; 739 entry->gpe_end = end; 740 error = G_PART_ADD(table, entry, gpp); 741 if (error) { 742 gctl_error(req, "%d", error); 743 if (delent == NULL) 744 g_free(entry); 745 return (error); 746 } 747 if (delent == NULL) { 748 if (last == NULL) 749 LIST_INSERT_HEAD(&table->gpt_entry, entry, gpe_entry); 750 else 751 LIST_INSERT_AFTER(last, entry, gpe_entry); 752 entry->gpe_created = 1; 753 } else { 754 entry->gpe_deleted = 0; 755 entry->gpe_modified = 1; 756 } 757 g_part_new_provider(gp, table, entry); 758 759 /* Provide feedback if so requested. */ 760 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) { 761 sb = sbuf_new_auto(); 762 G_PART_FULLNAME(table, entry, sb, gp->name); 763 if (pp->stripesize > 0 && entry->gpe_pp->stripeoffset != 0) 764 sbuf_printf(sb, " added, but partition is not " 765 "aligned on %u bytes\n", pp->stripesize); 766 else 767 sbuf_cat(sb, " added\n"); 768 sbuf_finish(sb); 769 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1); 770 sbuf_delete(sb); 771 } 772 return (0); 773 } 774 775 static int 776 g_part_ctl_bootcode(struct gctl_req *req, struct g_part_parms *gpp) 777 { 778 struct g_geom *gp; 779 struct g_part_table *table; 780 struct sbuf *sb; 781 int error, sz; 782 783 gp = gpp->gpp_geom; 784 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name)); 785 g_topology_assert(); 786 787 table = gp->softc; 788 sz = table->gpt_scheme->gps_bootcodesz; 789 if (sz == 0) { 790 error = ENODEV; 791 goto fail; 792 } 793 if (gpp->gpp_codesize > sz) { 794 error = EFBIG; 795 goto fail; 796 } 797 798 error = G_PART_BOOTCODE(table, gpp); 799 if (error) 800 goto fail; 801 802 /* Provide feedback if so requested. */ 803 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) { 804 sb = sbuf_new_auto(); 805 sbuf_printf(sb, "bootcode written to %s\n", gp->name); 806 sbuf_finish(sb); 807 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1); 808 sbuf_delete(sb); 809 } 810 return (0); 811 812 fail: 813 gctl_error(req, "%d", error); 814 return (error); 815 } 816 817 static int 818 g_part_ctl_commit(struct gctl_req *req, struct g_part_parms *gpp) 819 { 820 struct g_consumer *cp; 821 struct g_geom *gp; 822 struct g_provider *pp; 823 struct g_part_entry *entry, *tmp; 824 struct g_part_table *table; 825 char *buf; 826 int error, i; 827 828 gp = gpp->gpp_geom; 829 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name)); 830 g_topology_assert(); 831 832 table = gp->softc; 833 if (!table->gpt_opened) { 834 gctl_error(req, "%d", EPERM); 835 return (EPERM); 836 } 837 838 g_topology_unlock(); 839 840 cp = LIST_FIRST(&gp->consumer); 841 if ((table->gpt_smhead | table->gpt_smtail) != 0) { 842 pp = cp->provider; 843 buf = g_malloc(pp->sectorsize, M_WAITOK | M_ZERO); 844 while (table->gpt_smhead != 0) { 845 i = ffs(table->gpt_smhead) - 1; 846 error = g_write_data(cp, i * pp->sectorsize, buf, 847 pp->sectorsize); 848 if (error) { 849 g_free(buf); 850 goto fail; 851 } 852 table->gpt_smhead &= ~(1 << i); 853 } 854 while (table->gpt_smtail != 0) { 855 i = ffs(table->gpt_smtail) - 1; 856 error = g_write_data(cp, pp->mediasize - (i + 1) * 857 pp->sectorsize, buf, pp->sectorsize); 858 if (error) { 859 g_free(buf); 860 goto fail; 861 } 862 table->gpt_smtail &= ~(1 << i); 863 } 864 g_free(buf); 865 } 866 867 if (table->gpt_scheme == &g_part_null_scheme) { 868 g_topology_lock(); 869 g_access(cp, -1, -1, -1); 870 g_part_wither(gp, ENXIO); 871 return (0); 872 } 873 874 error = G_PART_WRITE(table, cp); 875 if (error) 876 goto fail; 877 878 LIST_FOREACH_SAFE(entry, &table->gpt_entry, gpe_entry, tmp) { 879 if (!entry->gpe_deleted) { 880 entry->gpe_created = 0; 881 entry->gpe_modified = 0; 882 continue; 883 } 884 LIST_REMOVE(entry, gpe_entry); 885 g_free(entry); 886 } 887 table->gpt_created = 0; 888 table->gpt_opened = 0; 889 890 g_topology_lock(); 891 g_access(cp, -1, -1, -1); 892 return (0); 893 894 fail: 895 g_topology_lock(); 896 gctl_error(req, "%d", error); 897 return (error); 898 } 899 900 static int 901 g_part_ctl_create(struct gctl_req *req, struct g_part_parms *gpp) 902 { 903 struct g_consumer *cp; 904 struct g_geom *gp; 905 struct g_provider *pp; 906 struct g_part_scheme *scheme; 907 struct g_part_table *null, *table; 908 struct sbuf *sb; 909 int attr, error; 910 911 pp = gpp->gpp_provider; 912 scheme = gpp->gpp_scheme; 913 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, pp->name)); 914 g_topology_assert(); 915 916 /* Check that there isn't already a g_part geom on the provider. */ 917 gp = g_part_find_geom(pp->name); 918 if (gp != NULL) { 919 null = gp->softc; 920 if (null->gpt_scheme != &g_part_null_scheme) { 921 gctl_error(req, "%d geom '%s'", EEXIST, pp->name); 922 return (EEXIST); 923 } 924 } else 925 null = NULL; 926 927 if ((gpp->gpp_parms & G_PART_PARM_ENTRIES) && 928 (gpp->gpp_entries < scheme->gps_minent || 929 gpp->gpp_entries > scheme->gps_maxent)) { 930 gctl_error(req, "%d entries '%d'", EINVAL, gpp->gpp_entries); 931 return (EINVAL); 932 } 933 934 if (null == NULL) 935 gp = g_new_geomf(&g_part_class, "%s", pp->name); 936 gp->softc = kobj_create((kobj_class_t)gpp->gpp_scheme, M_GEOM, 937 M_WAITOK); 938 table = gp->softc; 939 table->gpt_gp = gp; 940 table->gpt_scheme = gpp->gpp_scheme; 941 table->gpt_entries = (gpp->gpp_parms & G_PART_PARM_ENTRIES) ? 942 gpp->gpp_entries : scheme->gps_minent; 943 LIST_INIT(&table->gpt_entry); 944 if (null == NULL) { 945 cp = g_new_consumer(gp); 946 cp->flags |= G_CF_DIRECT_SEND | G_CF_DIRECT_RECEIVE; 947 error = g_attach(cp, pp); 948 if (error == 0) 949 error = g_access(cp, 1, 1, 1); 950 if (error != 0) { 951 g_part_wither(gp, error); 952 gctl_error(req, "%d geom '%s'", error, pp->name); 953 return (error); 954 } 955 table->gpt_opened = 1; 956 } else { 957 cp = LIST_FIRST(&gp->consumer); 958 table->gpt_opened = null->gpt_opened; 959 table->gpt_smhead = null->gpt_smhead; 960 table->gpt_smtail = null->gpt_smtail; 961 } 962 963 g_topology_unlock(); 964 965 /* Make sure the provider has media. */ 966 if (pp->mediasize == 0 || pp->sectorsize == 0) { 967 error = ENODEV; 968 goto fail; 969 } 970 971 /* Make sure we can nest and if so, determine our depth. */ 972 error = g_getattr("PART::isleaf", cp, &attr); 973 if (!error && attr) { 974 error = ENODEV; 975 goto fail; 976 } 977 error = g_getattr("PART::depth", cp, &attr); 978 table->gpt_depth = (!error) ? attr + 1 : 0; 979 980 /* 981 * Synthesize a disk geometry. Some partitioning schemes 982 * depend on it and since some file systems need it even 983 * when the partitition scheme doesn't, we do it here in 984 * scheme-independent code. 985 */ 986 g_part_geometry(table, cp, pp->mediasize / pp->sectorsize); 987 988 error = G_PART_CREATE(table, gpp); 989 if (error) 990 goto fail; 991 992 g_topology_lock(); 993 994 table->gpt_created = 1; 995 if (null != NULL) 996 kobj_delete((kobj_t)null, M_GEOM); 997 998 /* 999 * Support automatic commit by filling in the gpp_geom 1000 * parameter. 1001 */ 1002 gpp->gpp_parms |= G_PART_PARM_GEOM; 1003 gpp->gpp_geom = gp; 1004 1005 /* Provide feedback if so requested. */ 1006 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) { 1007 sb = sbuf_new_auto(); 1008 sbuf_printf(sb, "%s created\n", gp->name); 1009 sbuf_finish(sb); 1010 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1); 1011 sbuf_delete(sb); 1012 } 1013 return (0); 1014 1015 fail: 1016 g_topology_lock(); 1017 if (null == NULL) { 1018 g_access(cp, -1, -1, -1); 1019 g_part_wither(gp, error); 1020 } else { 1021 kobj_delete((kobj_t)gp->softc, M_GEOM); 1022 gp->softc = null; 1023 } 1024 gctl_error(req, "%d provider", error); 1025 return (error); 1026 } 1027 1028 static int 1029 g_part_ctl_delete(struct gctl_req *req, struct g_part_parms *gpp) 1030 { 1031 struct g_geom *gp; 1032 struct g_provider *pp; 1033 struct g_part_entry *entry; 1034 struct g_part_table *table; 1035 struct sbuf *sb; 1036 1037 gp = gpp->gpp_geom; 1038 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name)); 1039 g_topology_assert(); 1040 1041 table = gp->softc; 1042 1043 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) { 1044 if (entry->gpe_deleted || entry->gpe_internal) 1045 continue; 1046 if (entry->gpe_index == gpp->gpp_index) 1047 break; 1048 } 1049 if (entry == NULL) { 1050 gctl_error(req, "%d index '%d'", ENOENT, gpp->gpp_index); 1051 return (ENOENT); 1052 } 1053 1054 pp = entry->gpe_pp; 1055 if (pp != NULL) { 1056 if (pp->acr > 0 || pp->acw > 0 || pp->ace > 0) { 1057 gctl_error(req, "%d", EBUSY); 1058 return (EBUSY); 1059 } 1060 1061 pp->private = NULL; 1062 entry->gpe_pp = NULL; 1063 } 1064 1065 if (pp != NULL) 1066 g_wither_provider(pp, ENXIO); 1067 1068 /* Provide feedback if so requested. */ 1069 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) { 1070 sb = sbuf_new_auto(); 1071 G_PART_FULLNAME(table, entry, sb, gp->name); 1072 sbuf_cat(sb, " deleted\n"); 1073 sbuf_finish(sb); 1074 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1); 1075 sbuf_delete(sb); 1076 } 1077 1078 if (entry->gpe_created) { 1079 LIST_REMOVE(entry, gpe_entry); 1080 g_free(entry); 1081 } else { 1082 entry->gpe_modified = 0; 1083 entry->gpe_deleted = 1; 1084 } 1085 return (0); 1086 } 1087 1088 static int 1089 g_part_ctl_destroy(struct gctl_req *req, struct g_part_parms *gpp) 1090 { 1091 struct g_consumer *cp; 1092 struct g_geom *gp; 1093 struct g_provider *pp; 1094 struct g_part_entry *entry, *tmp; 1095 struct g_part_table *null, *table; 1096 struct sbuf *sb; 1097 int error; 1098 1099 gp = gpp->gpp_geom; 1100 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name)); 1101 g_topology_assert(); 1102 1103 table = gp->softc; 1104 /* Check for busy providers. */ 1105 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) { 1106 if (entry->gpe_deleted || entry->gpe_internal) 1107 continue; 1108 if (gpp->gpp_force) { 1109 pp = entry->gpe_pp; 1110 if (pp == NULL) 1111 continue; 1112 if (pp->acr == 0 && pp->acw == 0 && pp->ace == 0) 1113 continue; 1114 } 1115 gctl_error(req, "%d", EBUSY); 1116 return (EBUSY); 1117 } 1118 1119 if (gpp->gpp_force) { 1120 /* Destroy all providers. */ 1121 LIST_FOREACH_SAFE(entry, &table->gpt_entry, gpe_entry, tmp) { 1122 pp = entry->gpe_pp; 1123 if (pp != NULL) { 1124 pp->private = NULL; 1125 g_wither_provider(pp, ENXIO); 1126 } 1127 LIST_REMOVE(entry, gpe_entry); 1128 g_free(entry); 1129 } 1130 } 1131 1132 error = G_PART_DESTROY(table, gpp); 1133 if (error) { 1134 gctl_error(req, "%d", error); 1135 return (error); 1136 } 1137 1138 gp->softc = kobj_create((kobj_class_t)&g_part_null_scheme, M_GEOM, 1139 M_WAITOK); 1140 null = gp->softc; 1141 null->gpt_gp = gp; 1142 null->gpt_scheme = &g_part_null_scheme; 1143 LIST_INIT(&null->gpt_entry); 1144 1145 cp = LIST_FIRST(&gp->consumer); 1146 pp = cp->provider; 1147 null->gpt_last = pp->mediasize / pp->sectorsize - 1; 1148 1149 null->gpt_depth = table->gpt_depth; 1150 null->gpt_opened = table->gpt_opened; 1151 null->gpt_smhead = table->gpt_smhead; 1152 null->gpt_smtail = table->gpt_smtail; 1153 1154 while ((entry = LIST_FIRST(&table->gpt_entry)) != NULL) { 1155 LIST_REMOVE(entry, gpe_entry); 1156 g_free(entry); 1157 } 1158 kobj_delete((kobj_t)table, M_GEOM); 1159 1160 /* Provide feedback if so requested. */ 1161 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) { 1162 sb = sbuf_new_auto(); 1163 sbuf_printf(sb, "%s destroyed\n", gp->name); 1164 sbuf_finish(sb); 1165 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1); 1166 sbuf_delete(sb); 1167 } 1168 return (0); 1169 } 1170 1171 static int 1172 g_part_ctl_modify(struct gctl_req *req, struct g_part_parms *gpp) 1173 { 1174 struct g_geom *gp; 1175 struct g_part_entry *entry; 1176 struct g_part_table *table; 1177 struct sbuf *sb; 1178 int error; 1179 1180 gp = gpp->gpp_geom; 1181 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name)); 1182 g_topology_assert(); 1183 1184 table = gp->softc; 1185 1186 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) { 1187 if (entry->gpe_deleted || entry->gpe_internal) 1188 continue; 1189 if (entry->gpe_index == gpp->gpp_index) 1190 break; 1191 } 1192 if (entry == NULL) { 1193 gctl_error(req, "%d index '%d'", ENOENT, gpp->gpp_index); 1194 return (ENOENT); 1195 } 1196 1197 error = G_PART_MODIFY(table, entry, gpp); 1198 if (error) { 1199 gctl_error(req, "%d", error); 1200 return (error); 1201 } 1202 1203 if (!entry->gpe_created) 1204 entry->gpe_modified = 1; 1205 1206 /* Provide feedback if so requested. */ 1207 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) { 1208 sb = sbuf_new_auto(); 1209 G_PART_FULLNAME(table, entry, sb, gp->name); 1210 sbuf_cat(sb, " modified\n"); 1211 sbuf_finish(sb); 1212 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1); 1213 sbuf_delete(sb); 1214 } 1215 return (0); 1216 } 1217 1218 static int 1219 g_part_ctl_move(struct gctl_req *req, struct g_part_parms *gpp) 1220 { 1221 gctl_error(req, "%d verb 'move'", ENOSYS); 1222 return (ENOSYS); 1223 } 1224 1225 static int 1226 g_part_ctl_recover(struct gctl_req *req, struct g_part_parms *gpp) 1227 { 1228 struct g_part_table *table; 1229 struct g_geom *gp; 1230 struct sbuf *sb; 1231 int error, recovered; 1232 1233 gp = gpp->gpp_geom; 1234 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name)); 1235 g_topology_assert(); 1236 table = gp->softc; 1237 error = recovered = 0; 1238 1239 if (table->gpt_corrupt) { 1240 error = G_PART_RECOVER(table); 1241 if (error == 0) 1242 error = g_part_check_integrity(table, 1243 LIST_FIRST(&gp->consumer)); 1244 if (error) { 1245 gctl_error(req, "%d recovering '%s' failed", 1246 error, gp->name); 1247 return (error); 1248 } 1249 recovered = 1; 1250 } 1251 /* Provide feedback if so requested. */ 1252 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) { 1253 sb = sbuf_new_auto(); 1254 if (recovered) 1255 sbuf_printf(sb, "%s recovered\n", gp->name); 1256 else 1257 sbuf_printf(sb, "%s recovering is not needed\n", 1258 gp->name); 1259 sbuf_finish(sb); 1260 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1); 1261 sbuf_delete(sb); 1262 } 1263 return (0); 1264 } 1265 1266 static int 1267 g_part_ctl_resize(struct gctl_req *req, struct g_part_parms *gpp) 1268 { 1269 struct g_geom *gp; 1270 struct g_provider *pp; 1271 struct g_part_entry *pe, *entry; 1272 struct g_part_table *table; 1273 struct sbuf *sb; 1274 quad_t end; 1275 int error; 1276 off_t mediasize; 1277 1278 gp = gpp->gpp_geom; 1279 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name)); 1280 g_topology_assert(); 1281 table = gp->softc; 1282 1283 /* check gpp_index */ 1284 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) { 1285 if (entry->gpe_deleted || entry->gpe_internal) 1286 continue; 1287 if (entry->gpe_index == gpp->gpp_index) 1288 break; 1289 } 1290 if (entry == NULL) { 1291 gctl_error(req, "%d index '%d'", ENOENT, gpp->gpp_index); 1292 return (ENOENT); 1293 } 1294 1295 /* check gpp_size */ 1296 end = entry->gpe_start + gpp->gpp_size - 1; 1297 if (gpp->gpp_size < 1 || end > table->gpt_last) { 1298 gctl_error(req, "%d size '%jd'", EINVAL, 1299 (intmax_t)gpp->gpp_size); 1300 return (EINVAL); 1301 } 1302 1303 LIST_FOREACH(pe, &table->gpt_entry, gpe_entry) { 1304 if (pe->gpe_deleted || pe->gpe_internal || pe == entry) 1305 continue; 1306 if (end >= pe->gpe_start && end <= pe->gpe_end) { 1307 gctl_error(req, "%d end '%jd'", ENOSPC, 1308 (intmax_t)end); 1309 return (ENOSPC); 1310 } 1311 if (entry->gpe_start < pe->gpe_start && end > pe->gpe_end) { 1312 gctl_error(req, "%d size '%jd'", ENOSPC, 1313 (intmax_t)gpp->gpp_size); 1314 return (ENOSPC); 1315 } 1316 } 1317 1318 pp = entry->gpe_pp; 1319 if ((g_debugflags & 16) == 0 && 1320 (pp->acr > 0 || pp->acw > 0 || pp->ace > 0)) { 1321 if (entry->gpe_end - entry->gpe_start + 1 > gpp->gpp_size) { 1322 /* Deny shrinking of an opened partition. */ 1323 gctl_error(req, "%d", EBUSY); 1324 return (EBUSY); 1325 } 1326 } 1327 1328 error = G_PART_RESIZE(table, entry, gpp); 1329 if (error) { 1330 gctl_error(req, "%d%s", error, error != EBUSY ? "": 1331 " resizing will lead to unexpected shrinking" 1332 " due to alignment"); 1333 return (error); 1334 } 1335 1336 if (!entry->gpe_created) 1337 entry->gpe_modified = 1; 1338 1339 /* update mediasize of changed provider */ 1340 mediasize = (entry->gpe_end - entry->gpe_start + 1) * 1341 pp->sectorsize; 1342 g_resize_provider(pp, mediasize); 1343 1344 /* Provide feedback if so requested. */ 1345 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) { 1346 sb = sbuf_new_auto(); 1347 G_PART_FULLNAME(table, entry, sb, gp->name); 1348 sbuf_cat(sb, " resized\n"); 1349 sbuf_finish(sb); 1350 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1); 1351 sbuf_delete(sb); 1352 } 1353 return (0); 1354 } 1355 1356 static int 1357 g_part_ctl_setunset(struct gctl_req *req, struct g_part_parms *gpp, 1358 unsigned int set) 1359 { 1360 struct g_geom *gp; 1361 struct g_part_entry *entry; 1362 struct g_part_table *table; 1363 struct sbuf *sb; 1364 int error; 1365 1366 gp = gpp->gpp_geom; 1367 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name)); 1368 g_topology_assert(); 1369 1370 table = gp->softc; 1371 1372 if (gpp->gpp_parms & G_PART_PARM_INDEX) { 1373 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) { 1374 if (entry->gpe_deleted || entry->gpe_internal) 1375 continue; 1376 if (entry->gpe_index == gpp->gpp_index) 1377 break; 1378 } 1379 if (entry == NULL) { 1380 gctl_error(req, "%d index '%d'", ENOENT, 1381 gpp->gpp_index); 1382 return (ENOENT); 1383 } 1384 } else 1385 entry = NULL; 1386 1387 error = G_PART_SETUNSET(table, entry, gpp->gpp_attrib, set); 1388 if (error) { 1389 gctl_error(req, "%d attrib '%s'", error, gpp->gpp_attrib); 1390 return (error); 1391 } 1392 1393 /* Provide feedback if so requested. */ 1394 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) { 1395 sb = sbuf_new_auto(); 1396 sbuf_printf(sb, "%s %sset on ", gpp->gpp_attrib, 1397 (set) ? "" : "un"); 1398 if (entry) 1399 G_PART_FULLNAME(table, entry, sb, gp->name); 1400 else 1401 sbuf_cat(sb, gp->name); 1402 sbuf_cat(sb, "\n"); 1403 sbuf_finish(sb); 1404 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1); 1405 sbuf_delete(sb); 1406 } 1407 return (0); 1408 } 1409 1410 static int 1411 g_part_ctl_undo(struct gctl_req *req, struct g_part_parms *gpp) 1412 { 1413 struct g_consumer *cp; 1414 struct g_provider *pp; 1415 struct g_geom *gp; 1416 struct g_part_entry *entry, *tmp; 1417 struct g_part_table *table; 1418 int error, reprobe; 1419 1420 gp = gpp->gpp_geom; 1421 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name)); 1422 g_topology_assert(); 1423 1424 table = gp->softc; 1425 if (!table->gpt_opened) { 1426 gctl_error(req, "%d", EPERM); 1427 return (EPERM); 1428 } 1429 1430 cp = LIST_FIRST(&gp->consumer); 1431 LIST_FOREACH_SAFE(entry, &table->gpt_entry, gpe_entry, tmp) { 1432 entry->gpe_modified = 0; 1433 if (entry->gpe_created) { 1434 pp = entry->gpe_pp; 1435 if (pp != NULL) { 1436 pp->private = NULL; 1437 entry->gpe_pp = NULL; 1438 g_wither_provider(pp, ENXIO); 1439 } 1440 entry->gpe_deleted = 1; 1441 } 1442 if (entry->gpe_deleted) { 1443 LIST_REMOVE(entry, gpe_entry); 1444 g_free(entry); 1445 } 1446 } 1447 1448 g_topology_unlock(); 1449 1450 reprobe = (table->gpt_scheme == &g_part_null_scheme || 1451 table->gpt_created) ? 1 : 0; 1452 1453 if (reprobe) { 1454 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) { 1455 if (entry->gpe_internal) 1456 continue; 1457 error = EBUSY; 1458 goto fail; 1459 } 1460 while ((entry = LIST_FIRST(&table->gpt_entry)) != NULL) { 1461 LIST_REMOVE(entry, gpe_entry); 1462 g_free(entry); 1463 } 1464 error = g_part_probe(gp, cp, table->gpt_depth); 1465 if (error) { 1466 g_topology_lock(); 1467 g_access(cp, -1, -1, -1); 1468 g_part_wither(gp, error); 1469 return (0); 1470 } 1471 table = gp->softc; 1472 1473 /* 1474 * Synthesize a disk geometry. Some partitioning schemes 1475 * depend on it and since some file systems need it even 1476 * when the partitition scheme doesn't, we do it here in 1477 * scheme-independent code. 1478 */ 1479 pp = cp->provider; 1480 g_part_geometry(table, cp, pp->mediasize / pp->sectorsize); 1481 } 1482 1483 error = G_PART_READ(table, cp); 1484 if (error) 1485 goto fail; 1486 error = g_part_check_integrity(table, cp); 1487 if (error) 1488 goto fail; 1489 1490 g_topology_lock(); 1491 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) { 1492 if (!entry->gpe_internal) 1493 g_part_new_provider(gp, table, entry); 1494 } 1495 1496 table->gpt_opened = 0; 1497 g_access(cp, -1, -1, -1); 1498 return (0); 1499 1500 fail: 1501 g_topology_lock(); 1502 gctl_error(req, "%d", error); 1503 return (error); 1504 } 1505 1506 static void 1507 g_part_wither(struct g_geom *gp, int error) 1508 { 1509 struct g_part_entry *entry; 1510 struct g_part_table *table; 1511 1512 table = gp->softc; 1513 if (table != NULL) { 1514 G_PART_DESTROY(table, NULL); 1515 while ((entry = LIST_FIRST(&table->gpt_entry)) != NULL) { 1516 LIST_REMOVE(entry, gpe_entry); 1517 g_free(entry); 1518 } 1519 if (gp->softc != NULL) { 1520 kobj_delete((kobj_t)gp->softc, M_GEOM); 1521 gp->softc = NULL; 1522 } 1523 } 1524 g_wither_geom(gp, error); 1525 } 1526 1527 /* 1528 * Class methods. 1529 */ 1530 1531 static void 1532 g_part_ctlreq(struct gctl_req *req, struct g_class *mp, const char *verb) 1533 { 1534 struct g_part_parms gpp; 1535 struct g_part_table *table; 1536 struct gctl_req_arg *ap; 1537 enum g_part_ctl ctlreq; 1538 unsigned int i, mparms, oparms, parm; 1539 int auto_commit, close_on_error; 1540 int error, modifies; 1541 1542 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s,%s)", __func__, mp->name, verb)); 1543 g_topology_assert(); 1544 1545 ctlreq = G_PART_CTL_NONE; 1546 modifies = 1; 1547 mparms = 0; 1548 oparms = G_PART_PARM_FLAGS | G_PART_PARM_OUTPUT | G_PART_PARM_VERSION; 1549 switch (*verb) { 1550 case 'a': 1551 if (!strcmp(verb, "add")) { 1552 ctlreq = G_PART_CTL_ADD; 1553 mparms |= G_PART_PARM_GEOM | G_PART_PARM_SIZE | 1554 G_PART_PARM_START | G_PART_PARM_TYPE; 1555 oparms |= G_PART_PARM_INDEX | G_PART_PARM_LABEL; 1556 } 1557 break; 1558 case 'b': 1559 if (!strcmp(verb, "bootcode")) { 1560 ctlreq = G_PART_CTL_BOOTCODE; 1561 mparms |= G_PART_PARM_GEOM | G_PART_PARM_BOOTCODE; 1562 } 1563 break; 1564 case 'c': 1565 if (!strcmp(verb, "commit")) { 1566 ctlreq = G_PART_CTL_COMMIT; 1567 mparms |= G_PART_PARM_GEOM; 1568 modifies = 0; 1569 } else if (!strcmp(verb, "create")) { 1570 ctlreq = G_PART_CTL_CREATE; 1571 mparms |= G_PART_PARM_PROVIDER | G_PART_PARM_SCHEME; 1572 oparms |= G_PART_PARM_ENTRIES; 1573 } 1574 break; 1575 case 'd': 1576 if (!strcmp(verb, "delete")) { 1577 ctlreq = G_PART_CTL_DELETE; 1578 mparms |= G_PART_PARM_GEOM | G_PART_PARM_INDEX; 1579 } else if (!strcmp(verb, "destroy")) { 1580 ctlreq = G_PART_CTL_DESTROY; 1581 mparms |= G_PART_PARM_GEOM; 1582 oparms |= G_PART_PARM_FORCE; 1583 } 1584 break; 1585 case 'm': 1586 if (!strcmp(verb, "modify")) { 1587 ctlreq = G_PART_CTL_MODIFY; 1588 mparms |= G_PART_PARM_GEOM | G_PART_PARM_INDEX; 1589 oparms |= G_PART_PARM_LABEL | G_PART_PARM_TYPE; 1590 } else if (!strcmp(verb, "move")) { 1591 ctlreq = G_PART_CTL_MOVE; 1592 mparms |= G_PART_PARM_GEOM | G_PART_PARM_INDEX; 1593 } 1594 break; 1595 case 'r': 1596 if (!strcmp(verb, "recover")) { 1597 ctlreq = G_PART_CTL_RECOVER; 1598 mparms |= G_PART_PARM_GEOM; 1599 } else if (!strcmp(verb, "resize")) { 1600 ctlreq = G_PART_CTL_RESIZE; 1601 mparms |= G_PART_PARM_GEOM | G_PART_PARM_INDEX | 1602 G_PART_PARM_SIZE; 1603 } 1604 break; 1605 case 's': 1606 if (!strcmp(verb, "set")) { 1607 ctlreq = G_PART_CTL_SET; 1608 mparms |= G_PART_PARM_ATTRIB | G_PART_PARM_GEOM; 1609 oparms |= G_PART_PARM_INDEX; 1610 } 1611 break; 1612 case 'u': 1613 if (!strcmp(verb, "undo")) { 1614 ctlreq = G_PART_CTL_UNDO; 1615 mparms |= G_PART_PARM_GEOM; 1616 modifies = 0; 1617 } else if (!strcmp(verb, "unset")) { 1618 ctlreq = G_PART_CTL_UNSET; 1619 mparms |= G_PART_PARM_ATTRIB | G_PART_PARM_GEOM; 1620 oparms |= G_PART_PARM_INDEX; 1621 } 1622 break; 1623 } 1624 if (ctlreq == G_PART_CTL_NONE) { 1625 gctl_error(req, "%d verb '%s'", EINVAL, verb); 1626 return; 1627 } 1628 1629 bzero(&gpp, sizeof(gpp)); 1630 for (i = 0; i < req->narg; i++) { 1631 ap = &req->arg[i]; 1632 parm = 0; 1633 switch (ap->name[0]) { 1634 case 'a': 1635 if (!strcmp(ap->name, "arg0")) { 1636 parm = mparms & 1637 (G_PART_PARM_GEOM | G_PART_PARM_PROVIDER); 1638 } 1639 if (!strcmp(ap->name, "attrib")) 1640 parm = G_PART_PARM_ATTRIB; 1641 break; 1642 case 'b': 1643 if (!strcmp(ap->name, "bootcode")) 1644 parm = G_PART_PARM_BOOTCODE; 1645 break; 1646 case 'c': 1647 if (!strcmp(ap->name, "class")) 1648 continue; 1649 break; 1650 case 'e': 1651 if (!strcmp(ap->name, "entries")) 1652 parm = G_PART_PARM_ENTRIES; 1653 break; 1654 case 'f': 1655 if (!strcmp(ap->name, "flags")) 1656 parm = G_PART_PARM_FLAGS; 1657 else if (!strcmp(ap->name, "force")) 1658 parm = G_PART_PARM_FORCE; 1659 break; 1660 case 'i': 1661 if (!strcmp(ap->name, "index")) 1662 parm = G_PART_PARM_INDEX; 1663 break; 1664 case 'l': 1665 if (!strcmp(ap->name, "label")) 1666 parm = G_PART_PARM_LABEL; 1667 break; 1668 case 'o': 1669 if (!strcmp(ap->name, "output")) 1670 parm = G_PART_PARM_OUTPUT; 1671 break; 1672 case 's': 1673 if (!strcmp(ap->name, "scheme")) 1674 parm = G_PART_PARM_SCHEME; 1675 else if (!strcmp(ap->name, "size")) 1676 parm = G_PART_PARM_SIZE; 1677 else if (!strcmp(ap->name, "start")) 1678 parm = G_PART_PARM_START; 1679 break; 1680 case 't': 1681 if (!strcmp(ap->name, "type")) 1682 parm = G_PART_PARM_TYPE; 1683 break; 1684 case 'v': 1685 if (!strcmp(ap->name, "verb")) 1686 continue; 1687 else if (!strcmp(ap->name, "version")) 1688 parm = G_PART_PARM_VERSION; 1689 break; 1690 } 1691 if ((parm & (mparms | oparms)) == 0) { 1692 gctl_error(req, "%d param '%s'", EINVAL, ap->name); 1693 return; 1694 } 1695 switch (parm) { 1696 case G_PART_PARM_ATTRIB: 1697 error = g_part_parm_str(req, ap->name, 1698 &gpp.gpp_attrib); 1699 break; 1700 case G_PART_PARM_BOOTCODE: 1701 error = g_part_parm_bootcode(req, ap->name, 1702 &gpp.gpp_codeptr, &gpp.gpp_codesize); 1703 break; 1704 case G_PART_PARM_ENTRIES: 1705 error = g_part_parm_intmax(req, ap->name, 1706 &gpp.gpp_entries); 1707 break; 1708 case G_PART_PARM_FLAGS: 1709 error = g_part_parm_str(req, ap->name, &gpp.gpp_flags); 1710 break; 1711 case G_PART_PARM_FORCE: 1712 error = g_part_parm_uint32(req, ap->name, 1713 &gpp.gpp_force); 1714 break; 1715 case G_PART_PARM_GEOM: 1716 error = g_part_parm_geom(req, ap->name, &gpp.gpp_geom); 1717 break; 1718 case G_PART_PARM_INDEX: 1719 error = g_part_parm_intmax(req, ap->name, 1720 &gpp.gpp_index); 1721 break; 1722 case G_PART_PARM_LABEL: 1723 error = g_part_parm_str(req, ap->name, &gpp.gpp_label); 1724 break; 1725 case G_PART_PARM_OUTPUT: 1726 error = 0; /* Write-only parameter */ 1727 break; 1728 case G_PART_PARM_PROVIDER: 1729 error = g_part_parm_provider(req, ap->name, 1730 &gpp.gpp_provider); 1731 break; 1732 case G_PART_PARM_SCHEME: 1733 error = g_part_parm_scheme(req, ap->name, 1734 &gpp.gpp_scheme); 1735 break; 1736 case G_PART_PARM_SIZE: 1737 error = g_part_parm_quad(req, ap->name, &gpp.gpp_size); 1738 break; 1739 case G_PART_PARM_START: 1740 error = g_part_parm_quad(req, ap->name, 1741 &gpp.gpp_start); 1742 break; 1743 case G_PART_PARM_TYPE: 1744 error = g_part_parm_str(req, ap->name, &gpp.gpp_type); 1745 break; 1746 case G_PART_PARM_VERSION: 1747 error = g_part_parm_uint32(req, ap->name, 1748 &gpp.gpp_version); 1749 break; 1750 default: 1751 error = EDOOFUS; 1752 gctl_error(req, "%d %s", error, ap->name); 1753 break; 1754 } 1755 if (error != 0) { 1756 if (error == ENOATTR) { 1757 gctl_error(req, "%d param '%s'", error, 1758 ap->name); 1759 } 1760 return; 1761 } 1762 gpp.gpp_parms |= parm; 1763 } 1764 if ((gpp.gpp_parms & mparms) != mparms) { 1765 parm = mparms - (gpp.gpp_parms & mparms); 1766 gctl_error(req, "%d param '%x'", ENOATTR, parm); 1767 return; 1768 } 1769 1770 /* Obtain permissions if possible/necessary. */ 1771 close_on_error = 0; 1772 table = NULL; 1773 if (modifies && (gpp.gpp_parms & G_PART_PARM_GEOM)) { 1774 table = gpp.gpp_geom->softc; 1775 if (table != NULL && table->gpt_corrupt && 1776 ctlreq != G_PART_CTL_DESTROY && 1777 ctlreq != G_PART_CTL_RECOVER) { 1778 gctl_error(req, "%d table '%s' is corrupt", 1779 EPERM, gpp.gpp_geom->name); 1780 return; 1781 } 1782 if (table != NULL && !table->gpt_opened) { 1783 error = g_access(LIST_FIRST(&gpp.gpp_geom->consumer), 1784 1, 1, 1); 1785 if (error) { 1786 gctl_error(req, "%d geom '%s'", error, 1787 gpp.gpp_geom->name); 1788 return; 1789 } 1790 table->gpt_opened = 1; 1791 close_on_error = 1; 1792 } 1793 } 1794 1795 /* Allow the scheme to check or modify the parameters. */ 1796 if (table != NULL) { 1797 error = G_PART_PRECHECK(table, ctlreq, &gpp); 1798 if (error) { 1799 gctl_error(req, "%d pre-check failed", error); 1800 goto out; 1801 } 1802 } else 1803 error = EDOOFUS; /* Prevent bogus uninit. warning. */ 1804 1805 switch (ctlreq) { 1806 case G_PART_CTL_NONE: 1807 panic("%s", __func__); 1808 case G_PART_CTL_ADD: 1809 error = g_part_ctl_add(req, &gpp); 1810 break; 1811 case G_PART_CTL_BOOTCODE: 1812 error = g_part_ctl_bootcode(req, &gpp); 1813 break; 1814 case G_PART_CTL_COMMIT: 1815 error = g_part_ctl_commit(req, &gpp); 1816 break; 1817 case G_PART_CTL_CREATE: 1818 error = g_part_ctl_create(req, &gpp); 1819 break; 1820 case G_PART_CTL_DELETE: 1821 error = g_part_ctl_delete(req, &gpp); 1822 break; 1823 case G_PART_CTL_DESTROY: 1824 error = g_part_ctl_destroy(req, &gpp); 1825 break; 1826 case G_PART_CTL_MODIFY: 1827 error = g_part_ctl_modify(req, &gpp); 1828 break; 1829 case G_PART_CTL_MOVE: 1830 error = g_part_ctl_move(req, &gpp); 1831 break; 1832 case G_PART_CTL_RECOVER: 1833 error = g_part_ctl_recover(req, &gpp); 1834 break; 1835 case G_PART_CTL_RESIZE: 1836 error = g_part_ctl_resize(req, &gpp); 1837 break; 1838 case G_PART_CTL_SET: 1839 error = g_part_ctl_setunset(req, &gpp, 1); 1840 break; 1841 case G_PART_CTL_UNDO: 1842 error = g_part_ctl_undo(req, &gpp); 1843 break; 1844 case G_PART_CTL_UNSET: 1845 error = g_part_ctl_setunset(req, &gpp, 0); 1846 break; 1847 } 1848 1849 /* Implement automatic commit. */ 1850 if (!error) { 1851 auto_commit = (modifies && 1852 (gpp.gpp_parms & G_PART_PARM_FLAGS) && 1853 strchr(gpp.gpp_flags, 'C') != NULL) ? 1 : 0; 1854 if (auto_commit) { 1855 KASSERT(gpp.gpp_parms & G_PART_PARM_GEOM, ("%s", 1856 __func__)); 1857 error = g_part_ctl_commit(req, &gpp); 1858 } 1859 } 1860 1861 out: 1862 if (error && close_on_error) { 1863 g_access(LIST_FIRST(&gpp.gpp_geom->consumer), -1, -1, -1); 1864 table->gpt_opened = 0; 1865 } 1866 } 1867 1868 static int 1869 g_part_destroy_geom(struct gctl_req *req, struct g_class *mp, 1870 struct g_geom *gp) 1871 { 1872 1873 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s,%s)", __func__, mp->name, gp->name)); 1874 g_topology_assert(); 1875 1876 g_part_wither(gp, EINVAL); 1877 return (0); 1878 } 1879 1880 static struct g_geom * 1881 g_part_taste(struct g_class *mp, struct g_provider *pp, int flags __unused) 1882 { 1883 struct g_consumer *cp; 1884 struct g_geom *gp; 1885 struct g_part_entry *entry; 1886 struct g_part_table *table; 1887 struct root_hold_token *rht; 1888 int attr, depth; 1889 int error; 1890 1891 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s,%s)", __func__, mp->name, pp->name)); 1892 g_topology_assert(); 1893 1894 /* Skip providers that are already open for writing. */ 1895 if (pp->acw > 0) 1896 return (NULL); 1897 1898 /* 1899 * Create a GEOM with consumer and hook it up to the provider. 1900 * With that we become part of the topology. Optain read access 1901 * to the provider. 1902 */ 1903 gp = g_new_geomf(mp, "%s", pp->name); 1904 cp = g_new_consumer(gp); 1905 cp->flags |= G_CF_DIRECT_SEND | G_CF_DIRECT_RECEIVE; 1906 error = g_attach(cp, pp); 1907 if (error == 0) 1908 error = g_access(cp, 1, 0, 0); 1909 if (error != 0) { 1910 if (cp->provider) 1911 g_detach(cp); 1912 g_destroy_consumer(cp); 1913 g_destroy_geom(gp); 1914 return (NULL); 1915 } 1916 1917 rht = root_mount_hold(mp->name); 1918 g_topology_unlock(); 1919 1920 /* 1921 * Short-circuit the whole probing galore when there's no 1922 * media present. 1923 */ 1924 if (pp->mediasize == 0 || pp->sectorsize == 0) { 1925 error = ENODEV; 1926 goto fail; 1927 } 1928 1929 /* Make sure we can nest and if so, determine our depth. */ 1930 error = g_getattr("PART::isleaf", cp, &attr); 1931 if (!error && attr) { 1932 error = ENODEV; 1933 goto fail; 1934 } 1935 error = g_getattr("PART::depth", cp, &attr); 1936 depth = (!error) ? attr + 1 : 0; 1937 1938 error = g_part_probe(gp, cp, depth); 1939 if (error) 1940 goto fail; 1941 1942 table = gp->softc; 1943 1944 /* 1945 * Synthesize a disk geometry. Some partitioning schemes 1946 * depend on it and since some file systems need it even 1947 * when the partitition scheme doesn't, we do it here in 1948 * scheme-independent code. 1949 */ 1950 g_part_geometry(table, cp, pp->mediasize / pp->sectorsize); 1951 1952 error = G_PART_READ(table, cp); 1953 if (error) 1954 goto fail; 1955 error = g_part_check_integrity(table, cp); 1956 if (error) 1957 goto fail; 1958 1959 g_topology_lock(); 1960 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) { 1961 if (!entry->gpe_internal) 1962 g_part_new_provider(gp, table, entry); 1963 } 1964 1965 root_mount_rel(rht); 1966 g_access(cp, -1, 0, 0); 1967 return (gp); 1968 1969 fail: 1970 g_topology_lock(); 1971 root_mount_rel(rht); 1972 g_access(cp, -1, 0, 0); 1973 g_detach(cp); 1974 g_destroy_consumer(cp); 1975 g_destroy_geom(gp); 1976 return (NULL); 1977 } 1978 1979 /* 1980 * Geom methods. 1981 */ 1982 1983 static int 1984 g_part_access(struct g_provider *pp, int dr, int dw, int de) 1985 { 1986 struct g_consumer *cp; 1987 1988 G_PART_TRACE((G_T_ACCESS, "%s(%s,%d,%d,%d)", __func__, pp->name, dr, 1989 dw, de)); 1990 1991 cp = LIST_FIRST(&pp->geom->consumer); 1992 1993 /* We always gain write-exclusive access. */ 1994 return (g_access(cp, dr, dw, dw + de)); 1995 } 1996 1997 static void 1998 g_part_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp, 1999 struct g_consumer *cp, struct g_provider *pp) 2000 { 2001 char buf[64]; 2002 struct g_part_entry *entry; 2003 struct g_part_table *table; 2004 2005 KASSERT(sb != NULL && gp != NULL, ("%s", __func__)); 2006 table = gp->softc; 2007 2008 if (indent == NULL) { 2009 KASSERT(cp == NULL && pp != NULL, ("%s", __func__)); 2010 entry = pp->private; 2011 if (entry == NULL) 2012 return; 2013 sbuf_printf(sb, " i %u o %ju ty %s", entry->gpe_index, 2014 (uintmax_t)entry->gpe_offset, 2015 G_PART_TYPE(table, entry, buf, sizeof(buf))); 2016 /* 2017 * libdisk compatibility quirk - the scheme dumps the 2018 * slicer name and partition type in a way that is 2019 * compatible with libdisk. When libdisk is not used 2020 * anymore, this should go away. 2021 */ 2022 G_PART_DUMPCONF(table, entry, sb, indent); 2023 } else if (cp != NULL) { /* Consumer configuration. */ 2024 KASSERT(pp == NULL, ("%s", __func__)); 2025 /* none */ 2026 } else if (pp != NULL) { /* Provider configuration. */ 2027 entry = pp->private; 2028 if (entry == NULL) 2029 return; 2030 sbuf_printf(sb, "%s<start>%ju</start>\n", indent, 2031 (uintmax_t)entry->gpe_start); 2032 sbuf_printf(sb, "%s<end>%ju</end>\n", indent, 2033 (uintmax_t)entry->gpe_end); 2034 sbuf_printf(sb, "%s<index>%u</index>\n", indent, 2035 entry->gpe_index); 2036 sbuf_printf(sb, "%s<type>%s</type>\n", indent, 2037 G_PART_TYPE(table, entry, buf, sizeof(buf))); 2038 sbuf_printf(sb, "%s<offset>%ju</offset>\n", indent, 2039 (uintmax_t)entry->gpe_offset); 2040 sbuf_printf(sb, "%s<length>%ju</length>\n", indent, 2041 (uintmax_t)pp->mediasize); 2042 G_PART_DUMPCONF(table, entry, sb, indent); 2043 } else { /* Geom configuration. */ 2044 sbuf_printf(sb, "%s<scheme>%s</scheme>\n", indent, 2045 table->gpt_scheme->name); 2046 sbuf_printf(sb, "%s<entries>%u</entries>\n", indent, 2047 table->gpt_entries); 2048 sbuf_printf(sb, "%s<first>%ju</first>\n", indent, 2049 (uintmax_t)table->gpt_first); 2050 sbuf_printf(sb, "%s<last>%ju</last>\n", indent, 2051 (uintmax_t)table->gpt_last); 2052 sbuf_printf(sb, "%s<fwsectors>%u</fwsectors>\n", indent, 2053 table->gpt_sectors); 2054 sbuf_printf(sb, "%s<fwheads>%u</fwheads>\n", indent, 2055 table->gpt_heads); 2056 sbuf_printf(sb, "%s<state>%s</state>\n", indent, 2057 table->gpt_corrupt ? "CORRUPT": "OK"); 2058 sbuf_printf(sb, "%s<modified>%s</modified>\n", indent, 2059 table->gpt_opened ? "true": "false"); 2060 G_PART_DUMPCONF(table, NULL, sb, indent); 2061 } 2062 } 2063 2064 /*- 2065 * This start routine is only called for non-trivial requests, all the 2066 * trivial ones are handled autonomously by the slice code. 2067 * For requests we handle here, we must call the g_io_deliver() on the 2068 * bio, and return non-zero to indicate to the slice code that we did so. 2069 * This code executes in the "DOWN" I/O path, this means: 2070 * * No sleeping. 2071 * * Don't grab the topology lock. 2072 * * Don't call biowait, g_getattr(), g_setattr() or g_read_data() 2073 */ 2074 static int 2075 g_part_ioctl(struct g_provider *pp, u_long cmd, void *data, int fflag, struct thread *td) 2076 { 2077 struct g_part_table *table; 2078 2079 table = pp->geom->softc; 2080 return G_PART_IOCTL(table, pp, cmd, data, fflag, td); 2081 } 2082 2083 static void 2084 g_part_resize(struct g_consumer *cp) 2085 { 2086 struct g_part_table *table; 2087 2088 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, cp->provider->name)); 2089 g_topology_assert(); 2090 2091 table = cp->geom->softc; 2092 if (table->gpt_opened == 0) { 2093 if (g_access(cp, 1, 1, 1) != 0) 2094 return; 2095 table->gpt_opened = 1; 2096 } 2097 if (G_PART_RESIZE(table, NULL, NULL) == 0) 2098 printf("GEOM_PART: %s was automatically resized.\n" 2099 " Use `gpart commit %s` to save changes or " 2100 "`gpart undo %s` to revert them.\n", cp->geom->name, 2101 cp->geom->name, cp->geom->name); 2102 if (g_part_check_integrity(table, cp) != 0) { 2103 g_access(cp, -1, -1, -1); 2104 table->gpt_opened = 0; 2105 g_part_wither(table->gpt_gp, ENXIO); 2106 } 2107 } 2108 2109 static void 2110 g_part_orphan(struct g_consumer *cp) 2111 { 2112 struct g_provider *pp; 2113 struct g_part_table *table; 2114 2115 pp = cp->provider; 2116 KASSERT(pp != NULL, ("%s", __func__)); 2117 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, pp->name)); 2118 g_topology_assert(); 2119 2120 KASSERT(pp->error != 0, ("%s", __func__)); 2121 table = cp->geom->softc; 2122 if (table != NULL && table->gpt_opened) 2123 g_access(cp, -1, -1, -1); 2124 g_part_wither(cp->geom, pp->error); 2125 } 2126 2127 static void 2128 g_part_spoiled(struct g_consumer *cp) 2129 { 2130 2131 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, cp->provider->name)); 2132 g_topology_assert(); 2133 2134 cp->flags |= G_CF_ORPHAN; 2135 g_part_wither(cp->geom, ENXIO); 2136 } 2137 2138 static void 2139 g_part_start(struct bio *bp) 2140 { 2141 struct bio *bp2; 2142 struct g_consumer *cp; 2143 struct g_geom *gp; 2144 struct g_part_entry *entry; 2145 struct g_part_table *table; 2146 struct g_kerneldump *gkd; 2147 struct g_provider *pp; 2148 char buf[64]; 2149 2150 pp = bp->bio_to; 2151 gp = pp->geom; 2152 table = gp->softc; 2153 cp = LIST_FIRST(&gp->consumer); 2154 2155 G_PART_TRACE((G_T_BIO, "%s: cmd=%d, provider=%s", __func__, bp->bio_cmd, 2156 pp->name)); 2157 2158 entry = pp->private; 2159 if (entry == NULL) { 2160 g_io_deliver(bp, ENXIO); 2161 return; 2162 } 2163 2164 switch(bp->bio_cmd) { 2165 case BIO_DELETE: 2166 case BIO_READ: 2167 case BIO_WRITE: 2168 if (bp->bio_offset >= pp->mediasize) { 2169 g_io_deliver(bp, EIO); 2170 return; 2171 } 2172 bp2 = g_clone_bio(bp); 2173 if (bp2 == NULL) { 2174 g_io_deliver(bp, ENOMEM); 2175 return; 2176 } 2177 if (bp2->bio_offset + bp2->bio_length > pp->mediasize) 2178 bp2->bio_length = pp->mediasize - bp2->bio_offset; 2179 bp2->bio_done = g_std_done; 2180 bp2->bio_offset += entry->gpe_offset; 2181 g_io_request(bp2, cp); 2182 return; 2183 case BIO_FLUSH: 2184 break; 2185 case BIO_GETATTR: 2186 if (g_handleattr_int(bp, "GEOM::fwheads", table->gpt_heads)) 2187 return; 2188 if (g_handleattr_int(bp, "GEOM::fwsectors", table->gpt_sectors)) 2189 return; 2190 if (g_handleattr_int(bp, "PART::isleaf", table->gpt_isleaf)) 2191 return; 2192 if (g_handleattr_int(bp, "PART::depth", table->gpt_depth)) 2193 return; 2194 if (g_handleattr_str(bp, "PART::scheme", 2195 table->gpt_scheme->name)) 2196 return; 2197 if (g_handleattr_str(bp, "PART::type", 2198 G_PART_TYPE(table, entry, buf, sizeof(buf)))) 2199 return; 2200 if (!strcmp("GEOM::kerneldump", bp->bio_attribute)) { 2201 /* 2202 * Check that the partition is suitable for kernel 2203 * dumps. Typically only swap partitions should be 2204 * used. If the request comes from the nested scheme 2205 * we allow dumping there as well. 2206 */ 2207 if ((bp->bio_from == NULL || 2208 bp->bio_from->geom->class != &g_part_class) && 2209 G_PART_DUMPTO(table, entry) == 0) { 2210 g_io_deliver(bp, ENODEV); 2211 printf("GEOM_PART: Partition '%s' not suitable" 2212 " for kernel dumps (wrong type?)\n", 2213 pp->name); 2214 return; 2215 } 2216 gkd = (struct g_kerneldump *)bp->bio_data; 2217 if (gkd->offset >= pp->mediasize) { 2218 g_io_deliver(bp, EIO); 2219 return; 2220 } 2221 if (gkd->offset + gkd->length > pp->mediasize) 2222 gkd->length = pp->mediasize - gkd->offset; 2223 gkd->offset += entry->gpe_offset; 2224 } 2225 break; 2226 default: 2227 g_io_deliver(bp, EOPNOTSUPP); 2228 return; 2229 } 2230 2231 bp2 = g_clone_bio(bp); 2232 if (bp2 == NULL) { 2233 g_io_deliver(bp, ENOMEM); 2234 return; 2235 } 2236 bp2->bio_done = g_std_done; 2237 g_io_request(bp2, cp); 2238 } 2239 2240 static void 2241 g_part_init(struct g_class *mp) 2242 { 2243 2244 TAILQ_INSERT_HEAD(&g_part_schemes, &g_part_null_scheme, scheme_list); 2245 } 2246 2247 static void 2248 g_part_fini(struct g_class *mp) 2249 { 2250 2251 TAILQ_REMOVE(&g_part_schemes, &g_part_null_scheme, scheme_list); 2252 } 2253 2254 static void 2255 g_part_unload_event(void *arg, int flag) 2256 { 2257 struct g_consumer *cp; 2258 struct g_geom *gp; 2259 struct g_provider *pp; 2260 struct g_part_scheme *scheme; 2261 struct g_part_table *table; 2262 uintptr_t *xchg; 2263 int acc, error; 2264 2265 if (flag == EV_CANCEL) 2266 return; 2267 2268 xchg = arg; 2269 error = 0; 2270 scheme = (void *)(*xchg); 2271 2272 g_topology_assert(); 2273 2274 LIST_FOREACH(gp, &g_part_class.geom, geom) { 2275 table = gp->softc; 2276 if (table->gpt_scheme != scheme) 2277 continue; 2278 2279 acc = 0; 2280 LIST_FOREACH(pp, &gp->provider, provider) 2281 acc += pp->acr + pp->acw + pp->ace; 2282 LIST_FOREACH(cp, &gp->consumer, consumer) 2283 acc += cp->acr + cp->acw + cp->ace; 2284 2285 if (!acc) 2286 g_part_wither(gp, ENOSYS); 2287 else 2288 error = EBUSY; 2289 } 2290 2291 if (!error) 2292 TAILQ_REMOVE(&g_part_schemes, scheme, scheme_list); 2293 2294 *xchg = error; 2295 } 2296 2297 int 2298 g_part_modevent(module_t mod, int type, struct g_part_scheme *scheme) 2299 { 2300 struct g_part_scheme *iter; 2301 uintptr_t arg; 2302 int error; 2303 2304 error = 0; 2305 switch (type) { 2306 case MOD_LOAD: 2307 TAILQ_FOREACH(iter, &g_part_schemes, scheme_list) { 2308 if (scheme == iter) { 2309 printf("GEOM_PART: scheme %s is already " 2310 "registered!\n", scheme->name); 2311 break; 2312 } 2313 } 2314 if (iter == NULL) { 2315 TAILQ_INSERT_TAIL(&g_part_schemes, scheme, 2316 scheme_list); 2317 g_retaste(&g_part_class); 2318 } 2319 break; 2320 case MOD_UNLOAD: 2321 arg = (uintptr_t)scheme; 2322 error = g_waitfor_event(g_part_unload_event, &arg, M_WAITOK, 2323 NULL); 2324 if (error == 0) 2325 error = arg; 2326 break; 2327 default: 2328 error = EOPNOTSUPP; 2329 break; 2330 } 2331 2332 return (error); 2333 } 2334