1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 2002, 2005-2009 Marcel Moolenaar
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29 #include <sys/param.h>
30 #include <sys/bio.h>
31 #include <sys/endian.h>
32 #include <sys/kernel.h>
33 #include <sys/kobj.h>
34 #include <sys/limits.h>
35 #include <sys/lock.h>
36 #include <sys/malloc.h>
37 #include <sys/mutex.h>
38 #include <sys/queue.h>
39 #include <sys/sbuf.h>
40 #include <sys/sysctl.h>
41 #include <sys/systm.h>
42 #include <sys/uuid.h>
43 #include <geom/geom.h>
44 #include <geom/geom_ctl.h>
45 #include <geom/geom_int.h>
46 #include <geom/part/g_part.h>
47
48 #include "g_part_if.h"
49
50 static kobj_method_t g_part_null_methods[] = {
51 { 0, 0 }
52 };
53
54 static struct g_part_scheme g_part_null_scheme = {
55 "(none)",
56 g_part_null_methods,
57 sizeof(struct g_part_table),
58 };
59
60 TAILQ_HEAD(, g_part_scheme) g_part_schemes =
61 TAILQ_HEAD_INITIALIZER(g_part_schemes);
62
63 struct g_part_alias_list {
64 const char *lexeme;
65 enum g_part_alias alias;
66 } g_part_alias_list[G_PART_ALIAS_COUNT] = {
67 { "apple-apfs", G_PART_ALIAS_APPLE_APFS },
68 { "apple-boot", G_PART_ALIAS_APPLE_BOOT },
69 { "apple-core-storage", G_PART_ALIAS_APPLE_CORE_STORAGE },
70 { "apple-hfs", G_PART_ALIAS_APPLE_HFS },
71 { "apple-label", G_PART_ALIAS_APPLE_LABEL },
72 { "apple-raid", G_PART_ALIAS_APPLE_RAID },
73 { "apple-raid-offline", G_PART_ALIAS_APPLE_RAID_OFFLINE },
74 { "apple-tv-recovery", G_PART_ALIAS_APPLE_TV_RECOVERY },
75 { "apple-ufs", G_PART_ALIAS_APPLE_UFS },
76 { "apple-zfs", G_PART_ALIAS_APPLE_ZFS },
77 { "bios-boot", G_PART_ALIAS_BIOS_BOOT },
78 { "chromeos-firmware", G_PART_ALIAS_CHROMEOS_FIRMWARE },
79 { "chromeos-kernel", G_PART_ALIAS_CHROMEOS_KERNEL },
80 { "chromeos-reserved", G_PART_ALIAS_CHROMEOS_RESERVED },
81 { "chromeos-root", G_PART_ALIAS_CHROMEOS_ROOT },
82 { "dragonfly-ccd", G_PART_ALIAS_DFBSD_CCD },
83 { "dragonfly-hammer", G_PART_ALIAS_DFBSD_HAMMER },
84 { "dragonfly-hammer2", G_PART_ALIAS_DFBSD_HAMMER2 },
85 { "dragonfly-label32", G_PART_ALIAS_DFBSD },
86 { "dragonfly-label64", G_PART_ALIAS_DFBSD64 },
87 { "dragonfly-legacy", G_PART_ALIAS_DFBSD_LEGACY },
88 { "dragonfly-swap", G_PART_ALIAS_DFBSD_SWAP },
89 { "dragonfly-ufs", G_PART_ALIAS_DFBSD_UFS },
90 { "dragonfly-vinum", G_PART_ALIAS_DFBSD_VINUM },
91 { "ebr", G_PART_ALIAS_EBR },
92 { "efi", G_PART_ALIAS_EFI },
93 { "fat16", G_PART_ALIAS_MS_FAT16 },
94 { "fat32", G_PART_ALIAS_MS_FAT32 },
95 { "fat32lba", G_PART_ALIAS_MS_FAT32LBA },
96 { "freebsd", G_PART_ALIAS_FREEBSD },
97 { "freebsd-boot", G_PART_ALIAS_FREEBSD_BOOT },
98 { "freebsd-nandfs", G_PART_ALIAS_FREEBSD_NANDFS },
99 { "freebsd-swap", G_PART_ALIAS_FREEBSD_SWAP },
100 { "freebsd-ufs", G_PART_ALIAS_FREEBSD_UFS },
101 { "freebsd-vinum", G_PART_ALIAS_FREEBSD_VINUM },
102 { "freebsd-zfs", G_PART_ALIAS_FREEBSD_ZFS },
103 { "hifive-fsbl", G_PART_ALIAS_HIFIVE_FSBL },
104 { "hifive-bbl", G_PART_ALIAS_HIFIVE_BBL },
105 { "linux-data", G_PART_ALIAS_LINUX_DATA },
106 { "linux-lvm", G_PART_ALIAS_LINUX_LVM },
107 { "linux-raid", G_PART_ALIAS_LINUX_RAID },
108 { "linux-swap", G_PART_ALIAS_LINUX_SWAP },
109 { "mbr", G_PART_ALIAS_MBR },
110 { "ms-basic-data", G_PART_ALIAS_MS_BASIC_DATA },
111 { "ms-ldm-data", G_PART_ALIAS_MS_LDM_DATA },
112 { "ms-ldm-metadata", G_PART_ALIAS_MS_LDM_METADATA },
113 { "ms-recovery", G_PART_ALIAS_MS_RECOVERY },
114 { "ms-reserved", G_PART_ALIAS_MS_RESERVED },
115 { "ms-spaces", G_PART_ALIAS_MS_SPACES },
116 { "netbsd-ccd", G_PART_ALIAS_NETBSD_CCD },
117 { "netbsd-cgd", G_PART_ALIAS_NETBSD_CGD },
118 { "netbsd-ffs", G_PART_ALIAS_NETBSD_FFS },
119 { "netbsd-lfs", G_PART_ALIAS_NETBSD_LFS },
120 { "netbsd-raid", G_PART_ALIAS_NETBSD_RAID },
121 { "netbsd-swap", G_PART_ALIAS_NETBSD_SWAP },
122 { "ntfs", G_PART_ALIAS_MS_NTFS },
123 { "openbsd-data", G_PART_ALIAS_OPENBSD_DATA },
124 { "prep-boot", G_PART_ALIAS_PREP_BOOT },
125 { "solaris-boot", G_PART_ALIAS_SOLARIS_BOOT },
126 { "solaris-root", G_PART_ALIAS_SOLARIS_ROOT },
127 { "solaris-swap", G_PART_ALIAS_SOLARIS_SWAP },
128 { "solaris-backup", G_PART_ALIAS_SOLARIS_BACKUP },
129 { "solaris-var", G_PART_ALIAS_SOLARIS_VAR },
130 { "solaris-home", G_PART_ALIAS_SOLARIS_HOME },
131 { "solaris-altsec", G_PART_ALIAS_SOLARIS_ALTSEC },
132 { "solaris-reserved", G_PART_ALIAS_SOLARIS_RESERVED },
133 { "u-boot-env", G_PART_ALIAS_U_BOOT_ENV },
134 { "vmware-reserved", G_PART_ALIAS_VMRESERVED },
135 { "vmware-vmfs", G_PART_ALIAS_VMFS },
136 { "vmware-vmkdiag", G_PART_ALIAS_VMKDIAG },
137 { "vmware-vsanhdr", G_PART_ALIAS_VMVSANHDR },
138 };
139
140 SYSCTL_DECL(_kern_geom);
141 SYSCTL_NODE(_kern_geom, OID_AUTO, part, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
142 "GEOM_PART stuff");
143 u_int geom_part_check_integrity = 1;
144 SYSCTL_UINT(_kern_geom_part, OID_AUTO, check_integrity,
145 CTLFLAG_RWTUN, &geom_part_check_integrity, 1,
146 "Enable integrity checking");
147 static u_int auto_resize = 1;
148 SYSCTL_UINT(_kern_geom_part, OID_AUTO, auto_resize,
149 CTLFLAG_RWTUN, &auto_resize, 1,
150 "Enable auto resize");
151 static u_int allow_nesting = 0;
152 SYSCTL_UINT(_kern_geom_part, OID_AUTO, allow_nesting,
153 CTLFLAG_RWTUN, &allow_nesting, 0,
154 "Allow additional levels of nesting");
155 char g_part_separator[MAXPATHLEN] = "";
156 SYSCTL_STRING(_kern_geom_part, OID_AUTO, separator,
157 CTLFLAG_RDTUN, &g_part_separator, sizeof(g_part_separator),
158 "Partition name separator");
159
160 /*
161 * The GEOM partitioning class.
162 */
163 static g_ctl_req_t g_part_ctlreq;
164 static g_ctl_destroy_geom_t g_part_destroy_geom;
165 static g_fini_t g_part_fini;
166 static g_init_t g_part_init;
167 static g_taste_t g_part_taste;
168
169 static g_access_t g_part_access;
170 static g_dumpconf_t g_part_dumpconf;
171 static g_orphan_t g_part_orphan;
172 static g_spoiled_t g_part_spoiled;
173 static g_start_t g_part_start;
174 static g_resize_t g_part_resize;
175 static g_ioctl_t g_part_ioctl;
176
177 static struct g_class g_part_class = {
178 .name = "PART",
179 .version = G_VERSION,
180 /* Class methods. */
181 .ctlreq = g_part_ctlreq,
182 .destroy_geom = g_part_destroy_geom,
183 .fini = g_part_fini,
184 .init = g_part_init,
185 .taste = g_part_taste,
186 /* Geom methods. */
187 .access = g_part_access,
188 .dumpconf = g_part_dumpconf,
189 .orphan = g_part_orphan,
190 .spoiled = g_part_spoiled,
191 .start = g_part_start,
192 .resize = g_part_resize,
193 .ioctl = g_part_ioctl,
194 };
195
196 DECLARE_GEOM_CLASS(g_part_class, g_part);
197 MODULE_VERSION(g_part, 0);
198
199 /*
200 * Support functions.
201 */
202
203 static void g_part_wither(struct g_geom *, int);
204
205 const char *
g_part_alias_name(enum g_part_alias alias)206 g_part_alias_name(enum g_part_alias alias)
207 {
208 int i;
209
210 for (i = 0; i < G_PART_ALIAS_COUNT; i++) {
211 if (g_part_alias_list[i].alias != alias)
212 continue;
213 return (g_part_alias_list[i].lexeme);
214 }
215
216 return (NULL);
217 }
218
219 void
g_part_geometry_heads(off_t blocks,u_int sectors,off_t * bestchs,u_int * bestheads)220 g_part_geometry_heads(off_t blocks, u_int sectors, off_t *bestchs,
221 u_int *bestheads)
222 {
223 static u_int candidate_heads[] = { 1, 2, 16, 32, 64, 128, 255, 0 };
224 off_t chs, cylinders;
225 u_int heads;
226 int idx;
227
228 *bestchs = 0;
229 *bestheads = 0;
230 for (idx = 0; candidate_heads[idx] != 0; idx++) {
231 heads = candidate_heads[idx];
232 cylinders = blocks / heads / sectors;
233 if (cylinders < heads || cylinders < sectors)
234 break;
235 if (cylinders > 1023)
236 continue;
237 chs = cylinders * heads * sectors;
238 if (chs > *bestchs || (chs == *bestchs && *bestheads == 1)) {
239 *bestchs = chs;
240 *bestheads = heads;
241 }
242 }
243 }
244
245 static void
g_part_geometry(struct g_part_table * table,struct g_consumer * cp,off_t blocks)246 g_part_geometry(struct g_part_table *table, struct g_consumer *cp,
247 off_t blocks)
248 {
249 static u_int candidate_sectors[] = { 1, 9, 17, 33, 63, 0 };
250 off_t chs, bestchs;
251 u_int heads, sectors;
252 int idx;
253
254 if (g_getattr("GEOM::fwsectors", cp, §ors) != 0 || sectors == 0 ||
255 g_getattr("GEOM::fwheads", cp, &heads) != 0 || heads == 0) {
256 table->gpt_fixgeom = 0;
257 table->gpt_heads = 0;
258 table->gpt_sectors = 0;
259 bestchs = 0;
260 for (idx = 0; candidate_sectors[idx] != 0; idx++) {
261 sectors = candidate_sectors[idx];
262 g_part_geometry_heads(blocks, sectors, &chs, &heads);
263 if (chs == 0)
264 continue;
265 /*
266 * Prefer a geometry with sectors > 1, but only if
267 * it doesn't bump down the number of heads to 1.
268 */
269 if (chs > bestchs || (chs == bestchs && heads > 1 &&
270 table->gpt_sectors == 1)) {
271 bestchs = chs;
272 table->gpt_heads = heads;
273 table->gpt_sectors = sectors;
274 }
275 }
276 /*
277 * If we didn't find a geometry at all, then the disk is
278 * too big. This means we can use the maximum number of
279 * heads and sectors.
280 */
281 if (bestchs == 0) {
282 table->gpt_heads = 255;
283 table->gpt_sectors = 63;
284 }
285 } else {
286 table->gpt_fixgeom = 1;
287 table->gpt_heads = heads;
288 table->gpt_sectors = sectors;
289 }
290 }
291
292 static void
g_part_get_physpath_done(struct bio * bp)293 g_part_get_physpath_done(struct bio *bp)
294 {
295 struct g_geom *gp;
296 struct g_part_entry *entry;
297 struct g_part_table *table;
298 struct g_provider *pp;
299 struct bio *pbp;
300
301 pbp = bp->bio_parent;
302 pp = pbp->bio_to;
303 gp = pp->geom;
304 table = gp->softc;
305 entry = pp->private;
306
307 if (bp->bio_error == 0) {
308 char *end;
309 size_t len, remainder;
310 len = strlcat(bp->bio_data, "/", bp->bio_length);
311 if (len < bp->bio_length) {
312 end = bp->bio_data + len;
313 remainder = bp->bio_length - len;
314 G_PART_NAME(table, entry, end, remainder);
315 }
316 }
317 g_std_done(bp);
318 }
319
320 #define DPRINTF(...) if (bootverbose) { \
321 printf("GEOM_PART: " __VA_ARGS__); \
322 }
323
324 static int
g_part_check_integrity(struct g_part_table * table,struct g_consumer * cp)325 g_part_check_integrity(struct g_part_table *table, struct g_consumer *cp)
326 {
327 struct g_part_entry *e1, *e2;
328 struct g_provider *pp;
329 off_t offset;
330 int failed;
331
332 failed = 0;
333 pp = cp->provider;
334 if (table->gpt_last < table->gpt_first) {
335 DPRINTF("last LBA is below first LBA: %jd < %jd\n",
336 (intmax_t)table->gpt_last, (intmax_t)table->gpt_first);
337 failed++;
338 }
339 if (table->gpt_last > pp->mediasize / pp->sectorsize - 1) {
340 DPRINTF("last LBA extends beyond mediasize: "
341 "%jd > %jd\n", (intmax_t)table->gpt_last,
342 (intmax_t)pp->mediasize / pp->sectorsize - 1);
343 failed++;
344 }
345 LIST_FOREACH(e1, &table->gpt_entry, gpe_entry) {
346 if (e1->gpe_deleted || e1->gpe_internal)
347 continue;
348 if (e1->gpe_start < table->gpt_first) {
349 DPRINTF("partition %d has start offset below first "
350 "LBA: %jd < %jd\n", e1->gpe_index,
351 (intmax_t)e1->gpe_start,
352 (intmax_t)table->gpt_first);
353 failed++;
354 }
355 if (e1->gpe_start > table->gpt_last) {
356 DPRINTF("partition %d has start offset beyond last "
357 "LBA: %jd > %jd\n", e1->gpe_index,
358 (intmax_t)e1->gpe_start,
359 (intmax_t)table->gpt_last);
360 failed++;
361 }
362 if (e1->gpe_end < e1->gpe_start) {
363 DPRINTF("partition %d has end offset below start "
364 "offset: %jd < %jd\n", e1->gpe_index,
365 (intmax_t)e1->gpe_end,
366 (intmax_t)e1->gpe_start);
367 failed++;
368 }
369 if (e1->gpe_end > table->gpt_last) {
370 DPRINTF("partition %d has end offset beyond last "
371 "LBA: %jd > %jd\n", e1->gpe_index,
372 (intmax_t)e1->gpe_end,
373 (intmax_t)table->gpt_last);
374 failed++;
375 }
376 if (pp->stripesize > 0) {
377 offset = e1->gpe_start * pp->sectorsize;
378 if (e1->gpe_offset > offset)
379 offset = e1->gpe_offset;
380 if ((offset + pp->stripeoffset) % pp->stripesize) {
381 DPRINTF("partition %d on (%s, %s) is not "
382 "aligned on %ju bytes\n", e1->gpe_index,
383 pp->name, table->gpt_scheme->name,
384 (uintmax_t)pp->stripesize);
385 /* Don't treat this as a critical failure */
386 }
387 }
388 e2 = e1;
389 while ((e2 = LIST_NEXT(e2, gpe_entry)) != NULL) {
390 if (e2->gpe_deleted || e2->gpe_internal)
391 continue;
392 if (e1->gpe_start >= e2->gpe_start &&
393 e1->gpe_start <= e2->gpe_end) {
394 DPRINTF("partition %d has start offset inside "
395 "partition %d: start[%d] %jd >= start[%d] "
396 "%jd <= end[%d] %jd\n",
397 e1->gpe_index, e2->gpe_index,
398 e2->gpe_index, (intmax_t)e2->gpe_start,
399 e1->gpe_index, (intmax_t)e1->gpe_start,
400 e2->gpe_index, (intmax_t)e2->gpe_end);
401 failed++;
402 }
403 if (e1->gpe_end >= e2->gpe_start &&
404 e1->gpe_end <= e2->gpe_end) {
405 DPRINTF("partition %d has end offset inside "
406 "partition %d: start[%d] %jd >= end[%d] "
407 "%jd <= end[%d] %jd\n",
408 e1->gpe_index, e2->gpe_index,
409 e2->gpe_index, (intmax_t)e2->gpe_start,
410 e1->gpe_index, (intmax_t)e1->gpe_end,
411 e2->gpe_index, (intmax_t)e2->gpe_end);
412 failed++;
413 }
414 if (e1->gpe_start < e2->gpe_start &&
415 e1->gpe_end > e2->gpe_end) {
416 DPRINTF("partition %d contains partition %d: "
417 "start[%d] %jd > start[%d] %jd, end[%d] "
418 "%jd < end[%d] %jd\n",
419 e1->gpe_index, e2->gpe_index,
420 e1->gpe_index, (intmax_t)e1->gpe_start,
421 e2->gpe_index, (intmax_t)e2->gpe_start,
422 e2->gpe_index, (intmax_t)e2->gpe_end,
423 e1->gpe_index, (intmax_t)e1->gpe_end);
424 failed++;
425 }
426 }
427 }
428 if (failed != 0) {
429 printf("GEOM_PART: integrity check failed (%s, %s)\n",
430 pp->name, table->gpt_scheme->name);
431 if (geom_part_check_integrity != 0)
432 return (EINVAL);
433 table->gpt_corrupt = 1;
434 }
435 return (0);
436 }
437 #undef DPRINTF
438
439 struct g_part_entry *
g_part_new_entry(struct g_part_table * table,int index,quad_t start,quad_t end)440 g_part_new_entry(struct g_part_table *table, int index, quad_t start,
441 quad_t end)
442 {
443 struct g_part_entry *entry, *last;
444
445 last = NULL;
446 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) {
447 if (entry->gpe_index == index)
448 break;
449 if (entry->gpe_index > index) {
450 entry = NULL;
451 break;
452 }
453 last = entry;
454 }
455 if (entry == NULL) {
456 entry = g_malloc(table->gpt_scheme->gps_entrysz,
457 M_WAITOK | M_ZERO);
458 entry->gpe_index = index;
459 if (last == NULL)
460 LIST_INSERT_HEAD(&table->gpt_entry, entry, gpe_entry);
461 else
462 LIST_INSERT_AFTER(last, entry, gpe_entry);
463 } else
464 entry->gpe_offset = 0;
465 entry->gpe_start = start;
466 entry->gpe_end = end;
467 return (entry);
468 }
469
470 static void
g_part_new_provider(struct g_geom * gp,struct g_part_table * table,struct g_part_entry * entry)471 g_part_new_provider(struct g_geom *gp, struct g_part_table *table,
472 struct g_part_entry *entry)
473 {
474 struct g_consumer *cp;
475 struct g_provider *pp;
476 struct g_geom_alias *gap;
477 off_t offset;
478
479 cp = LIST_FIRST(&gp->consumer);
480 pp = cp->provider;
481
482 offset = entry->gpe_start * pp->sectorsize;
483 if (entry->gpe_offset < offset)
484 entry->gpe_offset = offset;
485
486 if (entry->gpe_pp == NULL) {
487 entry->gpe_pp = G_PART_NEW_PROVIDER(table, gp, entry, gp->name);
488 /*
489 * If our parent provider had any aliases, then copy them to our
490 * provider so when geom DEV tastes things later, they will be
491 * there for it to create the aliases with those name used in
492 * place of the geom's name we use to create the provider. The
493 * kobj interface that generates names makes this awkward.
494 */
495 LIST_FOREACH(gap, &pp->aliases, ga_next)
496 G_PART_ADD_ALIAS(table, entry->gpe_pp, entry, gap->ga_alias);
497 entry->gpe_pp->flags |= G_PF_DIRECT_SEND | G_PF_DIRECT_RECEIVE;
498 entry->gpe_pp->private = entry; /* Close the circle. */
499 }
500 entry->gpe_pp->index = entry->gpe_index - 1; /* index is 1-based. */
501 entry->gpe_pp->mediasize = (entry->gpe_end - entry->gpe_start + 1) *
502 pp->sectorsize;
503 entry->gpe_pp->mediasize -= entry->gpe_offset - offset;
504 entry->gpe_pp->sectorsize = pp->sectorsize;
505 entry->gpe_pp->stripesize = pp->stripesize;
506 entry->gpe_pp->stripeoffset = pp->stripeoffset + entry->gpe_offset;
507 if (pp->stripesize > 0)
508 entry->gpe_pp->stripeoffset %= pp->stripesize;
509 entry->gpe_pp->flags |= pp->flags & G_PF_ACCEPT_UNMAPPED;
510 g_error_provider(entry->gpe_pp, 0);
511 }
512
513 static struct g_geom*
g_part_find_geom(const char * name)514 g_part_find_geom(const char *name)
515 {
516 struct g_geom *gp;
517 LIST_FOREACH(gp, &g_part_class.geom, geom) {
518 if ((gp->flags & G_GEOM_WITHER) == 0 &&
519 strcmp(name, gp->name) == 0)
520 break;
521 }
522 return (gp);
523 }
524
525 static int
g_part_parm_geom(struct gctl_req * req,const char * name,struct g_geom ** v)526 g_part_parm_geom(struct gctl_req *req, const char *name, struct g_geom **v)
527 {
528 struct g_geom *gp;
529 const char *gname;
530
531 gname = gctl_get_asciiparam(req, name);
532 if (gname == NULL)
533 return (ENOATTR);
534 if (strncmp(gname, _PATH_DEV, sizeof(_PATH_DEV) - 1) == 0)
535 gname += sizeof(_PATH_DEV) - 1;
536 gp = g_part_find_geom(gname);
537 if (gp == NULL) {
538 gctl_error(req, "%d %s '%s'", EINVAL, name, gname);
539 return (EINVAL);
540 }
541 *v = gp;
542 return (0);
543 }
544
545 static int
g_part_parm_provider(struct gctl_req * req,const char * name,struct g_provider ** v)546 g_part_parm_provider(struct gctl_req *req, const char *name,
547 struct g_provider **v)
548 {
549 struct g_provider *pp;
550 const char *pname;
551
552 pname = gctl_get_asciiparam(req, name);
553 if (pname == NULL)
554 return (ENOATTR);
555 pp = g_provider_by_name(pname);
556 if (pp == NULL) {
557 gctl_error(req, "%d %s '%s'", EINVAL, name, pname);
558 return (EINVAL);
559 }
560 *v = pp;
561 return (0);
562 }
563
564 static int
g_part_parm_quad(struct gctl_req * req,const char * name,quad_t * v)565 g_part_parm_quad(struct gctl_req *req, const char *name, quad_t *v)
566 {
567 const char *p;
568 char *x;
569 quad_t q;
570
571 p = gctl_get_asciiparam(req, name);
572 if (p == NULL)
573 return (ENOATTR);
574 q = strtoq(p, &x, 0);
575 if (*x != '\0' || q < 0) {
576 gctl_error(req, "%d %s '%s'", EINVAL, name, p);
577 return (EINVAL);
578 }
579 *v = q;
580 return (0);
581 }
582
583 static int
g_part_parm_scheme(struct gctl_req * req,const char * name,struct g_part_scheme ** v)584 g_part_parm_scheme(struct gctl_req *req, const char *name,
585 struct g_part_scheme **v)
586 {
587 struct g_part_scheme *s;
588 const char *p;
589
590 p = gctl_get_asciiparam(req, name);
591 if (p == NULL)
592 return (ENOATTR);
593 TAILQ_FOREACH(s, &g_part_schemes, scheme_list) {
594 if (s == &g_part_null_scheme)
595 continue;
596 if (!strcasecmp(s->name, p))
597 break;
598 }
599 if (s == NULL) {
600 gctl_error(req, "%d %s '%s'", EINVAL, name, p);
601 return (EINVAL);
602 }
603 *v = s;
604 return (0);
605 }
606
607 static int
g_part_parm_str(struct gctl_req * req,const char * name,const char ** v)608 g_part_parm_str(struct gctl_req *req, const char *name, const char **v)
609 {
610 const char *p;
611
612 p = gctl_get_asciiparam(req, name);
613 if (p == NULL)
614 return (ENOATTR);
615 /* An empty label is always valid. */
616 if (strcmp(name, "label") != 0 && p[0] == '\0') {
617 gctl_error(req, "%d %s '%s'", EINVAL, name, p);
618 return (EINVAL);
619 }
620 *v = p;
621 return (0);
622 }
623
624 static int
g_part_parm_intmax(struct gctl_req * req,const char * name,u_int * v)625 g_part_parm_intmax(struct gctl_req *req, const char *name, u_int *v)
626 {
627 const intmax_t *p;
628 int size;
629
630 p = gctl_get_param(req, name, &size);
631 if (p == NULL)
632 return (ENOATTR);
633 if (size != sizeof(*p) || *p < 0 || *p > INT_MAX) {
634 gctl_error(req, "%d %s '%jd'", EINVAL, name, *p);
635 return (EINVAL);
636 }
637 *v = (u_int)*p;
638 return (0);
639 }
640
641 static int
g_part_parm_uint32(struct gctl_req * req,const char * name,u_int * v)642 g_part_parm_uint32(struct gctl_req *req, const char *name, u_int *v)
643 {
644 const uint32_t *p;
645 int size;
646
647 p = gctl_get_param(req, name, &size);
648 if (p == NULL)
649 return (ENOATTR);
650 if (size != sizeof(*p) || *p > INT_MAX) {
651 gctl_error(req, "%d %s '%u'", EINVAL, name, (unsigned int)*p);
652 return (EINVAL);
653 }
654 *v = (u_int)*p;
655 return (0);
656 }
657
658 static int
g_part_parm_bootcode(struct gctl_req * req,const char * name,const void ** v,unsigned int * s)659 g_part_parm_bootcode(struct gctl_req *req, const char *name, const void **v,
660 unsigned int *s)
661 {
662 const void *p;
663 int size;
664
665 p = gctl_get_param(req, name, &size);
666 if (p == NULL)
667 return (ENOATTR);
668 *v = p;
669 *s = size;
670 return (0);
671 }
672
673 static int
g_part_probe(struct g_geom * gp,struct g_consumer * cp,int depth)674 g_part_probe(struct g_geom *gp, struct g_consumer *cp, int depth)
675 {
676 struct g_part_scheme *iter, *scheme;
677 struct g_part_table *table;
678 int pri, probe;
679
680 table = gp->softc;
681 scheme = (table != NULL) ? table->gpt_scheme : NULL;
682 pri = (scheme != NULL) ? G_PART_PROBE(table, cp) : INT_MIN;
683 if (pri == 0)
684 goto done;
685 if (pri > 0) { /* error */
686 scheme = NULL;
687 pri = INT_MIN;
688 }
689
690 TAILQ_FOREACH(iter, &g_part_schemes, scheme_list) {
691 if (iter == &g_part_null_scheme)
692 continue;
693 table = (void *)kobj_create((kobj_class_t)iter, M_GEOM,
694 M_WAITOK);
695 table->gpt_gp = gp;
696 table->gpt_scheme = iter;
697 table->gpt_depth = depth;
698 probe = G_PART_PROBE(table, cp);
699 if (probe <= 0 && probe > pri) {
700 pri = probe;
701 scheme = iter;
702 if (gp->softc != NULL)
703 kobj_delete((kobj_t)gp->softc, M_GEOM);
704 gp->softc = table;
705 if (pri == 0)
706 goto done;
707 } else
708 kobj_delete((kobj_t)table, M_GEOM);
709 }
710
711 done:
712 return ((scheme == NULL) ? ENXIO : 0);
713 }
714
715 /*
716 * Control request functions.
717 */
718
719 static int
g_part_ctl_add(struct gctl_req * req,struct g_part_parms * gpp)720 g_part_ctl_add(struct gctl_req *req, struct g_part_parms *gpp)
721 {
722 struct g_geom *gp;
723 struct g_provider *pp;
724 struct g_part_entry *delent, *last, *entry;
725 struct g_part_table *table;
726 struct sbuf *sb;
727 quad_t end;
728 unsigned int index;
729 int error;
730
731 gp = gpp->gpp_geom;
732 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name));
733 g_topology_assert();
734
735 pp = LIST_FIRST(&gp->consumer)->provider;
736 table = gp->softc;
737 end = gpp->gpp_start + gpp->gpp_size - 1;
738
739 if (gpp->gpp_start < table->gpt_first ||
740 gpp->gpp_start > table->gpt_last) {
741 gctl_error(req, "%d start '%jd'", EINVAL,
742 (intmax_t)gpp->gpp_start);
743 return (EINVAL);
744 }
745 if (end < gpp->gpp_start || end > table->gpt_last) {
746 gctl_error(req, "%d size '%jd'", EINVAL,
747 (intmax_t)gpp->gpp_size);
748 return (EINVAL);
749 }
750 if (gpp->gpp_index > table->gpt_entries) {
751 gctl_error(req, "%d index '%d'", EINVAL, gpp->gpp_index);
752 return (EINVAL);
753 }
754
755 delent = last = NULL;
756 index = (gpp->gpp_index > 0) ? gpp->gpp_index : 1;
757 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) {
758 if (entry->gpe_deleted) {
759 if (entry->gpe_index == index)
760 delent = entry;
761 continue;
762 }
763 if (entry->gpe_index == index)
764 index = entry->gpe_index + 1;
765 if (entry->gpe_index < index)
766 last = entry;
767 if (entry->gpe_internal)
768 continue;
769 if (gpp->gpp_start >= entry->gpe_start &&
770 gpp->gpp_start <= entry->gpe_end) {
771 gctl_error(req, "%d start '%jd'", ENOSPC,
772 (intmax_t)gpp->gpp_start);
773 return (ENOSPC);
774 }
775 if (end >= entry->gpe_start && end <= entry->gpe_end) {
776 gctl_error(req, "%d end '%jd'", ENOSPC, (intmax_t)end);
777 return (ENOSPC);
778 }
779 if (gpp->gpp_start < entry->gpe_start && end > entry->gpe_end) {
780 gctl_error(req, "%d size '%jd'", ENOSPC,
781 (intmax_t)gpp->gpp_size);
782 return (ENOSPC);
783 }
784 }
785 if (gpp->gpp_index > 0 && index != gpp->gpp_index) {
786 gctl_error(req, "%d index '%d'", EEXIST, gpp->gpp_index);
787 return (EEXIST);
788 }
789 if (index > table->gpt_entries) {
790 gctl_error(req, "%d index '%d'", ENOSPC, index);
791 return (ENOSPC);
792 }
793
794 entry = (delent == NULL) ? g_malloc(table->gpt_scheme->gps_entrysz,
795 M_WAITOK | M_ZERO) : delent;
796 entry->gpe_index = index;
797 entry->gpe_start = gpp->gpp_start;
798 entry->gpe_end = end;
799 error = G_PART_ADD(table, entry, gpp);
800 if (error) {
801 gctl_error(req, "%d", error);
802 if (delent == NULL)
803 g_free(entry);
804 return (error);
805 }
806 if (delent == NULL) {
807 if (last == NULL)
808 LIST_INSERT_HEAD(&table->gpt_entry, entry, gpe_entry);
809 else
810 LIST_INSERT_AFTER(last, entry, gpe_entry);
811 entry->gpe_created = 1;
812 } else {
813 entry->gpe_deleted = 0;
814 entry->gpe_modified = 1;
815 }
816 g_part_new_provider(gp, table, entry);
817
818 /* Provide feedback if so requested. */
819 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) {
820 sb = sbuf_new_auto();
821 G_PART_FULLNAME(table, entry, sb, gp->name);
822 if (pp->stripesize > 0 && entry->gpe_pp->stripeoffset != 0)
823 sbuf_printf(sb, " added, but partition is not "
824 "aligned on %ju bytes\n", (uintmax_t)pp->stripesize);
825 else
826 sbuf_cat(sb, " added\n");
827 sbuf_finish(sb);
828 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1);
829 sbuf_delete(sb);
830 }
831 return (0);
832 }
833
834 static int
g_part_ctl_bootcode(struct gctl_req * req,struct g_part_parms * gpp)835 g_part_ctl_bootcode(struct gctl_req *req, struct g_part_parms *gpp)
836 {
837 struct g_geom *gp;
838 struct g_part_table *table;
839 struct sbuf *sb;
840 int error, sz;
841
842 gp = gpp->gpp_geom;
843 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name));
844 g_topology_assert();
845
846 table = gp->softc;
847 sz = table->gpt_scheme->gps_bootcodesz;
848 if (sz == 0) {
849 error = ENODEV;
850 goto fail;
851 }
852 if (gpp->gpp_codesize > sz) {
853 error = EFBIG;
854 goto fail;
855 }
856
857 error = G_PART_BOOTCODE(table, gpp);
858 if (error)
859 goto fail;
860
861 /* Provide feedback if so requested. */
862 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) {
863 sb = sbuf_new_auto();
864 sbuf_printf(sb, "bootcode written to %s\n", gp->name);
865 sbuf_finish(sb);
866 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1);
867 sbuf_delete(sb);
868 }
869 return (0);
870
871 fail:
872 gctl_error(req, "%d", error);
873 return (error);
874 }
875
876 static int
g_part_ctl_commit(struct gctl_req * req,struct g_part_parms * gpp)877 g_part_ctl_commit(struct gctl_req *req, struct g_part_parms *gpp)
878 {
879 struct g_consumer *cp;
880 struct g_geom *gp;
881 struct g_provider *pp;
882 struct g_part_entry *entry, *tmp;
883 struct g_part_table *table;
884 char *buf;
885 int error, i;
886
887 gp = gpp->gpp_geom;
888 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name));
889 g_topology_assert();
890
891 table = gp->softc;
892 if (!table->gpt_opened) {
893 gctl_error(req, "%d", EPERM);
894 return (EPERM);
895 }
896
897 g_topology_unlock();
898
899 cp = LIST_FIRST(&gp->consumer);
900 if ((table->gpt_smhead | table->gpt_smtail) != 0) {
901 pp = cp->provider;
902 buf = g_malloc(pp->sectorsize, M_WAITOK | M_ZERO);
903 while (table->gpt_smhead != 0) {
904 i = ffs(table->gpt_smhead) - 1;
905 error = g_write_data(cp, i * pp->sectorsize, buf,
906 pp->sectorsize);
907 if (error) {
908 g_free(buf);
909 goto fail;
910 }
911 table->gpt_smhead &= ~(1 << i);
912 }
913 while (table->gpt_smtail != 0) {
914 i = ffs(table->gpt_smtail) - 1;
915 error = g_write_data(cp, pp->mediasize - (i + 1) *
916 pp->sectorsize, buf, pp->sectorsize);
917 if (error) {
918 g_free(buf);
919 goto fail;
920 }
921 table->gpt_smtail &= ~(1 << i);
922 }
923 g_free(buf);
924 }
925
926 if (table->gpt_scheme == &g_part_null_scheme) {
927 g_topology_lock();
928 g_access(cp, -1, -1, -1);
929 g_part_wither(gp, ENXIO);
930 return (0);
931 }
932
933 error = G_PART_WRITE(table, cp);
934 if (error)
935 goto fail;
936
937 LIST_FOREACH_SAFE(entry, &table->gpt_entry, gpe_entry, tmp) {
938 if (!entry->gpe_deleted) {
939 /* Notify consumers that provider might be changed. */
940 if (entry->gpe_modified && (
941 entry->gpe_pp->acw + entry->gpe_pp->ace +
942 entry->gpe_pp->acr) == 0)
943 g_media_changed(entry->gpe_pp, M_NOWAIT);
944 entry->gpe_created = 0;
945 entry->gpe_modified = 0;
946 continue;
947 }
948 LIST_REMOVE(entry, gpe_entry);
949 g_free(entry);
950 }
951 table->gpt_created = 0;
952 table->gpt_opened = 0;
953
954 g_topology_lock();
955 g_access(cp, -1, -1, -1);
956 return (0);
957
958 fail:
959 g_topology_lock();
960 gctl_error(req, "%d", error);
961 return (error);
962 }
963
964 static int
g_part_ctl_create(struct gctl_req * req,struct g_part_parms * gpp)965 g_part_ctl_create(struct gctl_req *req, struct g_part_parms *gpp)
966 {
967 struct g_consumer *cp;
968 struct g_geom *gp;
969 struct g_provider *pp;
970 struct g_part_scheme *scheme;
971 struct g_part_table *null, *table;
972 struct sbuf *sb;
973 int attr, error;
974
975 pp = gpp->gpp_provider;
976 scheme = gpp->gpp_scheme;
977 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, pp->name));
978 g_topology_assert();
979
980 /* Check that there isn't already a g_part geom on the provider. */
981 gp = g_part_find_geom(pp->name);
982 if (gp != NULL) {
983 null = gp->softc;
984 if (null->gpt_scheme != &g_part_null_scheme) {
985 gctl_error(req, "%d geom '%s'", EEXIST, pp->name);
986 return (EEXIST);
987 }
988 } else
989 null = NULL;
990
991 if ((gpp->gpp_parms & G_PART_PARM_ENTRIES) &&
992 (gpp->gpp_entries < scheme->gps_minent ||
993 gpp->gpp_entries > scheme->gps_maxent)) {
994 gctl_error(req, "%d entries '%d'", EINVAL, gpp->gpp_entries);
995 return (EINVAL);
996 }
997
998 if (null == NULL)
999 gp = g_new_geom(&g_part_class, pp->name);
1000 gp->softc = kobj_create((kobj_class_t)gpp->gpp_scheme, M_GEOM,
1001 M_WAITOK);
1002 table = gp->softc;
1003 table->gpt_gp = gp;
1004 table->gpt_scheme = gpp->gpp_scheme;
1005 table->gpt_entries = (gpp->gpp_parms & G_PART_PARM_ENTRIES) ?
1006 gpp->gpp_entries : scheme->gps_defent;
1007 LIST_INIT(&table->gpt_entry);
1008 if (null == NULL) {
1009 cp = g_new_consumer(gp);
1010 cp->flags |= G_CF_DIRECT_SEND | G_CF_DIRECT_RECEIVE;
1011 error = g_attach(cp, pp);
1012 if (error == 0)
1013 error = g_access(cp, 1, 1, 1);
1014 if (error != 0) {
1015 g_part_wither(gp, error);
1016 gctl_error(req, "%d geom '%s'", error, pp->name);
1017 return (error);
1018 }
1019 table->gpt_opened = 1;
1020 } else {
1021 cp = LIST_FIRST(&gp->consumer);
1022 table->gpt_opened = null->gpt_opened;
1023 table->gpt_smhead = null->gpt_smhead;
1024 table->gpt_smtail = null->gpt_smtail;
1025 }
1026
1027 g_topology_unlock();
1028
1029 /* Make sure the provider has media. */
1030 if (pp->mediasize == 0 || pp->sectorsize == 0) {
1031 error = ENODEV;
1032 goto fail;
1033 }
1034
1035 /* Make sure we can nest and if so, determine our depth. */
1036 error = g_getattr("PART::isleaf", cp, &attr);
1037 if (!error && attr) {
1038 error = ENODEV;
1039 goto fail;
1040 }
1041 error = g_getattr("PART::depth", cp, &attr);
1042 table->gpt_depth = (!error) ? attr + 1 : 0;
1043
1044 /*
1045 * Synthesize a disk geometry. Some partitioning schemes
1046 * depend on it and since some file systems need it even
1047 * when the partition scheme doesn't, we do it here in
1048 * scheme-independent code.
1049 */
1050 g_part_geometry(table, cp, pp->mediasize / pp->sectorsize);
1051
1052 error = G_PART_CREATE(table, gpp);
1053 if (error)
1054 goto fail;
1055
1056 g_topology_lock();
1057
1058 table->gpt_created = 1;
1059 if (null != NULL)
1060 kobj_delete((kobj_t)null, M_GEOM);
1061
1062 /*
1063 * Support automatic commit by filling in the gpp_geom
1064 * parameter.
1065 */
1066 gpp->gpp_parms |= G_PART_PARM_GEOM;
1067 gpp->gpp_geom = gp;
1068
1069 /* Provide feedback if so requested. */
1070 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) {
1071 sb = sbuf_new_auto();
1072 sbuf_printf(sb, "%s created\n", gp->name);
1073 sbuf_finish(sb);
1074 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1);
1075 sbuf_delete(sb);
1076 }
1077 return (0);
1078
1079 fail:
1080 g_topology_lock();
1081 if (null == NULL) {
1082 g_access(cp, -1, -1, -1);
1083 g_part_wither(gp, error);
1084 } else {
1085 kobj_delete((kobj_t)gp->softc, M_GEOM);
1086 gp->softc = null;
1087 }
1088 gctl_error(req, "%d provider", error);
1089 return (error);
1090 }
1091
1092 static int
g_part_ctl_delete(struct gctl_req * req,struct g_part_parms * gpp)1093 g_part_ctl_delete(struct gctl_req *req, struct g_part_parms *gpp)
1094 {
1095 struct g_geom *gp;
1096 struct g_provider *pp;
1097 struct g_part_entry *entry;
1098 struct g_part_table *table;
1099 struct sbuf *sb;
1100
1101 gp = gpp->gpp_geom;
1102 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name));
1103 g_topology_assert();
1104
1105 table = gp->softc;
1106
1107 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) {
1108 if (entry->gpe_deleted || entry->gpe_internal)
1109 continue;
1110 if (entry->gpe_index == gpp->gpp_index)
1111 break;
1112 }
1113 if (entry == NULL) {
1114 gctl_error(req, "%d index '%d'", ENOENT, gpp->gpp_index);
1115 return (ENOENT);
1116 }
1117
1118 pp = entry->gpe_pp;
1119 if (pp != NULL) {
1120 if (pp->acr > 0 || pp->acw > 0 || pp->ace > 0) {
1121 gctl_error(req, "%d", EBUSY);
1122 return (EBUSY);
1123 }
1124
1125 pp->private = NULL;
1126 entry->gpe_pp = NULL;
1127 }
1128
1129 if (pp != NULL)
1130 g_wither_provider(pp, ENXIO);
1131
1132 /* Provide feedback if so requested. */
1133 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) {
1134 sb = sbuf_new_auto();
1135 G_PART_FULLNAME(table, entry, sb, gp->name);
1136 sbuf_cat(sb, " deleted\n");
1137 sbuf_finish(sb);
1138 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1);
1139 sbuf_delete(sb);
1140 }
1141
1142 if (entry->gpe_created) {
1143 LIST_REMOVE(entry, gpe_entry);
1144 g_free(entry);
1145 } else {
1146 entry->gpe_modified = 0;
1147 entry->gpe_deleted = 1;
1148 }
1149 return (0);
1150 }
1151
1152 static int
g_part_ctl_destroy(struct gctl_req * req,struct g_part_parms * gpp)1153 g_part_ctl_destroy(struct gctl_req *req, struct g_part_parms *gpp)
1154 {
1155 struct g_consumer *cp;
1156 struct g_geom *gp;
1157 struct g_provider *pp;
1158 struct g_part_entry *entry, *tmp;
1159 struct g_part_table *null, *table;
1160 struct sbuf *sb;
1161 int error;
1162
1163 gp = gpp->gpp_geom;
1164 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name));
1165 g_topology_assert();
1166
1167 table = gp->softc;
1168 /* Check for busy providers. */
1169 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) {
1170 if (entry->gpe_deleted || entry->gpe_internal)
1171 continue;
1172 if (gpp->gpp_force) {
1173 pp = entry->gpe_pp;
1174 if (pp == NULL)
1175 continue;
1176 if (pp->acr == 0 && pp->acw == 0 && pp->ace == 0)
1177 continue;
1178 }
1179 gctl_error(req, "%d", EBUSY);
1180 return (EBUSY);
1181 }
1182
1183 if (gpp->gpp_force) {
1184 /* Destroy all providers. */
1185 LIST_FOREACH_SAFE(entry, &table->gpt_entry, gpe_entry, tmp) {
1186 pp = entry->gpe_pp;
1187 if (pp != NULL) {
1188 pp->private = NULL;
1189 g_wither_provider(pp, ENXIO);
1190 }
1191 LIST_REMOVE(entry, gpe_entry);
1192 g_free(entry);
1193 }
1194 }
1195
1196 error = G_PART_DESTROY(table, gpp);
1197 if (error) {
1198 gctl_error(req, "%d", error);
1199 return (error);
1200 }
1201
1202 gp->softc = kobj_create((kobj_class_t)&g_part_null_scheme, M_GEOM,
1203 M_WAITOK);
1204 null = gp->softc;
1205 null->gpt_gp = gp;
1206 null->gpt_scheme = &g_part_null_scheme;
1207 LIST_INIT(&null->gpt_entry);
1208
1209 cp = LIST_FIRST(&gp->consumer);
1210 pp = cp->provider;
1211 null->gpt_last = pp->mediasize / pp->sectorsize - 1;
1212
1213 null->gpt_depth = table->gpt_depth;
1214 null->gpt_opened = table->gpt_opened;
1215 null->gpt_smhead = table->gpt_smhead;
1216 null->gpt_smtail = table->gpt_smtail;
1217
1218 while ((entry = LIST_FIRST(&table->gpt_entry)) != NULL) {
1219 LIST_REMOVE(entry, gpe_entry);
1220 g_free(entry);
1221 }
1222 kobj_delete((kobj_t)table, M_GEOM);
1223
1224 /* Provide feedback if so requested. */
1225 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) {
1226 sb = sbuf_new_auto();
1227 sbuf_printf(sb, "%s destroyed\n", gp->name);
1228 sbuf_finish(sb);
1229 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1);
1230 sbuf_delete(sb);
1231 }
1232 return (0);
1233 }
1234
1235 static int
g_part_ctl_modify(struct gctl_req * req,struct g_part_parms * gpp)1236 g_part_ctl_modify(struct gctl_req *req, struct g_part_parms *gpp)
1237 {
1238 struct g_geom *gp;
1239 struct g_part_entry *entry;
1240 struct g_part_table *table;
1241 struct sbuf *sb;
1242 int error;
1243
1244 gp = gpp->gpp_geom;
1245 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name));
1246 g_topology_assert();
1247
1248 table = gp->softc;
1249
1250 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) {
1251 if (entry->gpe_deleted || entry->gpe_internal)
1252 continue;
1253 if (entry->gpe_index == gpp->gpp_index)
1254 break;
1255 }
1256 if (entry == NULL) {
1257 gctl_error(req, "%d index '%d'", ENOENT, gpp->gpp_index);
1258 return (ENOENT);
1259 }
1260
1261 error = G_PART_MODIFY(table, entry, gpp);
1262 if (error) {
1263 gctl_error(req, "%d", error);
1264 return (error);
1265 }
1266
1267 if (!entry->gpe_created)
1268 entry->gpe_modified = 1;
1269
1270 /* Provide feedback if so requested. */
1271 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) {
1272 sb = sbuf_new_auto();
1273 G_PART_FULLNAME(table, entry, sb, gp->name);
1274 sbuf_cat(sb, " modified\n");
1275 sbuf_finish(sb);
1276 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1);
1277 sbuf_delete(sb);
1278 }
1279 return (0);
1280 }
1281
1282 static int
g_part_ctl_move(struct gctl_req * req,struct g_part_parms * gpp)1283 g_part_ctl_move(struct gctl_req *req, struct g_part_parms *gpp)
1284 {
1285 gctl_error(req, "%d verb 'move'", ENOSYS);
1286 return (ENOSYS);
1287 }
1288
1289 static int
g_part_ctl_recover(struct gctl_req * req,struct g_part_parms * gpp)1290 g_part_ctl_recover(struct gctl_req *req, struct g_part_parms *gpp)
1291 {
1292 struct g_part_table *table;
1293 struct g_geom *gp;
1294 struct sbuf *sb;
1295 int error, recovered;
1296
1297 gp = gpp->gpp_geom;
1298 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name));
1299 g_topology_assert();
1300 table = gp->softc;
1301 error = recovered = 0;
1302
1303 if (table->gpt_corrupt) {
1304 error = G_PART_RECOVER(table);
1305 if (error == 0)
1306 error = g_part_check_integrity(table,
1307 LIST_FIRST(&gp->consumer));
1308 if (error) {
1309 gctl_error(req, "%d recovering '%s' failed",
1310 error, gp->name);
1311 return (error);
1312 }
1313 recovered = 1;
1314 }
1315 /* Provide feedback if so requested. */
1316 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) {
1317 sb = sbuf_new_auto();
1318 if (recovered)
1319 sbuf_printf(sb, "%s recovered\n", gp->name);
1320 else
1321 sbuf_printf(sb, "%s recovering is not needed\n",
1322 gp->name);
1323 sbuf_finish(sb);
1324 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1);
1325 sbuf_delete(sb);
1326 }
1327 return (0);
1328 }
1329
1330 static int
g_part_ctl_resize(struct gctl_req * req,struct g_part_parms * gpp)1331 g_part_ctl_resize(struct gctl_req *req, struct g_part_parms *gpp)
1332 {
1333 struct g_geom *gp;
1334 struct g_provider *pp;
1335 struct g_part_entry *pe, *entry;
1336 struct g_part_table *table;
1337 struct sbuf *sb;
1338 quad_t end;
1339 int error;
1340 off_t mediasize;
1341
1342 gp = gpp->gpp_geom;
1343 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name));
1344 g_topology_assert();
1345 table = gp->softc;
1346
1347 /* check gpp_index */
1348 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) {
1349 if (entry->gpe_deleted || entry->gpe_internal)
1350 continue;
1351 if (entry->gpe_index == gpp->gpp_index)
1352 break;
1353 }
1354 if (entry == NULL) {
1355 gctl_error(req, "%d index '%d'", ENOENT, gpp->gpp_index);
1356 return (ENOENT);
1357 }
1358
1359 /* check gpp_size */
1360 end = entry->gpe_start + gpp->gpp_size - 1;
1361 if (gpp->gpp_size < 1 || end > table->gpt_last) {
1362 gctl_error(req, "%d size '%jd'", EINVAL,
1363 (intmax_t)gpp->gpp_size);
1364 return (EINVAL);
1365 }
1366
1367 LIST_FOREACH(pe, &table->gpt_entry, gpe_entry) {
1368 if (pe->gpe_deleted || pe->gpe_internal || pe == entry)
1369 continue;
1370 if (end >= pe->gpe_start && end <= pe->gpe_end) {
1371 gctl_error(req, "%d end '%jd'", ENOSPC,
1372 (intmax_t)end);
1373 return (ENOSPC);
1374 }
1375 if (entry->gpe_start < pe->gpe_start && end > pe->gpe_end) {
1376 gctl_error(req, "%d size '%jd'", ENOSPC,
1377 (intmax_t)gpp->gpp_size);
1378 return (ENOSPC);
1379 }
1380 }
1381
1382 pp = entry->gpe_pp;
1383 if ((g_debugflags & G_F_FOOTSHOOTING) == 0 &&
1384 (pp->acr > 0 || pp->acw > 0 || pp->ace > 0)) {
1385 if (entry->gpe_end - entry->gpe_start + 1 > gpp->gpp_size) {
1386 /* Deny shrinking of an opened partition. */
1387 gctl_error(req, "%d", EBUSY);
1388 return (EBUSY);
1389 }
1390 }
1391
1392 error = G_PART_RESIZE(table, entry, gpp);
1393 if (error) {
1394 gctl_error(req, "%d%s", error, error != EBUSY ? "":
1395 " resizing will lead to unexpected shrinking"
1396 " due to alignment");
1397 return (error);
1398 }
1399
1400 if (!entry->gpe_created)
1401 entry->gpe_modified = 1;
1402
1403 /* update mediasize of changed provider */
1404 mediasize = (entry->gpe_end - entry->gpe_start + 1) *
1405 pp->sectorsize;
1406 g_resize_provider(pp, mediasize);
1407
1408 /* Provide feedback if so requested. */
1409 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) {
1410 sb = sbuf_new_auto();
1411 G_PART_FULLNAME(table, entry, sb, gp->name);
1412 sbuf_cat(sb, " resized\n");
1413 sbuf_finish(sb);
1414 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1);
1415 sbuf_delete(sb);
1416 }
1417 return (0);
1418 }
1419
1420 static int
g_part_ctl_setunset(struct gctl_req * req,struct g_part_parms * gpp,unsigned int set)1421 g_part_ctl_setunset(struct gctl_req *req, struct g_part_parms *gpp,
1422 unsigned int set)
1423 {
1424 struct g_geom *gp;
1425 struct g_part_entry *entry;
1426 struct g_part_table *table;
1427 struct sbuf *sb;
1428 int error;
1429
1430 gp = gpp->gpp_geom;
1431 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name));
1432 g_topology_assert();
1433
1434 table = gp->softc;
1435
1436 if (gpp->gpp_parms & G_PART_PARM_INDEX) {
1437 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) {
1438 if (entry->gpe_deleted || entry->gpe_internal)
1439 continue;
1440 if (entry->gpe_index == gpp->gpp_index)
1441 break;
1442 }
1443 if (entry == NULL) {
1444 gctl_error(req, "%d index '%d'", ENOENT,
1445 gpp->gpp_index);
1446 return (ENOENT);
1447 }
1448 } else
1449 entry = NULL;
1450
1451 error = G_PART_SETUNSET(table, entry, gpp->gpp_attrib, set);
1452 if (error) {
1453 gctl_error(req, "%d attrib '%s'", error, gpp->gpp_attrib);
1454 return (error);
1455 }
1456
1457 /* Provide feedback if so requested. */
1458 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) {
1459 sb = sbuf_new_auto();
1460 sbuf_printf(sb, "%s %sset on ", gpp->gpp_attrib,
1461 (set) ? "" : "un");
1462 if (entry)
1463 G_PART_FULLNAME(table, entry, sb, gp->name);
1464 else
1465 sbuf_cat(sb, gp->name);
1466 sbuf_cat(sb, "\n");
1467 sbuf_finish(sb);
1468 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1);
1469 sbuf_delete(sb);
1470 }
1471 return (0);
1472 }
1473
1474 static int
g_part_ctl_undo(struct gctl_req * req,struct g_part_parms * gpp)1475 g_part_ctl_undo(struct gctl_req *req, struct g_part_parms *gpp)
1476 {
1477 struct g_consumer *cp;
1478 struct g_provider *pp;
1479 struct g_geom *gp;
1480 struct g_part_entry *entry, *tmp;
1481 struct g_part_table *table;
1482 int error, reprobe;
1483
1484 gp = gpp->gpp_geom;
1485 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name));
1486 g_topology_assert();
1487
1488 table = gp->softc;
1489 if (!table->gpt_opened) {
1490 gctl_error(req, "%d", EPERM);
1491 return (EPERM);
1492 }
1493
1494 cp = LIST_FIRST(&gp->consumer);
1495 LIST_FOREACH_SAFE(entry, &table->gpt_entry, gpe_entry, tmp) {
1496 entry->gpe_modified = 0;
1497 if (entry->gpe_created) {
1498 pp = entry->gpe_pp;
1499 if (pp != NULL) {
1500 pp->private = NULL;
1501 entry->gpe_pp = NULL;
1502 g_wither_provider(pp, ENXIO);
1503 }
1504 entry->gpe_deleted = 1;
1505 }
1506 if (entry->gpe_deleted) {
1507 LIST_REMOVE(entry, gpe_entry);
1508 g_free(entry);
1509 }
1510 }
1511
1512 g_topology_unlock();
1513
1514 reprobe = (table->gpt_scheme == &g_part_null_scheme ||
1515 table->gpt_created) ? 1 : 0;
1516
1517 if (reprobe) {
1518 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) {
1519 if (entry->gpe_internal)
1520 continue;
1521 error = EBUSY;
1522 goto fail;
1523 }
1524 while ((entry = LIST_FIRST(&table->gpt_entry)) != NULL) {
1525 LIST_REMOVE(entry, gpe_entry);
1526 g_free(entry);
1527 }
1528 error = g_part_probe(gp, cp, table->gpt_depth);
1529 if (error) {
1530 g_topology_lock();
1531 g_access(cp, -1, -1, -1);
1532 g_part_wither(gp, error);
1533 return (0);
1534 }
1535 table = gp->softc;
1536
1537 /*
1538 * Synthesize a disk geometry. Some partitioning schemes
1539 * depend on it and since some file systems need it even
1540 * when the partition scheme doesn't, we do it here in
1541 * scheme-independent code.
1542 */
1543 pp = cp->provider;
1544 g_part_geometry(table, cp, pp->mediasize / pp->sectorsize);
1545 }
1546
1547 error = G_PART_READ(table, cp);
1548 if (error)
1549 goto fail;
1550 error = g_part_check_integrity(table, cp);
1551 if (error)
1552 goto fail;
1553
1554 g_topology_lock();
1555 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) {
1556 if (!entry->gpe_internal)
1557 g_part_new_provider(gp, table, entry);
1558 }
1559
1560 table->gpt_opened = 0;
1561 g_access(cp, -1, -1, -1);
1562 return (0);
1563
1564 fail:
1565 g_topology_lock();
1566 gctl_error(req, "%d", error);
1567 return (error);
1568 }
1569
1570 static void
g_part_wither(struct g_geom * gp,int error)1571 g_part_wither(struct g_geom *gp, int error)
1572 {
1573 struct g_part_entry *entry;
1574 struct g_part_table *table;
1575 struct g_provider *pp;
1576
1577 table = gp->softc;
1578 if (table != NULL) {
1579 gp->softc = NULL;
1580 while ((entry = LIST_FIRST(&table->gpt_entry)) != NULL) {
1581 LIST_REMOVE(entry, gpe_entry);
1582 pp = entry->gpe_pp;
1583 entry->gpe_pp = NULL;
1584 if (pp != NULL) {
1585 pp->private = NULL;
1586 g_wither_provider(pp, error);
1587 }
1588 g_free(entry);
1589 }
1590 G_PART_DESTROY(table, NULL);
1591 kobj_delete((kobj_t)table, M_GEOM);
1592 }
1593 g_wither_geom(gp, error);
1594 }
1595
1596 /*
1597 * Class methods.
1598 */
1599
1600 static void
g_part_ctlreq(struct gctl_req * req,struct g_class * mp,const char * verb)1601 g_part_ctlreq(struct gctl_req *req, struct g_class *mp, const char *verb)
1602 {
1603 struct g_part_parms gpp;
1604 struct g_part_table *table;
1605 struct gctl_req_arg *ap;
1606 enum g_part_ctl ctlreq;
1607 unsigned int i, mparms, oparms, parm;
1608 int auto_commit, close_on_error;
1609 int error, modifies;
1610
1611 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s,%s)", __func__, mp->name, verb));
1612 g_topology_assert();
1613
1614 ctlreq = G_PART_CTL_NONE;
1615 modifies = 1;
1616 mparms = 0;
1617 oparms = G_PART_PARM_FLAGS | G_PART_PARM_OUTPUT | G_PART_PARM_VERSION;
1618 switch (*verb) {
1619 case 'a':
1620 if (!strcmp(verb, "add")) {
1621 ctlreq = G_PART_CTL_ADD;
1622 mparms |= G_PART_PARM_GEOM | G_PART_PARM_SIZE |
1623 G_PART_PARM_START | G_PART_PARM_TYPE;
1624 oparms |= G_PART_PARM_INDEX | G_PART_PARM_LABEL;
1625 }
1626 break;
1627 case 'b':
1628 if (!strcmp(verb, "bootcode")) {
1629 ctlreq = G_PART_CTL_BOOTCODE;
1630 mparms |= G_PART_PARM_GEOM | G_PART_PARM_BOOTCODE;
1631 oparms |= G_PART_PARM_SKIP_DSN;
1632 }
1633 break;
1634 case 'c':
1635 if (!strcmp(verb, "commit")) {
1636 ctlreq = G_PART_CTL_COMMIT;
1637 mparms |= G_PART_PARM_GEOM;
1638 modifies = 0;
1639 } else if (!strcmp(verb, "create")) {
1640 ctlreq = G_PART_CTL_CREATE;
1641 mparms |= G_PART_PARM_PROVIDER | G_PART_PARM_SCHEME;
1642 oparms |= G_PART_PARM_ENTRIES;
1643 }
1644 break;
1645 case 'd':
1646 if (!strcmp(verb, "delete")) {
1647 ctlreq = G_PART_CTL_DELETE;
1648 mparms |= G_PART_PARM_GEOM | G_PART_PARM_INDEX;
1649 } else if (!strcmp(verb, "destroy")) {
1650 ctlreq = G_PART_CTL_DESTROY;
1651 mparms |= G_PART_PARM_GEOM;
1652 oparms |= G_PART_PARM_FORCE;
1653 }
1654 break;
1655 case 'm':
1656 if (!strcmp(verb, "modify")) {
1657 ctlreq = G_PART_CTL_MODIFY;
1658 mparms |= G_PART_PARM_GEOM | G_PART_PARM_INDEX;
1659 oparms |= G_PART_PARM_LABEL | G_PART_PARM_TYPE;
1660 } else if (!strcmp(verb, "move")) {
1661 ctlreq = G_PART_CTL_MOVE;
1662 mparms |= G_PART_PARM_GEOM | G_PART_PARM_INDEX;
1663 }
1664 break;
1665 case 'r':
1666 if (!strcmp(verb, "recover")) {
1667 ctlreq = G_PART_CTL_RECOVER;
1668 mparms |= G_PART_PARM_GEOM;
1669 } else if (!strcmp(verb, "resize")) {
1670 ctlreq = G_PART_CTL_RESIZE;
1671 mparms |= G_PART_PARM_GEOM | G_PART_PARM_INDEX |
1672 G_PART_PARM_SIZE;
1673 }
1674 break;
1675 case 's':
1676 if (!strcmp(verb, "set")) {
1677 ctlreq = G_PART_CTL_SET;
1678 mparms |= G_PART_PARM_ATTRIB | G_PART_PARM_GEOM;
1679 oparms |= G_PART_PARM_INDEX;
1680 }
1681 break;
1682 case 'u':
1683 if (!strcmp(verb, "undo")) {
1684 ctlreq = G_PART_CTL_UNDO;
1685 mparms |= G_PART_PARM_GEOM;
1686 modifies = 0;
1687 } else if (!strcmp(verb, "unset")) {
1688 ctlreq = G_PART_CTL_UNSET;
1689 mparms |= G_PART_PARM_ATTRIB | G_PART_PARM_GEOM;
1690 oparms |= G_PART_PARM_INDEX;
1691 }
1692 break;
1693 }
1694 if (ctlreq == G_PART_CTL_NONE) {
1695 gctl_error(req, "%d verb '%s'", EINVAL, verb);
1696 return;
1697 }
1698
1699 bzero(&gpp, sizeof(gpp));
1700 for (i = 0; i < req->narg; i++) {
1701 ap = &req->arg[i];
1702 parm = 0;
1703 switch (ap->name[0]) {
1704 case 'a':
1705 if (!strcmp(ap->name, "arg0")) {
1706 parm = mparms &
1707 (G_PART_PARM_GEOM | G_PART_PARM_PROVIDER);
1708 }
1709 if (!strcmp(ap->name, "attrib"))
1710 parm = G_PART_PARM_ATTRIB;
1711 break;
1712 case 'b':
1713 if (!strcmp(ap->name, "bootcode"))
1714 parm = G_PART_PARM_BOOTCODE;
1715 break;
1716 case 'c':
1717 if (!strcmp(ap->name, "class"))
1718 continue;
1719 break;
1720 case 'e':
1721 if (!strcmp(ap->name, "entries"))
1722 parm = G_PART_PARM_ENTRIES;
1723 break;
1724 case 'f':
1725 if (!strcmp(ap->name, "flags"))
1726 parm = G_PART_PARM_FLAGS;
1727 else if (!strcmp(ap->name, "force"))
1728 parm = G_PART_PARM_FORCE;
1729 break;
1730 case 'i':
1731 if (!strcmp(ap->name, "index"))
1732 parm = G_PART_PARM_INDEX;
1733 break;
1734 case 'l':
1735 if (!strcmp(ap->name, "label"))
1736 parm = G_PART_PARM_LABEL;
1737 break;
1738 case 'o':
1739 if (!strcmp(ap->name, "output"))
1740 parm = G_PART_PARM_OUTPUT;
1741 break;
1742 case 's':
1743 if (!strcmp(ap->name, "scheme"))
1744 parm = G_PART_PARM_SCHEME;
1745 else if (!strcmp(ap->name, "size"))
1746 parm = G_PART_PARM_SIZE;
1747 else if (!strcmp(ap->name, "start"))
1748 parm = G_PART_PARM_START;
1749 else if (!strcmp(ap->name, "skip_dsn"))
1750 parm = G_PART_PARM_SKIP_DSN;
1751 break;
1752 case 't':
1753 if (!strcmp(ap->name, "type"))
1754 parm = G_PART_PARM_TYPE;
1755 break;
1756 case 'v':
1757 if (!strcmp(ap->name, "verb"))
1758 continue;
1759 else if (!strcmp(ap->name, "version"))
1760 parm = G_PART_PARM_VERSION;
1761 break;
1762 }
1763 if ((parm & (mparms | oparms)) == 0) {
1764 gctl_error(req, "%d param '%s'", EINVAL, ap->name);
1765 return;
1766 }
1767 switch (parm) {
1768 case G_PART_PARM_ATTRIB:
1769 error = g_part_parm_str(req, ap->name,
1770 &gpp.gpp_attrib);
1771 break;
1772 case G_PART_PARM_BOOTCODE:
1773 error = g_part_parm_bootcode(req, ap->name,
1774 &gpp.gpp_codeptr, &gpp.gpp_codesize);
1775 break;
1776 case G_PART_PARM_ENTRIES:
1777 error = g_part_parm_intmax(req, ap->name,
1778 &gpp.gpp_entries);
1779 break;
1780 case G_PART_PARM_FLAGS:
1781 error = g_part_parm_str(req, ap->name, &gpp.gpp_flags);
1782 break;
1783 case G_PART_PARM_FORCE:
1784 error = g_part_parm_uint32(req, ap->name,
1785 &gpp.gpp_force);
1786 break;
1787 case G_PART_PARM_GEOM:
1788 error = g_part_parm_geom(req, ap->name, &gpp.gpp_geom);
1789 break;
1790 case G_PART_PARM_INDEX:
1791 error = g_part_parm_intmax(req, ap->name,
1792 &gpp.gpp_index);
1793 break;
1794 case G_PART_PARM_LABEL:
1795 error = g_part_parm_str(req, ap->name, &gpp.gpp_label);
1796 break;
1797 case G_PART_PARM_OUTPUT:
1798 error = 0; /* Write-only parameter */
1799 break;
1800 case G_PART_PARM_PROVIDER:
1801 error = g_part_parm_provider(req, ap->name,
1802 &gpp.gpp_provider);
1803 break;
1804 case G_PART_PARM_SCHEME:
1805 error = g_part_parm_scheme(req, ap->name,
1806 &gpp.gpp_scheme);
1807 break;
1808 case G_PART_PARM_SIZE:
1809 error = g_part_parm_quad(req, ap->name, &gpp.gpp_size);
1810 break;
1811 case G_PART_PARM_SKIP_DSN:
1812 error = g_part_parm_uint32(req, ap->name,
1813 &gpp.gpp_skip_dsn);
1814 break;
1815 case G_PART_PARM_START:
1816 error = g_part_parm_quad(req, ap->name,
1817 &gpp.gpp_start);
1818 break;
1819 case G_PART_PARM_TYPE:
1820 error = g_part_parm_str(req, ap->name, &gpp.gpp_type);
1821 break;
1822 case G_PART_PARM_VERSION:
1823 error = g_part_parm_uint32(req, ap->name,
1824 &gpp.gpp_version);
1825 break;
1826 default:
1827 error = EDOOFUS;
1828 gctl_error(req, "%d %s", error, ap->name);
1829 break;
1830 }
1831 if (error != 0) {
1832 if (error == ENOATTR) {
1833 gctl_error(req, "%d param '%s'", error,
1834 ap->name);
1835 }
1836 return;
1837 }
1838 gpp.gpp_parms |= parm;
1839 }
1840 if ((gpp.gpp_parms & mparms) != mparms) {
1841 parm = mparms - (gpp.gpp_parms & mparms);
1842 gctl_error(req, "%d param '%x'", ENOATTR, parm);
1843 return;
1844 }
1845
1846 /* Obtain permissions if possible/necessary. */
1847 close_on_error = 0;
1848 table = NULL;
1849 if (modifies && (gpp.gpp_parms & G_PART_PARM_GEOM)) {
1850 table = gpp.gpp_geom->softc;
1851 if (table != NULL && table->gpt_corrupt &&
1852 ctlreq != G_PART_CTL_DESTROY &&
1853 ctlreq != G_PART_CTL_RECOVER &&
1854 geom_part_check_integrity) {
1855 gctl_error(req, "%d table '%s' is corrupt",
1856 EPERM, gpp.gpp_geom->name);
1857 return;
1858 }
1859 if (table != NULL && !table->gpt_opened) {
1860 error = g_access(LIST_FIRST(&gpp.gpp_geom->consumer),
1861 1, 1, 1);
1862 if (error) {
1863 gctl_error(req, "%d geom '%s'", error,
1864 gpp.gpp_geom->name);
1865 return;
1866 }
1867 table->gpt_opened = 1;
1868 close_on_error = 1;
1869 }
1870 }
1871
1872 /* Allow the scheme to check or modify the parameters. */
1873 if (table != NULL) {
1874 error = G_PART_PRECHECK(table, ctlreq, &gpp);
1875 if (error) {
1876 gctl_error(req, "%d pre-check failed", error);
1877 goto out;
1878 }
1879 } else
1880 error = EDOOFUS; /* Prevent bogus uninit. warning. */
1881
1882 switch (ctlreq) {
1883 case G_PART_CTL_NONE:
1884 panic("%s", __func__);
1885 case G_PART_CTL_ADD:
1886 error = g_part_ctl_add(req, &gpp);
1887 break;
1888 case G_PART_CTL_BOOTCODE:
1889 error = g_part_ctl_bootcode(req, &gpp);
1890 break;
1891 case G_PART_CTL_COMMIT:
1892 error = g_part_ctl_commit(req, &gpp);
1893 break;
1894 case G_PART_CTL_CREATE:
1895 error = g_part_ctl_create(req, &gpp);
1896 break;
1897 case G_PART_CTL_DELETE:
1898 error = g_part_ctl_delete(req, &gpp);
1899 break;
1900 case G_PART_CTL_DESTROY:
1901 error = g_part_ctl_destroy(req, &gpp);
1902 break;
1903 case G_PART_CTL_MODIFY:
1904 error = g_part_ctl_modify(req, &gpp);
1905 break;
1906 case G_PART_CTL_MOVE:
1907 error = g_part_ctl_move(req, &gpp);
1908 break;
1909 case G_PART_CTL_RECOVER:
1910 error = g_part_ctl_recover(req, &gpp);
1911 break;
1912 case G_PART_CTL_RESIZE:
1913 error = g_part_ctl_resize(req, &gpp);
1914 break;
1915 case G_PART_CTL_SET:
1916 error = g_part_ctl_setunset(req, &gpp, 1);
1917 break;
1918 case G_PART_CTL_UNDO:
1919 error = g_part_ctl_undo(req, &gpp);
1920 break;
1921 case G_PART_CTL_UNSET:
1922 error = g_part_ctl_setunset(req, &gpp, 0);
1923 break;
1924 }
1925
1926 /* Implement automatic commit. */
1927 if (!error) {
1928 auto_commit = (modifies &&
1929 (gpp.gpp_parms & G_PART_PARM_FLAGS) &&
1930 strchr(gpp.gpp_flags, 'C') != NULL) ? 1 : 0;
1931 if (auto_commit) {
1932 KASSERT(gpp.gpp_parms & G_PART_PARM_GEOM, ("%s",
1933 __func__));
1934 error = g_part_ctl_commit(req, &gpp);
1935 }
1936 }
1937
1938 out:
1939 if (error && close_on_error) {
1940 g_access(LIST_FIRST(&gpp.gpp_geom->consumer), -1, -1, -1);
1941 table->gpt_opened = 0;
1942 }
1943 }
1944
1945 static int
g_part_destroy_geom(struct gctl_req * req,struct g_class * mp,struct g_geom * gp)1946 g_part_destroy_geom(struct gctl_req *req, struct g_class *mp,
1947 struct g_geom *gp)
1948 {
1949
1950 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s,%s)", __func__, mp->name, gp->name));
1951 g_topology_assert();
1952
1953 g_part_wither(gp, EINVAL);
1954 return (0);
1955 }
1956
1957 static struct g_geom *
g_part_taste(struct g_class * mp,struct g_provider * pp,int flags __unused)1958 g_part_taste(struct g_class *mp, struct g_provider *pp, int flags __unused)
1959 {
1960 struct g_consumer *cp;
1961 struct g_geom *gp;
1962 struct g_part_entry *entry;
1963 struct g_part_table *table;
1964 struct root_hold_token *rht;
1965 int attr, depth;
1966 int error;
1967
1968 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s,%s)", __func__, mp->name, pp->name));
1969 g_topology_assert();
1970
1971 /* Skip providers that are already open for writing. */
1972 if (pp->acw > 0)
1973 return (NULL);
1974
1975 /*
1976 * Create a GEOM with consumer and hook it up to the provider.
1977 * With that we become part of the topology. Obtain read access
1978 * to the provider.
1979 */
1980 gp = g_new_geom(mp, pp->name);
1981 cp = g_new_consumer(gp);
1982 cp->flags |= G_CF_DIRECT_SEND | G_CF_DIRECT_RECEIVE;
1983 error = g_attach(cp, pp);
1984 if (error == 0)
1985 error = g_access(cp, 1, 0, 0);
1986 if (error != 0) {
1987 if (cp->provider)
1988 g_detach(cp);
1989 g_destroy_consumer(cp);
1990 g_destroy_geom(gp);
1991 return (NULL);
1992 }
1993
1994 rht = root_mount_hold(mp->name);
1995 g_topology_unlock();
1996
1997 /*
1998 * Short-circuit the whole probing galore when there's no
1999 * media present.
2000 */
2001 if (pp->mediasize == 0 || pp->sectorsize == 0) {
2002 error = ENODEV;
2003 goto fail;
2004 }
2005
2006 /* Make sure we can nest and if so, determine our depth. */
2007 error = g_getattr("PART::isleaf", cp, &attr);
2008 if (!error && attr) {
2009 error = ENODEV;
2010 goto fail;
2011 }
2012 error = g_getattr("PART::depth", cp, &attr);
2013 depth = (!error) ? attr + 1 : 0;
2014
2015 error = g_part_probe(gp, cp, depth);
2016 if (error)
2017 goto fail;
2018
2019 table = gp->softc;
2020
2021 /*
2022 * Synthesize a disk geometry. Some partitioning schemes
2023 * depend on it and since some file systems need it even
2024 * when the partition scheme doesn't, we do it here in
2025 * scheme-independent code.
2026 */
2027 g_part_geometry(table, cp, pp->mediasize / pp->sectorsize);
2028
2029 error = G_PART_READ(table, cp);
2030 if (error)
2031 goto fail;
2032 error = g_part_check_integrity(table, cp);
2033 if (error)
2034 goto fail;
2035
2036 g_topology_lock();
2037 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) {
2038 if (!entry->gpe_internal)
2039 g_part_new_provider(gp, table, entry);
2040 }
2041
2042 root_mount_rel(rht);
2043 g_access(cp, -1, 0, 0);
2044 return (gp);
2045
2046 fail:
2047 g_topology_lock();
2048 root_mount_rel(rht);
2049 g_access(cp, -1, 0, 0);
2050 g_detach(cp);
2051 g_destroy_consumer(cp);
2052 g_destroy_geom(gp);
2053 return (NULL);
2054 }
2055
2056 /*
2057 * Geom methods.
2058 */
2059
2060 static int
g_part_access(struct g_provider * pp,int dr,int dw,int de)2061 g_part_access(struct g_provider *pp, int dr, int dw, int de)
2062 {
2063 struct g_consumer *cp;
2064
2065 G_PART_TRACE((G_T_ACCESS, "%s(%s,%d,%d,%d)", __func__, pp->name, dr,
2066 dw, de));
2067
2068 cp = LIST_FIRST(&pp->geom->consumer);
2069
2070 /* We always gain write-exclusive access. */
2071 return (g_access(cp, dr, dw, dw + de));
2072 }
2073
2074 static void
g_part_dumpconf(struct sbuf * sb,const char * indent,struct g_geom * gp,struct g_consumer * cp,struct g_provider * pp)2075 g_part_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp,
2076 struct g_consumer *cp, struct g_provider *pp)
2077 {
2078 char buf[64];
2079 struct g_part_entry *entry;
2080 struct g_part_table *table;
2081
2082 KASSERT(sb != NULL && gp != NULL, ("%s", __func__));
2083 table = gp->softc;
2084
2085 if (indent == NULL) {
2086 KASSERT(cp == NULL && pp != NULL, ("%s", __func__));
2087 entry = pp->private;
2088 if (entry == NULL)
2089 return;
2090 sbuf_printf(sb, " i %u o %ju ty %s", entry->gpe_index,
2091 (uintmax_t)entry->gpe_offset,
2092 G_PART_TYPE(table, entry, buf, sizeof(buf)));
2093 /*
2094 * libdisk compatibility quirk - the scheme dumps the
2095 * slicer name and partition type in a way that is
2096 * compatible with libdisk. When libdisk is not used
2097 * anymore, this should go away.
2098 */
2099 G_PART_DUMPCONF(table, entry, sb, indent);
2100 } else if (cp != NULL) { /* Consumer configuration. */
2101 KASSERT(pp == NULL, ("%s", __func__));
2102 /* none */
2103 } else if (pp != NULL) { /* Provider configuration. */
2104 entry = pp->private;
2105 if (entry == NULL)
2106 return;
2107 sbuf_printf(sb, "%s<start>%ju</start>\n", indent,
2108 (uintmax_t)entry->gpe_start);
2109 sbuf_printf(sb, "%s<end>%ju</end>\n", indent,
2110 (uintmax_t)entry->gpe_end);
2111 sbuf_printf(sb, "%s<index>%u</index>\n", indent,
2112 entry->gpe_index);
2113 sbuf_printf(sb, "%s<type>%s</type>\n", indent,
2114 G_PART_TYPE(table, entry, buf, sizeof(buf)));
2115 sbuf_printf(sb, "%s<offset>%ju</offset>\n", indent,
2116 (uintmax_t)entry->gpe_offset);
2117 sbuf_printf(sb, "%s<length>%ju</length>\n", indent,
2118 (uintmax_t)pp->mediasize);
2119 G_PART_DUMPCONF(table, entry, sb, indent);
2120 } else { /* Geom configuration. */
2121 sbuf_printf(sb, "%s<scheme>%s</scheme>\n", indent,
2122 table->gpt_scheme->name);
2123 sbuf_printf(sb, "%s<entries>%u</entries>\n", indent,
2124 table->gpt_entries);
2125 sbuf_printf(sb, "%s<first>%ju</first>\n", indent,
2126 (uintmax_t)table->gpt_first);
2127 sbuf_printf(sb, "%s<last>%ju</last>\n", indent,
2128 (uintmax_t)table->gpt_last);
2129 sbuf_printf(sb, "%s<fwsectors>%u</fwsectors>\n", indent,
2130 table->gpt_sectors);
2131 sbuf_printf(sb, "%s<fwheads>%u</fwheads>\n", indent,
2132 table->gpt_heads);
2133 sbuf_printf(sb, "%s<state>%s</state>\n", indent,
2134 table->gpt_corrupt ? "CORRUPT": "OK");
2135 sbuf_printf(sb, "%s<modified>%s</modified>\n", indent,
2136 table->gpt_opened ? "true": "false");
2137 G_PART_DUMPCONF(table, NULL, sb, indent);
2138 }
2139 }
2140
2141 /*-
2142 * This start routine is only called for non-trivial requests, all the
2143 * trivial ones are handled autonomously by the slice code.
2144 * For requests we handle here, we must call the g_io_deliver() on the
2145 * bio, and return non-zero to indicate to the slice code that we did so.
2146 * This code executes in the "DOWN" I/O path, this means:
2147 * * No sleeping.
2148 * * Don't grab the topology lock.
2149 * * Don't call biowait, g_getattr(), g_setattr() or g_read_data()
2150 */
2151 static int
g_part_ioctl(struct g_provider * pp,u_long cmd,void * data,int fflag,struct thread * td)2152 g_part_ioctl(struct g_provider *pp, u_long cmd, void *data, int fflag, struct thread *td)
2153 {
2154 struct g_part_table *table;
2155
2156 table = pp->geom->softc;
2157 return G_PART_IOCTL(table, pp, cmd, data, fflag, td);
2158 }
2159
2160 static void
g_part_resize(struct g_consumer * cp)2161 g_part_resize(struct g_consumer *cp)
2162 {
2163 struct g_part_table *table;
2164
2165 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, cp->provider->name));
2166 g_topology_assert();
2167
2168 if (auto_resize == 0)
2169 return;
2170
2171 table = cp->geom->softc;
2172 if (table->gpt_opened == 0) {
2173 if (g_access(cp, 1, 1, 1) != 0)
2174 return;
2175 table->gpt_opened = 1;
2176 }
2177 if (G_PART_RESIZE(table, NULL, NULL) == 0)
2178 printf("GEOM_PART: %s was automatically resized.\n"
2179 " Use `gpart commit %s` to save changes or "
2180 "`gpart undo %s` to revert them.\n", cp->geom->name,
2181 cp->geom->name, cp->geom->name);
2182 if (g_part_check_integrity(table, cp) != 0) {
2183 g_access(cp, -1, -1, -1);
2184 table->gpt_opened = 0;
2185 g_part_wither(table->gpt_gp, ENXIO);
2186 }
2187 }
2188
2189 static void
g_part_orphan(struct g_consumer * cp)2190 g_part_orphan(struct g_consumer *cp)
2191 {
2192 struct g_provider *pp;
2193 struct g_part_table *table;
2194
2195 pp = cp->provider;
2196 KASSERT(pp != NULL, ("%s", __func__));
2197 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, pp->name));
2198 g_topology_assert();
2199
2200 KASSERT(pp->error != 0, ("%s", __func__));
2201 table = cp->geom->softc;
2202 if (table != NULL && table->gpt_opened)
2203 g_access(cp, -1, -1, -1);
2204 g_part_wither(cp->geom, pp->error);
2205 }
2206
2207 static void
g_part_spoiled(struct g_consumer * cp)2208 g_part_spoiled(struct g_consumer *cp)
2209 {
2210
2211 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, cp->provider->name));
2212 g_topology_assert();
2213
2214 cp->flags |= G_CF_ORPHAN;
2215 g_part_wither(cp->geom, ENXIO);
2216 }
2217
2218 static void
g_part_start(struct bio * bp)2219 g_part_start(struct bio *bp)
2220 {
2221 struct bio *bp2;
2222 struct g_consumer *cp;
2223 struct g_geom *gp;
2224 struct g_part_entry *entry;
2225 struct g_part_table *table;
2226 struct g_kerneldump *gkd;
2227 struct g_provider *pp;
2228 void (*done_func)(struct bio *) = g_std_done;
2229 char buf[64];
2230
2231 biotrack(bp, __func__);
2232
2233 pp = bp->bio_to;
2234 gp = pp->geom;
2235 table = gp->softc;
2236 cp = LIST_FIRST(&gp->consumer);
2237
2238 G_PART_TRACE((G_T_BIO, "%s: cmd=%d, provider=%s", __func__, bp->bio_cmd,
2239 pp->name));
2240
2241 entry = pp->private;
2242 if (entry == NULL) {
2243 g_io_deliver(bp, ENXIO);
2244 return;
2245 }
2246
2247 switch(bp->bio_cmd) {
2248 case BIO_DELETE:
2249 case BIO_READ:
2250 case BIO_WRITE:
2251 if (bp->bio_offset >= pp->mediasize) {
2252 g_io_deliver(bp, EIO);
2253 return;
2254 }
2255 bp2 = g_clone_bio(bp);
2256 if (bp2 == NULL) {
2257 g_io_deliver(bp, ENOMEM);
2258 return;
2259 }
2260 if (bp2->bio_offset + bp2->bio_length > pp->mediasize)
2261 bp2->bio_length = pp->mediasize - bp2->bio_offset;
2262 bp2->bio_done = g_std_done;
2263 bp2->bio_offset += entry->gpe_offset;
2264 g_io_request(bp2, cp);
2265 return;
2266 case BIO_SPEEDUP:
2267 case BIO_FLUSH:
2268 break;
2269 case BIO_GETATTR:
2270 if (g_handleattr_int(bp, "GEOM::fwheads", table->gpt_heads))
2271 return;
2272 if (g_handleattr_int(bp, "GEOM::fwsectors", table->gpt_sectors))
2273 return;
2274 /*
2275 * allow_nesting overrides "isleaf" to false _unless_ the
2276 * provider offset is zero, since otherwise we would recurse.
2277 */
2278 if (g_handleattr_int(bp, "PART::isleaf",
2279 table->gpt_isleaf &&
2280 (allow_nesting == 0 || entry->gpe_offset == 0)))
2281 return;
2282 if (g_handleattr_int(bp, "PART::depth", table->gpt_depth))
2283 return;
2284 if (g_handleattr_str(bp, "PART::scheme",
2285 table->gpt_scheme->name))
2286 return;
2287 if (g_handleattr_str(bp, "PART::type",
2288 G_PART_TYPE(table, entry, buf, sizeof(buf))))
2289 return;
2290 if (!strcmp("GEOM::physpath", bp->bio_attribute)) {
2291 done_func = g_part_get_physpath_done;
2292 break;
2293 }
2294 if (!strcmp("GEOM::kerneldump", bp->bio_attribute)) {
2295 /*
2296 * Check that the partition is suitable for kernel
2297 * dumps. Typically only swap partitions should be
2298 * used. If the request comes from the nested scheme
2299 * we allow dumping there as well.
2300 */
2301 if ((bp->bio_from == NULL ||
2302 bp->bio_from->geom->class != &g_part_class) &&
2303 G_PART_DUMPTO(table, entry) == 0) {
2304 g_io_deliver(bp, ENODEV);
2305 printf("GEOM_PART: Partition '%s' not suitable"
2306 " for kernel dumps (wrong type?)\n",
2307 pp->name);
2308 return;
2309 }
2310 gkd = (struct g_kerneldump *)bp->bio_data;
2311 if (gkd->offset >= pp->mediasize) {
2312 g_io_deliver(bp, EIO);
2313 return;
2314 }
2315 if (gkd->offset + gkd->length > pp->mediasize)
2316 gkd->length = pp->mediasize - gkd->offset;
2317 gkd->offset += entry->gpe_offset;
2318 }
2319 break;
2320 default:
2321 g_io_deliver(bp, EOPNOTSUPP);
2322 return;
2323 }
2324
2325 bp2 = g_clone_bio(bp);
2326 if (bp2 == NULL) {
2327 g_io_deliver(bp, ENOMEM);
2328 return;
2329 }
2330 bp2->bio_done = done_func;
2331 g_io_request(bp2, cp);
2332 }
2333
2334 static void
g_part_init(struct g_class * mp)2335 g_part_init(struct g_class *mp)
2336 {
2337
2338 TAILQ_INSERT_HEAD(&g_part_schemes, &g_part_null_scheme, scheme_list);
2339 }
2340
2341 static void
g_part_fini(struct g_class * mp)2342 g_part_fini(struct g_class *mp)
2343 {
2344
2345 TAILQ_REMOVE(&g_part_schemes, &g_part_null_scheme, scheme_list);
2346 }
2347
2348 static void
g_part_unload_event(void * arg,int flag)2349 g_part_unload_event(void *arg, int flag)
2350 {
2351 struct g_consumer *cp;
2352 struct g_geom *gp;
2353 struct g_provider *pp;
2354 struct g_part_scheme *scheme;
2355 struct g_part_table *table;
2356 uintptr_t *xchg;
2357 int acc, error;
2358
2359 if (flag == EV_CANCEL)
2360 return;
2361
2362 xchg = arg;
2363 error = 0;
2364 scheme = (void *)(*xchg);
2365
2366 g_topology_assert();
2367
2368 LIST_FOREACH(gp, &g_part_class.geom, geom) {
2369 table = gp->softc;
2370 if (table->gpt_scheme != scheme)
2371 continue;
2372
2373 acc = 0;
2374 LIST_FOREACH(pp, &gp->provider, provider)
2375 acc += pp->acr + pp->acw + pp->ace;
2376 LIST_FOREACH(cp, &gp->consumer, consumer)
2377 acc += cp->acr + cp->acw + cp->ace;
2378
2379 if (!acc)
2380 g_part_wither(gp, ENOSYS);
2381 else
2382 error = EBUSY;
2383 }
2384
2385 if (!error)
2386 TAILQ_REMOVE(&g_part_schemes, scheme, scheme_list);
2387
2388 *xchg = error;
2389 }
2390
2391 int
g_part_modevent(module_t mod,int type,struct g_part_scheme * scheme)2392 g_part_modevent(module_t mod, int type, struct g_part_scheme *scheme)
2393 {
2394 struct g_part_scheme *iter;
2395 uintptr_t arg;
2396 int error;
2397
2398 error = 0;
2399 switch (type) {
2400 case MOD_LOAD:
2401 TAILQ_FOREACH(iter, &g_part_schemes, scheme_list) {
2402 if (scheme == iter) {
2403 printf("GEOM_PART: scheme %s is already "
2404 "registered!\n", scheme->name);
2405 break;
2406 }
2407 }
2408 if (iter == NULL) {
2409 TAILQ_INSERT_TAIL(&g_part_schemes, scheme,
2410 scheme_list);
2411 g_retaste(&g_part_class);
2412 }
2413 break;
2414 case MOD_UNLOAD:
2415 arg = (uintptr_t)scheme;
2416 error = g_waitfor_event(g_part_unload_event, &arg, M_WAITOK,
2417 NULL);
2418 if (error == 0)
2419 error = arg;
2420 break;
2421 default:
2422 error = EOPNOTSUPP;
2423 break;
2424 }
2425
2426 return (error);
2427 }
2428