1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright 2015 Nexenta Systems, Inc. All rights reserved.
24 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
25 * Copyright (c) 2011, 2015 by Delphix. All rights reserved.
26 * Copyright (c) 2013, Joyent, Inc. All rights reserved.
27 */
28
29 #include <ctype.h>
30 #include <errno.h>
31 #include <devid.h>
32 #include <fcntl.h>
33 #include <libintl.h>
34 #include <stdio.h>
35 #include <stdlib.h>
36 #include <strings.h>
37 #include <unistd.h>
38 #include <libgen.h>
39 #include <sys/efi_partition.h>
40 #include <sys/vtoc.h>
41 #include <sys/zfs_ioctl.h>
42 #include <dlfcn.h>
43
44 #include "zfs_namecheck.h"
45 #include "zfs_prop.h"
46 #include "libzfs_impl.h"
47 #include "zfs_comutil.h"
48 #include "zfeature_common.h"
49
50 static int read_efi_label(nvlist_t *config, diskaddr_t *sb);
51
52 #define DISK_ROOT "/dev/dsk"
53 #define RDISK_ROOT "/dev/rdsk"
54 #define BACKUP_SLICE "s2"
55
56 typedef struct prop_flags {
57 int create:1; /* Validate property on creation */
58 int import:1; /* Validate property on import */
59 } prop_flags_t;
60
61 /*
62 * ====================================================================
63 * zpool property functions
64 * ====================================================================
65 */
66
67 static int
zpool_get_all_props(zpool_handle_t * zhp)68 zpool_get_all_props(zpool_handle_t *zhp)
69 {
70 zfs_cmd_t zc = { 0 };
71 libzfs_handle_t *hdl = zhp->zpool_hdl;
72
73 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
74
75 if (zcmd_alloc_dst_nvlist(hdl, &zc, 0) != 0)
76 return (-1);
77
78 while (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_GET_PROPS, &zc) != 0) {
79 if (errno == ENOMEM) {
80 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
81 zcmd_free_nvlists(&zc);
82 return (-1);
83 }
84 } else {
85 zcmd_free_nvlists(&zc);
86 return (-1);
87 }
88 }
89
90 if (zcmd_read_dst_nvlist(hdl, &zc, &zhp->zpool_props) != 0) {
91 zcmd_free_nvlists(&zc);
92 return (-1);
93 }
94
95 zcmd_free_nvlists(&zc);
96
97 return (0);
98 }
99
100 static int
zpool_props_refresh(zpool_handle_t * zhp)101 zpool_props_refresh(zpool_handle_t *zhp)
102 {
103 nvlist_t *old_props;
104
105 old_props = zhp->zpool_props;
106
107 if (zpool_get_all_props(zhp) != 0)
108 return (-1);
109
110 nvlist_free(old_props);
111 return (0);
112 }
113
114 static char *
zpool_get_prop_string(zpool_handle_t * zhp,zpool_prop_t prop,zprop_source_t * src)115 zpool_get_prop_string(zpool_handle_t *zhp, zpool_prop_t prop,
116 zprop_source_t *src)
117 {
118 nvlist_t *nv, *nvl;
119 uint64_t ival;
120 char *value;
121 zprop_source_t source;
122
123 nvl = zhp->zpool_props;
124 if (nvlist_lookup_nvlist(nvl, zpool_prop_to_name(prop), &nv) == 0) {
125 verify(nvlist_lookup_uint64(nv, ZPROP_SOURCE, &ival) == 0);
126 source = ival;
127 verify(nvlist_lookup_string(nv, ZPROP_VALUE, &value) == 0);
128 } else {
129 source = ZPROP_SRC_DEFAULT;
130 if ((value = (char *)zpool_prop_default_string(prop)) == NULL)
131 value = "-";
132 }
133
134 if (src)
135 *src = source;
136
137 return (value);
138 }
139
140 uint64_t
zpool_get_prop_int(zpool_handle_t * zhp,zpool_prop_t prop,zprop_source_t * src)141 zpool_get_prop_int(zpool_handle_t *zhp, zpool_prop_t prop, zprop_source_t *src)
142 {
143 nvlist_t *nv, *nvl;
144 uint64_t value;
145 zprop_source_t source;
146
147 if (zhp->zpool_props == NULL && zpool_get_all_props(zhp)) {
148 /*
149 * zpool_get_all_props() has most likely failed because
150 * the pool is faulted, but if all we need is the top level
151 * vdev's guid then get it from the zhp config nvlist.
152 */
153 if ((prop == ZPOOL_PROP_GUID) &&
154 (nvlist_lookup_nvlist(zhp->zpool_config,
155 ZPOOL_CONFIG_VDEV_TREE, &nv) == 0) &&
156 (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &value)
157 == 0)) {
158 return (value);
159 }
160 return (zpool_prop_default_numeric(prop));
161 }
162
163 nvl = zhp->zpool_props;
164 if (nvlist_lookup_nvlist(nvl, zpool_prop_to_name(prop), &nv) == 0) {
165 verify(nvlist_lookup_uint64(nv, ZPROP_SOURCE, &value) == 0);
166 source = value;
167 verify(nvlist_lookup_uint64(nv, ZPROP_VALUE, &value) == 0);
168 } else {
169 source = ZPROP_SRC_DEFAULT;
170 value = zpool_prop_default_numeric(prop);
171 }
172
173 if (src)
174 *src = source;
175
176 return (value);
177 }
178
179 /*
180 * Map VDEV STATE to printed strings.
181 */
182 char *
zpool_state_to_name(vdev_state_t state,vdev_aux_t aux)183 zpool_state_to_name(vdev_state_t state, vdev_aux_t aux)
184 {
185 switch (state) {
186 case VDEV_STATE_CLOSED:
187 case VDEV_STATE_OFFLINE:
188 return (gettext("OFFLINE"));
189 case VDEV_STATE_REMOVED:
190 return (gettext("REMOVED"));
191 case VDEV_STATE_CANT_OPEN:
192 if (aux == VDEV_AUX_CORRUPT_DATA || aux == VDEV_AUX_BAD_LOG)
193 return (gettext("FAULTED"));
194 else if (aux == VDEV_AUX_SPLIT_POOL)
195 return (gettext("SPLIT"));
196 else
197 return (gettext("UNAVAIL"));
198 case VDEV_STATE_FAULTED:
199 return (gettext("FAULTED"));
200 case VDEV_STATE_DEGRADED:
201 return (gettext("DEGRADED"));
202 case VDEV_STATE_HEALTHY:
203 return (gettext("ONLINE"));
204 }
205
206 return (gettext("UNKNOWN"));
207 }
208
209 /*
210 * Get a zpool property value for 'prop' and return the value in
211 * a pre-allocated buffer.
212 */
213 int
zpool_get_prop(zpool_handle_t * zhp,zpool_prop_t prop,char * buf,size_t len,zprop_source_t * srctype,boolean_t literal)214 zpool_get_prop(zpool_handle_t *zhp, zpool_prop_t prop, char *buf, size_t len,
215 zprop_source_t *srctype, boolean_t literal)
216 {
217 uint64_t intval;
218 const char *strval;
219 zprop_source_t src = ZPROP_SRC_NONE;
220 nvlist_t *nvroot;
221 vdev_stat_t *vs;
222 uint_t vsc;
223
224 if (zpool_get_state(zhp) == POOL_STATE_UNAVAIL) {
225 switch (prop) {
226 case ZPOOL_PROP_NAME:
227 (void) strlcpy(buf, zpool_get_name(zhp), len);
228 break;
229
230 case ZPOOL_PROP_HEALTH:
231 (void) strlcpy(buf, "FAULTED", len);
232 break;
233
234 case ZPOOL_PROP_GUID:
235 intval = zpool_get_prop_int(zhp, prop, &src);
236 (void) snprintf(buf, len, "%llu", intval);
237 break;
238
239 case ZPOOL_PROP_ALTROOT:
240 case ZPOOL_PROP_CACHEFILE:
241 case ZPOOL_PROP_COMMENT:
242 if (zhp->zpool_props != NULL ||
243 zpool_get_all_props(zhp) == 0) {
244 (void) strlcpy(buf,
245 zpool_get_prop_string(zhp, prop, &src),
246 len);
247 break;
248 }
249 /* FALLTHROUGH */
250 default:
251 (void) strlcpy(buf, "-", len);
252 break;
253 }
254
255 if (srctype != NULL)
256 *srctype = src;
257 return (0);
258 }
259
260 if (zhp->zpool_props == NULL && zpool_get_all_props(zhp) &&
261 prop != ZPOOL_PROP_NAME)
262 return (-1);
263
264 switch (zpool_prop_get_type(prop)) {
265 case PROP_TYPE_STRING:
266 (void) strlcpy(buf, zpool_get_prop_string(zhp, prop, &src),
267 len);
268 break;
269
270 case PROP_TYPE_NUMBER:
271 intval = zpool_get_prop_int(zhp, prop, &src);
272
273 switch (prop) {
274 case ZPOOL_PROP_SIZE:
275 case ZPOOL_PROP_ALLOCATED:
276 case ZPOOL_PROP_FREE:
277 case ZPOOL_PROP_FREEING:
278 case ZPOOL_PROP_LEAKED:
279 if (literal) {
280 (void) snprintf(buf, len, "%llu",
281 (u_longlong_t)intval);
282 } else {
283 (void) zfs_nicenum(intval, buf, len);
284 }
285 break;
286 case ZPOOL_PROP_EXPANDSZ:
287 if (intval == 0) {
288 (void) strlcpy(buf, "-", len);
289 } else if (literal) {
290 (void) snprintf(buf, len, "%llu",
291 (u_longlong_t)intval);
292 } else {
293 (void) zfs_nicenum(intval, buf, len);
294 }
295 break;
296 case ZPOOL_PROP_CAPACITY:
297 if (literal) {
298 (void) snprintf(buf, len, "%llu",
299 (u_longlong_t)intval);
300 } else {
301 (void) snprintf(buf, len, "%llu%%",
302 (u_longlong_t)intval);
303 }
304 break;
305 case ZPOOL_PROP_FRAGMENTATION:
306 if (intval == UINT64_MAX) {
307 (void) strlcpy(buf, "-", len);
308 } else {
309 (void) snprintf(buf, len, "%llu%%",
310 (u_longlong_t)intval);
311 }
312 break;
313 case ZPOOL_PROP_DEDUPRATIO:
314 (void) snprintf(buf, len, "%llu.%02llux",
315 (u_longlong_t)(intval / 100),
316 (u_longlong_t)(intval % 100));
317 break;
318 case ZPOOL_PROP_HEALTH:
319 verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL),
320 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
321 verify(nvlist_lookup_uint64_array(nvroot,
322 ZPOOL_CONFIG_VDEV_STATS, (uint64_t **)&vs, &vsc)
323 == 0);
324
325 (void) strlcpy(buf, zpool_state_to_name(intval,
326 vs->vs_aux), len);
327 break;
328 case ZPOOL_PROP_VERSION:
329 if (intval >= SPA_VERSION_FEATURES) {
330 (void) snprintf(buf, len, "-");
331 break;
332 }
333 /* FALLTHROUGH */
334 default:
335 (void) snprintf(buf, len, "%llu", intval);
336 }
337 break;
338
339 case PROP_TYPE_INDEX:
340 intval = zpool_get_prop_int(zhp, prop, &src);
341 if (zpool_prop_index_to_string(prop, intval, &strval)
342 != 0)
343 return (-1);
344 (void) strlcpy(buf, strval, len);
345 break;
346
347 default:
348 abort();
349 }
350
351 if (srctype)
352 *srctype = src;
353
354 return (0);
355 }
356
357 /*
358 * Check if the bootfs name has the same pool name as it is set to.
359 * Assuming bootfs is a valid dataset name.
360 */
361 static boolean_t
bootfs_name_valid(const char * pool,char * bootfs)362 bootfs_name_valid(const char *pool, char *bootfs)
363 {
364 int len = strlen(pool);
365
366 if (!zfs_name_valid(bootfs, ZFS_TYPE_FILESYSTEM|ZFS_TYPE_SNAPSHOT))
367 return (B_FALSE);
368
369 if (strncmp(pool, bootfs, len) == 0 &&
370 (bootfs[len] == '/' || bootfs[len] == '\0'))
371 return (B_TRUE);
372
373 return (B_FALSE);
374 }
375
376 boolean_t
zpool_is_bootable(zpool_handle_t * zhp)377 zpool_is_bootable(zpool_handle_t *zhp)
378 {
379 char bootfs[ZFS_MAX_DATASET_NAME_LEN];
380
381 return (zpool_get_prop(zhp, ZPOOL_PROP_BOOTFS, bootfs,
382 sizeof (bootfs), NULL, B_FALSE) == 0 && strncmp(bootfs, "-",
383 sizeof (bootfs)) != 0);
384 }
385
386
387 /*
388 * Given an nvlist of zpool properties to be set, validate that they are
389 * correct, and parse any numeric properties (index, boolean, etc) if they are
390 * specified as strings.
391 */
392 static nvlist_t *
zpool_valid_proplist(libzfs_handle_t * hdl,const char * poolname,nvlist_t * props,uint64_t version,prop_flags_t flags,char * errbuf)393 zpool_valid_proplist(libzfs_handle_t *hdl, const char *poolname,
394 nvlist_t *props, uint64_t version, prop_flags_t flags, char *errbuf)
395 {
396 nvpair_t *elem;
397 nvlist_t *retprops;
398 zpool_prop_t prop;
399 char *strval;
400 uint64_t intval;
401 char *slash, *check;
402 struct stat64 statbuf;
403 zpool_handle_t *zhp;
404
405 if (nvlist_alloc(&retprops, NV_UNIQUE_NAME, 0) != 0) {
406 (void) no_memory(hdl);
407 return (NULL);
408 }
409
410 elem = NULL;
411 while ((elem = nvlist_next_nvpair(props, elem)) != NULL) {
412 const char *propname = nvpair_name(elem);
413
414 prop = zpool_name_to_prop(propname);
415 if (prop == ZPROP_INVAL && zpool_prop_feature(propname)) {
416 int err;
417 char *fname = strchr(propname, '@') + 1;
418
419 err = zfeature_lookup_name(fname, NULL);
420 if (err != 0) {
421 ASSERT3U(err, ==, ENOENT);
422 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
423 "invalid feature '%s'"), fname);
424 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
425 goto error;
426 }
427
428 if (nvpair_type(elem) != DATA_TYPE_STRING) {
429 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
430 "'%s' must be a string"), propname);
431 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
432 goto error;
433 }
434
435 (void) nvpair_value_string(elem, &strval);
436 if (strcmp(strval, ZFS_FEATURE_ENABLED) != 0) {
437 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
438 "property '%s' can only be set to "
439 "'enabled'"), propname);
440 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
441 goto error;
442 }
443
444 if (nvlist_add_uint64(retprops, propname, 0) != 0) {
445 (void) no_memory(hdl);
446 goto error;
447 }
448 continue;
449 }
450
451 /*
452 * Make sure this property is valid and applies to this type.
453 */
454 if (prop == ZPROP_INVAL) {
455 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
456 "invalid property '%s'"), propname);
457 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
458 goto error;
459 }
460
461 if (zpool_prop_readonly(prop)) {
462 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' "
463 "is readonly"), propname);
464 (void) zfs_error(hdl, EZFS_PROPREADONLY, errbuf);
465 goto error;
466 }
467
468 if (zprop_parse_value(hdl, elem, prop, ZFS_TYPE_POOL, retprops,
469 &strval, &intval, errbuf) != 0)
470 goto error;
471
472 /*
473 * Perform additional checking for specific properties.
474 */
475 switch (prop) {
476 case ZPOOL_PROP_VERSION:
477 if (intval < version ||
478 !SPA_VERSION_IS_SUPPORTED(intval)) {
479 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
480 "property '%s' number %d is invalid."),
481 propname, intval);
482 (void) zfs_error(hdl, EZFS_BADVERSION, errbuf);
483 goto error;
484 }
485 break;
486
487 case ZPOOL_PROP_BOOTFS:
488 if (flags.create || flags.import) {
489 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
490 "property '%s' cannot be set at creation "
491 "or import time"), propname);
492 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
493 goto error;
494 }
495
496 if (version < SPA_VERSION_BOOTFS) {
497 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
498 "pool must be upgraded to support "
499 "'%s' property"), propname);
500 (void) zfs_error(hdl, EZFS_BADVERSION, errbuf);
501 goto error;
502 }
503
504 /*
505 * bootfs property value has to be a dataset name and
506 * the dataset has to be in the same pool as it sets to.
507 */
508 if (strval[0] != '\0' && !bootfs_name_valid(poolname,
509 strval)) {
510 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' "
511 "is an invalid name"), strval);
512 (void) zfs_error(hdl, EZFS_INVALIDNAME, errbuf);
513 goto error;
514 }
515
516 if ((zhp = zpool_open_canfail(hdl, poolname)) == NULL) {
517 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
518 "could not open pool '%s'"), poolname);
519 (void) zfs_error(hdl, EZFS_OPENFAILED, errbuf);
520 goto error;
521 }
522 zpool_close(zhp);
523 break;
524
525 case ZPOOL_PROP_ALTROOT:
526 if (!flags.create && !flags.import) {
527 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
528 "property '%s' can only be set during pool "
529 "creation or import"), propname);
530 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
531 goto error;
532 }
533
534 if (strval[0] != '/') {
535 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
536 "bad alternate root '%s'"), strval);
537 (void) zfs_error(hdl, EZFS_BADPATH, errbuf);
538 goto error;
539 }
540 break;
541
542 case ZPOOL_PROP_CACHEFILE:
543 if (strval[0] == '\0')
544 break;
545
546 if (strcmp(strval, "none") == 0)
547 break;
548
549 if (strval[0] != '/') {
550 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
551 "property '%s' must be empty, an "
552 "absolute path, or 'none'"), propname);
553 (void) zfs_error(hdl, EZFS_BADPATH, errbuf);
554 goto error;
555 }
556
557 slash = strrchr(strval, '/');
558
559 if (slash[1] == '\0' || strcmp(slash, "/.") == 0 ||
560 strcmp(slash, "/..") == 0) {
561 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
562 "'%s' is not a valid file"), strval);
563 (void) zfs_error(hdl, EZFS_BADPATH, errbuf);
564 goto error;
565 }
566
567 *slash = '\0';
568
569 if (strval[0] != '\0' &&
570 (stat64(strval, &statbuf) != 0 ||
571 !S_ISDIR(statbuf.st_mode))) {
572 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
573 "'%s' is not a valid directory"),
574 strval);
575 (void) zfs_error(hdl, EZFS_BADPATH, errbuf);
576 goto error;
577 }
578
579 *slash = '/';
580 break;
581
582 case ZPOOL_PROP_COMMENT:
583 for (check = strval; *check != '\0'; check++) {
584 if (!isprint(*check)) {
585 zfs_error_aux(hdl,
586 dgettext(TEXT_DOMAIN,
587 "comment may only have printable "
588 "characters"));
589 (void) zfs_error(hdl, EZFS_BADPROP,
590 errbuf);
591 goto error;
592 }
593 }
594 if (strlen(strval) > ZPROP_MAX_COMMENT) {
595 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
596 "comment must not exceed %d characters"),
597 ZPROP_MAX_COMMENT);
598 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
599 goto error;
600 }
601 break;
602 case ZPOOL_PROP_READONLY:
603 if (!flags.import) {
604 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
605 "property '%s' can only be set at "
606 "import time"), propname);
607 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
608 goto error;
609 }
610 break;
611 }
612 }
613
614 return (retprops);
615 error:
616 nvlist_free(retprops);
617 return (NULL);
618 }
619
620 /*
621 * Set zpool property : propname=propval.
622 */
623 int
zpool_set_prop(zpool_handle_t * zhp,const char * propname,const char * propval)624 zpool_set_prop(zpool_handle_t *zhp, const char *propname, const char *propval)
625 {
626 zfs_cmd_t zc = { 0 };
627 int ret = -1;
628 char errbuf[1024];
629 nvlist_t *nvl = NULL;
630 nvlist_t *realprops;
631 uint64_t version;
632 prop_flags_t flags = { 0 };
633
634 (void) snprintf(errbuf, sizeof (errbuf),
635 dgettext(TEXT_DOMAIN, "cannot set property for '%s'"),
636 zhp->zpool_name);
637
638 if (nvlist_alloc(&nvl, NV_UNIQUE_NAME, 0) != 0)
639 return (no_memory(zhp->zpool_hdl));
640
641 if (nvlist_add_string(nvl, propname, propval) != 0) {
642 nvlist_free(nvl);
643 return (no_memory(zhp->zpool_hdl));
644 }
645
646 version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL);
647 if ((realprops = zpool_valid_proplist(zhp->zpool_hdl,
648 zhp->zpool_name, nvl, version, flags, errbuf)) == NULL) {
649 nvlist_free(nvl);
650 return (-1);
651 }
652
653 nvlist_free(nvl);
654 nvl = realprops;
655
656 /*
657 * Execute the corresponding ioctl() to set this property.
658 */
659 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
660
661 if (zcmd_write_src_nvlist(zhp->zpool_hdl, &zc, nvl) != 0) {
662 nvlist_free(nvl);
663 return (-1);
664 }
665
666 ret = zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_SET_PROPS, &zc);
667
668 zcmd_free_nvlists(&zc);
669 nvlist_free(nvl);
670
671 if (ret)
672 (void) zpool_standard_error(zhp->zpool_hdl, errno, errbuf);
673 else
674 (void) zpool_props_refresh(zhp);
675
676 return (ret);
677 }
678
679 int
zpool_expand_proplist(zpool_handle_t * zhp,zprop_list_t ** plp)680 zpool_expand_proplist(zpool_handle_t *zhp, zprop_list_t **plp)
681 {
682 libzfs_handle_t *hdl = zhp->zpool_hdl;
683 zprop_list_t *entry;
684 char buf[ZFS_MAXPROPLEN];
685 nvlist_t *features = NULL;
686 zprop_list_t **last;
687 boolean_t firstexpand = (NULL == *plp);
688
689 if (zprop_expand_list(hdl, plp, ZFS_TYPE_POOL) != 0)
690 return (-1);
691
692 last = plp;
693 while (*last != NULL)
694 last = &(*last)->pl_next;
695
696 if ((*plp)->pl_all)
697 features = zpool_get_features(zhp);
698
699 if ((*plp)->pl_all && firstexpand) {
700 for (int i = 0; i < SPA_FEATURES; i++) {
701 zprop_list_t *entry = zfs_alloc(hdl,
702 sizeof (zprop_list_t));
703 entry->pl_prop = ZPROP_INVAL;
704 entry->pl_user_prop = zfs_asprintf(hdl, "feature@%s",
705 spa_feature_table[i].fi_uname);
706 entry->pl_width = strlen(entry->pl_user_prop);
707 entry->pl_all = B_TRUE;
708
709 *last = entry;
710 last = &entry->pl_next;
711 }
712 }
713
714 /* add any unsupported features */
715 for (nvpair_t *nvp = nvlist_next_nvpair(features, NULL);
716 nvp != NULL; nvp = nvlist_next_nvpair(features, nvp)) {
717 char *propname;
718 boolean_t found;
719 zprop_list_t *entry;
720
721 if (zfeature_is_supported(nvpair_name(nvp)))
722 continue;
723
724 propname = zfs_asprintf(hdl, "unsupported@%s",
725 nvpair_name(nvp));
726
727 /*
728 * Before adding the property to the list make sure that no
729 * other pool already added the same property.
730 */
731 found = B_FALSE;
732 entry = *plp;
733 while (entry != NULL) {
734 if (entry->pl_user_prop != NULL &&
735 strcmp(propname, entry->pl_user_prop) == 0) {
736 found = B_TRUE;
737 break;
738 }
739 entry = entry->pl_next;
740 }
741 if (found) {
742 free(propname);
743 continue;
744 }
745
746 entry = zfs_alloc(hdl, sizeof (zprop_list_t));
747 entry->pl_prop = ZPROP_INVAL;
748 entry->pl_user_prop = propname;
749 entry->pl_width = strlen(entry->pl_user_prop);
750 entry->pl_all = B_TRUE;
751
752 *last = entry;
753 last = &entry->pl_next;
754 }
755
756 for (entry = *plp; entry != NULL; entry = entry->pl_next) {
757
758 if (entry->pl_fixed)
759 continue;
760
761 if (entry->pl_prop != ZPROP_INVAL &&
762 zpool_get_prop(zhp, entry->pl_prop, buf, sizeof (buf),
763 NULL, B_FALSE) == 0) {
764 if (strlen(buf) > entry->pl_width)
765 entry->pl_width = strlen(buf);
766 }
767 }
768
769 return (0);
770 }
771
772 /*
773 * Get the state for the given feature on the given ZFS pool.
774 */
775 int
zpool_prop_get_feature(zpool_handle_t * zhp,const char * propname,char * buf,size_t len)776 zpool_prop_get_feature(zpool_handle_t *zhp, const char *propname, char *buf,
777 size_t len)
778 {
779 uint64_t refcount;
780 boolean_t found = B_FALSE;
781 nvlist_t *features = zpool_get_features(zhp);
782 boolean_t supported;
783 const char *feature = strchr(propname, '@') + 1;
784
785 supported = zpool_prop_feature(propname);
786 ASSERT(supported || zfs_prop_unsupported(propname));
787
788 /*
789 * Convert from feature name to feature guid. This conversion is
790 * unecessary for unsupported@... properties because they already
791 * use guids.
792 */
793 if (supported) {
794 int ret;
795 spa_feature_t fid;
796
797 ret = zfeature_lookup_name(feature, &fid);
798 if (ret != 0) {
799 (void) strlcpy(buf, "-", len);
800 return (ENOTSUP);
801 }
802 feature = spa_feature_table[fid].fi_guid;
803 }
804
805 if (nvlist_lookup_uint64(features, feature, &refcount) == 0)
806 found = B_TRUE;
807
808 if (supported) {
809 if (!found) {
810 (void) strlcpy(buf, ZFS_FEATURE_DISABLED, len);
811 } else {
812 if (refcount == 0)
813 (void) strlcpy(buf, ZFS_FEATURE_ENABLED, len);
814 else
815 (void) strlcpy(buf, ZFS_FEATURE_ACTIVE, len);
816 }
817 } else {
818 if (found) {
819 if (refcount == 0) {
820 (void) strcpy(buf, ZFS_UNSUPPORTED_INACTIVE);
821 } else {
822 (void) strcpy(buf, ZFS_UNSUPPORTED_READONLY);
823 }
824 } else {
825 (void) strlcpy(buf, "-", len);
826 return (ENOTSUP);
827 }
828 }
829
830 return (0);
831 }
832
833 /*
834 * Don't start the slice at the default block of 34; many storage
835 * devices will use a stripe width of 128k, so start there instead.
836 */
837 #define NEW_START_BLOCK 256
838
839 /*
840 * Validate the given pool name, optionally putting an extended error message in
841 * 'buf'.
842 */
843 boolean_t
zpool_name_valid(libzfs_handle_t * hdl,boolean_t isopen,const char * pool)844 zpool_name_valid(libzfs_handle_t *hdl, boolean_t isopen, const char *pool)
845 {
846 namecheck_err_t why;
847 char what;
848 int ret;
849
850 ret = pool_namecheck(pool, &why, &what);
851
852 /*
853 * The rules for reserved pool names were extended at a later point.
854 * But we need to support users with existing pools that may now be
855 * invalid. So we only check for this expanded set of names during a
856 * create (or import), and only in userland.
857 */
858 if (ret == 0 && !isopen &&
859 (strncmp(pool, "mirror", 6) == 0 ||
860 strncmp(pool, "raidz", 5) == 0 ||
861 strncmp(pool, "spare", 5) == 0 ||
862 strcmp(pool, "log") == 0)) {
863 if (hdl != NULL)
864 zfs_error_aux(hdl,
865 dgettext(TEXT_DOMAIN, "name is reserved"));
866 return (B_FALSE);
867 }
868
869
870 if (ret != 0) {
871 if (hdl != NULL) {
872 switch (why) {
873 case NAME_ERR_TOOLONG:
874 zfs_error_aux(hdl,
875 dgettext(TEXT_DOMAIN, "name is too long"));
876 break;
877
878 case NAME_ERR_INVALCHAR:
879 zfs_error_aux(hdl,
880 dgettext(TEXT_DOMAIN, "invalid character "
881 "'%c' in pool name"), what);
882 break;
883
884 case NAME_ERR_NOLETTER:
885 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
886 "name must begin with a letter"));
887 break;
888
889 case NAME_ERR_RESERVED:
890 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
891 "name is reserved"));
892 break;
893
894 case NAME_ERR_DISKLIKE:
895 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
896 "pool name is reserved"));
897 break;
898
899 case NAME_ERR_LEADING_SLASH:
900 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
901 "leading slash in name"));
902 break;
903
904 case NAME_ERR_EMPTY_COMPONENT:
905 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
906 "empty component in name"));
907 break;
908
909 case NAME_ERR_TRAILING_SLASH:
910 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
911 "trailing slash in name"));
912 break;
913
914 case NAME_ERR_MULTIPLE_DELIMITERS:
915 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
916 "multiple '@' and/or '#' delimiters in "
917 "name"));
918 break;
919
920 }
921 }
922 return (B_FALSE);
923 }
924
925 return (B_TRUE);
926 }
927
928 /*
929 * Open a handle to the given pool, even if the pool is currently in the FAULTED
930 * state.
931 */
932 zpool_handle_t *
zpool_open_canfail(libzfs_handle_t * hdl,const char * pool)933 zpool_open_canfail(libzfs_handle_t *hdl, const char *pool)
934 {
935 zpool_handle_t *zhp;
936 boolean_t missing;
937
938 /*
939 * Make sure the pool name is valid.
940 */
941 if (!zpool_name_valid(hdl, B_TRUE, pool)) {
942 (void) zfs_error_fmt(hdl, EZFS_INVALIDNAME,
943 dgettext(TEXT_DOMAIN, "cannot open '%s'"),
944 pool);
945 return (NULL);
946 }
947
948 if ((zhp = zfs_alloc(hdl, sizeof (zpool_handle_t))) == NULL)
949 return (NULL);
950
951 zhp->zpool_hdl = hdl;
952 (void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name));
953
954 if (zpool_refresh_stats(zhp, &missing) != 0) {
955 zpool_close(zhp);
956 return (NULL);
957 }
958
959 if (missing) {
960 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "no such pool"));
961 (void) zfs_error_fmt(hdl, EZFS_NOENT,
962 dgettext(TEXT_DOMAIN, "cannot open '%s'"), pool);
963 zpool_close(zhp);
964 return (NULL);
965 }
966
967 return (zhp);
968 }
969
970 /*
971 * Like the above, but silent on error. Used when iterating over pools (because
972 * the configuration cache may be out of date).
973 */
974 int
zpool_open_silent(libzfs_handle_t * hdl,const char * pool,zpool_handle_t ** ret)975 zpool_open_silent(libzfs_handle_t *hdl, const char *pool, zpool_handle_t **ret)
976 {
977 zpool_handle_t *zhp;
978 boolean_t missing;
979
980 if ((zhp = zfs_alloc(hdl, sizeof (zpool_handle_t))) == NULL)
981 return (-1);
982
983 zhp->zpool_hdl = hdl;
984 (void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name));
985
986 if (zpool_refresh_stats(zhp, &missing) != 0) {
987 zpool_close(zhp);
988 return (-1);
989 }
990
991 if (missing) {
992 zpool_close(zhp);
993 *ret = NULL;
994 return (0);
995 }
996
997 *ret = zhp;
998 return (0);
999 }
1000
1001 /*
1002 * Similar to zpool_open_canfail(), but refuses to open pools in the faulted
1003 * state.
1004 */
1005 zpool_handle_t *
zpool_open(libzfs_handle_t * hdl,const char * pool)1006 zpool_open(libzfs_handle_t *hdl, const char *pool)
1007 {
1008 zpool_handle_t *zhp;
1009
1010 if ((zhp = zpool_open_canfail(hdl, pool)) == NULL)
1011 return (NULL);
1012
1013 if (zhp->zpool_state == POOL_STATE_UNAVAIL) {
1014 (void) zfs_error_fmt(hdl, EZFS_POOLUNAVAIL,
1015 dgettext(TEXT_DOMAIN, "cannot open '%s'"), zhp->zpool_name);
1016 zpool_close(zhp);
1017 return (NULL);
1018 }
1019
1020 return (zhp);
1021 }
1022
1023 /*
1024 * Close the handle. Simply frees the memory associated with the handle.
1025 */
1026 void
zpool_close(zpool_handle_t * zhp)1027 zpool_close(zpool_handle_t *zhp)
1028 {
1029 if (zhp->zpool_config)
1030 nvlist_free(zhp->zpool_config);
1031 if (zhp->zpool_old_config)
1032 nvlist_free(zhp->zpool_old_config);
1033 if (zhp->zpool_props)
1034 nvlist_free(zhp->zpool_props);
1035 free(zhp);
1036 }
1037
1038 /*
1039 * Return the name of the pool.
1040 */
1041 const char *
zpool_get_name(zpool_handle_t * zhp)1042 zpool_get_name(zpool_handle_t *zhp)
1043 {
1044 return (zhp->zpool_name);
1045 }
1046
1047
1048 /*
1049 * Return the state of the pool (ACTIVE or UNAVAILABLE)
1050 */
1051 int
zpool_get_state(zpool_handle_t * zhp)1052 zpool_get_state(zpool_handle_t *zhp)
1053 {
1054 return (zhp->zpool_state);
1055 }
1056
1057 /*
1058 * Create the named pool, using the provided vdev list. It is assumed
1059 * that the consumer has already validated the contents of the nvlist, so we
1060 * don't have to worry about error semantics.
1061 */
1062 int
zpool_create(libzfs_handle_t * hdl,const char * pool,nvlist_t * nvroot,nvlist_t * props,nvlist_t * fsprops)1063 zpool_create(libzfs_handle_t *hdl, const char *pool, nvlist_t *nvroot,
1064 nvlist_t *props, nvlist_t *fsprops)
1065 {
1066 zfs_cmd_t zc = { 0 };
1067 nvlist_t *zc_fsprops = NULL;
1068 nvlist_t *zc_props = NULL;
1069 char msg[1024];
1070 int ret = -1;
1071
1072 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1073 "cannot create '%s'"), pool);
1074
1075 if (!zpool_name_valid(hdl, B_FALSE, pool))
1076 return (zfs_error(hdl, EZFS_INVALIDNAME, msg));
1077
1078 if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0)
1079 return (-1);
1080
1081 if (props) {
1082 prop_flags_t flags = { .create = B_TRUE, .import = B_FALSE };
1083
1084 if ((zc_props = zpool_valid_proplist(hdl, pool, props,
1085 SPA_VERSION_1, flags, msg)) == NULL) {
1086 goto create_failed;
1087 }
1088 }
1089
1090 if (fsprops) {
1091 uint64_t zoned;
1092 char *zonestr;
1093
1094 zoned = ((nvlist_lookup_string(fsprops,
1095 zfs_prop_to_name(ZFS_PROP_ZONED), &zonestr) == 0) &&
1096 strcmp(zonestr, "on") == 0);
1097
1098 if ((zc_fsprops = zfs_valid_proplist(hdl,
1099 ZFS_TYPE_FILESYSTEM, fsprops, zoned, NULL, msg)) == NULL) {
1100 goto create_failed;
1101 }
1102 if (!zc_props &&
1103 (nvlist_alloc(&zc_props, NV_UNIQUE_NAME, 0) != 0)) {
1104 goto create_failed;
1105 }
1106 if (nvlist_add_nvlist(zc_props,
1107 ZPOOL_ROOTFS_PROPS, zc_fsprops) != 0) {
1108 goto create_failed;
1109 }
1110 }
1111
1112 if (zc_props && zcmd_write_src_nvlist(hdl, &zc, zc_props) != 0)
1113 goto create_failed;
1114
1115 (void) strlcpy(zc.zc_name, pool, sizeof (zc.zc_name));
1116
1117 if ((ret = zfs_ioctl(hdl, ZFS_IOC_POOL_CREATE, &zc)) != 0) {
1118
1119 zcmd_free_nvlists(&zc);
1120 nvlist_free(zc_props);
1121 nvlist_free(zc_fsprops);
1122
1123 switch (errno) {
1124 case EBUSY:
1125 /*
1126 * This can happen if the user has specified the same
1127 * device multiple times. We can't reliably detect this
1128 * until we try to add it and see we already have a
1129 * label.
1130 */
1131 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1132 "one or more vdevs refer to the same device"));
1133 return (zfs_error(hdl, EZFS_BADDEV, msg));
1134
1135 case EOVERFLOW:
1136 /*
1137 * This occurs when one of the devices is below
1138 * SPA_MINDEVSIZE. Unfortunately, we can't detect which
1139 * device was the problem device since there's no
1140 * reliable way to determine device size from userland.
1141 */
1142 {
1143 char buf[64];
1144
1145 zfs_nicenum(SPA_MINDEVSIZE, buf, sizeof (buf));
1146
1147 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1148 "one or more devices is less than the "
1149 "minimum size (%s)"), buf);
1150 }
1151 return (zfs_error(hdl, EZFS_BADDEV, msg));
1152
1153 case ENOSPC:
1154 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1155 "one or more devices is out of space"));
1156 return (zfs_error(hdl, EZFS_BADDEV, msg));
1157
1158 case ENOTBLK:
1159 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1160 "cache device must be a disk or disk slice"));
1161 return (zfs_error(hdl, EZFS_BADDEV, msg));
1162
1163 default:
1164 return (zpool_standard_error(hdl, errno, msg));
1165 }
1166 }
1167
1168 create_failed:
1169 zcmd_free_nvlists(&zc);
1170 nvlist_free(zc_props);
1171 nvlist_free(zc_fsprops);
1172 return (ret);
1173 }
1174
1175 /*
1176 * Destroy the given pool. It is up to the caller to ensure that there are no
1177 * datasets left in the pool.
1178 */
1179 int
zpool_destroy(zpool_handle_t * zhp,const char * log_str)1180 zpool_destroy(zpool_handle_t *zhp, const char *log_str)
1181 {
1182 zfs_cmd_t zc = { 0 };
1183 zfs_handle_t *zfp = NULL;
1184 libzfs_handle_t *hdl = zhp->zpool_hdl;
1185 char msg[1024];
1186
1187 if (zhp->zpool_state == POOL_STATE_ACTIVE &&
1188 (zfp = zfs_open(hdl, zhp->zpool_name, ZFS_TYPE_FILESYSTEM)) == NULL)
1189 return (-1);
1190
1191 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1192 zc.zc_history = (uint64_t)(uintptr_t)log_str;
1193
1194 if (zfs_ioctl(hdl, ZFS_IOC_POOL_DESTROY, &zc) != 0) {
1195 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1196 "cannot destroy '%s'"), zhp->zpool_name);
1197
1198 if (errno == EROFS) {
1199 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1200 "one or more devices is read only"));
1201 (void) zfs_error(hdl, EZFS_BADDEV, msg);
1202 } else {
1203 (void) zpool_standard_error(hdl, errno, msg);
1204 }
1205
1206 if (zfp)
1207 zfs_close(zfp);
1208 return (-1);
1209 }
1210
1211 if (zfp) {
1212 remove_mountpoint(zfp);
1213 zfs_close(zfp);
1214 }
1215
1216 return (0);
1217 }
1218
1219 /*
1220 * Add the given vdevs to the pool. The caller must have already performed the
1221 * necessary verification to ensure that the vdev specification is well-formed.
1222 */
1223 int
zpool_add(zpool_handle_t * zhp,nvlist_t * nvroot)1224 zpool_add(zpool_handle_t *zhp, nvlist_t *nvroot)
1225 {
1226 zfs_cmd_t zc = { 0 };
1227 int ret;
1228 libzfs_handle_t *hdl = zhp->zpool_hdl;
1229 char msg[1024];
1230 nvlist_t **spares, **l2cache;
1231 uint_t nspares, nl2cache;
1232
1233 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1234 "cannot add to '%s'"), zhp->zpool_name);
1235
1236 if (zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL) <
1237 SPA_VERSION_SPARES &&
1238 nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
1239 &spares, &nspares) == 0) {
1240 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be "
1241 "upgraded to add hot spares"));
1242 return (zfs_error(hdl, EZFS_BADVERSION, msg));
1243 }
1244
1245 if (zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL) <
1246 SPA_VERSION_L2CACHE &&
1247 nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
1248 &l2cache, &nl2cache) == 0) {
1249 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be "
1250 "upgraded to add cache devices"));
1251 return (zfs_error(hdl, EZFS_BADVERSION, msg));
1252 }
1253
1254 if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0)
1255 return (-1);
1256 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1257
1258 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_ADD, &zc) != 0) {
1259 switch (errno) {
1260 case EBUSY:
1261 /*
1262 * This can happen if the user has specified the same
1263 * device multiple times. We can't reliably detect this
1264 * until we try to add it and see we already have a
1265 * label.
1266 */
1267 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1268 "one or more vdevs refer to the same device"));
1269 (void) zfs_error(hdl, EZFS_BADDEV, msg);
1270 break;
1271
1272 case EOVERFLOW:
1273 /*
1274 * This occurrs when one of the devices is below
1275 * SPA_MINDEVSIZE. Unfortunately, we can't detect which
1276 * device was the problem device since there's no
1277 * reliable way to determine device size from userland.
1278 */
1279 {
1280 char buf[64];
1281
1282 zfs_nicenum(SPA_MINDEVSIZE, buf, sizeof (buf));
1283
1284 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1285 "device is less than the minimum "
1286 "size (%s)"), buf);
1287 }
1288 (void) zfs_error(hdl, EZFS_BADDEV, msg);
1289 break;
1290
1291 case ENOTSUP:
1292 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1293 "pool must be upgraded to add these vdevs"));
1294 (void) zfs_error(hdl, EZFS_BADVERSION, msg);
1295 break;
1296
1297 case EDOM:
1298 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1299 "root pool can not have multiple vdevs"
1300 " or separate logs"));
1301 (void) zfs_error(hdl, EZFS_POOL_NOTSUP, msg);
1302 break;
1303
1304 case ENOTBLK:
1305 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1306 "cache device must be a disk or disk slice"));
1307 (void) zfs_error(hdl, EZFS_BADDEV, msg);
1308 break;
1309
1310 default:
1311 (void) zpool_standard_error(hdl, errno, msg);
1312 }
1313
1314 ret = -1;
1315 } else {
1316 ret = 0;
1317 }
1318
1319 zcmd_free_nvlists(&zc);
1320
1321 return (ret);
1322 }
1323
1324 /*
1325 * Exports the pool from the system. The caller must ensure that there are no
1326 * mounted datasets in the pool.
1327 */
1328 static int
zpool_export_common(zpool_handle_t * zhp,boolean_t force,boolean_t hardforce,const char * log_str)1329 zpool_export_common(zpool_handle_t *zhp, boolean_t force, boolean_t hardforce,
1330 const char *log_str)
1331 {
1332 zfs_cmd_t zc = { 0 };
1333 char msg[1024];
1334
1335 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1336 "cannot export '%s'"), zhp->zpool_name);
1337
1338 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1339 zc.zc_cookie = force;
1340 zc.zc_guid = hardforce;
1341 zc.zc_history = (uint64_t)(uintptr_t)log_str;
1342
1343 if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_EXPORT, &zc) != 0) {
1344 switch (errno) {
1345 case EXDEV:
1346 zfs_error_aux(zhp->zpool_hdl, dgettext(TEXT_DOMAIN,
1347 "use '-f' to override the following errors:\n"
1348 "'%s' has an active shared spare which could be"
1349 " used by other pools once '%s' is exported."),
1350 zhp->zpool_name, zhp->zpool_name);
1351 return (zfs_error(zhp->zpool_hdl, EZFS_ACTIVE_SPARE,
1352 msg));
1353 default:
1354 return (zpool_standard_error_fmt(zhp->zpool_hdl, errno,
1355 msg));
1356 }
1357 }
1358
1359 return (0);
1360 }
1361
1362 int
zpool_export(zpool_handle_t * zhp,boolean_t force,const char * log_str)1363 zpool_export(zpool_handle_t *zhp, boolean_t force, const char *log_str)
1364 {
1365 return (zpool_export_common(zhp, force, B_FALSE, log_str));
1366 }
1367
1368 int
zpool_export_force(zpool_handle_t * zhp,const char * log_str)1369 zpool_export_force(zpool_handle_t *zhp, const char *log_str)
1370 {
1371 return (zpool_export_common(zhp, B_TRUE, B_TRUE, log_str));
1372 }
1373
1374 static void
zpool_rewind_exclaim(libzfs_handle_t * hdl,const char * name,boolean_t dryrun,nvlist_t * config)1375 zpool_rewind_exclaim(libzfs_handle_t *hdl, const char *name, boolean_t dryrun,
1376 nvlist_t *config)
1377 {
1378 nvlist_t *nv = NULL;
1379 uint64_t rewindto;
1380 int64_t loss = -1;
1381 struct tm t;
1382 char timestr[128];
1383
1384 if (!hdl->libzfs_printerr || config == NULL)
1385 return;
1386
1387 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, &nv) != 0 ||
1388 nvlist_lookup_nvlist(nv, ZPOOL_CONFIG_REWIND_INFO, &nv) != 0) {
1389 return;
1390 }
1391
1392 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_TIME, &rewindto) != 0)
1393 return;
1394 (void) nvlist_lookup_int64(nv, ZPOOL_CONFIG_REWIND_TIME, &loss);
1395
1396 if (localtime_r((time_t *)&rewindto, &t) != NULL &&
1397 strftime(timestr, 128, 0, &t) != 0) {
1398 if (dryrun) {
1399 (void) printf(dgettext(TEXT_DOMAIN,
1400 "Would be able to return %s "
1401 "to its state as of %s.\n"),
1402 name, timestr);
1403 } else {
1404 (void) printf(dgettext(TEXT_DOMAIN,
1405 "Pool %s returned to its state as of %s.\n"),
1406 name, timestr);
1407 }
1408 if (loss > 120) {
1409 (void) printf(dgettext(TEXT_DOMAIN,
1410 "%s approximately %lld "),
1411 dryrun ? "Would discard" : "Discarded",
1412 (loss + 30) / 60);
1413 (void) printf(dgettext(TEXT_DOMAIN,
1414 "minutes of transactions.\n"));
1415 } else if (loss > 0) {
1416 (void) printf(dgettext(TEXT_DOMAIN,
1417 "%s approximately %lld "),
1418 dryrun ? "Would discard" : "Discarded", loss);
1419 (void) printf(dgettext(TEXT_DOMAIN,
1420 "seconds of transactions.\n"));
1421 }
1422 }
1423 }
1424
1425 void
zpool_explain_recover(libzfs_handle_t * hdl,const char * name,int reason,nvlist_t * config)1426 zpool_explain_recover(libzfs_handle_t *hdl, const char *name, int reason,
1427 nvlist_t *config)
1428 {
1429 nvlist_t *nv = NULL;
1430 int64_t loss = -1;
1431 uint64_t edata = UINT64_MAX;
1432 uint64_t rewindto;
1433 struct tm t;
1434 char timestr[128];
1435
1436 if (!hdl->libzfs_printerr)
1437 return;
1438
1439 if (reason >= 0)
1440 (void) printf(dgettext(TEXT_DOMAIN, "action: "));
1441 else
1442 (void) printf(dgettext(TEXT_DOMAIN, "\t"));
1443
1444 /* All attempted rewinds failed if ZPOOL_CONFIG_LOAD_TIME missing */
1445 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, &nv) != 0 ||
1446 nvlist_lookup_nvlist(nv, ZPOOL_CONFIG_REWIND_INFO, &nv) != 0 ||
1447 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_TIME, &rewindto) != 0)
1448 goto no_info;
1449
1450 (void) nvlist_lookup_int64(nv, ZPOOL_CONFIG_REWIND_TIME, &loss);
1451 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_DATA_ERRORS,
1452 &edata);
1453
1454 (void) printf(dgettext(TEXT_DOMAIN,
1455 "Recovery is possible, but will result in some data loss.\n"));
1456
1457 if (localtime_r((time_t *)&rewindto, &t) != NULL &&
1458 strftime(timestr, 128, 0, &t) != 0) {
1459 (void) printf(dgettext(TEXT_DOMAIN,
1460 "\tReturning the pool to its state as of %s\n"
1461 "\tshould correct the problem. "),
1462 timestr);
1463 } else {
1464 (void) printf(dgettext(TEXT_DOMAIN,
1465 "\tReverting the pool to an earlier state "
1466 "should correct the problem.\n\t"));
1467 }
1468
1469 if (loss > 120) {
1470 (void) printf(dgettext(TEXT_DOMAIN,
1471 "Approximately %lld minutes of data\n"
1472 "\tmust be discarded, irreversibly. "), (loss + 30) / 60);
1473 } else if (loss > 0) {
1474 (void) printf(dgettext(TEXT_DOMAIN,
1475 "Approximately %lld seconds of data\n"
1476 "\tmust be discarded, irreversibly. "), loss);
1477 }
1478 if (edata != 0 && edata != UINT64_MAX) {
1479 if (edata == 1) {
1480 (void) printf(dgettext(TEXT_DOMAIN,
1481 "After rewind, at least\n"
1482 "\tone persistent user-data error will remain. "));
1483 } else {
1484 (void) printf(dgettext(TEXT_DOMAIN,
1485 "After rewind, several\n"
1486 "\tpersistent user-data errors will remain. "));
1487 }
1488 }
1489 (void) printf(dgettext(TEXT_DOMAIN,
1490 "Recovery can be attempted\n\tby executing 'zpool %s -F %s'. "),
1491 reason >= 0 ? "clear" : "import", name);
1492
1493 (void) printf(dgettext(TEXT_DOMAIN,
1494 "A scrub of the pool\n"
1495 "\tis strongly recommended after recovery.\n"));
1496 return;
1497
1498 no_info:
1499 (void) printf(dgettext(TEXT_DOMAIN,
1500 "Destroy and re-create the pool from\n\ta backup source.\n"));
1501 }
1502
1503 /*
1504 * zpool_import() is a contracted interface. Should be kept the same
1505 * if possible.
1506 *
1507 * Applications should use zpool_import_props() to import a pool with
1508 * new properties value to be set.
1509 */
1510 int
zpool_import(libzfs_handle_t * hdl,nvlist_t * config,const char * newname,char * altroot)1511 zpool_import(libzfs_handle_t *hdl, nvlist_t *config, const char *newname,
1512 char *altroot)
1513 {
1514 nvlist_t *props = NULL;
1515 int ret;
1516
1517 if (altroot != NULL) {
1518 if (nvlist_alloc(&props, NV_UNIQUE_NAME, 0) != 0) {
1519 return (zfs_error_fmt(hdl, EZFS_NOMEM,
1520 dgettext(TEXT_DOMAIN, "cannot import '%s'"),
1521 newname));
1522 }
1523
1524 if (nvlist_add_string(props,
1525 zpool_prop_to_name(ZPOOL_PROP_ALTROOT), altroot) != 0 ||
1526 nvlist_add_string(props,
1527 zpool_prop_to_name(ZPOOL_PROP_CACHEFILE), "none") != 0) {
1528 nvlist_free(props);
1529 return (zfs_error_fmt(hdl, EZFS_NOMEM,
1530 dgettext(TEXT_DOMAIN, "cannot import '%s'"),
1531 newname));
1532 }
1533 }
1534
1535 ret = zpool_import_props(hdl, config, newname, props,
1536 ZFS_IMPORT_NORMAL);
1537 if (props)
1538 nvlist_free(props);
1539 return (ret);
1540 }
1541
1542 static void
print_vdev_tree(libzfs_handle_t * hdl,const char * name,nvlist_t * nv,int indent)1543 print_vdev_tree(libzfs_handle_t *hdl, const char *name, nvlist_t *nv,
1544 int indent)
1545 {
1546 nvlist_t **child;
1547 uint_t c, children;
1548 char *vname;
1549 uint64_t is_log = 0;
1550
1551 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_LOG,
1552 &is_log);
1553
1554 if (name != NULL)
1555 (void) printf("\t%*s%s%s\n", indent, "", name,
1556 is_log ? " [log]" : "");
1557
1558 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
1559 &child, &children) != 0)
1560 return;
1561
1562 for (c = 0; c < children; c++) {
1563 vname = zpool_vdev_name(hdl, NULL, child[c], B_TRUE);
1564 print_vdev_tree(hdl, vname, child[c], indent + 2);
1565 free(vname);
1566 }
1567 }
1568
1569 void
zpool_print_unsup_feat(nvlist_t * config)1570 zpool_print_unsup_feat(nvlist_t *config)
1571 {
1572 nvlist_t *nvinfo, *unsup_feat;
1573
1574 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, &nvinfo) ==
1575 0);
1576 verify(nvlist_lookup_nvlist(nvinfo, ZPOOL_CONFIG_UNSUP_FEAT,
1577 &unsup_feat) == 0);
1578
1579 for (nvpair_t *nvp = nvlist_next_nvpair(unsup_feat, NULL); nvp != NULL;
1580 nvp = nvlist_next_nvpair(unsup_feat, nvp)) {
1581 char *desc;
1582
1583 verify(nvpair_type(nvp) == DATA_TYPE_STRING);
1584 verify(nvpair_value_string(nvp, &desc) == 0);
1585
1586 if (strlen(desc) > 0)
1587 (void) printf("\t%s (%s)\n", nvpair_name(nvp), desc);
1588 else
1589 (void) printf("\t%s\n", nvpair_name(nvp));
1590 }
1591 }
1592
1593 /*
1594 * Import the given pool using the known configuration and a list of
1595 * properties to be set. The configuration should have come from
1596 * zpool_find_import(). The 'newname' parameters control whether the pool
1597 * is imported with a different name.
1598 */
1599 int
zpool_import_props(libzfs_handle_t * hdl,nvlist_t * config,const char * newname,nvlist_t * props,int flags)1600 zpool_import_props(libzfs_handle_t *hdl, nvlist_t *config, const char *newname,
1601 nvlist_t *props, int flags)
1602 {
1603 zfs_cmd_t zc = { 0 };
1604 zpool_rewind_policy_t policy;
1605 nvlist_t *nv = NULL;
1606 nvlist_t *nvinfo = NULL;
1607 nvlist_t *missing = NULL;
1608 char *thename;
1609 char *origname;
1610 int ret;
1611 int error = 0;
1612 char errbuf[1024];
1613
1614 verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
1615 &origname) == 0);
1616
1617 (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
1618 "cannot import pool '%s'"), origname);
1619
1620 if (newname != NULL) {
1621 if (!zpool_name_valid(hdl, B_FALSE, newname))
1622 return (zfs_error_fmt(hdl, EZFS_INVALIDNAME,
1623 dgettext(TEXT_DOMAIN, "cannot import '%s'"),
1624 newname));
1625 thename = (char *)newname;
1626 } else {
1627 thename = origname;
1628 }
1629
1630 if (props != NULL) {
1631 uint64_t version;
1632 prop_flags_t flags = { .create = B_FALSE, .import = B_TRUE };
1633
1634 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION,
1635 &version) == 0);
1636
1637 if ((props = zpool_valid_proplist(hdl, origname,
1638 props, version, flags, errbuf)) == NULL)
1639 return (-1);
1640 if (zcmd_write_src_nvlist(hdl, &zc, props) != 0) {
1641 nvlist_free(props);
1642 return (-1);
1643 }
1644 nvlist_free(props);
1645 }
1646
1647 (void) strlcpy(zc.zc_name, thename, sizeof (zc.zc_name));
1648
1649 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
1650 &zc.zc_guid) == 0);
1651
1652 if (zcmd_write_conf_nvlist(hdl, &zc, config) != 0) {
1653 zcmd_free_nvlists(&zc);
1654 return (-1);
1655 }
1656 if (zcmd_alloc_dst_nvlist(hdl, &zc, zc.zc_nvlist_conf_size * 2) != 0) {
1657 zcmd_free_nvlists(&zc);
1658 return (-1);
1659 }
1660
1661 zc.zc_cookie = flags;
1662 while ((ret = zfs_ioctl(hdl, ZFS_IOC_POOL_IMPORT, &zc)) != 0 &&
1663 errno == ENOMEM) {
1664 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
1665 zcmd_free_nvlists(&zc);
1666 return (-1);
1667 }
1668 }
1669 if (ret != 0)
1670 error = errno;
1671
1672 (void) zcmd_read_dst_nvlist(hdl, &zc, &nv);
1673
1674 zcmd_free_nvlists(&zc);
1675
1676 zpool_get_rewind_policy(config, &policy);
1677
1678 if (error) {
1679 char desc[1024];
1680
1681 /*
1682 * Dry-run failed, but we print out what success
1683 * looks like if we found a best txg
1684 */
1685 if (policy.zrp_request & ZPOOL_TRY_REWIND) {
1686 zpool_rewind_exclaim(hdl, newname ? origname : thename,
1687 B_TRUE, nv);
1688 nvlist_free(nv);
1689 return (-1);
1690 }
1691
1692 if (newname == NULL)
1693 (void) snprintf(desc, sizeof (desc),
1694 dgettext(TEXT_DOMAIN, "cannot import '%s'"),
1695 thename);
1696 else
1697 (void) snprintf(desc, sizeof (desc),
1698 dgettext(TEXT_DOMAIN, "cannot import '%s' as '%s'"),
1699 origname, thename);
1700
1701 switch (error) {
1702 case ENOTSUP:
1703 if (nv != NULL && nvlist_lookup_nvlist(nv,
1704 ZPOOL_CONFIG_LOAD_INFO, &nvinfo) == 0 &&
1705 nvlist_exists(nvinfo, ZPOOL_CONFIG_UNSUP_FEAT)) {
1706 (void) printf(dgettext(TEXT_DOMAIN, "This "
1707 "pool uses the following feature(s) not "
1708 "supported by this system:\n"));
1709 zpool_print_unsup_feat(nv);
1710 if (nvlist_exists(nvinfo,
1711 ZPOOL_CONFIG_CAN_RDONLY)) {
1712 (void) printf(dgettext(TEXT_DOMAIN,
1713 "All unsupported features are only "
1714 "required for writing to the pool."
1715 "\nThe pool can be imported using "
1716 "'-o readonly=on'.\n"));
1717 }
1718 }
1719 /*
1720 * Unsupported version.
1721 */
1722 (void) zfs_error(hdl, EZFS_BADVERSION, desc);
1723 break;
1724
1725 case EINVAL:
1726 (void) zfs_error(hdl, EZFS_INVALCONFIG, desc);
1727 break;
1728
1729 case EROFS:
1730 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1731 "one or more devices is read only"));
1732 (void) zfs_error(hdl, EZFS_BADDEV, desc);
1733 break;
1734
1735 case ENXIO:
1736 if (nv && nvlist_lookup_nvlist(nv,
1737 ZPOOL_CONFIG_LOAD_INFO, &nvinfo) == 0 &&
1738 nvlist_lookup_nvlist(nvinfo,
1739 ZPOOL_CONFIG_MISSING_DEVICES, &missing) == 0) {
1740 (void) printf(dgettext(TEXT_DOMAIN,
1741 "The devices below are missing, use "
1742 "'-m' to import the pool anyway:\n"));
1743 print_vdev_tree(hdl, NULL, missing, 2);
1744 (void) printf("\n");
1745 }
1746 (void) zpool_standard_error(hdl, error, desc);
1747 break;
1748
1749 case EEXIST:
1750 (void) zpool_standard_error(hdl, error, desc);
1751 break;
1752
1753 default:
1754 (void) zpool_standard_error(hdl, error, desc);
1755 zpool_explain_recover(hdl,
1756 newname ? origname : thename, -error, nv);
1757 break;
1758 }
1759
1760 nvlist_free(nv);
1761 ret = -1;
1762 } else {
1763 zpool_handle_t *zhp;
1764
1765 /*
1766 * This should never fail, but play it safe anyway.
1767 */
1768 if (zpool_open_silent(hdl, thename, &zhp) != 0)
1769 ret = -1;
1770 else if (zhp != NULL)
1771 zpool_close(zhp);
1772 if (policy.zrp_request &
1773 (ZPOOL_DO_REWIND | ZPOOL_TRY_REWIND)) {
1774 zpool_rewind_exclaim(hdl, newname ? origname : thename,
1775 ((policy.zrp_request & ZPOOL_TRY_REWIND) != 0), nv);
1776 }
1777 nvlist_free(nv);
1778 return (0);
1779 }
1780
1781 return (ret);
1782 }
1783
1784 /*
1785 * Scan the pool.
1786 */
1787 int
zpool_scan(zpool_handle_t * zhp,pool_scan_func_t func)1788 zpool_scan(zpool_handle_t *zhp, pool_scan_func_t func)
1789 {
1790 zfs_cmd_t zc = { 0 };
1791 char msg[1024];
1792 libzfs_handle_t *hdl = zhp->zpool_hdl;
1793
1794 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1795 zc.zc_cookie = func;
1796
1797 if (zfs_ioctl(hdl, ZFS_IOC_POOL_SCAN, &zc) == 0 ||
1798 (errno == ENOENT && func != POOL_SCAN_NONE))
1799 return (0);
1800
1801 if (func == POOL_SCAN_SCRUB) {
1802 (void) snprintf(msg, sizeof (msg),
1803 dgettext(TEXT_DOMAIN, "cannot scrub %s"), zc.zc_name);
1804 } else if (func == POOL_SCAN_NONE) {
1805 (void) snprintf(msg, sizeof (msg),
1806 dgettext(TEXT_DOMAIN, "cannot cancel scrubbing %s"),
1807 zc.zc_name);
1808 } else {
1809 assert(!"unexpected result");
1810 }
1811
1812 if (errno == EBUSY) {
1813 nvlist_t *nvroot;
1814 pool_scan_stat_t *ps = NULL;
1815 uint_t psc;
1816
1817 verify(nvlist_lookup_nvlist(zhp->zpool_config,
1818 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
1819 (void) nvlist_lookup_uint64_array(nvroot,
1820 ZPOOL_CONFIG_SCAN_STATS, (uint64_t **)&ps, &psc);
1821 if (ps && ps->pss_func == POOL_SCAN_SCRUB)
1822 return (zfs_error(hdl, EZFS_SCRUBBING, msg));
1823 else
1824 return (zfs_error(hdl, EZFS_RESILVERING, msg));
1825 } else if (errno == ENOENT) {
1826 return (zfs_error(hdl, EZFS_NO_SCRUB, msg));
1827 } else {
1828 return (zpool_standard_error(hdl, errno, msg));
1829 }
1830 }
1831
1832 /*
1833 * This provides a very minimal check whether a given string is likely a
1834 * c#t#d# style string. Users of this are expected to do their own
1835 * verification of the s# part.
1836 */
1837 #define CTD_CHECK(str) (str && str[0] == 'c' && isdigit(str[1]))
1838
1839 /*
1840 * More elaborate version for ones which may start with "/dev/dsk/"
1841 * and the like.
1842 */
1843 static int
ctd_check_path(char * str)1844 ctd_check_path(char *str) {
1845 /*
1846 * If it starts with a slash, check the last component.
1847 */
1848 if (str && str[0] == '/') {
1849 char *tmp = strrchr(str, '/');
1850
1851 /*
1852 * If it ends in "/old", check the second-to-last
1853 * component of the string instead.
1854 */
1855 if (tmp != str && strcmp(tmp, "/old") == 0) {
1856 for (tmp--; *tmp != '/'; tmp--)
1857 ;
1858 }
1859 str = tmp + 1;
1860 }
1861 return (CTD_CHECK(str));
1862 }
1863
1864 /*
1865 * Find a vdev that matches the search criteria specified. We use the
1866 * the nvpair name to determine how we should look for the device.
1867 * 'avail_spare' is set to TRUE if the provided guid refers to an AVAIL
1868 * spare; but FALSE if its an INUSE spare.
1869 */
1870 static nvlist_t *
vdev_to_nvlist_iter(nvlist_t * nv,nvlist_t * search,boolean_t * avail_spare,boolean_t * l2cache,boolean_t * log)1871 vdev_to_nvlist_iter(nvlist_t *nv, nvlist_t *search, boolean_t *avail_spare,
1872 boolean_t *l2cache, boolean_t *log)
1873 {
1874 uint_t c, children;
1875 nvlist_t **child;
1876 nvlist_t *ret;
1877 uint64_t is_log;
1878 char *srchkey;
1879 nvpair_t *pair = nvlist_next_nvpair(search, NULL);
1880
1881 /* Nothing to look for */
1882 if (search == NULL || pair == NULL)
1883 return (NULL);
1884
1885 /* Obtain the key we will use to search */
1886 srchkey = nvpair_name(pair);
1887
1888 switch (nvpair_type(pair)) {
1889 case DATA_TYPE_UINT64:
1890 if (strcmp(srchkey, ZPOOL_CONFIG_GUID) == 0) {
1891 uint64_t srchval, theguid;
1892
1893 verify(nvpair_value_uint64(pair, &srchval) == 0);
1894 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID,
1895 &theguid) == 0);
1896 if (theguid == srchval)
1897 return (nv);
1898 }
1899 break;
1900
1901 case DATA_TYPE_STRING: {
1902 char *srchval, *val;
1903
1904 verify(nvpair_value_string(pair, &srchval) == 0);
1905 if (nvlist_lookup_string(nv, srchkey, &val) != 0)
1906 break;
1907
1908 /*
1909 * Search for the requested value. Special cases:
1910 *
1911 * - ZPOOL_CONFIG_PATH for whole disk entries. These end in
1912 * "s0" or "s0/old". The "s0" part is hidden from the user,
1913 * but included in the string, so this matches around it.
1914 * - looking for a top-level vdev name (i.e. ZPOOL_CONFIG_TYPE).
1915 *
1916 * Otherwise, all other searches are simple string compares.
1917 */
1918 if (strcmp(srchkey, ZPOOL_CONFIG_PATH) == 0 &&
1919 ctd_check_path(val)) {
1920 uint64_t wholedisk = 0;
1921
1922 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK,
1923 &wholedisk);
1924 if (wholedisk) {
1925 int slen = strlen(srchval);
1926 int vlen = strlen(val);
1927
1928 if (slen != vlen - 2)
1929 break;
1930
1931 /*
1932 * make_leaf_vdev() should only set
1933 * wholedisk for ZPOOL_CONFIG_PATHs which
1934 * will include "/dev/dsk/", giving plenty of
1935 * room for the indices used next.
1936 */
1937 ASSERT(vlen >= 6);
1938
1939 /*
1940 * strings identical except trailing "s0"
1941 */
1942 if (strcmp(&val[vlen - 2], "s0") == 0 &&
1943 strncmp(srchval, val, slen) == 0)
1944 return (nv);
1945
1946 /*
1947 * strings identical except trailing "s0/old"
1948 */
1949 if (strcmp(&val[vlen - 6], "s0/old") == 0 &&
1950 strcmp(&srchval[slen - 4], "/old") == 0 &&
1951 strncmp(srchval, val, slen - 4) == 0)
1952 return (nv);
1953
1954 break;
1955 }
1956 } else if (strcmp(srchkey, ZPOOL_CONFIG_TYPE) == 0 && val) {
1957 char *type, *idx, *end, *p;
1958 uint64_t id, vdev_id;
1959
1960 /*
1961 * Determine our vdev type, keeping in mind
1962 * that the srchval is composed of a type and
1963 * vdev id pair (i.e. mirror-4).
1964 */
1965 if ((type = strdup(srchval)) == NULL)
1966 return (NULL);
1967
1968 if ((p = strrchr(type, '-')) == NULL) {
1969 free(type);
1970 break;
1971 }
1972 idx = p + 1;
1973 *p = '\0';
1974
1975 /*
1976 * If the types don't match then keep looking.
1977 */
1978 if (strncmp(val, type, strlen(val)) != 0) {
1979 free(type);
1980 break;
1981 }
1982
1983 verify(strncmp(type, VDEV_TYPE_RAIDZ,
1984 strlen(VDEV_TYPE_RAIDZ)) == 0 ||
1985 strncmp(type, VDEV_TYPE_MIRROR,
1986 strlen(VDEV_TYPE_MIRROR)) == 0);
1987 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ID,
1988 &id) == 0);
1989
1990 errno = 0;
1991 vdev_id = strtoull(idx, &end, 10);
1992
1993 free(type);
1994 if (errno != 0)
1995 return (NULL);
1996
1997 /*
1998 * Now verify that we have the correct vdev id.
1999 */
2000 if (vdev_id == id)
2001 return (nv);
2002 }
2003
2004 /*
2005 * Common case
2006 */
2007 if (strcmp(srchval, val) == 0)
2008 return (nv);
2009 break;
2010 }
2011
2012 default:
2013 break;
2014 }
2015
2016 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
2017 &child, &children) != 0)
2018 return (NULL);
2019
2020 for (c = 0; c < children; c++) {
2021 if ((ret = vdev_to_nvlist_iter(child[c], search,
2022 avail_spare, l2cache, NULL)) != NULL) {
2023 /*
2024 * The 'is_log' value is only set for the toplevel
2025 * vdev, not the leaf vdevs. So we always lookup the
2026 * log device from the root of the vdev tree (where
2027 * 'log' is non-NULL).
2028 */
2029 if (log != NULL &&
2030 nvlist_lookup_uint64(child[c],
2031 ZPOOL_CONFIG_IS_LOG, &is_log) == 0 &&
2032 is_log) {
2033 *log = B_TRUE;
2034 }
2035 return (ret);
2036 }
2037 }
2038
2039 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES,
2040 &child, &children) == 0) {
2041 for (c = 0; c < children; c++) {
2042 if ((ret = vdev_to_nvlist_iter(child[c], search,
2043 avail_spare, l2cache, NULL)) != NULL) {
2044 *avail_spare = B_TRUE;
2045 return (ret);
2046 }
2047 }
2048 }
2049
2050 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE,
2051 &child, &children) == 0) {
2052 for (c = 0; c < children; c++) {
2053 if ((ret = vdev_to_nvlist_iter(child[c], search,
2054 avail_spare, l2cache, NULL)) != NULL) {
2055 *l2cache = B_TRUE;
2056 return (ret);
2057 }
2058 }
2059 }
2060
2061 return (NULL);
2062 }
2063
2064 /*
2065 * Given a physical path (minus the "/devices" prefix), find the
2066 * associated vdev.
2067 */
2068 nvlist_t *
zpool_find_vdev_by_physpath(zpool_handle_t * zhp,const char * ppath,boolean_t * avail_spare,boolean_t * l2cache,boolean_t * log)2069 zpool_find_vdev_by_physpath(zpool_handle_t *zhp, const char *ppath,
2070 boolean_t *avail_spare, boolean_t *l2cache, boolean_t *log)
2071 {
2072 nvlist_t *search, *nvroot, *ret;
2073
2074 verify(nvlist_alloc(&search, NV_UNIQUE_NAME, KM_SLEEP) == 0);
2075 verify(nvlist_add_string(search, ZPOOL_CONFIG_PHYS_PATH, ppath) == 0);
2076
2077 verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE,
2078 &nvroot) == 0);
2079
2080 *avail_spare = B_FALSE;
2081 *l2cache = B_FALSE;
2082 if (log != NULL)
2083 *log = B_FALSE;
2084 ret = vdev_to_nvlist_iter(nvroot, search, avail_spare, l2cache, log);
2085 nvlist_free(search);
2086
2087 return (ret);
2088 }
2089
2090 /*
2091 * Determine if we have an "interior" top-level vdev (i.e mirror/raidz).
2092 */
2093 boolean_t
zpool_vdev_is_interior(const char * name)2094 zpool_vdev_is_interior(const char *name)
2095 {
2096 if (strncmp(name, VDEV_TYPE_RAIDZ, strlen(VDEV_TYPE_RAIDZ)) == 0 ||
2097 strncmp(name, VDEV_TYPE_MIRROR, strlen(VDEV_TYPE_MIRROR)) == 0)
2098 return (B_TRUE);
2099 return (B_FALSE);
2100 }
2101
2102 nvlist_t *
zpool_find_vdev(zpool_handle_t * zhp,const char * path,boolean_t * avail_spare,boolean_t * l2cache,boolean_t * log)2103 zpool_find_vdev(zpool_handle_t *zhp, const char *path, boolean_t *avail_spare,
2104 boolean_t *l2cache, boolean_t *log)
2105 {
2106 char buf[MAXPATHLEN];
2107 char *end;
2108 nvlist_t *nvroot, *search, *ret;
2109 uint64_t guid;
2110
2111 verify(nvlist_alloc(&search, NV_UNIQUE_NAME, KM_SLEEP) == 0);
2112
2113 guid = strtoull(path, &end, 10);
2114 if (guid != 0 && *end == '\0') {
2115 verify(nvlist_add_uint64(search, ZPOOL_CONFIG_GUID, guid) == 0);
2116 } else if (zpool_vdev_is_interior(path)) {
2117 verify(nvlist_add_string(search, ZPOOL_CONFIG_TYPE, path) == 0);
2118 } else if (path[0] != '/') {
2119 (void) snprintf(buf, sizeof (buf), "%s%s", "/dev/dsk/", path);
2120 verify(nvlist_add_string(search, ZPOOL_CONFIG_PATH, buf) == 0);
2121 } else {
2122 verify(nvlist_add_string(search, ZPOOL_CONFIG_PATH, path) == 0);
2123 }
2124
2125 verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE,
2126 &nvroot) == 0);
2127
2128 *avail_spare = B_FALSE;
2129 *l2cache = B_FALSE;
2130 if (log != NULL)
2131 *log = B_FALSE;
2132 ret = vdev_to_nvlist_iter(nvroot, search, avail_spare, l2cache, log);
2133 nvlist_free(search);
2134
2135 return (ret);
2136 }
2137
2138 static int
vdev_online(nvlist_t * nv)2139 vdev_online(nvlist_t *nv)
2140 {
2141 uint64_t ival;
2142
2143 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_OFFLINE, &ival) == 0 ||
2144 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_FAULTED, &ival) == 0 ||
2145 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_REMOVED, &ival) == 0)
2146 return (0);
2147
2148 return (1);
2149 }
2150
2151 /*
2152 * Helper function for zpool_get_physpaths().
2153 */
2154 static int
vdev_get_one_physpath(nvlist_t * config,char * physpath,size_t physpath_size,size_t * bytes_written)2155 vdev_get_one_physpath(nvlist_t *config, char *physpath, size_t physpath_size,
2156 size_t *bytes_written)
2157 {
2158 size_t bytes_left, pos, rsz;
2159 char *tmppath;
2160 const char *format;
2161
2162 if (nvlist_lookup_string(config, ZPOOL_CONFIG_PHYS_PATH,
2163 &tmppath) != 0)
2164 return (EZFS_NODEVICE);
2165
2166 pos = *bytes_written;
2167 bytes_left = physpath_size - pos;
2168 format = (pos == 0) ? "%s" : " %s";
2169
2170 rsz = snprintf(physpath + pos, bytes_left, format, tmppath);
2171 *bytes_written += rsz;
2172
2173 if (rsz >= bytes_left) {
2174 /* if physpath was not copied properly, clear it */
2175 if (bytes_left != 0) {
2176 physpath[pos] = 0;
2177 }
2178 return (EZFS_NOSPC);
2179 }
2180 return (0);
2181 }
2182
2183 static int
vdev_get_physpaths(nvlist_t * nv,char * physpath,size_t phypath_size,size_t * rsz,boolean_t is_spare)2184 vdev_get_physpaths(nvlist_t *nv, char *physpath, size_t phypath_size,
2185 size_t *rsz, boolean_t is_spare)
2186 {
2187 char *type;
2188 int ret;
2189
2190 if (nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) != 0)
2191 return (EZFS_INVALCONFIG);
2192
2193 if (strcmp(type, VDEV_TYPE_DISK) == 0) {
2194 /*
2195 * An active spare device has ZPOOL_CONFIG_IS_SPARE set.
2196 * For a spare vdev, we only want to boot from the active
2197 * spare device.
2198 */
2199 if (is_spare) {
2200 uint64_t spare = 0;
2201 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_SPARE,
2202 &spare);
2203 if (!spare)
2204 return (EZFS_INVALCONFIG);
2205 }
2206
2207 if (vdev_online(nv)) {
2208 if ((ret = vdev_get_one_physpath(nv, physpath,
2209 phypath_size, rsz)) != 0)
2210 return (ret);
2211 }
2212 } else if (strcmp(type, VDEV_TYPE_MIRROR) == 0 ||
2213 strcmp(type, VDEV_TYPE_REPLACING) == 0 ||
2214 (is_spare = (strcmp(type, VDEV_TYPE_SPARE) == 0))) {
2215 nvlist_t **child;
2216 uint_t count;
2217 int i, ret;
2218
2219 if (nvlist_lookup_nvlist_array(nv,
2220 ZPOOL_CONFIG_CHILDREN, &child, &count) != 0)
2221 return (EZFS_INVALCONFIG);
2222
2223 for (i = 0; i < count; i++) {
2224 ret = vdev_get_physpaths(child[i], physpath,
2225 phypath_size, rsz, is_spare);
2226 if (ret == EZFS_NOSPC)
2227 return (ret);
2228 }
2229 }
2230
2231 return (EZFS_POOL_INVALARG);
2232 }
2233
2234 /*
2235 * Get phys_path for a root pool config.
2236 * Return 0 on success; non-zero on failure.
2237 */
2238 static int
zpool_get_config_physpath(nvlist_t * config,char * physpath,size_t phypath_size)2239 zpool_get_config_physpath(nvlist_t *config, char *physpath, size_t phypath_size)
2240 {
2241 size_t rsz;
2242 nvlist_t *vdev_root;
2243 nvlist_t **child;
2244 uint_t count;
2245 char *type;
2246
2247 rsz = 0;
2248
2249 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
2250 &vdev_root) != 0)
2251 return (EZFS_INVALCONFIG);
2252
2253 if (nvlist_lookup_string(vdev_root, ZPOOL_CONFIG_TYPE, &type) != 0 ||
2254 nvlist_lookup_nvlist_array(vdev_root, ZPOOL_CONFIG_CHILDREN,
2255 &child, &count) != 0)
2256 return (EZFS_INVALCONFIG);
2257
2258 /*
2259 * root pool can only have a single top-level vdev.
2260 */
2261 if (strcmp(type, VDEV_TYPE_ROOT) != 0 || count != 1)
2262 return (EZFS_POOL_INVALARG);
2263
2264 (void) vdev_get_physpaths(child[0], physpath, phypath_size, &rsz,
2265 B_FALSE);
2266
2267 /* No online devices */
2268 if (rsz == 0)
2269 return (EZFS_NODEVICE);
2270
2271 return (0);
2272 }
2273
2274 /*
2275 * Get phys_path for a root pool
2276 * Return 0 on success; non-zero on failure.
2277 */
2278 int
zpool_get_physpath(zpool_handle_t * zhp,char * physpath,size_t phypath_size)2279 zpool_get_physpath(zpool_handle_t *zhp, char *physpath, size_t phypath_size)
2280 {
2281 return (zpool_get_config_physpath(zhp->zpool_config, physpath,
2282 phypath_size));
2283 }
2284
2285 /*
2286 * If the device has being dynamically expanded then we need to relabel
2287 * the disk to use the new unallocated space.
2288 */
2289 static int
zpool_relabel_disk(libzfs_handle_t * hdl,const char * name)2290 zpool_relabel_disk(libzfs_handle_t *hdl, const char *name)
2291 {
2292 char path[MAXPATHLEN];
2293 char errbuf[1024];
2294 int fd, error;
2295 int (*_efi_use_whole_disk)(int);
2296
2297 if ((_efi_use_whole_disk = (int (*)(int))dlsym(RTLD_DEFAULT,
2298 "efi_use_whole_disk")) == NULL)
2299 return (-1);
2300
2301 (void) snprintf(path, sizeof (path), "%s/%s", RDISK_ROOT, name);
2302
2303 if ((fd = open(path, O_RDWR | O_NDELAY)) < 0) {
2304 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "cannot "
2305 "relabel '%s': unable to open device"), name);
2306 return (zfs_error(hdl, EZFS_OPENFAILED, errbuf));
2307 }
2308
2309 /*
2310 * It's possible that we might encounter an error if the device
2311 * does not have any unallocated space left. If so, we simply
2312 * ignore that error and continue on.
2313 */
2314 error = _efi_use_whole_disk(fd);
2315 (void) close(fd);
2316 if (error && error != VT_ENOSPC) {
2317 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "cannot "
2318 "relabel '%s': unable to read disk capacity"), name);
2319 return (zfs_error(hdl, EZFS_NOCAP, errbuf));
2320 }
2321 return (0);
2322 }
2323
2324 /*
2325 * Bring the specified vdev online. The 'flags' parameter is a set of the
2326 * ZFS_ONLINE_* flags.
2327 */
2328 int
zpool_vdev_online(zpool_handle_t * zhp,const char * path,int flags,vdev_state_t * newstate)2329 zpool_vdev_online(zpool_handle_t *zhp, const char *path, int flags,
2330 vdev_state_t *newstate)
2331 {
2332 zfs_cmd_t zc = { 0 };
2333 char msg[1024];
2334 nvlist_t *tgt;
2335 boolean_t avail_spare, l2cache, islog;
2336 libzfs_handle_t *hdl = zhp->zpool_hdl;
2337
2338 if (flags & ZFS_ONLINE_EXPAND) {
2339 (void) snprintf(msg, sizeof (msg),
2340 dgettext(TEXT_DOMAIN, "cannot expand %s"), path);
2341 } else {
2342 (void) snprintf(msg, sizeof (msg),
2343 dgettext(TEXT_DOMAIN, "cannot online %s"), path);
2344 }
2345
2346 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2347 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
2348 &islog)) == NULL)
2349 return (zfs_error(hdl, EZFS_NODEVICE, msg));
2350
2351 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
2352
2353 if (avail_spare)
2354 return (zfs_error(hdl, EZFS_ISSPARE, msg));
2355
2356 if (flags & ZFS_ONLINE_EXPAND ||
2357 zpool_get_prop_int(zhp, ZPOOL_PROP_AUTOEXPAND, NULL)) {
2358 char *pathname = NULL;
2359 uint64_t wholedisk = 0;
2360
2361 (void) nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_WHOLE_DISK,
2362 &wholedisk);
2363 verify(nvlist_lookup_string(tgt, ZPOOL_CONFIG_PATH,
2364 &pathname) == 0);
2365
2366 /*
2367 * XXX - L2ARC 1.0 devices can't support expansion.
2368 */
2369 if (l2cache) {
2370 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2371 "cannot expand cache devices"));
2372 return (zfs_error(hdl, EZFS_VDEVNOTSUP, msg));
2373 }
2374
2375 if (wholedisk) {
2376 pathname += strlen(DISK_ROOT) + 1;
2377 (void) zpool_relabel_disk(hdl, pathname);
2378 }
2379 }
2380
2381 zc.zc_cookie = VDEV_STATE_ONLINE;
2382 zc.zc_obj = flags;
2383
2384 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SET_STATE, &zc) != 0) {
2385 if (errno == EINVAL) {
2386 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "was split "
2387 "from this pool into a new one. Use '%s' "
2388 "instead"), "zpool detach");
2389 return (zfs_error(hdl, EZFS_POSTSPLIT_ONLINE, msg));
2390 }
2391 return (zpool_standard_error(hdl, errno, msg));
2392 }
2393
2394 *newstate = zc.zc_cookie;
2395 return (0);
2396 }
2397
2398 /*
2399 * Take the specified vdev offline
2400 */
2401 int
zpool_vdev_offline(zpool_handle_t * zhp,const char * path,boolean_t istmp)2402 zpool_vdev_offline(zpool_handle_t *zhp, const char *path, boolean_t istmp)
2403 {
2404 zfs_cmd_t zc = { 0 };
2405 char msg[1024];
2406 nvlist_t *tgt;
2407 boolean_t avail_spare, l2cache;
2408 libzfs_handle_t *hdl = zhp->zpool_hdl;
2409
2410 (void) snprintf(msg, sizeof (msg),
2411 dgettext(TEXT_DOMAIN, "cannot offline %s"), path);
2412
2413 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2414 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
2415 NULL)) == NULL)
2416 return (zfs_error(hdl, EZFS_NODEVICE, msg));
2417
2418 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
2419
2420 if (avail_spare)
2421 return (zfs_error(hdl, EZFS_ISSPARE, msg));
2422
2423 zc.zc_cookie = VDEV_STATE_OFFLINE;
2424 zc.zc_obj = istmp ? ZFS_OFFLINE_TEMPORARY : 0;
2425
2426 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
2427 return (0);
2428
2429 switch (errno) {
2430 case EBUSY:
2431
2432 /*
2433 * There are no other replicas of this device.
2434 */
2435 return (zfs_error(hdl, EZFS_NOREPLICAS, msg));
2436
2437 case EEXIST:
2438 /*
2439 * The log device has unplayed logs
2440 */
2441 return (zfs_error(hdl, EZFS_UNPLAYED_LOGS, msg));
2442
2443 default:
2444 return (zpool_standard_error(hdl, errno, msg));
2445 }
2446 }
2447
2448 /*
2449 * Mark the given vdev faulted.
2450 */
2451 int
zpool_vdev_fault(zpool_handle_t * zhp,uint64_t guid,vdev_aux_t aux)2452 zpool_vdev_fault(zpool_handle_t *zhp, uint64_t guid, vdev_aux_t aux)
2453 {
2454 zfs_cmd_t zc = { 0 };
2455 char msg[1024];
2456 libzfs_handle_t *hdl = zhp->zpool_hdl;
2457
2458 (void) snprintf(msg, sizeof (msg),
2459 dgettext(TEXT_DOMAIN, "cannot fault %llu"), guid);
2460
2461 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2462 zc.zc_guid = guid;
2463 zc.zc_cookie = VDEV_STATE_FAULTED;
2464 zc.zc_obj = aux;
2465
2466 if (ioctl(hdl->libzfs_fd, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
2467 return (0);
2468
2469 switch (errno) {
2470 case EBUSY:
2471
2472 /*
2473 * There are no other replicas of this device.
2474 */
2475 return (zfs_error(hdl, EZFS_NOREPLICAS, msg));
2476
2477 default:
2478 return (zpool_standard_error(hdl, errno, msg));
2479 }
2480
2481 }
2482
2483 /*
2484 * Mark the given vdev degraded.
2485 */
2486 int
zpool_vdev_degrade(zpool_handle_t * zhp,uint64_t guid,vdev_aux_t aux)2487 zpool_vdev_degrade(zpool_handle_t *zhp, uint64_t guid, vdev_aux_t aux)
2488 {
2489 zfs_cmd_t zc = { 0 };
2490 char msg[1024];
2491 libzfs_handle_t *hdl = zhp->zpool_hdl;
2492
2493 (void) snprintf(msg, sizeof (msg),
2494 dgettext(TEXT_DOMAIN, "cannot degrade %llu"), guid);
2495
2496 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2497 zc.zc_guid = guid;
2498 zc.zc_cookie = VDEV_STATE_DEGRADED;
2499 zc.zc_obj = aux;
2500
2501 if (ioctl(hdl->libzfs_fd, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
2502 return (0);
2503
2504 return (zpool_standard_error(hdl, errno, msg));
2505 }
2506
2507 /*
2508 * Returns TRUE if the given nvlist is a vdev that was originally swapped in as
2509 * a hot spare.
2510 */
2511 static boolean_t
is_replacing_spare(nvlist_t * search,nvlist_t * tgt,int which)2512 is_replacing_spare(nvlist_t *search, nvlist_t *tgt, int which)
2513 {
2514 nvlist_t **child;
2515 uint_t c, children;
2516 char *type;
2517
2518 if (nvlist_lookup_nvlist_array(search, ZPOOL_CONFIG_CHILDREN, &child,
2519 &children) == 0) {
2520 verify(nvlist_lookup_string(search, ZPOOL_CONFIG_TYPE,
2521 &type) == 0);
2522
2523 if (strcmp(type, VDEV_TYPE_SPARE) == 0 &&
2524 children == 2 && child[which] == tgt)
2525 return (B_TRUE);
2526
2527 for (c = 0; c < children; c++)
2528 if (is_replacing_spare(child[c], tgt, which))
2529 return (B_TRUE);
2530 }
2531
2532 return (B_FALSE);
2533 }
2534
2535 /*
2536 * Attach new_disk (fully described by nvroot) to old_disk.
2537 * If 'replacing' is specified, the new disk will replace the old one.
2538 */
2539 int
zpool_vdev_attach(zpool_handle_t * zhp,const char * old_disk,const char * new_disk,nvlist_t * nvroot,int replacing)2540 zpool_vdev_attach(zpool_handle_t *zhp,
2541 const char *old_disk, const char *new_disk, nvlist_t *nvroot, int replacing)
2542 {
2543 zfs_cmd_t zc = { 0 };
2544 char msg[1024];
2545 int ret;
2546 nvlist_t *tgt;
2547 boolean_t avail_spare, l2cache, islog;
2548 uint64_t val;
2549 char *newname;
2550 nvlist_t **child;
2551 uint_t children;
2552 nvlist_t *config_root;
2553 libzfs_handle_t *hdl = zhp->zpool_hdl;
2554 boolean_t rootpool = zpool_is_bootable(zhp);
2555
2556 if (replacing)
2557 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
2558 "cannot replace %s with %s"), old_disk, new_disk);
2559 else
2560 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
2561 "cannot attach %s to %s"), new_disk, old_disk);
2562
2563 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2564 if ((tgt = zpool_find_vdev(zhp, old_disk, &avail_spare, &l2cache,
2565 &islog)) == 0)
2566 return (zfs_error(hdl, EZFS_NODEVICE, msg));
2567
2568 if (avail_spare)
2569 return (zfs_error(hdl, EZFS_ISSPARE, msg));
2570
2571 if (l2cache)
2572 return (zfs_error(hdl, EZFS_ISL2CACHE, msg));
2573
2574 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
2575 zc.zc_cookie = replacing;
2576
2577 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
2578 &child, &children) != 0 || children != 1) {
2579 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2580 "new device must be a single disk"));
2581 return (zfs_error(hdl, EZFS_INVALCONFIG, msg));
2582 }
2583
2584 verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL),
2585 ZPOOL_CONFIG_VDEV_TREE, &config_root) == 0);
2586
2587 if ((newname = zpool_vdev_name(NULL, NULL, child[0], B_FALSE)) == NULL)
2588 return (-1);
2589
2590 /*
2591 * If the target is a hot spare that has been swapped in, we can only
2592 * replace it with another hot spare.
2593 */
2594 if (replacing &&
2595 nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_IS_SPARE, &val) == 0 &&
2596 (zpool_find_vdev(zhp, newname, &avail_spare, &l2cache,
2597 NULL) == NULL || !avail_spare) &&
2598 is_replacing_spare(config_root, tgt, 1)) {
2599 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2600 "can only be replaced by another hot spare"));
2601 free(newname);
2602 return (zfs_error(hdl, EZFS_BADTARGET, msg));
2603 }
2604
2605 free(newname);
2606
2607 if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0)
2608 return (-1);
2609
2610 ret = zfs_ioctl(hdl, ZFS_IOC_VDEV_ATTACH, &zc);
2611
2612 zcmd_free_nvlists(&zc);
2613
2614 if (ret == 0) {
2615 if (rootpool) {
2616 /*
2617 * XXX need a better way to prevent user from
2618 * booting up a half-baked vdev.
2619 */
2620 (void) fprintf(stderr, dgettext(TEXT_DOMAIN, "Make "
2621 "sure to wait until resilver is done "
2622 "before rebooting.\n"));
2623 }
2624 return (0);
2625 }
2626
2627 switch (errno) {
2628 case ENOTSUP:
2629 /*
2630 * Can't attach to or replace this type of vdev.
2631 */
2632 if (replacing) {
2633 uint64_t version = zpool_get_prop_int(zhp,
2634 ZPOOL_PROP_VERSION, NULL);
2635
2636 if (islog)
2637 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2638 "cannot replace a log with a spare"));
2639 else if (version >= SPA_VERSION_MULTI_REPLACE)
2640 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2641 "already in replacing/spare config; wait "
2642 "for completion or use 'zpool detach'"));
2643 else
2644 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2645 "cannot replace a replacing device"));
2646 } else {
2647 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2648 "can only attach to mirrors and top-level "
2649 "disks"));
2650 }
2651 (void) zfs_error(hdl, EZFS_BADTARGET, msg);
2652 break;
2653
2654 case EINVAL:
2655 /*
2656 * The new device must be a single disk.
2657 */
2658 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2659 "new device must be a single disk"));
2660 (void) zfs_error(hdl, EZFS_INVALCONFIG, msg);
2661 break;
2662
2663 case EBUSY:
2664 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "%s is busy"),
2665 new_disk);
2666 (void) zfs_error(hdl, EZFS_BADDEV, msg);
2667 break;
2668
2669 case EOVERFLOW:
2670 /*
2671 * The new device is too small.
2672 */
2673 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2674 "device is too small"));
2675 (void) zfs_error(hdl, EZFS_BADDEV, msg);
2676 break;
2677
2678 case EDOM:
2679 /*
2680 * The new device has a different alignment requirement.
2681 */
2682 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2683 "devices have different sector alignment"));
2684 (void) zfs_error(hdl, EZFS_BADDEV, msg);
2685 break;
2686
2687 case ENAMETOOLONG:
2688 /*
2689 * The resulting top-level vdev spec won't fit in the label.
2690 */
2691 (void) zfs_error(hdl, EZFS_DEVOVERFLOW, msg);
2692 break;
2693
2694 default:
2695 (void) zpool_standard_error(hdl, errno, msg);
2696 }
2697
2698 return (-1);
2699 }
2700
2701 /*
2702 * Detach the specified device.
2703 */
2704 int
zpool_vdev_detach(zpool_handle_t * zhp,const char * path)2705 zpool_vdev_detach(zpool_handle_t *zhp, const char *path)
2706 {
2707 zfs_cmd_t zc = { 0 };
2708 char msg[1024];
2709 nvlist_t *tgt;
2710 boolean_t avail_spare, l2cache;
2711 libzfs_handle_t *hdl = zhp->zpool_hdl;
2712
2713 (void) snprintf(msg, sizeof (msg),
2714 dgettext(TEXT_DOMAIN, "cannot detach %s"), path);
2715
2716 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2717 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
2718 NULL)) == 0)
2719 return (zfs_error(hdl, EZFS_NODEVICE, msg));
2720
2721 if (avail_spare)
2722 return (zfs_error(hdl, EZFS_ISSPARE, msg));
2723
2724 if (l2cache)
2725 return (zfs_error(hdl, EZFS_ISL2CACHE, msg));
2726
2727 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
2728
2729 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_DETACH, &zc) == 0)
2730 return (0);
2731
2732 switch (errno) {
2733
2734 case ENOTSUP:
2735 /*
2736 * Can't detach from this type of vdev.
2737 */
2738 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "only "
2739 "applicable to mirror and replacing vdevs"));
2740 (void) zfs_error(hdl, EZFS_BADTARGET, msg);
2741 break;
2742
2743 case EBUSY:
2744 /*
2745 * There are no other replicas of this device.
2746 */
2747 (void) zfs_error(hdl, EZFS_NOREPLICAS, msg);
2748 break;
2749
2750 default:
2751 (void) zpool_standard_error(hdl, errno, msg);
2752 }
2753
2754 return (-1);
2755 }
2756
2757 /*
2758 * Find a mirror vdev in the source nvlist.
2759 *
2760 * The mchild array contains a list of disks in one of the top-level mirrors
2761 * of the source pool. The schild array contains a list of disks that the
2762 * user specified on the command line. We loop over the mchild array to
2763 * see if any entry in the schild array matches.
2764 *
2765 * If a disk in the mchild array is found in the schild array, we return
2766 * the index of that entry. Otherwise we return -1.
2767 */
2768 static int
find_vdev_entry(zpool_handle_t * zhp,nvlist_t ** mchild,uint_t mchildren,nvlist_t ** schild,uint_t schildren)2769 find_vdev_entry(zpool_handle_t *zhp, nvlist_t **mchild, uint_t mchildren,
2770 nvlist_t **schild, uint_t schildren)
2771 {
2772 uint_t mc;
2773
2774 for (mc = 0; mc < mchildren; mc++) {
2775 uint_t sc;
2776 char *mpath = zpool_vdev_name(zhp->zpool_hdl, zhp,
2777 mchild[mc], B_FALSE);
2778
2779 for (sc = 0; sc < schildren; sc++) {
2780 char *spath = zpool_vdev_name(zhp->zpool_hdl, zhp,
2781 schild[sc], B_FALSE);
2782 boolean_t result = (strcmp(mpath, spath) == 0);
2783
2784 free(spath);
2785 if (result) {
2786 free(mpath);
2787 return (mc);
2788 }
2789 }
2790
2791 free(mpath);
2792 }
2793
2794 return (-1);
2795 }
2796
2797 /*
2798 * Split a mirror pool. If newroot points to null, then a new nvlist
2799 * is generated and it is the responsibility of the caller to free it.
2800 */
2801 int
zpool_vdev_split(zpool_handle_t * zhp,char * newname,nvlist_t ** newroot,nvlist_t * props,splitflags_t flags)2802 zpool_vdev_split(zpool_handle_t *zhp, char *newname, nvlist_t **newroot,
2803 nvlist_t *props, splitflags_t flags)
2804 {
2805 zfs_cmd_t zc = { 0 };
2806 char msg[1024];
2807 nvlist_t *tree, *config, **child, **newchild, *newconfig = NULL;
2808 nvlist_t **varray = NULL, *zc_props = NULL;
2809 uint_t c, children, newchildren, lastlog = 0, vcount, found = 0;
2810 libzfs_handle_t *hdl = zhp->zpool_hdl;
2811 uint64_t vers;
2812 boolean_t freelist = B_FALSE, memory_err = B_TRUE;
2813 int retval = 0;
2814
2815 (void) snprintf(msg, sizeof (msg),
2816 dgettext(TEXT_DOMAIN, "Unable to split %s"), zhp->zpool_name);
2817
2818 if (!zpool_name_valid(hdl, B_FALSE, newname))
2819 return (zfs_error(hdl, EZFS_INVALIDNAME, msg));
2820
2821 if ((config = zpool_get_config(zhp, NULL)) == NULL) {
2822 (void) fprintf(stderr, gettext("Internal error: unable to "
2823 "retrieve pool configuration\n"));
2824 return (-1);
2825 }
2826
2827 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &tree)
2828 == 0);
2829 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION, &vers) == 0);
2830
2831 if (props) {
2832 prop_flags_t flags = { .create = B_FALSE, .import = B_TRUE };
2833 if ((zc_props = zpool_valid_proplist(hdl, zhp->zpool_name,
2834 props, vers, flags, msg)) == NULL)
2835 return (-1);
2836 }
2837
2838 if (nvlist_lookup_nvlist_array(tree, ZPOOL_CONFIG_CHILDREN, &child,
2839 &children) != 0) {
2840 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2841 "Source pool is missing vdev tree"));
2842 if (zc_props)
2843 nvlist_free(zc_props);
2844 return (-1);
2845 }
2846
2847 varray = zfs_alloc(hdl, children * sizeof (nvlist_t *));
2848 vcount = 0;
2849
2850 if (*newroot == NULL ||
2851 nvlist_lookup_nvlist_array(*newroot, ZPOOL_CONFIG_CHILDREN,
2852 &newchild, &newchildren) != 0)
2853 newchildren = 0;
2854
2855 for (c = 0; c < children; c++) {
2856 uint64_t is_log = B_FALSE, is_hole = B_FALSE;
2857 char *type;
2858 nvlist_t **mchild, *vdev;
2859 uint_t mchildren;
2860 int entry;
2861
2862 /*
2863 * Unlike cache & spares, slogs are stored in the
2864 * ZPOOL_CONFIG_CHILDREN array. We filter them out here.
2865 */
2866 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG,
2867 &is_log);
2868 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_HOLE,
2869 &is_hole);
2870 if (is_log || is_hole) {
2871 /*
2872 * Create a hole vdev and put it in the config.
2873 */
2874 if (nvlist_alloc(&vdev, NV_UNIQUE_NAME, 0) != 0)
2875 goto out;
2876 if (nvlist_add_string(vdev, ZPOOL_CONFIG_TYPE,
2877 VDEV_TYPE_HOLE) != 0)
2878 goto out;
2879 if (nvlist_add_uint64(vdev, ZPOOL_CONFIG_IS_HOLE,
2880 1) != 0)
2881 goto out;
2882 if (lastlog == 0)
2883 lastlog = vcount;
2884 varray[vcount++] = vdev;
2885 continue;
2886 }
2887 lastlog = 0;
2888 verify(nvlist_lookup_string(child[c], ZPOOL_CONFIG_TYPE, &type)
2889 == 0);
2890 if (strcmp(type, VDEV_TYPE_MIRROR) != 0) {
2891 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2892 "Source pool must be composed only of mirrors\n"));
2893 retval = zfs_error(hdl, EZFS_INVALCONFIG, msg);
2894 goto out;
2895 }
2896
2897 verify(nvlist_lookup_nvlist_array(child[c],
2898 ZPOOL_CONFIG_CHILDREN, &mchild, &mchildren) == 0);
2899
2900 /* find or add an entry for this top-level vdev */
2901 if (newchildren > 0 &&
2902 (entry = find_vdev_entry(zhp, mchild, mchildren,
2903 newchild, newchildren)) >= 0) {
2904 /* We found a disk that the user specified. */
2905 vdev = mchild[entry];
2906 ++found;
2907 } else {
2908 /* User didn't specify a disk for this vdev. */
2909 vdev = mchild[mchildren - 1];
2910 }
2911
2912 if (nvlist_dup(vdev, &varray[vcount++], 0) != 0)
2913 goto out;
2914 }
2915
2916 /* did we find every disk the user specified? */
2917 if (found != newchildren) {
2918 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "Device list must "
2919 "include at most one disk from each mirror"));
2920 retval = zfs_error(hdl, EZFS_INVALCONFIG, msg);
2921 goto out;
2922 }
2923
2924 /* Prepare the nvlist for populating. */
2925 if (*newroot == NULL) {
2926 if (nvlist_alloc(newroot, NV_UNIQUE_NAME, 0) != 0)
2927 goto out;
2928 freelist = B_TRUE;
2929 if (nvlist_add_string(*newroot, ZPOOL_CONFIG_TYPE,
2930 VDEV_TYPE_ROOT) != 0)
2931 goto out;
2932 } else {
2933 verify(nvlist_remove_all(*newroot, ZPOOL_CONFIG_CHILDREN) == 0);
2934 }
2935
2936 /* Add all the children we found */
2937 if (nvlist_add_nvlist_array(*newroot, ZPOOL_CONFIG_CHILDREN, varray,
2938 lastlog == 0 ? vcount : lastlog) != 0)
2939 goto out;
2940
2941 /*
2942 * If we're just doing a dry run, exit now with success.
2943 */
2944 if (flags.dryrun) {
2945 memory_err = B_FALSE;
2946 freelist = B_FALSE;
2947 goto out;
2948 }
2949
2950 /* now build up the config list & call the ioctl */
2951 if (nvlist_alloc(&newconfig, NV_UNIQUE_NAME, 0) != 0)
2952 goto out;
2953
2954 if (nvlist_add_nvlist(newconfig,
2955 ZPOOL_CONFIG_VDEV_TREE, *newroot) != 0 ||
2956 nvlist_add_string(newconfig,
2957 ZPOOL_CONFIG_POOL_NAME, newname) != 0 ||
2958 nvlist_add_uint64(newconfig, ZPOOL_CONFIG_VERSION, vers) != 0)
2959 goto out;
2960
2961 /*
2962 * The new pool is automatically part of the namespace unless we
2963 * explicitly export it.
2964 */
2965 if (!flags.import)
2966 zc.zc_cookie = ZPOOL_EXPORT_AFTER_SPLIT;
2967 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2968 (void) strlcpy(zc.zc_string, newname, sizeof (zc.zc_string));
2969 if (zcmd_write_conf_nvlist(hdl, &zc, newconfig) != 0)
2970 goto out;
2971 if (zc_props != NULL && zcmd_write_src_nvlist(hdl, &zc, zc_props) != 0)
2972 goto out;
2973
2974 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SPLIT, &zc) != 0) {
2975 retval = zpool_standard_error(hdl, errno, msg);
2976 goto out;
2977 }
2978
2979 freelist = B_FALSE;
2980 memory_err = B_FALSE;
2981
2982 out:
2983 if (varray != NULL) {
2984 int v;
2985
2986 for (v = 0; v < vcount; v++)
2987 nvlist_free(varray[v]);
2988 free(varray);
2989 }
2990 zcmd_free_nvlists(&zc);
2991 if (zc_props)
2992 nvlist_free(zc_props);
2993 if (newconfig)
2994 nvlist_free(newconfig);
2995 if (freelist) {
2996 nvlist_free(*newroot);
2997 *newroot = NULL;
2998 }
2999
3000 if (retval != 0)
3001 return (retval);
3002
3003 if (memory_err)
3004 return (no_memory(hdl));
3005
3006 return (0);
3007 }
3008
3009 /*
3010 * Remove the given device. Currently, this is supported only for hot spares
3011 * and level 2 cache devices.
3012 */
3013 int
zpool_vdev_remove(zpool_handle_t * zhp,const char * path)3014 zpool_vdev_remove(zpool_handle_t *zhp, const char *path)
3015 {
3016 zfs_cmd_t zc = { 0 };
3017 char msg[1024];
3018 nvlist_t *tgt;
3019 boolean_t avail_spare, l2cache, islog;
3020 libzfs_handle_t *hdl = zhp->zpool_hdl;
3021 uint64_t version;
3022
3023 (void) snprintf(msg, sizeof (msg),
3024 dgettext(TEXT_DOMAIN, "cannot remove %s"), path);
3025
3026 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3027 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
3028 &islog)) == 0)
3029 return (zfs_error(hdl, EZFS_NODEVICE, msg));
3030 /*
3031 * XXX - this should just go away.
3032 */
3033 if (!avail_spare && !l2cache && !islog) {
3034 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3035 "only inactive hot spares, cache, top-level, "
3036 "or log devices can be removed"));
3037 return (zfs_error(hdl, EZFS_NODEVICE, msg));
3038 }
3039
3040 version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL);
3041 if (islog && version < SPA_VERSION_HOLES) {
3042 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3043 "pool must be upgrade to support log removal"));
3044 return (zfs_error(hdl, EZFS_BADVERSION, msg));
3045 }
3046
3047 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
3048
3049 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_REMOVE, &zc) == 0)
3050 return (0);
3051
3052 return (zpool_standard_error(hdl, errno, msg));
3053 }
3054
3055 /*
3056 * Clear the errors for the pool, or the particular device if specified.
3057 */
3058 int
zpool_clear(zpool_handle_t * zhp,const char * path,nvlist_t * rewindnvl)3059 zpool_clear(zpool_handle_t *zhp, const char *path, nvlist_t *rewindnvl)
3060 {
3061 zfs_cmd_t zc = { 0 };
3062 char msg[1024];
3063 nvlist_t *tgt;
3064 zpool_rewind_policy_t policy;
3065 boolean_t avail_spare, l2cache;
3066 libzfs_handle_t *hdl = zhp->zpool_hdl;
3067 nvlist_t *nvi = NULL;
3068 int error;
3069
3070 if (path)
3071 (void) snprintf(msg, sizeof (msg),
3072 dgettext(TEXT_DOMAIN, "cannot clear errors for %s"),
3073 path);
3074 else
3075 (void) snprintf(msg, sizeof (msg),
3076 dgettext(TEXT_DOMAIN, "cannot clear errors for %s"),
3077 zhp->zpool_name);
3078
3079 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3080 if (path) {
3081 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare,
3082 &l2cache, NULL)) == 0)
3083 return (zfs_error(hdl, EZFS_NODEVICE, msg));
3084
3085 /*
3086 * Don't allow error clearing for hot spares. Do allow
3087 * error clearing for l2cache devices.
3088 */
3089 if (avail_spare)
3090 return (zfs_error(hdl, EZFS_ISSPARE, msg));
3091
3092 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID,
3093 &zc.zc_guid) == 0);
3094 }
3095
3096 zpool_get_rewind_policy(rewindnvl, &policy);
3097 zc.zc_cookie = policy.zrp_request;
3098
3099 if (zcmd_alloc_dst_nvlist(hdl, &zc, zhp->zpool_config_size * 2) != 0)
3100 return (-1);
3101
3102 if (zcmd_write_src_nvlist(hdl, &zc, rewindnvl) != 0)
3103 return (-1);
3104
3105 while ((error = zfs_ioctl(hdl, ZFS_IOC_CLEAR, &zc)) != 0 &&
3106 errno == ENOMEM) {
3107 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
3108 zcmd_free_nvlists(&zc);
3109 return (-1);
3110 }
3111 }
3112
3113 if (!error || ((policy.zrp_request & ZPOOL_TRY_REWIND) &&
3114 errno != EPERM && errno != EACCES)) {
3115 if (policy.zrp_request &
3116 (ZPOOL_DO_REWIND | ZPOOL_TRY_REWIND)) {
3117 (void) zcmd_read_dst_nvlist(hdl, &zc, &nvi);
3118 zpool_rewind_exclaim(hdl, zc.zc_name,
3119 ((policy.zrp_request & ZPOOL_TRY_REWIND) != 0),
3120 nvi);
3121 nvlist_free(nvi);
3122 }
3123 zcmd_free_nvlists(&zc);
3124 return (0);
3125 }
3126
3127 zcmd_free_nvlists(&zc);
3128 return (zpool_standard_error(hdl, errno, msg));
3129 }
3130
3131 /*
3132 * Similar to zpool_clear(), but takes a GUID (used by fmd).
3133 */
3134 int
zpool_vdev_clear(zpool_handle_t * zhp,uint64_t guid)3135 zpool_vdev_clear(zpool_handle_t *zhp, uint64_t guid)
3136 {
3137 zfs_cmd_t zc = { 0 };
3138 char msg[1024];
3139 libzfs_handle_t *hdl = zhp->zpool_hdl;
3140
3141 (void) snprintf(msg, sizeof (msg),
3142 dgettext(TEXT_DOMAIN, "cannot clear errors for %llx"),
3143 guid);
3144
3145 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3146 zc.zc_guid = guid;
3147 zc.zc_cookie = ZPOOL_NO_REWIND;
3148
3149 if (ioctl(hdl->libzfs_fd, ZFS_IOC_CLEAR, &zc) == 0)
3150 return (0);
3151
3152 return (zpool_standard_error(hdl, errno, msg));
3153 }
3154
3155 /*
3156 * Change the GUID for a pool.
3157 */
3158 int
zpool_reguid(zpool_handle_t * zhp)3159 zpool_reguid(zpool_handle_t *zhp)
3160 {
3161 char msg[1024];
3162 libzfs_handle_t *hdl = zhp->zpool_hdl;
3163 zfs_cmd_t zc = { 0 };
3164
3165 (void) snprintf(msg, sizeof (msg),
3166 dgettext(TEXT_DOMAIN, "cannot reguid '%s'"), zhp->zpool_name);
3167
3168 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3169 if (zfs_ioctl(hdl, ZFS_IOC_POOL_REGUID, &zc) == 0)
3170 return (0);
3171
3172 return (zpool_standard_error(hdl, errno, msg));
3173 }
3174
3175 /*
3176 * Reopen the pool.
3177 */
3178 int
zpool_reopen(zpool_handle_t * zhp)3179 zpool_reopen(zpool_handle_t *zhp)
3180 {
3181 zfs_cmd_t zc = { 0 };
3182 char msg[1024];
3183 libzfs_handle_t *hdl = zhp->zpool_hdl;
3184
3185 (void) snprintf(msg, sizeof (msg),
3186 dgettext(TEXT_DOMAIN, "cannot reopen '%s'"),
3187 zhp->zpool_name);
3188
3189 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3190 if (zfs_ioctl(hdl, ZFS_IOC_POOL_REOPEN, &zc) == 0)
3191 return (0);
3192 return (zpool_standard_error(hdl, errno, msg));
3193 }
3194
3195 /*
3196 * Convert from a devid string to a path.
3197 */
3198 static char *
devid_to_path(char * devid_str)3199 devid_to_path(char *devid_str)
3200 {
3201 ddi_devid_t devid;
3202 char *minor;
3203 char *path;
3204 devid_nmlist_t *list = NULL;
3205 int ret;
3206
3207 if (devid_str_decode(devid_str, &devid, &minor) != 0)
3208 return (NULL);
3209
3210 ret = devid_deviceid_to_nmlist("/dev", devid, minor, &list);
3211
3212 devid_str_free(minor);
3213 devid_free(devid);
3214
3215 if (ret != 0)
3216 return (NULL);
3217
3218 /*
3219 * In a case the strdup() fails, we will just return NULL below.
3220 */
3221 path = strdup(list[0].devname);
3222
3223 devid_free_nmlist(list);
3224
3225 return (path);
3226 }
3227
3228 /*
3229 * Convert from a path to a devid string.
3230 */
3231 static char *
path_to_devid(const char * path)3232 path_to_devid(const char *path)
3233 {
3234 int fd;
3235 ddi_devid_t devid;
3236 char *minor, *ret;
3237
3238 if ((fd = open(path, O_RDONLY)) < 0)
3239 return (NULL);
3240
3241 minor = NULL;
3242 ret = NULL;
3243 if (devid_get(fd, &devid) == 0) {
3244 if (devid_get_minor_name(fd, &minor) == 0)
3245 ret = devid_str_encode(devid, minor);
3246 if (minor != NULL)
3247 devid_str_free(minor);
3248 devid_free(devid);
3249 }
3250 (void) close(fd);
3251
3252 return (ret);
3253 }
3254
3255 /*
3256 * Issue the necessary ioctl() to update the stored path value for the vdev. We
3257 * ignore any failure here, since a common case is for an unprivileged user to
3258 * type 'zpool status', and we'll display the correct information anyway.
3259 */
3260 static void
set_path(zpool_handle_t * zhp,nvlist_t * nv,const char * path)3261 set_path(zpool_handle_t *zhp, nvlist_t *nv, const char *path)
3262 {
3263 zfs_cmd_t zc = { 0 };
3264
3265 (void) strncpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3266 (void) strncpy(zc.zc_value, path, sizeof (zc.zc_value));
3267 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID,
3268 &zc.zc_guid) == 0);
3269
3270 (void) ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_VDEV_SETPATH, &zc);
3271 }
3272
3273 /*
3274 * Given a vdev, return the name to display in iostat. If the vdev has a path,
3275 * we use that, stripping off any leading "/dev/dsk/"; if not, we use the type.
3276 * We also check if this is a whole disk, in which case we strip off the
3277 * trailing 's0' slice name.
3278 *
3279 * This routine is also responsible for identifying when disks have been
3280 * reconfigured in a new location. The kernel will have opened the device by
3281 * devid, but the path will still refer to the old location. To catch this, we
3282 * first do a path -> devid translation (which is fast for the common case). If
3283 * the devid matches, we're done. If not, we do a reverse devid -> path
3284 * translation and issue the appropriate ioctl() to update the path of the vdev.
3285 * If 'zhp' is NULL, then this is an exported pool, and we don't need to do any
3286 * of these checks.
3287 */
3288 char *
zpool_vdev_name(libzfs_handle_t * hdl,zpool_handle_t * zhp,nvlist_t * nv,boolean_t verbose)3289 zpool_vdev_name(libzfs_handle_t *hdl, zpool_handle_t *zhp, nvlist_t *nv,
3290 boolean_t verbose)
3291 {
3292 char *path, *devid;
3293 uint64_t value;
3294 char buf[64];
3295 vdev_stat_t *vs;
3296 uint_t vsc;
3297
3298 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT,
3299 &value) == 0) {
3300 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID,
3301 &value) == 0);
3302 (void) snprintf(buf, sizeof (buf), "%llu",
3303 (u_longlong_t)value);
3304 path = buf;
3305 } else if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0) {
3306
3307 /*
3308 * If the device is dead (faulted, offline, etc) then don't
3309 * bother opening it. Otherwise we may be forcing the user to
3310 * open a misbehaving device, which can have undesirable
3311 * effects.
3312 */
3313 if ((nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_VDEV_STATS,
3314 (uint64_t **)&vs, &vsc) != 0 ||
3315 vs->vs_state >= VDEV_STATE_DEGRADED) &&
3316 zhp != NULL &&
3317 nvlist_lookup_string(nv, ZPOOL_CONFIG_DEVID, &devid) == 0) {
3318 /*
3319 * Determine if the current path is correct.
3320 */
3321 char *newdevid = path_to_devid(path);
3322
3323 if (newdevid == NULL ||
3324 strcmp(devid, newdevid) != 0) {
3325 char *newpath;
3326
3327 if ((newpath = devid_to_path(devid)) != NULL) {
3328 /*
3329 * Update the path appropriately.
3330 */
3331 set_path(zhp, nv, newpath);
3332 if (nvlist_add_string(nv,
3333 ZPOOL_CONFIG_PATH, newpath) == 0)
3334 verify(nvlist_lookup_string(nv,
3335 ZPOOL_CONFIG_PATH,
3336 &path) == 0);
3337 free(newpath);
3338 }
3339 }
3340
3341 if (newdevid)
3342 devid_str_free(newdevid);
3343 }
3344
3345 if (strncmp(path, "/dev/dsk/", 9) == 0)
3346 path += 9;
3347
3348 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK,
3349 &value) == 0 && value) {
3350 int pathlen = strlen(path);
3351 char *tmp = zfs_strdup(hdl, path);
3352
3353 /*
3354 * If it starts with c#, and ends with "s0", chop
3355 * the "s0" off, or if it ends with "s0/old", remove
3356 * the "s0" from the middle.
3357 */
3358 if (CTD_CHECK(tmp)) {
3359 if (strcmp(&tmp[pathlen - 2], "s0") == 0) {
3360 tmp[pathlen - 2] = '\0';
3361 } else if (pathlen > 6 &&
3362 strcmp(&tmp[pathlen - 6], "s0/old") == 0) {
3363 (void) strcpy(&tmp[pathlen - 6],
3364 "/old");
3365 }
3366 }
3367 return (tmp);
3368 }
3369 } else {
3370 verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &path) == 0);
3371
3372 /*
3373 * If it's a raidz device, we need to stick in the parity level.
3374 */
3375 if (strcmp(path, VDEV_TYPE_RAIDZ) == 0) {
3376 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NPARITY,
3377 &value) == 0);
3378 (void) snprintf(buf, sizeof (buf), "%s%llu", path,
3379 (u_longlong_t)value);
3380 path = buf;
3381 }
3382
3383 /*
3384 * We identify each top-level vdev by using a <type-id>
3385 * naming convention.
3386 */
3387 if (verbose) {
3388 uint64_t id;
3389
3390 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ID,
3391 &id) == 0);
3392 (void) snprintf(buf, sizeof (buf), "%s-%llu", path,
3393 (u_longlong_t)id);
3394 path = buf;
3395 }
3396 }
3397
3398 return (zfs_strdup(hdl, path));
3399 }
3400
3401 static int
zbookmark_mem_compare(const void * a,const void * b)3402 zbookmark_mem_compare(const void *a, const void *b)
3403 {
3404 return (memcmp(a, b, sizeof (zbookmark_phys_t)));
3405 }
3406
3407 /*
3408 * Retrieve the persistent error log, uniquify the members, and return to the
3409 * caller.
3410 */
3411 int
zpool_get_errlog(zpool_handle_t * zhp,nvlist_t ** nverrlistp)3412 zpool_get_errlog(zpool_handle_t *zhp, nvlist_t **nverrlistp)
3413 {
3414 zfs_cmd_t zc = { 0 };
3415 uint64_t count;
3416 zbookmark_phys_t *zb = NULL;
3417 int i;
3418
3419 /*
3420 * Retrieve the raw error list from the kernel. If the number of errors
3421 * has increased, allocate more space and continue until we get the
3422 * entire list.
3423 */
3424 verify(nvlist_lookup_uint64(zhp->zpool_config, ZPOOL_CONFIG_ERRCOUNT,
3425 &count) == 0);
3426 if (count == 0)
3427 return (0);
3428 if ((zc.zc_nvlist_dst = (uintptr_t)zfs_alloc(zhp->zpool_hdl,
3429 count * sizeof (zbookmark_phys_t))) == (uintptr_t)NULL)
3430 return (-1);
3431 zc.zc_nvlist_dst_size = count;
3432 (void) strcpy(zc.zc_name, zhp->zpool_name);
3433 for (;;) {
3434 if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_ERROR_LOG,
3435 &zc) != 0) {
3436 free((void *)(uintptr_t)zc.zc_nvlist_dst);
3437 if (errno == ENOMEM) {
3438 void *dst;
3439
3440 count = zc.zc_nvlist_dst_size;
3441 dst = zfs_alloc(zhp->zpool_hdl, count *
3442 sizeof (zbookmark_phys_t));
3443 if (dst == NULL)
3444 return (-1);
3445 zc.zc_nvlist_dst = (uintptr_t)dst;
3446 } else {
3447 return (-1);
3448 }
3449 } else {
3450 break;
3451 }
3452 }
3453
3454 /*
3455 * Sort the resulting bookmarks. This is a little confusing due to the
3456 * implementation of ZFS_IOC_ERROR_LOG. The bookmarks are copied last
3457 * to first, and 'zc_nvlist_dst_size' indicates the number of boomarks
3458 * _not_ copied as part of the process. So we point the start of our
3459 * array appropriate and decrement the total number of elements.
3460 */
3461 zb = ((zbookmark_phys_t *)(uintptr_t)zc.zc_nvlist_dst) +
3462 zc.zc_nvlist_dst_size;
3463 count -= zc.zc_nvlist_dst_size;
3464
3465 qsort(zb, count, sizeof (zbookmark_phys_t), zbookmark_mem_compare);
3466
3467 verify(nvlist_alloc(nverrlistp, 0, KM_SLEEP) == 0);
3468
3469 /*
3470 * Fill in the nverrlistp with nvlist's of dataset and object numbers.
3471 */
3472 for (i = 0; i < count; i++) {
3473 nvlist_t *nv;
3474
3475 /* ignoring zb_blkid and zb_level for now */
3476 if (i > 0 && zb[i-1].zb_objset == zb[i].zb_objset &&
3477 zb[i-1].zb_object == zb[i].zb_object)
3478 continue;
3479
3480 if (nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) != 0)
3481 goto nomem;
3482 if (nvlist_add_uint64(nv, ZPOOL_ERR_DATASET,
3483 zb[i].zb_objset) != 0) {
3484 nvlist_free(nv);
3485 goto nomem;
3486 }
3487 if (nvlist_add_uint64(nv, ZPOOL_ERR_OBJECT,
3488 zb[i].zb_object) != 0) {
3489 nvlist_free(nv);
3490 goto nomem;
3491 }
3492 if (nvlist_add_nvlist(*nverrlistp, "ejk", nv) != 0) {
3493 nvlist_free(nv);
3494 goto nomem;
3495 }
3496 nvlist_free(nv);
3497 }
3498
3499 free((void *)(uintptr_t)zc.zc_nvlist_dst);
3500 return (0);
3501
3502 nomem:
3503 free((void *)(uintptr_t)zc.zc_nvlist_dst);
3504 return (no_memory(zhp->zpool_hdl));
3505 }
3506
3507 /*
3508 * Upgrade a ZFS pool to the latest on-disk version.
3509 */
3510 int
zpool_upgrade(zpool_handle_t * zhp,uint64_t new_version)3511 zpool_upgrade(zpool_handle_t *zhp, uint64_t new_version)
3512 {
3513 zfs_cmd_t zc = { 0 };
3514 libzfs_handle_t *hdl = zhp->zpool_hdl;
3515
3516 (void) strcpy(zc.zc_name, zhp->zpool_name);
3517 zc.zc_cookie = new_version;
3518
3519 if (zfs_ioctl(hdl, ZFS_IOC_POOL_UPGRADE, &zc) != 0)
3520 return (zpool_standard_error_fmt(hdl, errno,
3521 dgettext(TEXT_DOMAIN, "cannot upgrade '%s'"),
3522 zhp->zpool_name));
3523 return (0);
3524 }
3525
3526 void
zfs_save_arguments(int argc,char ** argv,char * string,int len)3527 zfs_save_arguments(int argc, char **argv, char *string, int len)
3528 {
3529 (void) strlcpy(string, basename(argv[0]), len);
3530 for (int i = 1; i < argc; i++) {
3531 (void) strlcat(string, " ", len);
3532 (void) strlcat(string, argv[i], len);
3533 }
3534 }
3535
3536 int
zpool_log_history(libzfs_handle_t * hdl,const char * message)3537 zpool_log_history(libzfs_handle_t *hdl, const char *message)
3538 {
3539 zfs_cmd_t zc = { 0 };
3540 nvlist_t *args;
3541 int err;
3542
3543 args = fnvlist_alloc();
3544 fnvlist_add_string(args, "message", message);
3545 err = zcmd_write_src_nvlist(hdl, &zc, args);
3546 if (err == 0)
3547 err = ioctl(hdl->libzfs_fd, ZFS_IOC_LOG_HISTORY, &zc);
3548 nvlist_free(args);
3549 zcmd_free_nvlists(&zc);
3550 return (err);
3551 }
3552
3553 /*
3554 * Perform ioctl to get some command history of a pool.
3555 *
3556 * 'buf' is the buffer to fill up to 'len' bytes. 'off' is the
3557 * logical offset of the history buffer to start reading from.
3558 *
3559 * Upon return, 'off' is the next logical offset to read from and
3560 * 'len' is the actual amount of bytes read into 'buf'.
3561 */
3562 static int
get_history(zpool_handle_t * zhp,char * buf,uint64_t * off,uint64_t * len)3563 get_history(zpool_handle_t *zhp, char *buf, uint64_t *off, uint64_t *len)
3564 {
3565 zfs_cmd_t zc = { 0 };
3566 libzfs_handle_t *hdl = zhp->zpool_hdl;
3567
3568 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3569
3570 zc.zc_history = (uint64_t)(uintptr_t)buf;
3571 zc.zc_history_len = *len;
3572 zc.zc_history_offset = *off;
3573
3574 if (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_GET_HISTORY, &zc) != 0) {
3575 switch (errno) {
3576 case EPERM:
3577 return (zfs_error_fmt(hdl, EZFS_PERM,
3578 dgettext(TEXT_DOMAIN,
3579 "cannot show history for pool '%s'"),
3580 zhp->zpool_name));
3581 case ENOENT:
3582 return (zfs_error_fmt(hdl, EZFS_NOHISTORY,
3583 dgettext(TEXT_DOMAIN, "cannot get history for pool "
3584 "'%s'"), zhp->zpool_name));
3585 case ENOTSUP:
3586 return (zfs_error_fmt(hdl, EZFS_BADVERSION,
3587 dgettext(TEXT_DOMAIN, "cannot get history for pool "
3588 "'%s', pool must be upgraded"), zhp->zpool_name));
3589 default:
3590 return (zpool_standard_error_fmt(hdl, errno,
3591 dgettext(TEXT_DOMAIN,
3592 "cannot get history for '%s'"), zhp->zpool_name));
3593 }
3594 }
3595
3596 *len = zc.zc_history_len;
3597 *off = zc.zc_history_offset;
3598
3599 return (0);
3600 }
3601
3602 /*
3603 * Process the buffer of nvlists, unpacking and storing each nvlist record
3604 * into 'records'. 'leftover' is set to the number of bytes that weren't
3605 * processed as there wasn't a complete record.
3606 */
3607 int
zpool_history_unpack(char * buf,uint64_t bytes_read,uint64_t * leftover,nvlist_t *** records,uint_t * numrecords)3608 zpool_history_unpack(char *buf, uint64_t bytes_read, uint64_t *leftover,
3609 nvlist_t ***records, uint_t *numrecords)
3610 {
3611 uint64_t reclen;
3612 nvlist_t *nv;
3613 int i;
3614
3615 while (bytes_read > sizeof (reclen)) {
3616
3617 /* get length of packed record (stored as little endian) */
3618 for (i = 0, reclen = 0; i < sizeof (reclen); i++)
3619 reclen += (uint64_t)(((uchar_t *)buf)[i]) << (8*i);
3620
3621 if (bytes_read < sizeof (reclen) + reclen)
3622 break;
3623
3624 /* unpack record */
3625 if (nvlist_unpack(buf + sizeof (reclen), reclen, &nv, 0) != 0)
3626 return (ENOMEM);
3627 bytes_read -= sizeof (reclen) + reclen;
3628 buf += sizeof (reclen) + reclen;
3629
3630 /* add record to nvlist array */
3631 (*numrecords)++;
3632 if (ISP2(*numrecords + 1)) {
3633 *records = realloc(*records,
3634 *numrecords * 2 * sizeof (nvlist_t *));
3635 }
3636 (*records)[*numrecords - 1] = nv;
3637 }
3638
3639 *leftover = bytes_read;
3640 return (0);
3641 }
3642
3643 /*
3644 * Retrieve the command history of a pool.
3645 */
3646 int
zpool_get_history(zpool_handle_t * zhp,nvlist_t ** nvhisp)3647 zpool_get_history(zpool_handle_t *zhp, nvlist_t **nvhisp)
3648 {
3649 char *buf;
3650 int buflen = 128 * 1024;
3651 uint64_t off = 0;
3652 nvlist_t **records = NULL;
3653 uint_t numrecords = 0;
3654 int err, i;
3655
3656 buf = malloc(buflen);
3657 if (buf == NULL)
3658 return (ENOMEM);
3659 do {
3660 uint64_t bytes_read = buflen;
3661 uint64_t leftover;
3662
3663 if ((err = get_history(zhp, buf, &off, &bytes_read)) != 0)
3664 break;
3665
3666 /* if nothing else was read in, we're at EOF, just return */
3667 if (!bytes_read)
3668 break;
3669
3670 if ((err = zpool_history_unpack(buf, bytes_read,
3671 &leftover, &records, &numrecords)) != 0)
3672 break;
3673 off -= leftover;
3674 if (leftover == bytes_read) {
3675 /*
3676 * no progress made, because buffer is not big enough
3677 * to hold this record; resize and retry.
3678 */
3679 buflen *= 2;
3680 free(buf);
3681 buf = malloc(buflen);
3682 if (buf == NULL)
3683 return (ENOMEM);
3684 }
3685
3686 /* CONSTCOND */
3687 } while (1);
3688
3689 free(buf);
3690
3691 if (!err) {
3692 verify(nvlist_alloc(nvhisp, NV_UNIQUE_NAME, 0) == 0);
3693 verify(nvlist_add_nvlist_array(*nvhisp, ZPOOL_HIST_RECORD,
3694 records, numrecords) == 0);
3695 }
3696 for (i = 0; i < numrecords; i++)
3697 nvlist_free(records[i]);
3698 free(records);
3699
3700 return (err);
3701 }
3702
3703 void
zpool_obj_to_path(zpool_handle_t * zhp,uint64_t dsobj,uint64_t obj,char * pathname,size_t len)3704 zpool_obj_to_path(zpool_handle_t *zhp, uint64_t dsobj, uint64_t obj,
3705 char *pathname, size_t len)
3706 {
3707 zfs_cmd_t zc = { 0 };
3708 boolean_t mounted = B_FALSE;
3709 char *mntpnt = NULL;
3710 char dsname[ZFS_MAX_DATASET_NAME_LEN];
3711
3712 if (dsobj == 0) {
3713 /* special case for the MOS */
3714 (void) snprintf(pathname, len, "<metadata>:<0x%llx>", obj);
3715 return;
3716 }
3717
3718 /* get the dataset's name */
3719 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3720 zc.zc_obj = dsobj;
3721 if (ioctl(zhp->zpool_hdl->libzfs_fd,
3722 ZFS_IOC_DSOBJ_TO_DSNAME, &zc) != 0) {
3723 /* just write out a path of two object numbers */
3724 (void) snprintf(pathname, len, "<0x%llx>:<0x%llx>",
3725 dsobj, obj);
3726 return;
3727 }
3728 (void) strlcpy(dsname, zc.zc_value, sizeof (dsname));
3729
3730 /* find out if the dataset is mounted */
3731 mounted = is_mounted(zhp->zpool_hdl, dsname, &mntpnt);
3732
3733 /* get the corrupted object's path */
3734 (void) strlcpy(zc.zc_name, dsname, sizeof (zc.zc_name));
3735 zc.zc_obj = obj;
3736 if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_OBJ_TO_PATH,
3737 &zc) == 0) {
3738 if (mounted) {
3739 (void) snprintf(pathname, len, "%s%s", mntpnt,
3740 zc.zc_value);
3741 } else {
3742 (void) snprintf(pathname, len, "%s:%s",
3743 dsname, zc.zc_value);
3744 }
3745 } else {
3746 (void) snprintf(pathname, len, "%s:<0x%llx>", dsname, obj);
3747 }
3748 free(mntpnt);
3749 }
3750
3751 /*
3752 * Read the EFI label from the config, if a label does not exist then
3753 * pass back the error to the caller. If the caller has passed a non-NULL
3754 * diskaddr argument then we set it to the starting address of the EFI
3755 * partition.
3756 */
3757 static int
read_efi_label(nvlist_t * config,diskaddr_t * sb)3758 read_efi_label(nvlist_t *config, diskaddr_t *sb)
3759 {
3760 char *path;
3761 int fd;
3762 char diskname[MAXPATHLEN];
3763 int err = -1;
3764
3765 if (nvlist_lookup_string(config, ZPOOL_CONFIG_PATH, &path) != 0)
3766 return (err);
3767
3768 (void) snprintf(diskname, sizeof (diskname), "%s%s", RDISK_ROOT,
3769 strrchr(path, '/'));
3770 if ((fd = open(diskname, O_RDONLY|O_NDELAY)) >= 0) {
3771 struct dk_gpt *vtoc;
3772
3773 if ((err = efi_alloc_and_read(fd, &vtoc)) >= 0) {
3774 if (sb != NULL)
3775 *sb = vtoc->efi_parts[0].p_start;
3776 efi_free(vtoc);
3777 }
3778 (void) close(fd);
3779 }
3780 return (err);
3781 }
3782
3783 /*
3784 * determine where a partition starts on a disk in the current
3785 * configuration
3786 */
3787 static diskaddr_t
find_start_block(nvlist_t * config)3788 find_start_block(nvlist_t *config)
3789 {
3790 nvlist_t **child;
3791 uint_t c, children;
3792 diskaddr_t sb = MAXOFFSET_T;
3793 uint64_t wholedisk;
3794
3795 if (nvlist_lookup_nvlist_array(config,
3796 ZPOOL_CONFIG_CHILDREN, &child, &children) != 0) {
3797 if (nvlist_lookup_uint64(config,
3798 ZPOOL_CONFIG_WHOLE_DISK,
3799 &wholedisk) != 0 || !wholedisk) {
3800 return (MAXOFFSET_T);
3801 }
3802 if (read_efi_label(config, &sb) < 0)
3803 sb = MAXOFFSET_T;
3804 return (sb);
3805 }
3806
3807 for (c = 0; c < children; c++) {
3808 sb = find_start_block(child[c]);
3809 if (sb != MAXOFFSET_T) {
3810 return (sb);
3811 }
3812 }
3813 return (MAXOFFSET_T);
3814 }
3815
3816 /*
3817 * Label an individual disk. The name provided is the short name,
3818 * stripped of any leading /dev path.
3819 */
3820 int
zpool_label_disk(libzfs_handle_t * hdl,zpool_handle_t * zhp,char * name)3821 zpool_label_disk(libzfs_handle_t *hdl, zpool_handle_t *zhp, char *name)
3822 {
3823 char path[MAXPATHLEN];
3824 struct dk_gpt *vtoc;
3825 int fd;
3826 size_t resv = EFI_MIN_RESV_SIZE;
3827 uint64_t slice_size;
3828 diskaddr_t start_block;
3829 char errbuf[1024];
3830
3831 /* prepare an error message just in case */
3832 (void) snprintf(errbuf, sizeof (errbuf),
3833 dgettext(TEXT_DOMAIN, "cannot label '%s'"), name);
3834
3835 if (zhp) {
3836 nvlist_t *nvroot;
3837
3838 verify(nvlist_lookup_nvlist(zhp->zpool_config,
3839 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
3840
3841 if (zhp->zpool_start_block == 0)
3842 start_block = find_start_block(nvroot);
3843 else
3844 start_block = zhp->zpool_start_block;
3845 zhp->zpool_start_block = start_block;
3846 } else {
3847 /* new pool */
3848 start_block = NEW_START_BLOCK;
3849 }
3850
3851 (void) snprintf(path, sizeof (path), "%s/%s%s", RDISK_ROOT, name,
3852 BACKUP_SLICE);
3853
3854 if ((fd = open(path, O_RDWR | O_NDELAY)) < 0) {
3855 /*
3856 * This shouldn't happen. We've long since verified that this
3857 * is a valid device.
3858 */
3859 zfs_error_aux(hdl,
3860 dgettext(TEXT_DOMAIN, "unable to open device"));
3861 return (zfs_error(hdl, EZFS_OPENFAILED, errbuf));
3862 }
3863
3864 if (efi_alloc_and_init(fd, EFI_NUMPAR, &vtoc) != 0) {
3865 /*
3866 * The only way this can fail is if we run out of memory, or we
3867 * were unable to read the disk's capacity
3868 */
3869 if (errno == ENOMEM)
3870 (void) no_memory(hdl);
3871
3872 (void) close(fd);
3873 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3874 "unable to read disk capacity"), name);
3875
3876 return (zfs_error(hdl, EZFS_NOCAP, errbuf));
3877 }
3878
3879 slice_size = vtoc->efi_last_u_lba + 1;
3880 slice_size -= EFI_MIN_RESV_SIZE;
3881 if (start_block == MAXOFFSET_T)
3882 start_block = NEW_START_BLOCK;
3883 slice_size -= start_block;
3884
3885 vtoc->efi_parts[0].p_start = start_block;
3886 vtoc->efi_parts[0].p_size = slice_size;
3887
3888 /*
3889 * Why we use V_USR: V_BACKUP confuses users, and is considered
3890 * disposable by some EFI utilities (since EFI doesn't have a backup
3891 * slice). V_UNASSIGNED is supposed to be used only for zero size
3892 * partitions, and efi_write() will fail if we use it. V_ROOT, V_BOOT,
3893 * etc. were all pretty specific. V_USR is as close to reality as we
3894 * can get, in the absence of V_OTHER.
3895 */
3896 vtoc->efi_parts[0].p_tag = V_USR;
3897 (void) strcpy(vtoc->efi_parts[0].p_name, "zfs");
3898
3899 vtoc->efi_parts[8].p_start = slice_size + start_block;
3900 vtoc->efi_parts[8].p_size = resv;
3901 vtoc->efi_parts[8].p_tag = V_RESERVED;
3902
3903 if (efi_write(fd, vtoc) != 0) {
3904 /*
3905 * Some block drivers (like pcata) may not support EFI
3906 * GPT labels. Print out a helpful error message dir-
3907 * ecting the user to manually label the disk and give
3908 * a specific slice.
3909 */
3910 (void) close(fd);
3911 efi_free(vtoc);
3912
3913 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3914 "try using fdisk(1M) and then provide a specific slice"));
3915 return (zfs_error(hdl, EZFS_LABELFAILED, errbuf));
3916 }
3917
3918 (void) close(fd);
3919 efi_free(vtoc);
3920 return (0);
3921 }
3922
3923 static boolean_t
supported_dump_vdev_type(libzfs_handle_t * hdl,nvlist_t * config,char * errbuf)3924 supported_dump_vdev_type(libzfs_handle_t *hdl, nvlist_t *config, char *errbuf)
3925 {
3926 char *type;
3927 nvlist_t **child;
3928 uint_t children, c;
3929
3930 verify(nvlist_lookup_string(config, ZPOOL_CONFIG_TYPE, &type) == 0);
3931 if (strcmp(type, VDEV_TYPE_FILE) == 0 ||
3932 strcmp(type, VDEV_TYPE_HOLE) == 0 ||
3933 strcmp(type, VDEV_TYPE_MISSING) == 0) {
3934 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3935 "vdev type '%s' is not supported"), type);
3936 (void) zfs_error(hdl, EZFS_VDEVNOTSUP, errbuf);
3937 return (B_FALSE);
3938 }
3939 if (nvlist_lookup_nvlist_array(config, ZPOOL_CONFIG_CHILDREN,
3940 &child, &children) == 0) {
3941 for (c = 0; c < children; c++) {
3942 if (!supported_dump_vdev_type(hdl, child[c], errbuf))
3943 return (B_FALSE);
3944 }
3945 }
3946 return (B_TRUE);
3947 }
3948
3949 /*
3950 * Check if this zvol is allowable for use as a dump device; zero if
3951 * it is, > 0 if it isn't, < 0 if it isn't a zvol.
3952 *
3953 * Allowable storage configurations include mirrors, all raidz variants, and
3954 * pools with log, cache, and spare devices. Pools which are backed by files or
3955 * have missing/hole vdevs are not suitable.
3956 */
3957 int
zvol_check_dump_config(char * arg)3958 zvol_check_dump_config(char *arg)
3959 {
3960 zpool_handle_t *zhp = NULL;
3961 nvlist_t *config, *nvroot;
3962 char *p, *volname;
3963 nvlist_t **top;
3964 uint_t toplevels;
3965 libzfs_handle_t *hdl;
3966 char errbuf[1024];
3967 char poolname[ZFS_MAX_DATASET_NAME_LEN];
3968 int pathlen = strlen(ZVOL_FULL_DEV_DIR);
3969 int ret = 1;
3970
3971 if (strncmp(arg, ZVOL_FULL_DEV_DIR, pathlen)) {
3972 return (-1);
3973 }
3974
3975 (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
3976 "dump is not supported on device '%s'"), arg);
3977
3978 if ((hdl = libzfs_init()) == NULL)
3979 return (1);
3980 libzfs_print_on_error(hdl, B_TRUE);
3981
3982 volname = arg + pathlen;
3983
3984 /* check the configuration of the pool */
3985 if ((p = strchr(volname, '/')) == NULL) {
3986 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3987 "malformed dataset name"));
3988 (void) zfs_error(hdl, EZFS_INVALIDNAME, errbuf);
3989 return (1);
3990 } else if (p - volname >= ZFS_MAX_DATASET_NAME_LEN) {
3991 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3992 "dataset name is too long"));
3993 (void) zfs_error(hdl, EZFS_NAMETOOLONG, errbuf);
3994 return (1);
3995 } else {
3996 (void) strncpy(poolname, volname, p - volname);
3997 poolname[p - volname] = '\0';
3998 }
3999
4000 if ((zhp = zpool_open(hdl, poolname)) == NULL) {
4001 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
4002 "could not open pool '%s'"), poolname);
4003 (void) zfs_error(hdl, EZFS_OPENFAILED, errbuf);
4004 goto out;
4005 }
4006 config = zpool_get_config(zhp, NULL);
4007 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
4008 &nvroot) != 0) {
4009 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
4010 "could not obtain vdev configuration for '%s'"), poolname);
4011 (void) zfs_error(hdl, EZFS_INVALCONFIG, errbuf);
4012 goto out;
4013 }
4014
4015 verify(nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
4016 &top, &toplevels) == 0);
4017
4018 if (!supported_dump_vdev_type(hdl, top[0], errbuf)) {
4019 goto out;
4020 }
4021 ret = 0;
4022
4023 out:
4024 if (zhp)
4025 zpool_close(zhp);
4026 libzfs_fini(hdl);
4027 return (ret);
4028 }
4029