1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or https://opensource.org/licenses/CDDL-1.0.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright 2015 Nexenta Systems, Inc. All rights reserved.
24 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
25 * Copyright (c) 2011, 2024 by Delphix. All rights reserved.
26 * Copyright 2016 Igor Kozhukhov <ikozhukhov@gmail.com>
27 * Copyright (c) 2018 Datto Inc.
28 * Copyright (c) 2017 Open-E, Inc. All Rights Reserved.
29 * Copyright (c) 2017, Intel Corporation.
30 * Copyright (c) 2018, loli10K <ezomori.nozomu@gmail.com>
31 * Copyright (c) 2021, Colm Buckley <colm@tuatha.org>
32 * Copyright (c) 2021, 2023, Klara Inc.
33 */
34
35 #include <errno.h>
36 #include <libintl.h>
37 #include <stdio.h>
38 #include <stdlib.h>
39 #include <strings.h>
40 #include <unistd.h>
41 #include <libgen.h>
42 #include <zone.h>
43 #include <sys/stat.h>
44 #include <sys/efi_partition.h>
45 #include <sys/systeminfo.h>
46 #include <sys/zfs_ioctl.h>
47 #include <sys/zfs_sysfs.h>
48 #include <sys/vdev_disk.h>
49 #include <sys/types.h>
50 #include <dlfcn.h>
51 #include <libzutil.h>
52 #include <fcntl.h>
53
54 #include "zfs_namecheck.h"
55 #include "zfs_prop.h"
56 #include "libzfs_impl.h"
57 #include "zfs_comutil.h"
58 #include "zfeature_common.h"
59
60 static boolean_t zpool_vdev_is_interior(const char *name);
61
62 typedef struct prop_flags {
63 unsigned int create:1; /* Validate property on creation */
64 unsigned int import:1; /* Validate property on import */
65 unsigned int vdevprop:1; /* Validate property as a VDEV property */
66 } prop_flags_t;
67
68 /*
69 * ====================================================================
70 * zpool property functions
71 * ====================================================================
72 */
73
74 static int
zpool_get_all_props(zpool_handle_t * zhp)75 zpool_get_all_props(zpool_handle_t *zhp)
76 {
77 zfs_cmd_t zc = {"\0"};
78 libzfs_handle_t *hdl = zhp->zpool_hdl;
79
80 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
81
82 if (zhp->zpool_n_propnames > 0) {
83 nvlist_t *innvl = fnvlist_alloc();
84 fnvlist_add_string_array(innvl, ZPOOL_GET_PROPS_NAMES,
85 zhp->zpool_propnames, zhp->zpool_n_propnames);
86 zcmd_write_src_nvlist(hdl, &zc, innvl);
87 }
88
89 zcmd_alloc_dst_nvlist(hdl, &zc, 0);
90
91 while (zfs_ioctl(hdl, ZFS_IOC_POOL_GET_PROPS, &zc) != 0) {
92 if (errno == ENOMEM)
93 zcmd_expand_dst_nvlist(hdl, &zc);
94 else {
95 zcmd_free_nvlists(&zc);
96 return (-1);
97 }
98 }
99
100 if (zcmd_read_dst_nvlist(hdl, &zc, &zhp->zpool_props) != 0) {
101 zcmd_free_nvlists(&zc);
102 return (-1);
103 }
104
105 zcmd_free_nvlists(&zc);
106
107 return (0);
108 }
109
110 int
zpool_props_refresh(zpool_handle_t * zhp)111 zpool_props_refresh(zpool_handle_t *zhp)
112 {
113 nvlist_t *old_props;
114
115 old_props = zhp->zpool_props;
116
117 if (zpool_get_all_props(zhp) != 0)
118 return (-1);
119
120 nvlist_free(old_props);
121 return (0);
122 }
123
124 static const char *
zpool_get_prop_string(zpool_handle_t * zhp,zpool_prop_t prop,zprop_source_t * src)125 zpool_get_prop_string(zpool_handle_t *zhp, zpool_prop_t prop,
126 zprop_source_t *src)
127 {
128 nvlist_t *nv, *nvl;
129 const char *value;
130 zprop_source_t source;
131
132 nvl = zhp->zpool_props;
133 if (nvlist_lookup_nvlist(nvl, zpool_prop_to_name(prop), &nv) == 0) {
134 source = fnvlist_lookup_uint64(nv, ZPROP_SOURCE);
135 value = fnvlist_lookup_string(nv, ZPROP_VALUE);
136 } else {
137 source = ZPROP_SRC_DEFAULT;
138 if ((value = zpool_prop_default_string(prop)) == NULL)
139 value = "-";
140 }
141
142 if (src)
143 *src = source;
144
145 return (value);
146 }
147
148 uint64_t
zpool_get_prop_int(zpool_handle_t * zhp,zpool_prop_t prop,zprop_source_t * src)149 zpool_get_prop_int(zpool_handle_t *zhp, zpool_prop_t prop, zprop_source_t *src)
150 {
151 nvlist_t *nv, *nvl;
152 uint64_t value;
153 zprop_source_t source;
154
155 if (zhp->zpool_props == NULL && zpool_get_all_props(zhp)) {
156 /*
157 * zpool_get_all_props() has most likely failed because
158 * the pool is faulted, but if all we need is the top level
159 * vdev's guid then get it from the zhp config nvlist.
160 */
161 if ((prop == ZPOOL_PROP_GUID) &&
162 (nvlist_lookup_nvlist(zhp->zpool_config,
163 ZPOOL_CONFIG_VDEV_TREE, &nv) == 0) &&
164 (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &value)
165 == 0)) {
166 return (value);
167 }
168 return (zpool_prop_default_numeric(prop));
169 }
170
171 nvl = zhp->zpool_props;
172 if (nvlist_lookup_nvlist(nvl, zpool_prop_to_name(prop), &nv) == 0) {
173 source = fnvlist_lookup_uint64(nv, ZPROP_SOURCE);
174 value = fnvlist_lookup_uint64(nv, ZPROP_VALUE);
175 } else {
176 source = ZPROP_SRC_DEFAULT;
177 value = zpool_prop_default_numeric(prop);
178 }
179
180 if (src)
181 *src = source;
182
183 return (value);
184 }
185
186 /*
187 * Map VDEV STATE to printed strings.
188 */
189 const char *
zpool_state_to_name(vdev_state_t state,vdev_aux_t aux)190 zpool_state_to_name(vdev_state_t state, vdev_aux_t aux)
191 {
192 switch (state) {
193 case VDEV_STATE_CLOSED:
194 case VDEV_STATE_OFFLINE:
195 return (gettext("OFFLINE"));
196 case VDEV_STATE_REMOVED:
197 return (gettext("REMOVED"));
198 case VDEV_STATE_CANT_OPEN:
199 if (aux == VDEV_AUX_CORRUPT_DATA || aux == VDEV_AUX_BAD_LOG)
200 return (gettext("FAULTED"));
201 else if (aux == VDEV_AUX_SPLIT_POOL)
202 return (gettext("SPLIT"));
203 else
204 return (gettext("UNAVAIL"));
205 case VDEV_STATE_FAULTED:
206 return (gettext("FAULTED"));
207 case VDEV_STATE_DEGRADED:
208 return (gettext("DEGRADED"));
209 case VDEV_STATE_HEALTHY:
210 return (gettext("ONLINE"));
211
212 default:
213 break;
214 }
215
216 return (gettext("UNKNOWN"));
217 }
218
219 /*
220 * Map POOL STATE to printed strings.
221 */
222 const char *
zpool_pool_state_to_name(pool_state_t state)223 zpool_pool_state_to_name(pool_state_t state)
224 {
225 switch (state) {
226 default:
227 break;
228 case POOL_STATE_ACTIVE:
229 return (gettext("ACTIVE"));
230 case POOL_STATE_EXPORTED:
231 return (gettext("EXPORTED"));
232 case POOL_STATE_DESTROYED:
233 return (gettext("DESTROYED"));
234 case POOL_STATE_SPARE:
235 return (gettext("SPARE"));
236 case POOL_STATE_L2CACHE:
237 return (gettext("L2CACHE"));
238 case POOL_STATE_UNINITIALIZED:
239 return (gettext("UNINITIALIZED"));
240 case POOL_STATE_UNAVAIL:
241 return (gettext("UNAVAIL"));
242 case POOL_STATE_POTENTIALLY_ACTIVE:
243 return (gettext("POTENTIALLY_ACTIVE"));
244 }
245
246 return (gettext("UNKNOWN"));
247 }
248
249 /*
250 * Given a pool handle, return the pool health string ("ONLINE", "DEGRADED",
251 * "SUSPENDED", etc).
252 */
253 const char *
zpool_get_state_str(zpool_handle_t * zhp)254 zpool_get_state_str(zpool_handle_t *zhp)
255 {
256 zpool_errata_t errata;
257 zpool_status_t status;
258 const char *str;
259
260 status = zpool_get_status(zhp, NULL, &errata);
261
262 if (zpool_get_state(zhp) == POOL_STATE_UNAVAIL) {
263 str = gettext("FAULTED");
264 } else if (status == ZPOOL_STATUS_IO_FAILURE_WAIT ||
265 status == ZPOOL_STATUS_IO_FAILURE_CONTINUE ||
266 status == ZPOOL_STATUS_IO_FAILURE_MMP) {
267 str = gettext("SUSPENDED");
268 } else {
269 nvlist_t *nvroot = fnvlist_lookup_nvlist(
270 zpool_get_config(zhp, NULL), ZPOOL_CONFIG_VDEV_TREE);
271 uint_t vsc;
272 vdev_stat_t *vs = (vdev_stat_t *)fnvlist_lookup_uint64_array(
273 nvroot, ZPOOL_CONFIG_VDEV_STATS, &vsc);
274 str = zpool_state_to_name(vs->vs_state, vs->vs_aux);
275 }
276 return (str);
277 }
278
279 /*
280 * Get a zpool property value for 'prop' and return the value in
281 * a pre-allocated buffer.
282 */
283 int
zpool_get_prop(zpool_handle_t * zhp,zpool_prop_t prop,char * buf,size_t len,zprop_source_t * srctype,boolean_t literal)284 zpool_get_prop(zpool_handle_t *zhp, zpool_prop_t prop, char *buf,
285 size_t len, zprop_source_t *srctype, boolean_t literal)
286 {
287 uint64_t intval;
288 const char *strval;
289 zprop_source_t src = ZPROP_SRC_NONE;
290
291 if (zpool_get_state(zhp) == POOL_STATE_UNAVAIL) {
292 switch (prop) {
293 case ZPOOL_PROP_NAME:
294 (void) strlcpy(buf, zpool_get_name(zhp), len);
295 break;
296
297 case ZPOOL_PROP_HEALTH:
298 (void) strlcpy(buf, zpool_get_state_str(zhp), len);
299 break;
300
301 case ZPOOL_PROP_GUID:
302 intval = zpool_get_prop_int(zhp, prop, &src);
303 (void) snprintf(buf, len, "%llu", (u_longlong_t)intval);
304 break;
305
306 case ZPOOL_PROP_ALTROOT:
307 case ZPOOL_PROP_CACHEFILE:
308 case ZPOOL_PROP_COMMENT:
309 case ZPOOL_PROP_COMPATIBILITY:
310 if (zhp->zpool_props != NULL ||
311 zpool_get_all_props(zhp) == 0) {
312 (void) strlcpy(buf,
313 zpool_get_prop_string(zhp, prop, &src),
314 len);
315 break;
316 }
317 zfs_fallthrough;
318 default:
319 (void) strlcpy(buf, "-", len);
320 break;
321 }
322
323 if (srctype != NULL)
324 *srctype = src;
325 return (0);
326 }
327
328 /*
329 * ZPOOL_PROP_DEDUPCACHED can be fetched by name only using
330 * the ZPOOL_GET_PROPS_NAMES mechanism
331 */
332 if (prop == ZPOOL_PROP_DEDUPCACHED) {
333 zpool_add_propname(zhp, ZPOOL_DEDUPCACHED_PROP_NAME);
334 (void) zpool_get_all_props(zhp);
335 }
336
337 if (zhp->zpool_props == NULL && zpool_get_all_props(zhp) &&
338 prop != ZPOOL_PROP_NAME)
339 return (-1);
340
341 switch (zpool_prop_get_type(prop)) {
342 case PROP_TYPE_STRING:
343 (void) strlcpy(buf, zpool_get_prop_string(zhp, prop, &src),
344 len);
345 break;
346
347 case PROP_TYPE_NUMBER:
348 intval = zpool_get_prop_int(zhp, prop, &src);
349
350 switch (prop) {
351 case ZPOOL_PROP_DEDUP_TABLE_QUOTA:
352 /*
353 * If dedup quota is 0, we translate this into 'none'
354 * (unless literal is set). And if it is UINT64_MAX
355 * we translate that as 'automatic' (limit to size of
356 * the dedicated dedup VDEV. Otherwise, fall throught
357 * into the regular number formating.
358 */
359 if (intval == 0) {
360 (void) strlcpy(buf, literal ? "0" : "none",
361 len);
362 break;
363 } else if (intval == UINT64_MAX) {
364 (void) strlcpy(buf, "auto", len);
365 break;
366 }
367 zfs_fallthrough;
368
369 case ZPOOL_PROP_SIZE:
370 case ZPOOL_PROP_ALLOCATED:
371 case ZPOOL_PROP_FREE:
372 case ZPOOL_PROP_FREEING:
373 case ZPOOL_PROP_LEAKED:
374 case ZPOOL_PROP_ASHIFT:
375 case ZPOOL_PROP_MAXBLOCKSIZE:
376 case ZPOOL_PROP_MAXDNODESIZE:
377 case ZPOOL_PROP_BCLONESAVED:
378 case ZPOOL_PROP_BCLONEUSED:
379 case ZPOOL_PROP_DEDUP_TABLE_SIZE:
380 case ZPOOL_PROP_DEDUPCACHED:
381 if (literal)
382 (void) snprintf(buf, len, "%llu",
383 (u_longlong_t)intval);
384 else
385 (void) zfs_nicenum(intval, buf, len);
386 break;
387
388 case ZPOOL_PROP_EXPANDSZ:
389 case ZPOOL_PROP_CHECKPOINT:
390 if (intval == 0) {
391 (void) strlcpy(buf, "-", len);
392 } else if (literal) {
393 (void) snprintf(buf, len, "%llu",
394 (u_longlong_t)intval);
395 } else {
396 (void) zfs_nicebytes(intval, buf, len);
397 }
398 break;
399
400 case ZPOOL_PROP_CAPACITY:
401 if (literal) {
402 (void) snprintf(buf, len, "%llu",
403 (u_longlong_t)intval);
404 } else {
405 (void) snprintf(buf, len, "%llu%%",
406 (u_longlong_t)intval);
407 }
408 break;
409
410 case ZPOOL_PROP_FRAGMENTATION:
411 if (intval == UINT64_MAX) {
412 (void) strlcpy(buf, "-", len);
413 } else if (literal) {
414 (void) snprintf(buf, len, "%llu",
415 (u_longlong_t)intval);
416 } else {
417 (void) snprintf(buf, len, "%llu%%",
418 (u_longlong_t)intval);
419 }
420 break;
421
422 case ZPOOL_PROP_BCLONERATIO:
423 case ZPOOL_PROP_DEDUPRATIO:
424 if (literal)
425 (void) snprintf(buf, len, "%llu.%02llu",
426 (u_longlong_t)(intval / 100),
427 (u_longlong_t)(intval % 100));
428 else
429 (void) snprintf(buf, len, "%llu.%02llux",
430 (u_longlong_t)(intval / 100),
431 (u_longlong_t)(intval % 100));
432 break;
433
434 case ZPOOL_PROP_HEALTH:
435 (void) strlcpy(buf, zpool_get_state_str(zhp), len);
436 break;
437 case ZPOOL_PROP_VERSION:
438 if (intval >= SPA_VERSION_FEATURES) {
439 (void) snprintf(buf, len, "-");
440 break;
441 }
442 zfs_fallthrough;
443 default:
444 (void) snprintf(buf, len, "%llu", (u_longlong_t)intval);
445 }
446 break;
447
448 case PROP_TYPE_INDEX:
449 intval = zpool_get_prop_int(zhp, prop, &src);
450 if (zpool_prop_index_to_string(prop, intval, &strval)
451 != 0)
452 return (-1);
453 (void) strlcpy(buf, strval, len);
454 break;
455
456 default:
457 abort();
458 }
459
460 if (srctype)
461 *srctype = src;
462
463 return (0);
464 }
465
466 /*
467 * Get a zpool property value for 'propname' and return the value in
468 * a pre-allocated buffer.
469 */
470 int
zpool_get_userprop(zpool_handle_t * zhp,const char * propname,char * buf,size_t len,zprop_source_t * srctype)471 zpool_get_userprop(zpool_handle_t *zhp, const char *propname, char *buf,
472 size_t len, zprop_source_t *srctype)
473 {
474 nvlist_t *nv;
475 uint64_t ival;
476 const char *value;
477 zprop_source_t source = ZPROP_SRC_LOCAL;
478
479 if (zhp->zpool_props == NULL)
480 zpool_get_all_props(zhp);
481
482 if (nvlist_lookup_nvlist(zhp->zpool_props, propname, &nv) == 0) {
483 if (nvlist_lookup_uint64(nv, ZPROP_SOURCE, &ival) == 0)
484 source = ival;
485 verify(nvlist_lookup_string(nv, ZPROP_VALUE, &value) == 0);
486 } else {
487 source = ZPROP_SRC_DEFAULT;
488 value = "-";
489 }
490
491 if (srctype)
492 *srctype = source;
493
494 (void) strlcpy(buf, value, len);
495
496 return (0);
497 }
498
499 /*
500 * Check if the bootfs name has the same pool name as it is set to.
501 * Assuming bootfs is a valid dataset name.
502 */
503 static boolean_t
bootfs_name_valid(const char * pool,const char * bootfs)504 bootfs_name_valid(const char *pool, const char *bootfs)
505 {
506 int len = strlen(pool);
507 if (bootfs[0] == '\0')
508 return (B_TRUE);
509
510 if (!zfs_name_valid(bootfs, ZFS_TYPE_FILESYSTEM|ZFS_TYPE_SNAPSHOT))
511 return (B_FALSE);
512
513 if (strncmp(pool, bootfs, len) == 0 &&
514 (bootfs[len] == '/' || bootfs[len] == '\0'))
515 return (B_TRUE);
516
517 return (B_FALSE);
518 }
519
520 /*
521 * Given an nvlist of zpool properties to be set, validate that they are
522 * correct, and parse any numeric properties (index, boolean, etc) if they are
523 * specified as strings.
524 */
525 static nvlist_t *
zpool_valid_proplist(libzfs_handle_t * hdl,const char * poolname,nvlist_t * props,uint64_t version,prop_flags_t flags,char * errbuf)526 zpool_valid_proplist(libzfs_handle_t *hdl, const char *poolname,
527 nvlist_t *props, uint64_t version, prop_flags_t flags, char *errbuf)
528 {
529 nvpair_t *elem;
530 nvlist_t *retprops;
531 zpool_prop_t prop;
532 const char *strval;
533 uint64_t intval;
534 const char *check;
535 struct stat64 statbuf;
536 zpool_handle_t *zhp;
537 char *parent, *slash;
538 char report[1024];
539
540 if (nvlist_alloc(&retprops, NV_UNIQUE_NAME, 0) != 0) {
541 (void) no_memory(hdl);
542 return (NULL);
543 }
544
545 elem = NULL;
546 while ((elem = nvlist_next_nvpair(props, elem)) != NULL) {
547 const char *propname = nvpair_name(elem);
548
549 if (flags.vdevprop && zpool_prop_vdev(propname)) {
550 vdev_prop_t vprop = vdev_name_to_prop(propname);
551
552 if (vdev_prop_readonly(vprop)) {
553 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' "
554 "is readonly"), propname);
555 (void) zfs_error(hdl, EZFS_PROPREADONLY,
556 errbuf);
557 goto error;
558 }
559
560 if (zprop_parse_value(hdl, elem, vprop, ZFS_TYPE_VDEV,
561 retprops, &strval, &intval, errbuf) != 0)
562 goto error;
563
564 continue;
565 } else if (flags.vdevprop && vdev_prop_user(propname)) {
566 if (nvlist_add_nvpair(retprops, elem) != 0) {
567 (void) no_memory(hdl);
568 goto error;
569 }
570 continue;
571 } else if (flags.vdevprop) {
572 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
573 "invalid property: '%s'"), propname);
574 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
575 goto error;
576 }
577
578 prop = zpool_name_to_prop(propname);
579 if (prop == ZPOOL_PROP_INVAL && zpool_prop_feature(propname)) {
580 int err;
581 char *fname = strchr(propname, '@') + 1;
582
583 err = zfeature_lookup_name(fname, NULL);
584 if (err != 0) {
585 ASSERT3U(err, ==, ENOENT);
586 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
587 "feature '%s' unsupported by kernel"),
588 fname);
589 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
590 goto error;
591 }
592
593 if (nvpair_type(elem) != DATA_TYPE_STRING) {
594 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
595 "'%s' must be a string"), propname);
596 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
597 goto error;
598 }
599
600 (void) nvpair_value_string(elem, &strval);
601 if (strcmp(strval, ZFS_FEATURE_ENABLED) != 0 &&
602 strcmp(strval, ZFS_FEATURE_DISABLED) != 0) {
603 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
604 "property '%s' can only be set to "
605 "'enabled' or 'disabled'"), propname);
606 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
607 goto error;
608 }
609
610 if (!flags.create &&
611 strcmp(strval, ZFS_FEATURE_DISABLED) == 0) {
612 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
613 "property '%s' can only be set to "
614 "'disabled' at creation time"), propname);
615 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
616 goto error;
617 }
618
619 if (nvlist_add_uint64(retprops, propname, 0) != 0) {
620 (void) no_memory(hdl);
621 goto error;
622 }
623 continue;
624 } else if (prop == ZPOOL_PROP_INVAL &&
625 zfs_prop_user(propname)) {
626 /*
627 * This is a user property: make sure it's a
628 * string, and that it's less than ZAP_MAXNAMELEN.
629 */
630 if (nvpair_type(elem) != DATA_TYPE_STRING) {
631 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
632 "'%s' must be a string"), propname);
633 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
634 goto error;
635 }
636
637 if (strlen(nvpair_name(elem)) >= ZAP_MAXNAMELEN) {
638 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
639 "property name '%s' is too long"),
640 propname);
641 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
642 goto error;
643 }
644
645 (void) nvpair_value_string(elem, &strval);
646
647 if (strlen(strval) >= ZFS_MAXPROPLEN) {
648 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
649 "property value '%s' is too long"),
650 strval);
651 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
652 goto error;
653 }
654
655 if (nvlist_add_string(retprops, propname,
656 strval) != 0) {
657 (void) no_memory(hdl);
658 goto error;
659 }
660
661 continue;
662 }
663
664 /*
665 * Make sure this property is valid and applies to this type.
666 */
667 if (prop == ZPOOL_PROP_INVAL) {
668 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
669 "invalid property '%s'"), propname);
670 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
671 goto error;
672 }
673
674 if (zpool_prop_readonly(prop)) {
675 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' "
676 "is readonly"), propname);
677 (void) zfs_error(hdl, EZFS_PROPREADONLY, errbuf);
678 goto error;
679 }
680
681 if (!flags.create && zpool_prop_setonce(prop)) {
682 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
683 "property '%s' can only be set at "
684 "creation time"), propname);
685 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
686 goto error;
687 }
688
689 if (zprop_parse_value(hdl, elem, prop, ZFS_TYPE_POOL, retprops,
690 &strval, &intval, errbuf) != 0)
691 goto error;
692
693 /*
694 * Perform additional checking for specific properties.
695 */
696 switch (prop) {
697 case ZPOOL_PROP_VERSION:
698 if (intval < version ||
699 !SPA_VERSION_IS_SUPPORTED(intval)) {
700 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
701 "property '%s' number %llu is invalid."),
702 propname, (unsigned long long)intval);
703 (void) zfs_error(hdl, EZFS_BADVERSION, errbuf);
704 goto error;
705 }
706 break;
707
708 case ZPOOL_PROP_ASHIFT:
709 if (intval != 0 &&
710 (intval < ASHIFT_MIN || intval > ASHIFT_MAX)) {
711 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
712 "property '%s' number %llu is invalid, "
713 "only values between %" PRId32 " and %"
714 PRId32 " are allowed."),
715 propname, (unsigned long long)intval,
716 ASHIFT_MIN, ASHIFT_MAX);
717 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
718 goto error;
719 }
720 break;
721
722 case ZPOOL_PROP_BOOTFS:
723 if (flags.create || flags.import) {
724 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
725 "property '%s' cannot be set at creation "
726 "or import time"), propname);
727 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
728 goto error;
729 }
730
731 if (version < SPA_VERSION_BOOTFS) {
732 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
733 "pool must be upgraded to support "
734 "'%s' property"), propname);
735 (void) zfs_error(hdl, EZFS_BADVERSION, errbuf);
736 goto error;
737 }
738
739 /*
740 * bootfs property value has to be a dataset name and
741 * the dataset has to be in the same pool as it sets to.
742 */
743 if (!bootfs_name_valid(poolname, strval)) {
744 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' "
745 "is an invalid name"), strval);
746 (void) zfs_error(hdl, EZFS_INVALIDNAME, errbuf);
747 goto error;
748 }
749
750 if ((zhp = zpool_open_canfail(hdl, poolname)) == NULL) {
751 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
752 "could not open pool '%s'"), poolname);
753 (void) zfs_error(hdl, EZFS_OPENFAILED, errbuf);
754 goto error;
755 }
756 zpool_close(zhp);
757 break;
758
759 case ZPOOL_PROP_ALTROOT:
760 if (!flags.create && !flags.import) {
761 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
762 "property '%s' can only be set during pool "
763 "creation or import"), propname);
764 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
765 goto error;
766 }
767
768 if (strval[0] != '/') {
769 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
770 "bad alternate root '%s'"), strval);
771 (void) zfs_error(hdl, EZFS_BADPATH, errbuf);
772 goto error;
773 }
774 break;
775
776 case ZPOOL_PROP_CACHEFILE:
777 if (strval[0] == '\0')
778 break;
779
780 if (strcmp(strval, "none") == 0)
781 break;
782
783 if (strval[0] != '/') {
784 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
785 "property '%s' must be empty, an "
786 "absolute path, or 'none'"), propname);
787 (void) zfs_error(hdl, EZFS_BADPATH, errbuf);
788 goto error;
789 }
790
791 parent = strdup(strval);
792 if (parent == NULL) {
793 (void) zfs_error(hdl, EZFS_NOMEM, errbuf);
794 goto error;
795 }
796 slash = strrchr(parent, '/');
797
798 if (slash[1] == '\0' || strcmp(slash, "/.") == 0 ||
799 strcmp(slash, "/..") == 0) {
800 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
801 "'%s' is not a valid file"), parent);
802 (void) zfs_error(hdl, EZFS_BADPATH, errbuf);
803 free(parent);
804 goto error;
805 }
806
807 *slash = '\0';
808
809 if (parent[0] != '\0' &&
810 (stat64(parent, &statbuf) != 0 ||
811 !S_ISDIR(statbuf.st_mode))) {
812 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
813 "'%s' is not a valid directory"),
814 parent);
815 (void) zfs_error(hdl, EZFS_BADPATH, errbuf);
816 free(parent);
817 goto error;
818 }
819 free(parent);
820
821 break;
822
823 case ZPOOL_PROP_COMPATIBILITY:
824 switch (zpool_load_compat(strval, NULL, report, 1024)) {
825 case ZPOOL_COMPATIBILITY_OK:
826 case ZPOOL_COMPATIBILITY_WARNTOKEN:
827 break;
828 case ZPOOL_COMPATIBILITY_BADFILE:
829 case ZPOOL_COMPATIBILITY_BADTOKEN:
830 case ZPOOL_COMPATIBILITY_NOFILES:
831 zfs_error_aux(hdl, "%s", report);
832 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
833 goto error;
834 }
835 break;
836
837 case ZPOOL_PROP_COMMENT:
838 for (check = strval; *check != '\0'; check++) {
839 if (!isprint(*check)) {
840 zfs_error_aux(hdl,
841 dgettext(TEXT_DOMAIN,
842 "comment may only have printable "
843 "characters"));
844 (void) zfs_error(hdl, EZFS_BADPROP,
845 errbuf);
846 goto error;
847 }
848 }
849 if (strlen(strval) > ZPROP_MAX_COMMENT) {
850 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
851 "comment must not exceed %d characters"),
852 ZPROP_MAX_COMMENT);
853 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
854 goto error;
855 }
856 break;
857 case ZPOOL_PROP_READONLY:
858 if (!flags.import) {
859 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
860 "property '%s' can only be set at "
861 "import time"), propname);
862 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
863 goto error;
864 }
865 break;
866 case ZPOOL_PROP_MULTIHOST:
867 if (get_system_hostid() == 0) {
868 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
869 "requires a non-zero system hostid"));
870 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
871 goto error;
872 }
873 break;
874 case ZPOOL_PROP_DEDUPDITTO:
875 printf("Note: property '%s' no longer has "
876 "any effect\n", propname);
877 break;
878
879 default:
880 break;
881 }
882 }
883
884 return (retprops);
885 error:
886 nvlist_free(retprops);
887 return (NULL);
888 }
889
890 /*
891 * Set zpool property : propname=propval.
892 */
893 int
zpool_set_prop(zpool_handle_t * zhp,const char * propname,const char * propval)894 zpool_set_prop(zpool_handle_t *zhp, const char *propname, const char *propval)
895 {
896 zfs_cmd_t zc = {"\0"};
897 int ret = -1;
898 char errbuf[ERRBUFLEN];
899 nvlist_t *nvl = NULL;
900 nvlist_t *realprops;
901 uint64_t version;
902 prop_flags_t flags = { 0 };
903
904 (void) snprintf(errbuf, sizeof (errbuf),
905 dgettext(TEXT_DOMAIN, "cannot set property for '%s'"),
906 zhp->zpool_name);
907
908 if (nvlist_alloc(&nvl, NV_UNIQUE_NAME, 0) != 0)
909 return (no_memory(zhp->zpool_hdl));
910
911 if (nvlist_add_string(nvl, propname, propval) != 0) {
912 nvlist_free(nvl);
913 return (no_memory(zhp->zpool_hdl));
914 }
915
916 version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL);
917 if ((realprops = zpool_valid_proplist(zhp->zpool_hdl,
918 zhp->zpool_name, nvl, version, flags, errbuf)) == NULL) {
919 nvlist_free(nvl);
920 return (-1);
921 }
922
923 nvlist_free(nvl);
924 nvl = realprops;
925
926 /*
927 * Execute the corresponding ioctl() to set this property.
928 */
929 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
930
931 zcmd_write_src_nvlist(zhp->zpool_hdl, &zc, nvl);
932
933 ret = zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_SET_PROPS, &zc);
934
935 zcmd_free_nvlists(&zc);
936 nvlist_free(nvl);
937
938 if (ret)
939 (void) zpool_standard_error(zhp->zpool_hdl, errno, errbuf);
940 else
941 (void) zpool_props_refresh(zhp);
942
943 return (ret);
944 }
945
946 int
zpool_expand_proplist(zpool_handle_t * zhp,zprop_list_t ** plp,zfs_type_t type,boolean_t literal)947 zpool_expand_proplist(zpool_handle_t *zhp, zprop_list_t **plp,
948 zfs_type_t type, boolean_t literal)
949 {
950 libzfs_handle_t *hdl = zhp->zpool_hdl;
951 zprop_list_t *entry;
952 char buf[ZFS_MAXPROPLEN];
953 nvlist_t *features = NULL;
954 nvpair_t *nvp;
955 zprop_list_t **last;
956 boolean_t firstexpand = (NULL == *plp);
957 int i;
958
959 if (zprop_expand_list(hdl, plp, type) != 0)
960 return (-1);
961
962 if (type == ZFS_TYPE_VDEV)
963 return (0);
964
965 last = plp;
966 while (*last != NULL)
967 last = &(*last)->pl_next;
968
969 if ((*plp)->pl_all)
970 features = zpool_get_features(zhp);
971
972 if ((*plp)->pl_all && firstexpand) {
973 /* Handle userprops in the all properties case */
974 if (zhp->zpool_props == NULL && zpool_props_refresh(zhp))
975 return (-1);
976
977 nvp = NULL;
978 while ((nvp = nvlist_next_nvpair(zhp->zpool_props, nvp)) !=
979 NULL) {
980 const char *propname = nvpair_name(nvp);
981
982 if (!zfs_prop_user(propname))
983 continue;
984
985 entry = zfs_alloc(hdl, sizeof (zprop_list_t));
986 entry->pl_prop = ZPROP_USERPROP;
987 entry->pl_user_prop = zfs_strdup(hdl, propname);
988 entry->pl_width = strlen(entry->pl_user_prop);
989 entry->pl_all = B_TRUE;
990
991 *last = entry;
992 last = &entry->pl_next;
993 }
994
995 for (i = 0; i < SPA_FEATURES; i++) {
996 entry = zfs_alloc(hdl, sizeof (zprop_list_t));
997 entry->pl_prop = ZPROP_USERPROP;
998 entry->pl_user_prop = zfs_asprintf(hdl, "feature@%s",
999 spa_feature_table[i].fi_uname);
1000 entry->pl_width = strlen(entry->pl_user_prop);
1001 entry->pl_all = B_TRUE;
1002
1003 *last = entry;
1004 last = &entry->pl_next;
1005 }
1006 }
1007
1008 /* add any unsupported features */
1009 for (nvp = nvlist_next_nvpair(features, NULL);
1010 nvp != NULL; nvp = nvlist_next_nvpair(features, nvp)) {
1011 char *propname;
1012 boolean_t found;
1013
1014 if (zfeature_is_supported(nvpair_name(nvp)))
1015 continue;
1016
1017 propname = zfs_asprintf(hdl, "unsupported@%s",
1018 nvpair_name(nvp));
1019
1020 /*
1021 * Before adding the property to the list make sure that no
1022 * other pool already added the same property.
1023 */
1024 found = B_FALSE;
1025 entry = *plp;
1026 while (entry != NULL) {
1027 if (entry->pl_user_prop != NULL &&
1028 strcmp(propname, entry->pl_user_prop) == 0) {
1029 found = B_TRUE;
1030 break;
1031 }
1032 entry = entry->pl_next;
1033 }
1034 if (found) {
1035 free(propname);
1036 continue;
1037 }
1038
1039 entry = zfs_alloc(hdl, sizeof (zprop_list_t));
1040 entry->pl_prop = ZPROP_USERPROP;
1041 entry->pl_user_prop = propname;
1042 entry->pl_width = strlen(entry->pl_user_prop);
1043 entry->pl_all = B_TRUE;
1044
1045 *last = entry;
1046 last = &entry->pl_next;
1047 }
1048
1049 for (entry = *plp; entry != NULL; entry = entry->pl_next) {
1050 if (entry->pl_fixed && !literal)
1051 continue;
1052
1053 if (entry->pl_prop != ZPROP_USERPROP &&
1054 zpool_get_prop(zhp, entry->pl_prop, buf, sizeof (buf),
1055 NULL, literal) == 0) {
1056 if (strlen(buf) > entry->pl_width)
1057 entry->pl_width = strlen(buf);
1058 } else if (entry->pl_prop == ZPROP_INVAL &&
1059 zfs_prop_user(entry->pl_user_prop) &&
1060 zpool_get_userprop(zhp, entry->pl_user_prop, buf,
1061 sizeof (buf), NULL) == 0) {
1062 if (strlen(buf) > entry->pl_width)
1063 entry->pl_width = strlen(buf);
1064 }
1065 }
1066
1067 return (0);
1068 }
1069
1070 int
vdev_expand_proplist(zpool_handle_t * zhp,const char * vdevname,zprop_list_t ** plp)1071 vdev_expand_proplist(zpool_handle_t *zhp, const char *vdevname,
1072 zprop_list_t **plp)
1073 {
1074 zprop_list_t *entry;
1075 char buf[ZFS_MAXPROPLEN];
1076 const char *strval = NULL;
1077 int err = 0;
1078 nvpair_t *elem = NULL;
1079 nvlist_t *vprops = NULL;
1080 nvlist_t *propval = NULL;
1081 const char *propname;
1082 vdev_prop_t prop;
1083 zprop_list_t **last;
1084
1085 for (entry = *plp; entry != NULL; entry = entry->pl_next) {
1086 if (entry->pl_fixed)
1087 continue;
1088
1089 if (zpool_get_vdev_prop(zhp, vdevname, entry->pl_prop,
1090 entry->pl_user_prop, buf, sizeof (buf), NULL,
1091 B_FALSE) == 0) {
1092 if (strlen(buf) > entry->pl_width)
1093 entry->pl_width = strlen(buf);
1094 }
1095 if (entry->pl_prop == VDEV_PROP_NAME &&
1096 strlen(vdevname) > entry->pl_width)
1097 entry->pl_width = strlen(vdevname);
1098 }
1099
1100 /* Handle the all properties case */
1101 last = plp;
1102 if (*last != NULL && (*last)->pl_all == B_TRUE) {
1103 while (*last != NULL)
1104 last = &(*last)->pl_next;
1105
1106 err = zpool_get_all_vdev_props(zhp, vdevname, &vprops);
1107 if (err != 0)
1108 return (err);
1109
1110 while ((elem = nvlist_next_nvpair(vprops, elem)) != NULL) {
1111 propname = nvpair_name(elem);
1112
1113 /* Skip properties that are not user defined */
1114 if ((prop = vdev_name_to_prop(propname)) !=
1115 VDEV_PROP_USERPROP)
1116 continue;
1117
1118 if (nvpair_value_nvlist(elem, &propval) != 0)
1119 continue;
1120
1121 strval = fnvlist_lookup_string(propval, ZPROP_VALUE);
1122
1123 entry = zfs_alloc(zhp->zpool_hdl,
1124 sizeof (zprop_list_t));
1125 entry->pl_prop = prop;
1126 entry->pl_user_prop = zfs_strdup(zhp->zpool_hdl,
1127 propname);
1128 entry->pl_width = strlen(strval);
1129 entry->pl_all = B_TRUE;
1130 *last = entry;
1131 last = &entry->pl_next;
1132 }
1133 }
1134
1135 return (0);
1136 }
1137
1138 /*
1139 * Get the state for the given feature on the given ZFS pool.
1140 */
1141 int
zpool_prop_get_feature(zpool_handle_t * zhp,const char * propname,char * buf,size_t len)1142 zpool_prop_get_feature(zpool_handle_t *zhp, const char *propname, char *buf,
1143 size_t len)
1144 {
1145 uint64_t refcount;
1146 boolean_t found = B_FALSE;
1147 nvlist_t *features = zpool_get_features(zhp);
1148 boolean_t supported;
1149 const char *feature = strchr(propname, '@') + 1;
1150
1151 supported = zpool_prop_feature(propname);
1152 ASSERT(supported || zpool_prop_unsupported(propname));
1153
1154 /*
1155 * Convert from feature name to feature guid. This conversion is
1156 * unnecessary for unsupported@... properties because they already
1157 * use guids.
1158 */
1159 if (supported) {
1160 int ret;
1161 spa_feature_t fid;
1162
1163 ret = zfeature_lookup_name(feature, &fid);
1164 if (ret != 0) {
1165 (void) strlcpy(buf, "-", len);
1166 return (ENOTSUP);
1167 }
1168 feature = spa_feature_table[fid].fi_guid;
1169 }
1170
1171 if (nvlist_lookup_uint64(features, feature, &refcount) == 0)
1172 found = B_TRUE;
1173
1174 if (supported) {
1175 if (!found) {
1176 (void) strlcpy(buf, ZFS_FEATURE_DISABLED, len);
1177 } else {
1178 if (refcount == 0)
1179 (void) strlcpy(buf, ZFS_FEATURE_ENABLED, len);
1180 else
1181 (void) strlcpy(buf, ZFS_FEATURE_ACTIVE, len);
1182 }
1183 } else {
1184 if (found) {
1185 if (refcount == 0) {
1186 (void) strcpy(buf, ZFS_UNSUPPORTED_INACTIVE);
1187 } else {
1188 (void) strcpy(buf, ZFS_UNSUPPORTED_READONLY);
1189 }
1190 } else {
1191 (void) strlcpy(buf, "-", len);
1192 return (ENOTSUP);
1193 }
1194 }
1195
1196 return (0);
1197 }
1198
1199 /*
1200 * Validate the given pool name, optionally putting an extended error message in
1201 * 'buf'.
1202 */
1203 boolean_t
zpool_name_valid(libzfs_handle_t * hdl,boolean_t isopen,const char * pool)1204 zpool_name_valid(libzfs_handle_t *hdl, boolean_t isopen, const char *pool)
1205 {
1206 namecheck_err_t why;
1207 char what;
1208 int ret;
1209
1210 ret = pool_namecheck(pool, &why, &what);
1211
1212 /*
1213 * The rules for reserved pool names were extended at a later point.
1214 * But we need to support users with existing pools that may now be
1215 * invalid. So we only check for this expanded set of names during a
1216 * create (or import), and only in userland.
1217 */
1218 if (ret == 0 && !isopen &&
1219 (strncmp(pool, "mirror", 6) == 0 ||
1220 strncmp(pool, "raidz", 5) == 0 ||
1221 strncmp(pool, "draid", 5) == 0 ||
1222 strncmp(pool, "spare", 5) == 0 ||
1223 strcmp(pool, "log") == 0)) {
1224 if (hdl != NULL)
1225 zfs_error_aux(hdl,
1226 dgettext(TEXT_DOMAIN, "name is reserved"));
1227 return (B_FALSE);
1228 }
1229
1230
1231 if (ret != 0) {
1232 if (hdl != NULL) {
1233 switch (why) {
1234 case NAME_ERR_TOOLONG:
1235 zfs_error_aux(hdl,
1236 dgettext(TEXT_DOMAIN, "name is too long"));
1237 break;
1238
1239 case NAME_ERR_INVALCHAR:
1240 zfs_error_aux(hdl,
1241 dgettext(TEXT_DOMAIN, "invalid character "
1242 "'%c' in pool name"), what);
1243 break;
1244
1245 case NAME_ERR_NOLETTER:
1246 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1247 "name must begin with a letter"));
1248 break;
1249
1250 case NAME_ERR_RESERVED:
1251 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1252 "name is reserved"));
1253 break;
1254
1255 case NAME_ERR_DISKLIKE:
1256 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1257 "pool name is reserved"));
1258 break;
1259
1260 case NAME_ERR_LEADING_SLASH:
1261 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1262 "leading slash in name"));
1263 break;
1264
1265 case NAME_ERR_EMPTY_COMPONENT:
1266 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1267 "empty component in name"));
1268 break;
1269
1270 case NAME_ERR_TRAILING_SLASH:
1271 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1272 "trailing slash in name"));
1273 break;
1274
1275 case NAME_ERR_MULTIPLE_DELIMITERS:
1276 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1277 "multiple '@' and/or '#' delimiters in "
1278 "name"));
1279 break;
1280
1281 case NAME_ERR_NO_AT:
1282 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1283 "permission set is missing '@'"));
1284 break;
1285
1286 default:
1287 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1288 "(%d) not defined"), why);
1289 break;
1290 }
1291 }
1292 return (B_FALSE);
1293 }
1294
1295 return (B_TRUE);
1296 }
1297
1298 /*
1299 * Open a handle to the given pool, even if the pool is currently in the FAULTED
1300 * state.
1301 */
1302 zpool_handle_t *
zpool_open_canfail(libzfs_handle_t * hdl,const char * pool)1303 zpool_open_canfail(libzfs_handle_t *hdl, const char *pool)
1304 {
1305 zpool_handle_t *zhp;
1306 boolean_t missing;
1307
1308 /*
1309 * Make sure the pool name is valid.
1310 */
1311 if (!zpool_name_valid(hdl, B_TRUE, pool)) {
1312 (void) zfs_error_fmt(hdl, EZFS_INVALIDNAME,
1313 dgettext(TEXT_DOMAIN, "cannot open '%s'"),
1314 pool);
1315 return (NULL);
1316 }
1317
1318 zhp = zfs_alloc(hdl, sizeof (zpool_handle_t));
1319
1320 zhp->zpool_hdl = hdl;
1321 (void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name));
1322
1323 if (zpool_refresh_stats(zhp, &missing) != 0) {
1324 zpool_close(zhp);
1325 return (NULL);
1326 }
1327
1328 if (missing) {
1329 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "no such pool"));
1330 (void) zfs_error_fmt(hdl, EZFS_NOENT,
1331 dgettext(TEXT_DOMAIN, "cannot open '%s'"), pool);
1332 zpool_close(zhp);
1333 return (NULL);
1334 }
1335
1336 return (zhp);
1337 }
1338
1339 /*
1340 * Like the above, but silent on error. Used when iterating over pools (because
1341 * the configuration cache may be out of date).
1342 */
1343 int
zpool_open_silent(libzfs_handle_t * hdl,const char * pool,zpool_handle_t ** ret)1344 zpool_open_silent(libzfs_handle_t *hdl, const char *pool, zpool_handle_t **ret)
1345 {
1346 zpool_handle_t *zhp;
1347 boolean_t missing;
1348
1349 zhp = zfs_alloc(hdl, sizeof (zpool_handle_t));
1350
1351 zhp->zpool_hdl = hdl;
1352 (void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name));
1353
1354 if (zpool_refresh_stats(zhp, &missing) != 0) {
1355 zpool_close(zhp);
1356 return (-1);
1357 }
1358
1359 if (missing) {
1360 zpool_close(zhp);
1361 *ret = NULL;
1362 return (0);
1363 }
1364
1365 *ret = zhp;
1366 return (0);
1367 }
1368
1369 /*
1370 * Similar to zpool_open_canfail(), but refuses to open pools in the faulted
1371 * state.
1372 */
1373 zpool_handle_t *
zpool_open(libzfs_handle_t * hdl,const char * pool)1374 zpool_open(libzfs_handle_t *hdl, const char *pool)
1375 {
1376 zpool_handle_t *zhp;
1377
1378 if ((zhp = zpool_open_canfail(hdl, pool)) == NULL)
1379 return (NULL);
1380
1381 if (zhp->zpool_state == POOL_STATE_UNAVAIL) {
1382 (void) zfs_error_fmt(hdl, EZFS_POOLUNAVAIL,
1383 dgettext(TEXT_DOMAIN, "cannot open '%s'"), zhp->zpool_name);
1384 zpool_close(zhp);
1385 return (NULL);
1386 }
1387
1388 return (zhp);
1389 }
1390
1391 /*
1392 * Close the handle. Simply frees the memory associated with the handle.
1393 */
1394 void
zpool_close(zpool_handle_t * zhp)1395 zpool_close(zpool_handle_t *zhp)
1396 {
1397 nvlist_free(zhp->zpool_config);
1398 nvlist_free(zhp->zpool_old_config);
1399 nvlist_free(zhp->zpool_props);
1400 free(zhp);
1401 }
1402
1403 /*
1404 * Return the name of the pool.
1405 */
1406 const char *
zpool_get_name(zpool_handle_t * zhp)1407 zpool_get_name(zpool_handle_t *zhp)
1408 {
1409 return (zhp->zpool_name);
1410 }
1411
1412
1413 /*
1414 * Return the state of the pool (ACTIVE or UNAVAILABLE)
1415 */
1416 int
zpool_get_state(zpool_handle_t * zhp)1417 zpool_get_state(zpool_handle_t *zhp)
1418 {
1419 return (zhp->zpool_state);
1420 }
1421
1422 /*
1423 * Check if vdev list contains a special vdev
1424 */
1425 static boolean_t
zpool_has_special_vdev(nvlist_t * nvroot)1426 zpool_has_special_vdev(nvlist_t *nvroot)
1427 {
1428 nvlist_t **child;
1429 uint_t children;
1430
1431 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN, &child,
1432 &children) == 0) {
1433 for (uint_t c = 0; c < children; c++) {
1434 const char *bias;
1435
1436 if (nvlist_lookup_string(child[c],
1437 ZPOOL_CONFIG_ALLOCATION_BIAS, &bias) == 0 &&
1438 strcmp(bias, VDEV_ALLOC_BIAS_SPECIAL) == 0) {
1439 return (B_TRUE);
1440 }
1441 }
1442 }
1443 return (B_FALSE);
1444 }
1445
1446 /*
1447 * Check if vdev list contains a dRAID vdev
1448 */
1449 static boolean_t
zpool_has_draid_vdev(nvlist_t * nvroot)1450 zpool_has_draid_vdev(nvlist_t *nvroot)
1451 {
1452 nvlist_t **child;
1453 uint_t children;
1454
1455 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
1456 &child, &children) == 0) {
1457 for (uint_t c = 0; c < children; c++) {
1458 const char *type;
1459
1460 if (nvlist_lookup_string(child[c],
1461 ZPOOL_CONFIG_TYPE, &type) == 0 &&
1462 strcmp(type, VDEV_TYPE_DRAID) == 0) {
1463 return (B_TRUE);
1464 }
1465 }
1466 }
1467 return (B_FALSE);
1468 }
1469
1470 /*
1471 * Output a dRAID top-level vdev name in to the provided buffer.
1472 */
1473 static char *
zpool_draid_name(char * name,int len,uint64_t data,uint64_t parity,uint64_t spares,uint64_t children)1474 zpool_draid_name(char *name, int len, uint64_t data, uint64_t parity,
1475 uint64_t spares, uint64_t children)
1476 {
1477 snprintf(name, len, "%s%llu:%llud:%lluc:%llus",
1478 VDEV_TYPE_DRAID, (u_longlong_t)parity, (u_longlong_t)data,
1479 (u_longlong_t)children, (u_longlong_t)spares);
1480
1481 return (name);
1482 }
1483
1484 /*
1485 * Return B_TRUE if the provided name is a dRAID spare name.
1486 */
1487 boolean_t
zpool_is_draid_spare(const char * name)1488 zpool_is_draid_spare(const char *name)
1489 {
1490 uint64_t spare_id, parity, vdev_id;
1491
1492 if (sscanf(name, VDEV_TYPE_DRAID "%llu-%llu-%llu",
1493 (u_longlong_t *)&parity, (u_longlong_t *)&vdev_id,
1494 (u_longlong_t *)&spare_id) == 3) {
1495 return (B_TRUE);
1496 }
1497
1498 return (B_FALSE);
1499 }
1500
1501 /*
1502 * Create the named pool, using the provided vdev list. It is assumed
1503 * that the consumer has already validated the contents of the nvlist, so we
1504 * don't have to worry about error semantics.
1505 */
1506 int
zpool_create(libzfs_handle_t * hdl,const char * pool,nvlist_t * nvroot,nvlist_t * props,nvlist_t * fsprops)1507 zpool_create(libzfs_handle_t *hdl, const char *pool, nvlist_t *nvroot,
1508 nvlist_t *props, nvlist_t *fsprops)
1509 {
1510 zfs_cmd_t zc = {"\0"};
1511 nvlist_t *zc_fsprops = NULL;
1512 nvlist_t *zc_props = NULL;
1513 nvlist_t *hidden_args = NULL;
1514 uint8_t *wkeydata = NULL;
1515 uint_t wkeylen = 0;
1516 char errbuf[ERRBUFLEN];
1517 int ret = -1;
1518
1519 (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
1520 "cannot create '%s'"), pool);
1521
1522 if (!zpool_name_valid(hdl, B_FALSE, pool))
1523 return (zfs_error(hdl, EZFS_INVALIDNAME, errbuf));
1524
1525 zcmd_write_conf_nvlist(hdl, &zc, nvroot);
1526
1527 if (props) {
1528 prop_flags_t flags = { .create = B_TRUE, .import = B_FALSE };
1529
1530 if ((zc_props = zpool_valid_proplist(hdl, pool, props,
1531 SPA_VERSION_1, flags, errbuf)) == NULL) {
1532 goto create_failed;
1533 }
1534 }
1535
1536 if (fsprops) {
1537 uint64_t zoned;
1538 const char *zonestr;
1539
1540 zoned = ((nvlist_lookup_string(fsprops,
1541 zfs_prop_to_name(ZFS_PROP_ZONED), &zonestr) == 0) &&
1542 strcmp(zonestr, "on") == 0);
1543
1544 if ((zc_fsprops = zfs_valid_proplist(hdl, ZFS_TYPE_FILESYSTEM,
1545 fsprops, zoned, NULL, NULL, B_TRUE, errbuf)) == NULL) {
1546 goto create_failed;
1547 }
1548
1549 if (nvlist_exists(zc_fsprops,
1550 zfs_prop_to_name(ZFS_PROP_SPECIAL_SMALL_BLOCKS)) &&
1551 !zpool_has_special_vdev(nvroot)) {
1552 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1553 "%s property requires a special vdev"),
1554 zfs_prop_to_name(ZFS_PROP_SPECIAL_SMALL_BLOCKS));
1555 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
1556 goto create_failed;
1557 }
1558
1559 if (!zc_props &&
1560 (nvlist_alloc(&zc_props, NV_UNIQUE_NAME, 0) != 0)) {
1561 goto create_failed;
1562 }
1563 if (zfs_crypto_create(hdl, NULL, zc_fsprops, props, B_TRUE,
1564 &wkeydata, &wkeylen) != 0) {
1565 zfs_error(hdl, EZFS_CRYPTOFAILED, errbuf);
1566 goto create_failed;
1567 }
1568 if (nvlist_add_nvlist(zc_props,
1569 ZPOOL_ROOTFS_PROPS, zc_fsprops) != 0) {
1570 goto create_failed;
1571 }
1572 if (wkeydata != NULL) {
1573 if (nvlist_alloc(&hidden_args, NV_UNIQUE_NAME, 0) != 0)
1574 goto create_failed;
1575
1576 if (nvlist_add_uint8_array(hidden_args, "wkeydata",
1577 wkeydata, wkeylen) != 0)
1578 goto create_failed;
1579
1580 if (nvlist_add_nvlist(zc_props, ZPOOL_HIDDEN_ARGS,
1581 hidden_args) != 0)
1582 goto create_failed;
1583 }
1584 }
1585
1586 if (zc_props)
1587 zcmd_write_src_nvlist(hdl, &zc, zc_props);
1588
1589 (void) strlcpy(zc.zc_name, pool, sizeof (zc.zc_name));
1590
1591 if ((ret = zfs_ioctl(hdl, ZFS_IOC_POOL_CREATE, &zc)) != 0) {
1592
1593 zcmd_free_nvlists(&zc);
1594 nvlist_free(zc_props);
1595 nvlist_free(zc_fsprops);
1596 nvlist_free(hidden_args);
1597 if (wkeydata != NULL)
1598 free(wkeydata);
1599
1600 switch (errno) {
1601 case EBUSY:
1602 /*
1603 * This can happen if the user has specified the same
1604 * device multiple times. We can't reliably detect this
1605 * until we try to add it and see we already have a
1606 * label. This can also happen under if the device is
1607 * part of an active md or lvm device.
1608 */
1609 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1610 "one or more vdevs refer to the same device, or "
1611 "one of\nthe devices is part of an active md or "
1612 "lvm device"));
1613 return (zfs_error(hdl, EZFS_BADDEV, errbuf));
1614
1615 case ERANGE:
1616 /*
1617 * This happens if the record size is smaller or larger
1618 * than the allowed size range, or not a power of 2.
1619 *
1620 * NOTE: although zfs_valid_proplist is called earlier,
1621 * this case may have slipped through since the
1622 * pool does not exist yet and it is therefore
1623 * impossible to read properties e.g. max blocksize
1624 * from the pool.
1625 */
1626 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1627 "record size invalid"));
1628 return (zfs_error(hdl, EZFS_BADPROP, errbuf));
1629
1630 case EOVERFLOW:
1631 /*
1632 * This occurs when one of the devices is below
1633 * SPA_MINDEVSIZE. Unfortunately, we can't detect which
1634 * device was the problem device since there's no
1635 * reliable way to determine device size from userland.
1636 */
1637 {
1638 char buf[64];
1639
1640 zfs_nicebytes(SPA_MINDEVSIZE, buf,
1641 sizeof (buf));
1642
1643 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1644 "one or more devices is less than the "
1645 "minimum size (%s)"), buf);
1646 }
1647 return (zfs_error(hdl, EZFS_BADDEV, errbuf));
1648
1649 case ENOSPC:
1650 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1651 "one or more devices is out of space"));
1652 return (zfs_error(hdl, EZFS_BADDEV, errbuf));
1653
1654 case EINVAL:
1655 if (zpool_has_draid_vdev(nvroot) &&
1656 zfeature_lookup_name("draid", NULL) != 0) {
1657 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1658 "dRAID vdevs are unsupported by the "
1659 "kernel"));
1660 return (zfs_error(hdl, EZFS_BADDEV, errbuf));
1661 } else {
1662 return (zpool_standard_error(hdl, errno,
1663 errbuf));
1664 }
1665
1666 default:
1667 return (zpool_standard_error(hdl, errno, errbuf));
1668 }
1669 }
1670
1671 create_failed:
1672 zcmd_free_nvlists(&zc);
1673 nvlist_free(zc_props);
1674 nvlist_free(zc_fsprops);
1675 nvlist_free(hidden_args);
1676 if (wkeydata != NULL)
1677 free(wkeydata);
1678 return (ret);
1679 }
1680
1681 /*
1682 * Destroy the given pool. It is up to the caller to ensure that there are no
1683 * datasets left in the pool.
1684 */
1685 int
zpool_destroy(zpool_handle_t * zhp,const char * log_str)1686 zpool_destroy(zpool_handle_t *zhp, const char *log_str)
1687 {
1688 zfs_cmd_t zc = {"\0"};
1689 zfs_handle_t *zfp = NULL;
1690 libzfs_handle_t *hdl = zhp->zpool_hdl;
1691 char errbuf[ERRBUFLEN];
1692
1693 if (zhp->zpool_state == POOL_STATE_ACTIVE &&
1694 (zfp = zfs_open(hdl, zhp->zpool_name, ZFS_TYPE_FILESYSTEM)) == NULL)
1695 return (-1);
1696
1697 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1698 zc.zc_history = (uint64_t)(uintptr_t)log_str;
1699
1700 if (zfs_ioctl(hdl, ZFS_IOC_POOL_DESTROY, &zc) != 0) {
1701 (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
1702 "cannot destroy '%s'"), zhp->zpool_name);
1703
1704 if (errno == EROFS) {
1705 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1706 "one or more devices is read only"));
1707 (void) zfs_error(hdl, EZFS_BADDEV, errbuf);
1708 } else {
1709 (void) zpool_standard_error(hdl, errno, errbuf);
1710 }
1711
1712 if (zfp)
1713 zfs_close(zfp);
1714 return (-1);
1715 }
1716
1717 if (zfp) {
1718 remove_mountpoint(zfp);
1719 zfs_close(zfp);
1720 }
1721
1722 return (0);
1723 }
1724
1725 /*
1726 * Create a checkpoint in the given pool.
1727 */
1728 int
zpool_checkpoint(zpool_handle_t * zhp)1729 zpool_checkpoint(zpool_handle_t *zhp)
1730 {
1731 libzfs_handle_t *hdl = zhp->zpool_hdl;
1732 char errbuf[ERRBUFLEN];
1733 int error;
1734
1735 error = lzc_pool_checkpoint(zhp->zpool_name);
1736 if (error != 0) {
1737 (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
1738 "cannot checkpoint '%s'"), zhp->zpool_name);
1739 (void) zpool_standard_error(hdl, error, errbuf);
1740 return (-1);
1741 }
1742
1743 return (0);
1744 }
1745
1746 /*
1747 * Discard the checkpoint from the given pool.
1748 */
1749 int
zpool_discard_checkpoint(zpool_handle_t * zhp)1750 zpool_discard_checkpoint(zpool_handle_t *zhp)
1751 {
1752 libzfs_handle_t *hdl = zhp->zpool_hdl;
1753 char errbuf[ERRBUFLEN];
1754 int error;
1755
1756 error = lzc_pool_checkpoint_discard(zhp->zpool_name);
1757 if (error != 0) {
1758 (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
1759 "cannot discard checkpoint in '%s'"), zhp->zpool_name);
1760 (void) zpool_standard_error(hdl, error, errbuf);
1761 return (-1);
1762 }
1763
1764 return (0);
1765 }
1766
1767 /*
1768 * Load data type for the given pool.
1769 */
1770 int
zpool_prefetch(zpool_handle_t * zhp,zpool_prefetch_type_t type)1771 zpool_prefetch(zpool_handle_t *zhp, zpool_prefetch_type_t type)
1772 {
1773 libzfs_handle_t *hdl = zhp->zpool_hdl;
1774 char msg[1024];
1775 int error;
1776
1777 error = lzc_pool_prefetch(zhp->zpool_name, type);
1778 if (error != 0) {
1779 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1780 "cannot prefetch %s in '%s'"),
1781 type == ZPOOL_PREFETCH_DDT ? "ddt" : "", zhp->zpool_name);
1782 (void) zpool_standard_error(hdl, error, msg);
1783 return (-1);
1784 }
1785
1786 return (0);
1787 }
1788
1789 /*
1790 * Add the given vdevs to the pool. The caller must have already performed the
1791 * necessary verification to ensure that the vdev specification is well-formed.
1792 */
1793 int
zpool_add(zpool_handle_t * zhp,nvlist_t * nvroot,boolean_t check_ashift)1794 zpool_add(zpool_handle_t *zhp, nvlist_t *nvroot, boolean_t check_ashift)
1795 {
1796 zfs_cmd_t zc = {"\0"};
1797 int ret;
1798 libzfs_handle_t *hdl = zhp->zpool_hdl;
1799 char errbuf[ERRBUFLEN];
1800 nvlist_t **spares, **l2cache;
1801 uint_t nspares, nl2cache;
1802
1803 (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
1804 "cannot add to '%s'"), zhp->zpool_name);
1805
1806 if (zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL) <
1807 SPA_VERSION_SPARES &&
1808 nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
1809 &spares, &nspares) == 0) {
1810 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be "
1811 "upgraded to add hot spares"));
1812 return (zfs_error(hdl, EZFS_BADVERSION, errbuf));
1813 }
1814
1815 if (zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL) <
1816 SPA_VERSION_L2CACHE &&
1817 nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
1818 &l2cache, &nl2cache) == 0) {
1819 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be "
1820 "upgraded to add cache devices"));
1821 return (zfs_error(hdl, EZFS_BADVERSION, errbuf));
1822 }
1823
1824 zcmd_write_conf_nvlist(hdl, &zc, nvroot);
1825 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1826 zc.zc_flags = check_ashift;
1827
1828 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_ADD, &zc) != 0) {
1829 switch (errno) {
1830 case EBUSY:
1831 /*
1832 * This can happen if the user has specified the same
1833 * device multiple times. We can't reliably detect this
1834 * until we try to add it and see we already have a
1835 * label.
1836 */
1837 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1838 "one or more vdevs refer to the same device"));
1839 (void) zfs_error(hdl, EZFS_BADDEV, errbuf);
1840 break;
1841
1842 case EINVAL:
1843
1844 if (zpool_has_draid_vdev(nvroot) &&
1845 zfeature_lookup_name("draid", NULL) != 0) {
1846 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1847 "dRAID vdevs are unsupported by the "
1848 "kernel"));
1849 } else {
1850 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1851 "invalid config; a pool with removing/"
1852 "removed vdevs does not support adding "
1853 "raidz or dRAID vdevs"));
1854 }
1855
1856 (void) zfs_error(hdl, EZFS_BADDEV, errbuf);
1857 break;
1858
1859 case EOVERFLOW:
1860 /*
1861 * This occurs when one of the devices is below
1862 * SPA_MINDEVSIZE. Unfortunately, we can't detect which
1863 * device was the problem device since there's no
1864 * reliable way to determine device size from userland.
1865 */
1866 {
1867 char buf[64];
1868
1869 zfs_nicebytes(SPA_MINDEVSIZE, buf,
1870 sizeof (buf));
1871
1872 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1873 "device is less than the minimum "
1874 "size (%s)"), buf);
1875 }
1876 (void) zfs_error(hdl, EZFS_BADDEV, errbuf);
1877 break;
1878
1879 case ENOTSUP:
1880 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1881 "pool must be upgraded to add these vdevs"));
1882 (void) zfs_error(hdl, EZFS_BADVERSION, errbuf);
1883 break;
1884
1885 default:
1886 (void) zpool_standard_error(hdl, errno, errbuf);
1887 }
1888
1889 ret = -1;
1890 } else {
1891 ret = 0;
1892 }
1893
1894 zcmd_free_nvlists(&zc);
1895
1896 return (ret);
1897 }
1898
1899 /*
1900 * Exports the pool from the system. The caller must ensure that there are no
1901 * mounted datasets in the pool.
1902 */
1903 static int
zpool_export_common(zpool_handle_t * zhp,boolean_t force,boolean_t hardforce,const char * log_str)1904 zpool_export_common(zpool_handle_t *zhp, boolean_t force, boolean_t hardforce,
1905 const char *log_str)
1906 {
1907 zfs_cmd_t zc = {"\0"};
1908
1909 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1910 zc.zc_cookie = force;
1911 zc.zc_guid = hardforce;
1912 zc.zc_history = (uint64_t)(uintptr_t)log_str;
1913
1914 if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_EXPORT, &zc) != 0) {
1915 switch (errno) {
1916 case EXDEV:
1917 zfs_error_aux(zhp->zpool_hdl, dgettext(TEXT_DOMAIN,
1918 "use '-f' to override the following errors:\n"
1919 "'%s' has an active shared spare which could be"
1920 " used by other pools once '%s' is exported."),
1921 zhp->zpool_name, zhp->zpool_name);
1922 return (zfs_error_fmt(zhp->zpool_hdl, EZFS_ACTIVE_SPARE,
1923 dgettext(TEXT_DOMAIN, "cannot export '%s'"),
1924 zhp->zpool_name));
1925 default:
1926 return (zpool_standard_error_fmt(zhp->zpool_hdl, errno,
1927 dgettext(TEXT_DOMAIN, "cannot export '%s'"),
1928 zhp->zpool_name));
1929 }
1930 }
1931
1932 return (0);
1933 }
1934
1935 int
zpool_export(zpool_handle_t * zhp,boolean_t force,const char * log_str)1936 zpool_export(zpool_handle_t *zhp, boolean_t force, const char *log_str)
1937 {
1938 return (zpool_export_common(zhp, force, B_FALSE, log_str));
1939 }
1940
1941 int
zpool_export_force(zpool_handle_t * zhp,const char * log_str)1942 zpool_export_force(zpool_handle_t *zhp, const char *log_str)
1943 {
1944 return (zpool_export_common(zhp, B_TRUE, B_TRUE, log_str));
1945 }
1946
1947 static void
zpool_rewind_exclaim(libzfs_handle_t * hdl,const char * name,boolean_t dryrun,nvlist_t * config)1948 zpool_rewind_exclaim(libzfs_handle_t *hdl, const char *name, boolean_t dryrun,
1949 nvlist_t *config)
1950 {
1951 nvlist_t *nv = NULL;
1952 uint64_t rewindto;
1953 int64_t loss = -1;
1954 struct tm t;
1955 char timestr[128];
1956
1957 if (!hdl->libzfs_printerr || config == NULL)
1958 return;
1959
1960 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, &nv) != 0 ||
1961 nvlist_lookup_nvlist(nv, ZPOOL_CONFIG_REWIND_INFO, &nv) != 0) {
1962 return;
1963 }
1964
1965 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_TIME, &rewindto) != 0)
1966 return;
1967 (void) nvlist_lookup_int64(nv, ZPOOL_CONFIG_REWIND_TIME, &loss);
1968
1969 if (localtime_r((time_t *)&rewindto, &t) != NULL &&
1970 ctime_r((time_t *)&rewindto, timestr) != NULL) {
1971 timestr[24] = 0;
1972 if (dryrun) {
1973 (void) printf(dgettext(TEXT_DOMAIN,
1974 "Would be able to return %s "
1975 "to its state as of %s.\n"),
1976 name, timestr);
1977 } else {
1978 (void) printf(dgettext(TEXT_DOMAIN,
1979 "Pool %s returned to its state as of %s.\n"),
1980 name, timestr);
1981 }
1982 if (loss > 120) {
1983 (void) printf(dgettext(TEXT_DOMAIN,
1984 "%s approximately %lld "),
1985 dryrun ? "Would discard" : "Discarded",
1986 ((longlong_t)loss + 30) / 60);
1987 (void) printf(dgettext(TEXT_DOMAIN,
1988 "minutes of transactions.\n"));
1989 } else if (loss > 0) {
1990 (void) printf(dgettext(TEXT_DOMAIN,
1991 "%s approximately %lld "),
1992 dryrun ? "Would discard" : "Discarded",
1993 (longlong_t)loss);
1994 (void) printf(dgettext(TEXT_DOMAIN,
1995 "seconds of transactions.\n"));
1996 }
1997 }
1998 }
1999
2000 void
zpool_explain_recover(libzfs_handle_t * hdl,const char * name,int reason,nvlist_t * config,char * buf,size_t size)2001 zpool_explain_recover(libzfs_handle_t *hdl, const char *name, int reason,
2002 nvlist_t *config, char *buf, size_t size)
2003 {
2004 nvlist_t *nv = NULL;
2005 int64_t loss = -1;
2006 uint64_t edata = UINT64_MAX;
2007 uint64_t rewindto;
2008 struct tm t;
2009 char timestr[128], temp[1024];
2010
2011 if (!hdl->libzfs_printerr)
2012 return;
2013
2014 /* All attempted rewinds failed if ZPOOL_CONFIG_LOAD_TIME missing */
2015 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, &nv) != 0 ||
2016 nvlist_lookup_nvlist(nv, ZPOOL_CONFIG_REWIND_INFO, &nv) != 0 ||
2017 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_TIME, &rewindto) != 0)
2018 goto no_info;
2019
2020 (void) nvlist_lookup_int64(nv, ZPOOL_CONFIG_REWIND_TIME, &loss);
2021 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_DATA_ERRORS,
2022 &edata);
2023
2024 (void) snprintf(buf, size, dgettext(TEXT_DOMAIN,
2025 "Recovery is possible, but will result in some data loss.\n"));
2026
2027 if (localtime_r((time_t *)&rewindto, &t) != NULL &&
2028 ctime_r((time_t *)&rewindto, timestr) != NULL) {
2029 timestr[24] = 0;
2030 (void) snprintf(temp, 1024, dgettext(TEXT_DOMAIN,
2031 "\tReturning the pool to its state as of %s\n"
2032 "\tshould correct the problem. "), timestr);
2033 (void) strlcat(buf, temp, size);
2034 } else {
2035 (void) strlcat(buf, dgettext(TEXT_DOMAIN,
2036 "\tReverting the pool to an earlier state "
2037 "should correct the problem.\n\t"), size);
2038 }
2039
2040 if (loss > 120) {
2041 (void) snprintf(temp, 1024, dgettext(TEXT_DOMAIN,
2042 "Approximately %lld minutes of data\n"
2043 "\tmust be discarded, irreversibly. "),
2044 ((longlong_t)loss + 30) / 60);
2045 (void) strlcat(buf, temp, size);
2046 } else if (loss > 0) {
2047 (void) snprintf(temp, 1024, dgettext(TEXT_DOMAIN,
2048 "Approximately %lld seconds of data\n"
2049 "\tmust be discarded, irreversibly. "),
2050 (longlong_t)loss);
2051 (void) strlcat(buf, temp, size);
2052 }
2053 if (edata != 0 && edata != UINT64_MAX) {
2054 if (edata == 1) {
2055 (void) strlcat(buf, dgettext(TEXT_DOMAIN,
2056 "After rewind, at least\n"
2057 "\tone persistent user-data error will remain. "),
2058 size);
2059 } else {
2060 (void) strlcat(buf, dgettext(TEXT_DOMAIN,
2061 "After rewind, several\n"
2062 "\tpersistent user-data errors will remain. "),
2063 size);
2064 }
2065 }
2066 (void) snprintf(temp, 1024, dgettext(TEXT_DOMAIN,
2067 "Recovery can be attempted\n\tby executing 'zpool %s -F %s'. "),
2068 reason >= 0 ? "clear" : "import", name);
2069 (void) strlcat(buf, temp, size);
2070
2071 (void) strlcat(buf, dgettext(TEXT_DOMAIN,
2072 "A scrub of the pool\n"
2073 "\tis strongly recommended after recovery.\n"), size);
2074 return;
2075
2076 no_info:
2077 (void) strlcat(buf, dgettext(TEXT_DOMAIN,
2078 "Destroy and re-create the pool from\n\ta backup source.\n"), size);
2079 }
2080
2081 /*
2082 * zpool_import() is a contracted interface. Should be kept the same
2083 * if possible.
2084 *
2085 * Applications should use zpool_import_props() to import a pool with
2086 * new properties value to be set.
2087 */
2088 int
zpool_import(libzfs_handle_t * hdl,nvlist_t * config,const char * newname,char * altroot)2089 zpool_import(libzfs_handle_t *hdl, nvlist_t *config, const char *newname,
2090 char *altroot)
2091 {
2092 nvlist_t *props = NULL;
2093 int ret;
2094
2095 if (altroot != NULL) {
2096 if (nvlist_alloc(&props, NV_UNIQUE_NAME, 0) != 0) {
2097 return (zfs_error_fmt(hdl, EZFS_NOMEM,
2098 dgettext(TEXT_DOMAIN, "cannot import '%s'"),
2099 newname));
2100 }
2101
2102 if (nvlist_add_string(props,
2103 zpool_prop_to_name(ZPOOL_PROP_ALTROOT), altroot) != 0 ||
2104 nvlist_add_string(props,
2105 zpool_prop_to_name(ZPOOL_PROP_CACHEFILE), "none") != 0) {
2106 nvlist_free(props);
2107 return (zfs_error_fmt(hdl, EZFS_NOMEM,
2108 dgettext(TEXT_DOMAIN, "cannot import '%s'"),
2109 newname));
2110 }
2111 }
2112
2113 ret = zpool_import_props(hdl, config, newname, props,
2114 ZFS_IMPORT_NORMAL);
2115 nvlist_free(props);
2116 return (ret);
2117 }
2118
2119 static void
print_vdev_tree(libzfs_handle_t * hdl,const char * name,nvlist_t * nv,int indent)2120 print_vdev_tree(libzfs_handle_t *hdl, const char *name, nvlist_t *nv,
2121 int indent)
2122 {
2123 nvlist_t **child;
2124 uint_t c, children;
2125 char *vname;
2126 uint64_t is_log = 0;
2127
2128 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_LOG,
2129 &is_log);
2130
2131 if (name != NULL)
2132 (void) printf("\t%*s%s%s\n", indent, "", name,
2133 is_log ? " [log]" : "");
2134
2135 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
2136 &child, &children) != 0)
2137 return;
2138
2139 for (c = 0; c < children; c++) {
2140 vname = zpool_vdev_name(hdl, NULL, child[c], VDEV_NAME_TYPE_ID);
2141 print_vdev_tree(hdl, vname, child[c], indent + 2);
2142 free(vname);
2143 }
2144 }
2145
2146 void
zpool_collect_unsup_feat(nvlist_t * config,char * buf,size_t size)2147 zpool_collect_unsup_feat(nvlist_t *config, char *buf, size_t size)
2148 {
2149 nvlist_t *nvinfo, *unsup_feat;
2150 char temp[512];
2151
2152 nvinfo = fnvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO);
2153 unsup_feat = fnvlist_lookup_nvlist(nvinfo, ZPOOL_CONFIG_UNSUP_FEAT);
2154
2155 for (nvpair_t *nvp = nvlist_next_nvpair(unsup_feat, NULL);
2156 nvp != NULL; nvp = nvlist_next_nvpair(unsup_feat, nvp)) {
2157 const char *desc = fnvpair_value_string(nvp);
2158 if (strlen(desc) > 0) {
2159 (void) snprintf(temp, 512, "\t%s (%s)\n",
2160 nvpair_name(nvp), desc);
2161 (void) strlcat(buf, temp, size);
2162 } else {
2163 (void) snprintf(temp, 512, "\t%s\n", nvpair_name(nvp));
2164 (void) strlcat(buf, temp, size);
2165 }
2166 }
2167 }
2168
2169 /*
2170 * Import the given pool using the known configuration and a list of
2171 * properties to be set. The configuration should have come from
2172 * zpool_find_import(). The 'newname' parameters control whether the pool
2173 * is imported with a different name.
2174 */
2175 int
zpool_import_props(libzfs_handle_t * hdl,nvlist_t * config,const char * newname,nvlist_t * props,int flags)2176 zpool_import_props(libzfs_handle_t *hdl, nvlist_t *config, const char *newname,
2177 nvlist_t *props, int flags)
2178 {
2179 zfs_cmd_t zc = {"\0"};
2180 zpool_load_policy_t policy;
2181 nvlist_t *nv = NULL;
2182 nvlist_t *nvinfo = NULL;
2183 nvlist_t *missing = NULL;
2184 const char *thename;
2185 const char *origname;
2186 int ret;
2187 int error = 0;
2188 char buf[2048];
2189 char errbuf[ERRBUFLEN];
2190
2191 origname = fnvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME);
2192
2193 (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
2194 "cannot import pool '%s'"), origname);
2195
2196 if (newname != NULL) {
2197 if (!zpool_name_valid(hdl, B_FALSE, newname))
2198 return (zfs_error_fmt(hdl, EZFS_INVALIDNAME,
2199 dgettext(TEXT_DOMAIN, "cannot import '%s'"),
2200 newname));
2201 thename = newname;
2202 } else {
2203 thename = origname;
2204 }
2205
2206 if (props != NULL) {
2207 uint64_t version;
2208 prop_flags_t flags = { .create = B_FALSE, .import = B_TRUE };
2209
2210 version = fnvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION);
2211
2212 if ((props = zpool_valid_proplist(hdl, origname,
2213 props, version, flags, errbuf)) == NULL)
2214 return (-1);
2215 zcmd_write_src_nvlist(hdl, &zc, props);
2216 nvlist_free(props);
2217 }
2218
2219 (void) strlcpy(zc.zc_name, thename, sizeof (zc.zc_name));
2220
2221 zc.zc_guid = fnvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID);
2222
2223 zcmd_write_conf_nvlist(hdl, &zc, config);
2224 zcmd_alloc_dst_nvlist(hdl, &zc, zc.zc_nvlist_conf_size * 2);
2225
2226 zc.zc_cookie = flags;
2227 while ((ret = zfs_ioctl(hdl, ZFS_IOC_POOL_IMPORT, &zc)) != 0 &&
2228 errno == ENOMEM)
2229 zcmd_expand_dst_nvlist(hdl, &zc);
2230 if (ret != 0)
2231 error = errno;
2232
2233 (void) zcmd_read_dst_nvlist(hdl, &zc, &nv);
2234
2235 zcmd_free_nvlists(&zc);
2236
2237 zpool_get_load_policy(config, &policy);
2238
2239 if (error) {
2240 char desc[1024];
2241 char aux[256];
2242
2243 /*
2244 * Dry-run failed, but we print out what success
2245 * looks like if we found a best txg
2246 */
2247 if (policy.zlp_rewind & ZPOOL_TRY_REWIND) {
2248 zpool_rewind_exclaim(hdl, newname ? origname : thename,
2249 B_TRUE, nv);
2250 nvlist_free(nv);
2251 return (-1);
2252 }
2253
2254 if (newname == NULL)
2255 (void) snprintf(desc, sizeof (desc),
2256 dgettext(TEXT_DOMAIN, "cannot import '%s'"),
2257 thename);
2258 else
2259 (void) snprintf(desc, sizeof (desc),
2260 dgettext(TEXT_DOMAIN, "cannot import '%s' as '%s'"),
2261 origname, thename);
2262
2263 switch (error) {
2264 case ENOTSUP:
2265 if (nv != NULL && nvlist_lookup_nvlist(nv,
2266 ZPOOL_CONFIG_LOAD_INFO, &nvinfo) == 0 &&
2267 nvlist_exists(nvinfo, ZPOOL_CONFIG_UNSUP_FEAT)) {
2268 (void) printf(dgettext(TEXT_DOMAIN, "This "
2269 "pool uses the following feature(s) not "
2270 "supported by this system:\n"));
2271 memset(buf, 0, 2048);
2272 zpool_collect_unsup_feat(nv, buf, 2048);
2273 (void) printf("%s", buf);
2274 if (nvlist_exists(nvinfo,
2275 ZPOOL_CONFIG_CAN_RDONLY)) {
2276 (void) printf(dgettext(TEXT_DOMAIN,
2277 "All unsupported features are only "
2278 "required for writing to the pool."
2279 "\nThe pool can be imported using "
2280 "'-o readonly=on'.\n"));
2281 }
2282 }
2283 /*
2284 * Unsupported version.
2285 */
2286 (void) zfs_error(hdl, EZFS_BADVERSION, desc);
2287 break;
2288
2289 case EREMOTEIO:
2290 if (nv != NULL && nvlist_lookup_nvlist(nv,
2291 ZPOOL_CONFIG_LOAD_INFO, &nvinfo) == 0) {
2292 const char *hostname = "<unknown>";
2293 uint64_t hostid = 0;
2294 mmp_state_t mmp_state;
2295
2296 mmp_state = fnvlist_lookup_uint64(nvinfo,
2297 ZPOOL_CONFIG_MMP_STATE);
2298
2299 if (nvlist_exists(nvinfo,
2300 ZPOOL_CONFIG_MMP_HOSTNAME))
2301 hostname = fnvlist_lookup_string(nvinfo,
2302 ZPOOL_CONFIG_MMP_HOSTNAME);
2303
2304 if (nvlist_exists(nvinfo,
2305 ZPOOL_CONFIG_MMP_HOSTID))
2306 hostid = fnvlist_lookup_uint64(nvinfo,
2307 ZPOOL_CONFIG_MMP_HOSTID);
2308
2309 if (mmp_state == MMP_STATE_ACTIVE) {
2310 (void) snprintf(aux, sizeof (aux),
2311 dgettext(TEXT_DOMAIN, "pool is imp"
2312 "orted on host '%s' (hostid=%lx).\n"
2313 "Export the pool on the other "
2314 "system, then run 'zpool import'."),
2315 hostname, (unsigned long) hostid);
2316 } else if (mmp_state == MMP_STATE_NO_HOSTID) {
2317 (void) snprintf(aux, sizeof (aux),
2318 dgettext(TEXT_DOMAIN, "pool has "
2319 "the multihost property on and "
2320 "the\nsystem's hostid is not set. "
2321 "Set a unique system hostid with "
2322 "the zgenhostid(8) command.\n"));
2323 }
2324
2325 (void) zfs_error_aux(hdl, "%s", aux);
2326 }
2327 (void) zfs_error(hdl, EZFS_ACTIVE_POOL, desc);
2328 break;
2329
2330 case EINVAL:
2331 (void) zfs_error(hdl, EZFS_INVALCONFIG, desc);
2332 break;
2333
2334 case EROFS:
2335 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2336 "one or more devices is read only"));
2337 (void) zfs_error(hdl, EZFS_BADDEV, desc);
2338 break;
2339
2340 case ENXIO:
2341 if (nv && nvlist_lookup_nvlist(nv,
2342 ZPOOL_CONFIG_LOAD_INFO, &nvinfo) == 0 &&
2343 nvlist_lookup_nvlist(nvinfo,
2344 ZPOOL_CONFIG_MISSING_DEVICES, &missing) == 0) {
2345 (void) printf(dgettext(TEXT_DOMAIN,
2346 "The devices below are missing or "
2347 "corrupted, use '-m' to import the pool "
2348 "anyway:\n"));
2349 print_vdev_tree(hdl, NULL, missing, 2);
2350 (void) printf("\n");
2351 }
2352 (void) zpool_standard_error(hdl, error, desc);
2353 break;
2354
2355 case EEXIST:
2356 (void) zpool_standard_error(hdl, error, desc);
2357 break;
2358
2359 case EBUSY:
2360 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2361 "one or more devices are already in use\n"));
2362 (void) zfs_error(hdl, EZFS_BADDEV, desc);
2363 break;
2364 case ENAMETOOLONG:
2365 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2366 "new name of at least one dataset is longer than "
2367 "the maximum allowable length"));
2368 (void) zfs_error(hdl, EZFS_NAMETOOLONG, desc);
2369 break;
2370 default:
2371 (void) zpool_standard_error(hdl, error, desc);
2372 memset(buf, 0, 2048);
2373 zpool_explain_recover(hdl,
2374 newname ? origname : thename, -error, nv,
2375 buf, 2048);
2376 (void) printf("\t%s", buf);
2377 break;
2378 }
2379
2380 nvlist_free(nv);
2381 ret = -1;
2382 } else {
2383 zpool_handle_t *zhp;
2384
2385 /*
2386 * This should never fail, but play it safe anyway.
2387 */
2388 if (zpool_open_silent(hdl, thename, &zhp) != 0)
2389 ret = -1;
2390 else if (zhp != NULL)
2391 zpool_close(zhp);
2392 if (policy.zlp_rewind &
2393 (ZPOOL_DO_REWIND | ZPOOL_TRY_REWIND)) {
2394 zpool_rewind_exclaim(hdl, newname ? origname : thename,
2395 ((policy.zlp_rewind & ZPOOL_TRY_REWIND) != 0), nv);
2396 }
2397 nvlist_free(nv);
2398 }
2399
2400 return (ret);
2401 }
2402
2403 /*
2404 * Translate vdev names to guids. If a vdev_path is determined to be
2405 * unsuitable then a vd_errlist is allocated and the vdev path and errno
2406 * are added to it.
2407 */
2408 static int
zpool_translate_vdev_guids(zpool_handle_t * zhp,nvlist_t * vds,nvlist_t * vdev_guids,nvlist_t * guids_to_paths,nvlist_t ** vd_errlist)2409 zpool_translate_vdev_guids(zpool_handle_t *zhp, nvlist_t *vds,
2410 nvlist_t *vdev_guids, nvlist_t *guids_to_paths, nvlist_t **vd_errlist)
2411 {
2412 nvlist_t *errlist = NULL;
2413 int error = 0;
2414
2415 for (nvpair_t *elem = nvlist_next_nvpair(vds, NULL); elem != NULL;
2416 elem = nvlist_next_nvpair(vds, elem)) {
2417 boolean_t spare, cache;
2418
2419 const char *vd_path = nvpair_name(elem);
2420 nvlist_t *tgt = zpool_find_vdev(zhp, vd_path, &spare, &cache,
2421 NULL);
2422
2423 if ((tgt == NULL) || cache || spare) {
2424 if (errlist == NULL) {
2425 errlist = fnvlist_alloc();
2426 error = EINVAL;
2427 }
2428
2429 uint64_t err = (tgt == NULL) ? EZFS_NODEVICE :
2430 (spare ? EZFS_ISSPARE : EZFS_ISL2CACHE);
2431 fnvlist_add_int64(errlist, vd_path, err);
2432 continue;
2433 }
2434
2435 uint64_t guid = fnvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID);
2436 fnvlist_add_uint64(vdev_guids, vd_path, guid);
2437
2438 char msg[MAXNAMELEN];
2439 (void) snprintf(msg, sizeof (msg), "%llu", (u_longlong_t)guid);
2440 fnvlist_add_string(guids_to_paths, msg, vd_path);
2441 }
2442
2443 if (error != 0) {
2444 verify(errlist != NULL);
2445 if (vd_errlist != NULL)
2446 *vd_errlist = errlist;
2447 else
2448 fnvlist_free(errlist);
2449 }
2450
2451 return (error);
2452 }
2453
2454 static int
xlate_init_err(int err)2455 xlate_init_err(int err)
2456 {
2457 switch (err) {
2458 case ENODEV:
2459 return (EZFS_NODEVICE);
2460 case EINVAL:
2461 case EROFS:
2462 return (EZFS_BADDEV);
2463 case EBUSY:
2464 return (EZFS_INITIALIZING);
2465 case ESRCH:
2466 return (EZFS_NO_INITIALIZE);
2467 }
2468 return (err);
2469 }
2470
2471 /*
2472 * Begin, suspend, cancel, or uninit (clear) the initialization (initializing
2473 * of all free blocks) for the given vdevs in the given pool.
2474 */
2475 static int
zpool_initialize_impl(zpool_handle_t * zhp,pool_initialize_func_t cmd_type,nvlist_t * vds,boolean_t wait)2476 zpool_initialize_impl(zpool_handle_t *zhp, pool_initialize_func_t cmd_type,
2477 nvlist_t *vds, boolean_t wait)
2478 {
2479 int err;
2480
2481 nvlist_t *vdev_guids = fnvlist_alloc();
2482 nvlist_t *guids_to_paths = fnvlist_alloc();
2483 nvlist_t *vd_errlist = NULL;
2484 nvlist_t *errlist;
2485 nvpair_t *elem;
2486
2487 err = zpool_translate_vdev_guids(zhp, vds, vdev_guids,
2488 guids_to_paths, &vd_errlist);
2489
2490 if (err != 0) {
2491 verify(vd_errlist != NULL);
2492 goto list_errors;
2493 }
2494
2495 err = lzc_initialize(zhp->zpool_name, cmd_type,
2496 vdev_guids, &errlist);
2497
2498 if (err != 0) {
2499 if (errlist != NULL && nvlist_lookup_nvlist(errlist,
2500 ZPOOL_INITIALIZE_VDEVS, &vd_errlist) == 0) {
2501 goto list_errors;
2502 }
2503
2504 if (err == EINVAL && cmd_type == POOL_INITIALIZE_UNINIT) {
2505 zfs_error_aux(zhp->zpool_hdl, dgettext(TEXT_DOMAIN,
2506 "uninitialize is not supported by kernel"));
2507 }
2508
2509 (void) zpool_standard_error(zhp->zpool_hdl, err,
2510 dgettext(TEXT_DOMAIN, "operation failed"));
2511 goto out;
2512 }
2513
2514 if (wait) {
2515 for (elem = nvlist_next_nvpair(vdev_guids, NULL); elem != NULL;
2516 elem = nvlist_next_nvpair(vdev_guids, elem)) {
2517
2518 uint64_t guid = fnvpair_value_uint64(elem);
2519
2520 err = lzc_wait_tag(zhp->zpool_name,
2521 ZPOOL_WAIT_INITIALIZE, guid, NULL);
2522 if (err != 0) {
2523 (void) zpool_standard_error_fmt(zhp->zpool_hdl,
2524 err, dgettext(TEXT_DOMAIN, "error "
2525 "waiting for '%s' to initialize"),
2526 nvpair_name(elem));
2527
2528 goto out;
2529 }
2530 }
2531 }
2532 goto out;
2533
2534 list_errors:
2535 for (elem = nvlist_next_nvpair(vd_errlist, NULL); elem != NULL;
2536 elem = nvlist_next_nvpair(vd_errlist, elem)) {
2537 int64_t vd_error = xlate_init_err(fnvpair_value_int64(elem));
2538 const char *path;
2539
2540 if (nvlist_lookup_string(guids_to_paths, nvpair_name(elem),
2541 &path) != 0)
2542 path = nvpair_name(elem);
2543
2544 (void) zfs_error_fmt(zhp->zpool_hdl, vd_error,
2545 "cannot initialize '%s'", path);
2546 }
2547
2548 out:
2549 fnvlist_free(vdev_guids);
2550 fnvlist_free(guids_to_paths);
2551
2552 if (vd_errlist != NULL)
2553 fnvlist_free(vd_errlist);
2554
2555 return (err == 0 ? 0 : -1);
2556 }
2557
2558 int
zpool_initialize(zpool_handle_t * zhp,pool_initialize_func_t cmd_type,nvlist_t * vds)2559 zpool_initialize(zpool_handle_t *zhp, pool_initialize_func_t cmd_type,
2560 nvlist_t *vds)
2561 {
2562 return (zpool_initialize_impl(zhp, cmd_type, vds, B_FALSE));
2563 }
2564
2565 int
zpool_initialize_wait(zpool_handle_t * zhp,pool_initialize_func_t cmd_type,nvlist_t * vds)2566 zpool_initialize_wait(zpool_handle_t *zhp, pool_initialize_func_t cmd_type,
2567 nvlist_t *vds)
2568 {
2569 return (zpool_initialize_impl(zhp, cmd_type, vds, B_TRUE));
2570 }
2571
2572 static int
xlate_trim_err(int err)2573 xlate_trim_err(int err)
2574 {
2575 switch (err) {
2576 case ENODEV:
2577 return (EZFS_NODEVICE);
2578 case EINVAL:
2579 case EROFS:
2580 return (EZFS_BADDEV);
2581 case EBUSY:
2582 return (EZFS_TRIMMING);
2583 case ESRCH:
2584 return (EZFS_NO_TRIM);
2585 case EOPNOTSUPP:
2586 return (EZFS_TRIM_NOTSUP);
2587 }
2588 return (err);
2589 }
2590
2591 static int
zpool_trim_wait(zpool_handle_t * zhp,nvlist_t * vdev_guids)2592 zpool_trim_wait(zpool_handle_t *zhp, nvlist_t *vdev_guids)
2593 {
2594 int err;
2595 nvpair_t *elem;
2596
2597 for (elem = nvlist_next_nvpair(vdev_guids, NULL); elem != NULL;
2598 elem = nvlist_next_nvpair(vdev_guids, elem)) {
2599
2600 uint64_t guid = fnvpair_value_uint64(elem);
2601
2602 err = lzc_wait_tag(zhp->zpool_name,
2603 ZPOOL_WAIT_TRIM, guid, NULL);
2604 if (err != 0) {
2605 (void) zpool_standard_error_fmt(zhp->zpool_hdl,
2606 err, dgettext(TEXT_DOMAIN, "error "
2607 "waiting to trim '%s'"), nvpair_name(elem));
2608
2609 return (err);
2610 }
2611 }
2612 return (0);
2613 }
2614
2615 /*
2616 * Check errlist and report any errors, omitting ones which should be
2617 * suppressed. Returns B_TRUE if any errors were reported.
2618 */
2619 static boolean_t
check_trim_errs(zpool_handle_t * zhp,trimflags_t * trim_flags,nvlist_t * guids_to_paths,nvlist_t * vds,nvlist_t * errlist)2620 check_trim_errs(zpool_handle_t *zhp, trimflags_t *trim_flags,
2621 nvlist_t *guids_to_paths, nvlist_t *vds, nvlist_t *errlist)
2622 {
2623 nvpair_t *elem;
2624 boolean_t reported_errs = B_FALSE;
2625 int num_vds = 0;
2626 int num_suppressed_errs = 0;
2627
2628 for (elem = nvlist_next_nvpair(vds, NULL);
2629 elem != NULL; elem = nvlist_next_nvpair(vds, elem)) {
2630 num_vds++;
2631 }
2632
2633 for (elem = nvlist_next_nvpair(errlist, NULL);
2634 elem != NULL; elem = nvlist_next_nvpair(errlist, elem)) {
2635 int64_t vd_error = xlate_trim_err(fnvpair_value_int64(elem));
2636 const char *path;
2637
2638 /*
2639 * If only the pool was specified, and it was not a secure
2640 * trim then suppress warnings for individual vdevs which
2641 * do not support trimming.
2642 */
2643 if (vd_error == EZFS_TRIM_NOTSUP &&
2644 trim_flags->fullpool &&
2645 !trim_flags->secure) {
2646 num_suppressed_errs++;
2647 continue;
2648 }
2649
2650 reported_errs = B_TRUE;
2651 if (nvlist_lookup_string(guids_to_paths, nvpair_name(elem),
2652 &path) != 0)
2653 path = nvpair_name(elem);
2654
2655 (void) zfs_error_fmt(zhp->zpool_hdl, vd_error,
2656 "cannot trim '%s'", path);
2657 }
2658
2659 if (num_suppressed_errs == num_vds) {
2660 (void) zfs_error_aux(zhp->zpool_hdl, dgettext(TEXT_DOMAIN,
2661 "no devices in pool support trim operations"));
2662 (void) (zfs_error(zhp->zpool_hdl, EZFS_TRIM_NOTSUP,
2663 dgettext(TEXT_DOMAIN, "cannot trim")));
2664 reported_errs = B_TRUE;
2665 }
2666
2667 return (reported_errs);
2668 }
2669
2670 /*
2671 * Begin, suspend, or cancel the TRIM (discarding of all free blocks) for
2672 * the given vdevs in the given pool.
2673 */
2674 int
zpool_trim(zpool_handle_t * zhp,pool_trim_func_t cmd_type,nvlist_t * vds,trimflags_t * trim_flags)2675 zpool_trim(zpool_handle_t *zhp, pool_trim_func_t cmd_type, nvlist_t *vds,
2676 trimflags_t *trim_flags)
2677 {
2678 int err;
2679 int retval = 0;
2680
2681 nvlist_t *vdev_guids = fnvlist_alloc();
2682 nvlist_t *guids_to_paths = fnvlist_alloc();
2683 nvlist_t *errlist = NULL;
2684
2685 err = zpool_translate_vdev_guids(zhp, vds, vdev_guids,
2686 guids_to_paths, &errlist);
2687 if (err != 0) {
2688 check_trim_errs(zhp, trim_flags, guids_to_paths, vds, errlist);
2689 retval = -1;
2690 goto out;
2691 }
2692
2693 err = lzc_trim(zhp->zpool_name, cmd_type, trim_flags->rate,
2694 trim_flags->secure, vdev_guids, &errlist);
2695 if (err != 0) {
2696 nvlist_t *vd_errlist;
2697 if (errlist != NULL && nvlist_lookup_nvlist(errlist,
2698 ZPOOL_TRIM_VDEVS, &vd_errlist) == 0) {
2699 if (check_trim_errs(zhp, trim_flags, guids_to_paths,
2700 vds, vd_errlist)) {
2701 retval = -1;
2702 goto out;
2703 }
2704 } else {
2705 char errbuf[ERRBUFLEN];
2706
2707 (void) snprintf(errbuf, sizeof (errbuf),
2708 dgettext(TEXT_DOMAIN, "operation failed"));
2709 zpool_standard_error(zhp->zpool_hdl, err, errbuf);
2710 retval = -1;
2711 goto out;
2712 }
2713 }
2714
2715
2716 if (trim_flags->wait)
2717 retval = zpool_trim_wait(zhp, vdev_guids);
2718
2719 out:
2720 if (errlist != NULL)
2721 fnvlist_free(errlist);
2722 fnvlist_free(vdev_guids);
2723 fnvlist_free(guids_to_paths);
2724 return (retval);
2725 }
2726
2727 /*
2728 * Scan the pool.
2729 */
2730 int
zpool_scan(zpool_handle_t * zhp,pool_scan_func_t func,pool_scrub_cmd_t cmd)2731 zpool_scan(zpool_handle_t *zhp, pool_scan_func_t func, pool_scrub_cmd_t cmd)
2732 {
2733 char errbuf[ERRBUFLEN];
2734 int err;
2735 libzfs_handle_t *hdl = zhp->zpool_hdl;
2736
2737 nvlist_t *args = fnvlist_alloc();
2738 fnvlist_add_uint64(args, "scan_type", (uint64_t)func);
2739 fnvlist_add_uint64(args, "scan_command", (uint64_t)cmd);
2740
2741 err = lzc_scrub(ZFS_IOC_POOL_SCRUB, zhp->zpool_name, args, NULL);
2742 fnvlist_free(args);
2743
2744 if (err == 0) {
2745 return (0);
2746 } else if (err == ZFS_ERR_IOC_CMD_UNAVAIL) {
2747 zfs_cmd_t zc = {"\0"};
2748 (void) strlcpy(zc.zc_name, zhp->zpool_name,
2749 sizeof (zc.zc_name));
2750 zc.zc_cookie = func;
2751 zc.zc_flags = cmd;
2752
2753 if (zfs_ioctl(hdl, ZFS_IOC_POOL_SCAN, &zc) == 0)
2754 return (0);
2755 }
2756
2757 /*
2758 * An ECANCELED on a scrub means one of the following:
2759 * 1. we resumed a paused scrub.
2760 * 2. we resumed a paused error scrub.
2761 * 3. Error scrub is not run because of no error log.
2762 */
2763 if (err == ECANCELED && (func == POOL_SCAN_SCRUB ||
2764 func == POOL_SCAN_ERRORSCRUB) && cmd == POOL_SCRUB_NORMAL)
2765 return (0);
2766 /*
2767 * The following cases have been handled here:
2768 * 1. Paused a scrub/error scrub if there is none in progress.
2769 */
2770 if (err == ENOENT && func != POOL_SCAN_NONE && cmd ==
2771 POOL_SCRUB_PAUSE) {
2772 return (0);
2773 }
2774
2775 ASSERT3U(func, >=, POOL_SCAN_NONE);
2776 ASSERT3U(func, <, POOL_SCAN_FUNCS);
2777
2778 if (func == POOL_SCAN_SCRUB || func == POOL_SCAN_ERRORSCRUB) {
2779 if (cmd == POOL_SCRUB_PAUSE) {
2780 (void) snprintf(errbuf, sizeof (errbuf),
2781 dgettext(TEXT_DOMAIN, "cannot pause scrubbing %s"),
2782 zhp->zpool_name);
2783 } else {
2784 assert(cmd == POOL_SCRUB_NORMAL);
2785 (void) snprintf(errbuf, sizeof (errbuf),
2786 dgettext(TEXT_DOMAIN, "cannot scrub %s"),
2787 zhp->zpool_name);
2788 }
2789 } else if (func == POOL_SCAN_RESILVER) {
2790 assert(cmd == POOL_SCRUB_NORMAL);
2791 (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
2792 "cannot restart resilver on %s"), zhp->zpool_name);
2793 } else if (func == POOL_SCAN_NONE) {
2794 (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
2795 "cannot cancel scrubbing %s"), zhp->zpool_name);
2796 } else {
2797 assert(!"unexpected result");
2798 }
2799
2800 /*
2801 * With EBUSY, six cases are possible:
2802 *
2803 * Current state Requested
2804 * 1. Normal Scrub Running Normal Scrub or Error Scrub
2805 * 2. Normal Scrub Paused Error Scrub
2806 * 3. Normal Scrub Paused Pause Normal Scrub
2807 * 4. Error Scrub Running Normal Scrub or Error Scrub
2808 * 5. Error Scrub Paused Pause Error Scrub
2809 * 6. Resilvering Anything else
2810 */
2811 if (err == EBUSY) {
2812 nvlist_t *nvroot;
2813 pool_scan_stat_t *ps = NULL;
2814 uint_t psc;
2815
2816 nvroot = fnvlist_lookup_nvlist(zhp->zpool_config,
2817 ZPOOL_CONFIG_VDEV_TREE);
2818 (void) nvlist_lookup_uint64_array(nvroot,
2819 ZPOOL_CONFIG_SCAN_STATS, (uint64_t **)&ps, &psc);
2820 if (ps && ps->pss_func == POOL_SCAN_SCRUB &&
2821 ps->pss_state == DSS_SCANNING) {
2822 if (ps->pss_pass_scrub_pause == 0) {
2823 /* handles case 1 */
2824 assert(cmd == POOL_SCRUB_NORMAL);
2825 return (zfs_error(hdl, EZFS_SCRUBBING,
2826 errbuf));
2827 } else {
2828 if (func == POOL_SCAN_ERRORSCRUB) {
2829 /* handles case 2 */
2830 ASSERT3U(cmd, ==, POOL_SCRUB_NORMAL);
2831 return (zfs_error(hdl,
2832 EZFS_SCRUB_PAUSED_TO_CANCEL,
2833 errbuf));
2834 } else {
2835 /* handles case 3 */
2836 ASSERT3U(func, ==, POOL_SCAN_SCRUB);
2837 ASSERT3U(cmd, ==, POOL_SCRUB_PAUSE);
2838 return (zfs_error(hdl,
2839 EZFS_SCRUB_PAUSED, errbuf));
2840 }
2841 }
2842 } else if (ps &&
2843 ps->pss_error_scrub_func == POOL_SCAN_ERRORSCRUB &&
2844 ps->pss_error_scrub_state == DSS_ERRORSCRUBBING) {
2845 if (ps->pss_pass_error_scrub_pause == 0) {
2846 /* handles case 4 */
2847 ASSERT3U(cmd, ==, POOL_SCRUB_NORMAL);
2848 return (zfs_error(hdl, EZFS_ERRORSCRUBBING,
2849 errbuf));
2850 } else {
2851 /* handles case 5 */
2852 ASSERT3U(func, ==, POOL_SCAN_ERRORSCRUB);
2853 ASSERT3U(cmd, ==, POOL_SCRUB_PAUSE);
2854 return (zfs_error(hdl, EZFS_ERRORSCRUB_PAUSED,
2855 errbuf));
2856 }
2857 } else {
2858 /* handles case 6 */
2859 return (zfs_error(hdl, EZFS_RESILVERING, errbuf));
2860 }
2861 } else if (err == ENOENT) {
2862 return (zfs_error(hdl, EZFS_NO_SCRUB, errbuf));
2863 } else if (err == ENOTSUP && func == POOL_SCAN_RESILVER) {
2864 return (zfs_error(hdl, EZFS_NO_RESILVER_DEFER, errbuf));
2865 } else {
2866 return (zpool_standard_error(hdl, err, errbuf));
2867 }
2868 }
2869
2870 /*
2871 * Find a vdev that matches the search criteria specified. We use the
2872 * the nvpair name to determine how we should look for the device.
2873 * 'avail_spare' is set to TRUE if the provided guid refers to an AVAIL
2874 * spare; but FALSE if its an INUSE spare.
2875 *
2876 * If 'return_parent' is set, then return the *parent* of the vdev you're
2877 * searching for rather than the vdev itself.
2878 */
2879 static nvlist_t *
vdev_to_nvlist_iter(nvlist_t * nv,nvlist_t * search,boolean_t * avail_spare,boolean_t * l2cache,boolean_t * log,boolean_t return_parent)2880 vdev_to_nvlist_iter(nvlist_t *nv, nvlist_t *search, boolean_t *avail_spare,
2881 boolean_t *l2cache, boolean_t *log, boolean_t return_parent)
2882 {
2883 uint_t c, children;
2884 nvlist_t **child;
2885 nvlist_t *ret;
2886 uint64_t is_log;
2887 const char *srchkey;
2888 nvpair_t *pair = nvlist_next_nvpair(search, NULL);
2889 const char *tmp = NULL;
2890 boolean_t is_root;
2891
2892 /* Nothing to look for */
2893 if (search == NULL || pair == NULL)
2894 return (NULL);
2895
2896 /* Obtain the key we will use to search */
2897 srchkey = nvpair_name(pair);
2898
2899 nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &tmp);
2900 if (strcmp(tmp, "root") == 0)
2901 is_root = B_TRUE;
2902 else
2903 is_root = B_FALSE;
2904
2905 switch (nvpair_type(pair)) {
2906 case DATA_TYPE_UINT64:
2907 if (strcmp(srchkey, ZPOOL_CONFIG_GUID) == 0) {
2908 uint64_t srchval = fnvpair_value_uint64(pair);
2909 uint64_t theguid = fnvlist_lookup_uint64(nv,
2910 ZPOOL_CONFIG_GUID);
2911 if (theguid == srchval)
2912 return (nv);
2913 }
2914 break;
2915
2916 case DATA_TYPE_STRING: {
2917 const char *srchval, *val;
2918
2919 srchval = fnvpair_value_string(pair);
2920 if (nvlist_lookup_string(nv, srchkey, &val) != 0)
2921 break;
2922
2923 /*
2924 * Search for the requested value. Special cases:
2925 *
2926 * - ZPOOL_CONFIG_PATH for whole disk entries. These end in
2927 * "-part1", or "p1". The suffix is hidden from the user,
2928 * but included in the string, so this matches around it.
2929 * - ZPOOL_CONFIG_PATH for short names zfs_strcmp_shortname()
2930 * is used to check all possible expanded paths.
2931 * - looking for a top-level vdev name (i.e. ZPOOL_CONFIG_TYPE).
2932 *
2933 * Otherwise, all other searches are simple string compares.
2934 */
2935 if (strcmp(srchkey, ZPOOL_CONFIG_PATH) == 0) {
2936 uint64_t wholedisk = 0;
2937
2938 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK,
2939 &wholedisk);
2940 if (zfs_strcmp_pathname(srchval, val, wholedisk) == 0)
2941 return (nv);
2942
2943 } else if (strcmp(srchkey, ZPOOL_CONFIG_TYPE) == 0) {
2944 char *type, *idx, *end, *p;
2945 uint64_t id, vdev_id;
2946
2947 /*
2948 * Determine our vdev type, keeping in mind
2949 * that the srchval is composed of a type and
2950 * vdev id pair (i.e. mirror-4).
2951 */
2952 if ((type = strdup(srchval)) == NULL)
2953 return (NULL);
2954
2955 if ((p = strrchr(type, '-')) == NULL) {
2956 free(type);
2957 break;
2958 }
2959 idx = p + 1;
2960 *p = '\0';
2961
2962 /*
2963 * If the types don't match then keep looking.
2964 */
2965 if (strncmp(val, type, strlen(val)) != 0) {
2966 free(type);
2967 break;
2968 }
2969
2970 verify(zpool_vdev_is_interior(type));
2971
2972 id = fnvlist_lookup_uint64(nv, ZPOOL_CONFIG_ID);
2973 errno = 0;
2974 vdev_id = strtoull(idx, &end, 10);
2975
2976 /*
2977 * If we are looking for a raidz and a parity is
2978 * specified, make sure it matches.
2979 */
2980 int rzlen = strlen(VDEV_TYPE_RAIDZ);
2981 assert(rzlen == strlen(VDEV_TYPE_DRAID));
2982 int typlen = strlen(type);
2983 if ((strncmp(type, VDEV_TYPE_RAIDZ, rzlen) == 0 ||
2984 strncmp(type, VDEV_TYPE_DRAID, rzlen) == 0) &&
2985 typlen != rzlen) {
2986 uint64_t vdev_parity;
2987 int parity = *(type + rzlen) - '0';
2988
2989 if (parity <= 0 || parity > 3 ||
2990 (typlen - rzlen) != 1) {
2991 /*
2992 * Nonsense parity specified, can
2993 * never match
2994 */
2995 free(type);
2996 return (NULL);
2997 }
2998 vdev_parity = fnvlist_lookup_uint64(nv,
2999 ZPOOL_CONFIG_NPARITY);
3000 if ((int)vdev_parity != parity) {
3001 free(type);
3002 break;
3003 }
3004 }
3005
3006 free(type);
3007 if (errno != 0)
3008 return (NULL);
3009
3010 /*
3011 * Now verify that we have the correct vdev id.
3012 */
3013 if (vdev_id == id)
3014 return (nv);
3015 }
3016
3017 /*
3018 * Common case
3019 */
3020 if (strcmp(srchval, val) == 0)
3021 return (nv);
3022 break;
3023 }
3024
3025 default:
3026 break;
3027 }
3028
3029 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
3030 &child, &children) != 0)
3031 return (NULL);
3032
3033 for (c = 0; c < children; c++) {
3034 if ((ret = vdev_to_nvlist_iter(child[c], search,
3035 avail_spare, l2cache, NULL, return_parent)) != NULL) {
3036 /*
3037 * The 'is_log' value is only set for the toplevel
3038 * vdev, not the leaf vdevs. So we always lookup the
3039 * log device from the root of the vdev tree (where
3040 * 'log' is non-NULL).
3041 */
3042 if (log != NULL &&
3043 nvlist_lookup_uint64(child[c],
3044 ZPOOL_CONFIG_IS_LOG, &is_log) == 0 &&
3045 is_log) {
3046 *log = B_TRUE;
3047 }
3048 return (ret && return_parent && !is_root ? nv : ret);
3049 }
3050 }
3051
3052 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES,
3053 &child, &children) == 0) {
3054 for (c = 0; c < children; c++) {
3055 if ((ret = vdev_to_nvlist_iter(child[c], search,
3056 avail_spare, l2cache, NULL, return_parent))
3057 != NULL) {
3058 *avail_spare = B_TRUE;
3059 return (ret && return_parent &&
3060 !is_root ? nv : ret);
3061 }
3062 }
3063 }
3064
3065 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE,
3066 &child, &children) == 0) {
3067 for (c = 0; c < children; c++) {
3068 if ((ret = vdev_to_nvlist_iter(child[c], search,
3069 avail_spare, l2cache, NULL, return_parent))
3070 != NULL) {
3071 *l2cache = B_TRUE;
3072 return (ret && return_parent &&
3073 !is_root ? nv : ret);
3074 }
3075 }
3076 }
3077
3078 return (NULL);
3079 }
3080
3081 /*
3082 * Given a physical path or guid, find the associated vdev.
3083 */
3084 nvlist_t *
zpool_find_vdev_by_physpath(zpool_handle_t * zhp,const char * ppath,boolean_t * avail_spare,boolean_t * l2cache,boolean_t * log)3085 zpool_find_vdev_by_physpath(zpool_handle_t *zhp, const char *ppath,
3086 boolean_t *avail_spare, boolean_t *l2cache, boolean_t *log)
3087 {
3088 nvlist_t *search, *nvroot, *ret;
3089 uint64_t guid;
3090 char *end;
3091
3092 search = fnvlist_alloc();
3093
3094 guid = strtoull(ppath, &end, 0);
3095 if (guid != 0 && *end == '\0') {
3096 fnvlist_add_uint64(search, ZPOOL_CONFIG_GUID, guid);
3097 } else {
3098 fnvlist_add_string(search, ZPOOL_CONFIG_PHYS_PATH, ppath);
3099 }
3100
3101 nvroot = fnvlist_lookup_nvlist(zhp->zpool_config,
3102 ZPOOL_CONFIG_VDEV_TREE);
3103
3104 *avail_spare = B_FALSE;
3105 *l2cache = B_FALSE;
3106 if (log != NULL)
3107 *log = B_FALSE;
3108 ret = vdev_to_nvlist_iter(nvroot, search, avail_spare, l2cache, log,
3109 B_FALSE);
3110 fnvlist_free(search);
3111
3112 return (ret);
3113 }
3114
3115 /*
3116 * Determine if we have an "interior" top-level vdev (i.e mirror/raidz).
3117 */
3118 static boolean_t
zpool_vdev_is_interior(const char * name)3119 zpool_vdev_is_interior(const char *name)
3120 {
3121 if (strncmp(name, VDEV_TYPE_RAIDZ, strlen(VDEV_TYPE_RAIDZ)) == 0 ||
3122 strncmp(name, VDEV_TYPE_SPARE, strlen(VDEV_TYPE_SPARE)) == 0 ||
3123 strncmp(name,
3124 VDEV_TYPE_REPLACING, strlen(VDEV_TYPE_REPLACING)) == 0 ||
3125 strncmp(name, VDEV_TYPE_ROOT, strlen(VDEV_TYPE_ROOT)) == 0 ||
3126 strncmp(name, VDEV_TYPE_MIRROR, strlen(VDEV_TYPE_MIRROR)) == 0)
3127 return (B_TRUE);
3128
3129 if (strncmp(name, VDEV_TYPE_DRAID, strlen(VDEV_TYPE_DRAID)) == 0 &&
3130 !zpool_is_draid_spare(name))
3131 return (B_TRUE);
3132
3133 return (B_FALSE);
3134 }
3135
3136 /*
3137 * Lookup the nvlist for a given vdev or vdev's parent (depending on
3138 * if 'return_parent' is set).
3139 */
3140 static nvlist_t *
__zpool_find_vdev(zpool_handle_t * zhp,const char * path,boolean_t * avail_spare,boolean_t * l2cache,boolean_t * log,boolean_t return_parent)3141 __zpool_find_vdev(zpool_handle_t *zhp, const char *path, boolean_t *avail_spare,
3142 boolean_t *l2cache, boolean_t *log, boolean_t return_parent)
3143 {
3144 char *end;
3145 nvlist_t *nvroot, *search, *ret;
3146 uint64_t guid;
3147 boolean_t __avail_spare, __l2cache, __log;
3148
3149 search = fnvlist_alloc();
3150
3151 guid = strtoull(path, &end, 0);
3152 if (guid != 0 && *end == '\0') {
3153 fnvlist_add_uint64(search, ZPOOL_CONFIG_GUID, guid);
3154 } else if (zpool_vdev_is_interior(path)) {
3155 fnvlist_add_string(search, ZPOOL_CONFIG_TYPE, path);
3156 } else {
3157 fnvlist_add_string(search, ZPOOL_CONFIG_PATH, path);
3158 }
3159
3160 nvroot = fnvlist_lookup_nvlist(zhp->zpool_config,
3161 ZPOOL_CONFIG_VDEV_TREE);
3162
3163 /*
3164 * User can pass NULL for avail_spare, l2cache, and log, but
3165 * we still need to provide variables to vdev_to_nvlist_iter(), so
3166 * just point them to junk variables here.
3167 */
3168 if (!avail_spare)
3169 avail_spare = &__avail_spare;
3170 if (!l2cache)
3171 l2cache = &__l2cache;
3172 if (!log)
3173 log = &__log;
3174
3175 *avail_spare = B_FALSE;
3176 *l2cache = B_FALSE;
3177 if (log != NULL)
3178 *log = B_FALSE;
3179 ret = vdev_to_nvlist_iter(nvroot, search, avail_spare, l2cache, log,
3180 return_parent);
3181 fnvlist_free(search);
3182
3183 return (ret);
3184 }
3185
3186 nvlist_t *
zpool_find_vdev(zpool_handle_t * zhp,const char * path,boolean_t * avail_spare,boolean_t * l2cache,boolean_t * log)3187 zpool_find_vdev(zpool_handle_t *zhp, const char *path, boolean_t *avail_spare,
3188 boolean_t *l2cache, boolean_t *log)
3189 {
3190 return (__zpool_find_vdev(zhp, path, avail_spare, l2cache, log,
3191 B_FALSE));
3192 }
3193
3194 /* Given a vdev path, return its parent's nvlist */
3195 nvlist_t *
zpool_find_parent_vdev(zpool_handle_t * zhp,const char * path,boolean_t * avail_spare,boolean_t * l2cache,boolean_t * log)3196 zpool_find_parent_vdev(zpool_handle_t *zhp, const char *path,
3197 boolean_t *avail_spare, boolean_t *l2cache, boolean_t *log)
3198 {
3199 return (__zpool_find_vdev(zhp, path, avail_spare, l2cache, log,
3200 B_TRUE));
3201 }
3202
3203 /*
3204 * Convert a vdev path to a GUID. Returns GUID or 0 on error.
3205 *
3206 * If is_spare, is_l2cache, or is_log is non-NULL, then store within it
3207 * if the VDEV is a spare, l2cache, or log device. If they're NULL then
3208 * ignore them.
3209 */
3210 static uint64_t
zpool_vdev_path_to_guid_impl(zpool_handle_t * zhp,const char * path,boolean_t * is_spare,boolean_t * is_l2cache,boolean_t * is_log)3211 zpool_vdev_path_to_guid_impl(zpool_handle_t *zhp, const char *path,
3212 boolean_t *is_spare, boolean_t *is_l2cache, boolean_t *is_log)
3213 {
3214 boolean_t spare = B_FALSE, l2cache = B_FALSE, log = B_FALSE;
3215 nvlist_t *tgt;
3216
3217 if ((tgt = zpool_find_vdev(zhp, path, &spare, &l2cache,
3218 &log)) == NULL)
3219 return (0);
3220
3221 if (is_spare != NULL)
3222 *is_spare = spare;
3223 if (is_l2cache != NULL)
3224 *is_l2cache = l2cache;
3225 if (is_log != NULL)
3226 *is_log = log;
3227
3228 return (fnvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID));
3229 }
3230
3231 /* Convert a vdev path to a GUID. Returns GUID or 0 on error. */
3232 uint64_t
zpool_vdev_path_to_guid(zpool_handle_t * zhp,const char * path)3233 zpool_vdev_path_to_guid(zpool_handle_t *zhp, const char *path)
3234 {
3235 return (zpool_vdev_path_to_guid_impl(zhp, path, NULL, NULL, NULL));
3236 }
3237
3238 /*
3239 * Bring the specified vdev online. The 'flags' parameter is a set of the
3240 * ZFS_ONLINE_* flags.
3241 */
3242 int
zpool_vdev_online(zpool_handle_t * zhp,const char * path,int flags,vdev_state_t * newstate)3243 zpool_vdev_online(zpool_handle_t *zhp, const char *path, int flags,
3244 vdev_state_t *newstate)
3245 {
3246 zfs_cmd_t zc = {"\0"};
3247 char errbuf[ERRBUFLEN];
3248 nvlist_t *tgt;
3249 boolean_t avail_spare, l2cache, islog;
3250 libzfs_handle_t *hdl = zhp->zpool_hdl;
3251
3252 if (flags & ZFS_ONLINE_EXPAND) {
3253 (void) snprintf(errbuf, sizeof (errbuf),
3254 dgettext(TEXT_DOMAIN, "cannot expand %s"), path);
3255 } else {
3256 (void) snprintf(errbuf, sizeof (errbuf),
3257 dgettext(TEXT_DOMAIN, "cannot online %s"), path);
3258 }
3259
3260 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3261 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
3262 &islog)) == NULL)
3263 return (zfs_error(hdl, EZFS_NODEVICE, errbuf));
3264
3265 zc.zc_guid = fnvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID);
3266
3267 if (!(flags & ZFS_ONLINE_SPARE) && avail_spare)
3268 return (zfs_error(hdl, EZFS_ISSPARE, errbuf));
3269
3270 #ifndef __FreeBSD__
3271 const char *pathname;
3272 if ((flags & ZFS_ONLINE_EXPAND ||
3273 zpool_get_prop_int(zhp, ZPOOL_PROP_AUTOEXPAND, NULL)) &&
3274 nvlist_lookup_string(tgt, ZPOOL_CONFIG_PATH, &pathname) == 0) {
3275 uint64_t wholedisk = 0;
3276
3277 (void) nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_WHOLE_DISK,
3278 &wholedisk);
3279
3280 /*
3281 * XXX - L2ARC 1.0 devices can't support expansion.
3282 */
3283 if (l2cache) {
3284 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3285 "cannot expand cache devices"));
3286 return (zfs_error(hdl, EZFS_VDEVNOTSUP, errbuf));
3287 }
3288
3289 if (wholedisk) {
3290 const char *fullpath = path;
3291 char buf[MAXPATHLEN];
3292 int error;
3293
3294 if (path[0] != '/') {
3295 error = zfs_resolve_shortname(path, buf,
3296 sizeof (buf));
3297 if (error != 0)
3298 return (zfs_error(hdl, EZFS_NODEVICE,
3299 errbuf));
3300
3301 fullpath = buf;
3302 }
3303
3304 error = zpool_relabel_disk(hdl, fullpath, errbuf);
3305 if (error != 0)
3306 return (error);
3307 }
3308 }
3309 #endif
3310
3311 zc.zc_cookie = VDEV_STATE_ONLINE;
3312 zc.zc_obj = flags;
3313
3314 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SET_STATE, &zc) != 0) {
3315 if (errno == EINVAL) {
3316 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "was split "
3317 "from this pool into a new one. Use '%s' "
3318 "instead"), "zpool detach");
3319 return (zfs_error(hdl, EZFS_POSTSPLIT_ONLINE, errbuf));
3320 }
3321 return (zpool_standard_error(hdl, errno, errbuf));
3322 }
3323
3324 *newstate = zc.zc_cookie;
3325 return (0);
3326 }
3327
3328 /*
3329 * Take the specified vdev offline
3330 */
3331 int
zpool_vdev_offline(zpool_handle_t * zhp,const char * path,boolean_t istmp)3332 zpool_vdev_offline(zpool_handle_t *zhp, const char *path, boolean_t istmp)
3333 {
3334 zfs_cmd_t zc = {"\0"};
3335 char errbuf[ERRBUFLEN];
3336 nvlist_t *tgt;
3337 boolean_t avail_spare, l2cache;
3338 libzfs_handle_t *hdl = zhp->zpool_hdl;
3339
3340 (void) snprintf(errbuf, sizeof (errbuf),
3341 dgettext(TEXT_DOMAIN, "cannot offline %s"), path);
3342
3343 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3344 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
3345 NULL)) == NULL)
3346 return (zfs_error(hdl, EZFS_NODEVICE, errbuf));
3347
3348 zc.zc_guid = fnvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID);
3349
3350 if (avail_spare)
3351 return (zfs_error(hdl, EZFS_ISSPARE, errbuf));
3352
3353 zc.zc_cookie = VDEV_STATE_OFFLINE;
3354 zc.zc_obj = istmp ? ZFS_OFFLINE_TEMPORARY : 0;
3355
3356 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
3357 return (0);
3358
3359 switch (errno) {
3360 case EBUSY:
3361
3362 /*
3363 * There are no other replicas of this device.
3364 */
3365 return (zfs_error(hdl, EZFS_NOREPLICAS, errbuf));
3366
3367 case EEXIST:
3368 /*
3369 * The log device has unplayed logs
3370 */
3371 return (zfs_error(hdl, EZFS_UNPLAYED_LOGS, errbuf));
3372
3373 default:
3374 return (zpool_standard_error(hdl, errno, errbuf));
3375 }
3376 }
3377
3378 /*
3379 * Remove the specified vdev asynchronously from the configuration, so
3380 * that it may come ONLINE if reinserted. This is called from zed on
3381 * Udev remove event.
3382 * Note: We also have a similar function zpool_vdev_remove() that
3383 * removes the vdev from the pool.
3384 */
3385 int
zpool_vdev_remove_wanted(zpool_handle_t * zhp,const char * path)3386 zpool_vdev_remove_wanted(zpool_handle_t *zhp, const char *path)
3387 {
3388 zfs_cmd_t zc = {"\0"};
3389 char errbuf[ERRBUFLEN];
3390 nvlist_t *tgt;
3391 boolean_t avail_spare, l2cache;
3392 libzfs_handle_t *hdl = zhp->zpool_hdl;
3393
3394 (void) snprintf(errbuf, sizeof (errbuf),
3395 dgettext(TEXT_DOMAIN, "cannot remove %s"), path);
3396
3397 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3398 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
3399 NULL)) == NULL)
3400 return (zfs_error(hdl, EZFS_NODEVICE, errbuf));
3401
3402 zc.zc_guid = fnvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID);
3403
3404 zc.zc_cookie = VDEV_STATE_REMOVED;
3405
3406 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
3407 return (0);
3408
3409 return (zpool_standard_error(hdl, errno, errbuf));
3410 }
3411
3412 /*
3413 * Mark the given vdev faulted.
3414 */
3415 int
zpool_vdev_fault(zpool_handle_t * zhp,uint64_t guid,vdev_aux_t aux)3416 zpool_vdev_fault(zpool_handle_t *zhp, uint64_t guid, vdev_aux_t aux)
3417 {
3418 zfs_cmd_t zc = {"\0"};
3419 char errbuf[ERRBUFLEN];
3420 libzfs_handle_t *hdl = zhp->zpool_hdl;
3421
3422 (void) snprintf(errbuf, sizeof (errbuf),
3423 dgettext(TEXT_DOMAIN, "cannot fault %llu"), (u_longlong_t)guid);
3424
3425 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3426 zc.zc_guid = guid;
3427 zc.zc_cookie = VDEV_STATE_FAULTED;
3428 zc.zc_obj = aux;
3429
3430 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
3431 return (0);
3432
3433 switch (errno) {
3434 case EBUSY:
3435
3436 /*
3437 * There are no other replicas of this device.
3438 */
3439 return (zfs_error(hdl, EZFS_NOREPLICAS, errbuf));
3440
3441 default:
3442 return (zpool_standard_error(hdl, errno, errbuf));
3443 }
3444
3445 }
3446
3447 /*
3448 * Generic set vdev state function
3449 */
3450 static int
zpool_vdev_set_state(zpool_handle_t * zhp,uint64_t guid,vdev_aux_t aux,vdev_state_t state)3451 zpool_vdev_set_state(zpool_handle_t *zhp, uint64_t guid, vdev_aux_t aux,
3452 vdev_state_t state)
3453 {
3454 zfs_cmd_t zc = {"\0"};
3455 char errbuf[ERRBUFLEN];
3456 libzfs_handle_t *hdl = zhp->zpool_hdl;
3457
3458 (void) snprintf(errbuf, sizeof (errbuf),
3459 dgettext(TEXT_DOMAIN, "cannot set %s %llu"),
3460 zpool_state_to_name(state, aux), (u_longlong_t)guid);
3461
3462 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3463 zc.zc_guid = guid;
3464 zc.zc_cookie = state;
3465 zc.zc_obj = aux;
3466
3467 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
3468 return (0);
3469
3470 return (zpool_standard_error(hdl, errno, errbuf));
3471 }
3472
3473 /*
3474 * Mark the given vdev degraded.
3475 */
3476 int
zpool_vdev_degrade(zpool_handle_t * zhp,uint64_t guid,vdev_aux_t aux)3477 zpool_vdev_degrade(zpool_handle_t *zhp, uint64_t guid, vdev_aux_t aux)
3478 {
3479 return (zpool_vdev_set_state(zhp, guid, aux, VDEV_STATE_DEGRADED));
3480 }
3481
3482 /*
3483 * Mark the given vdev as in a removed state (as if the device does not exist).
3484 *
3485 * This is different than zpool_vdev_remove() which does a removal of a device
3486 * from the pool (but the device does exist).
3487 */
3488 int
zpool_vdev_set_removed_state(zpool_handle_t * zhp,uint64_t guid,vdev_aux_t aux)3489 zpool_vdev_set_removed_state(zpool_handle_t *zhp, uint64_t guid, vdev_aux_t aux)
3490 {
3491 return (zpool_vdev_set_state(zhp, guid, aux, VDEV_STATE_REMOVED));
3492 }
3493
3494 /*
3495 * Returns TRUE if the given nvlist is a vdev that was originally swapped in as
3496 * a hot spare.
3497 */
3498 static boolean_t
is_replacing_spare(nvlist_t * search,nvlist_t * tgt,int which)3499 is_replacing_spare(nvlist_t *search, nvlist_t *tgt, int which)
3500 {
3501 nvlist_t **child;
3502 uint_t c, children;
3503
3504 if (nvlist_lookup_nvlist_array(search, ZPOOL_CONFIG_CHILDREN, &child,
3505 &children) == 0) {
3506 const char *type = fnvlist_lookup_string(search,
3507 ZPOOL_CONFIG_TYPE);
3508 if ((strcmp(type, VDEV_TYPE_SPARE) == 0 ||
3509 strcmp(type, VDEV_TYPE_DRAID_SPARE) == 0) &&
3510 children == 2 && child[which] == tgt)
3511 return (B_TRUE);
3512
3513 for (c = 0; c < children; c++)
3514 if (is_replacing_spare(child[c], tgt, which))
3515 return (B_TRUE);
3516 }
3517
3518 return (B_FALSE);
3519 }
3520
3521 /*
3522 * Attach new_disk (fully described by nvroot) to old_disk.
3523 * If 'replacing' is specified, the new disk will replace the old one.
3524 */
3525 int
zpool_vdev_attach(zpool_handle_t * zhp,const char * old_disk,const char * new_disk,nvlist_t * nvroot,int replacing,boolean_t rebuild)3526 zpool_vdev_attach(zpool_handle_t *zhp, const char *old_disk,
3527 const char *new_disk, nvlist_t *nvroot, int replacing, boolean_t rebuild)
3528 {
3529 zfs_cmd_t zc = {"\0"};
3530 char errbuf[ERRBUFLEN];
3531 int ret;
3532 nvlist_t *tgt;
3533 boolean_t avail_spare, l2cache, islog;
3534 uint64_t val;
3535 char *newname;
3536 const char *type;
3537 nvlist_t **child;
3538 uint_t children;
3539 nvlist_t *config_root;
3540 libzfs_handle_t *hdl = zhp->zpool_hdl;
3541
3542 if (replacing)
3543 (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
3544 "cannot replace %s with %s"), old_disk, new_disk);
3545 else
3546 (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
3547 "cannot attach %s to %s"), new_disk, old_disk);
3548
3549 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3550 if ((tgt = zpool_find_vdev(zhp, old_disk, &avail_spare, &l2cache,
3551 &islog)) == NULL)
3552 return (zfs_error(hdl, EZFS_NODEVICE, errbuf));
3553
3554 if (avail_spare)
3555 return (zfs_error(hdl, EZFS_ISSPARE, errbuf));
3556
3557 if (l2cache)
3558 return (zfs_error(hdl, EZFS_ISL2CACHE, errbuf));
3559
3560 zc.zc_guid = fnvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID);
3561 zc.zc_cookie = replacing;
3562 zc.zc_simple = rebuild;
3563
3564 if (rebuild &&
3565 zfeature_lookup_guid("org.openzfs:device_rebuild", NULL) != 0) {
3566 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3567 "the loaded zfs module doesn't support device rebuilds"));
3568 return (zfs_error(hdl, EZFS_POOL_NOTSUP, errbuf));
3569 }
3570
3571 type = fnvlist_lookup_string(tgt, ZPOOL_CONFIG_TYPE);
3572 if (strcmp(type, VDEV_TYPE_RAIDZ) == 0 &&
3573 zfeature_lookup_guid("org.openzfs:raidz_expansion", NULL) != 0) {
3574 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3575 "the loaded zfs module doesn't support raidz expansion"));
3576 return (zfs_error(hdl, EZFS_POOL_NOTSUP, errbuf));
3577 }
3578
3579 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
3580 &child, &children) != 0 || children != 1) {
3581 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3582 "new device must be a single disk"));
3583 return (zfs_error(hdl, EZFS_INVALCONFIG, errbuf));
3584 }
3585
3586 config_root = fnvlist_lookup_nvlist(zpool_get_config(zhp, NULL),
3587 ZPOOL_CONFIG_VDEV_TREE);
3588
3589 if ((newname = zpool_vdev_name(NULL, NULL, child[0], 0)) == NULL)
3590 return (-1);
3591
3592 /*
3593 * If the target is a hot spare that has been swapped in, we can only
3594 * replace it with another hot spare.
3595 */
3596 if (replacing &&
3597 nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_IS_SPARE, &val) == 0 &&
3598 (zpool_find_vdev(zhp, newname, &avail_spare, &l2cache,
3599 NULL) == NULL || !avail_spare) &&
3600 is_replacing_spare(config_root, tgt, 1)) {
3601 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3602 "can only be replaced by another hot spare"));
3603 free(newname);
3604 return (zfs_error(hdl, EZFS_BADTARGET, errbuf));
3605 }
3606
3607 free(newname);
3608
3609 zcmd_write_conf_nvlist(hdl, &zc, nvroot);
3610
3611 ret = zfs_ioctl(hdl, ZFS_IOC_VDEV_ATTACH, &zc);
3612
3613 zcmd_free_nvlists(&zc);
3614
3615 if (ret == 0)
3616 return (0);
3617
3618 switch (errno) {
3619 case ENOTSUP:
3620 /*
3621 * Can't attach to or replace this type of vdev.
3622 */
3623 if (replacing) {
3624 uint64_t version = zpool_get_prop_int(zhp,
3625 ZPOOL_PROP_VERSION, NULL);
3626
3627 if (islog) {
3628 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3629 "cannot replace a log with a spare"));
3630 } else if (rebuild) {
3631 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3632 "only mirror and dRAID vdevs support "
3633 "sequential reconstruction"));
3634 } else if (zpool_is_draid_spare(new_disk)) {
3635 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3636 "dRAID spares can only replace child "
3637 "devices in their parent's dRAID vdev"));
3638 } else if (version >= SPA_VERSION_MULTI_REPLACE) {
3639 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3640 "already in replacing/spare config; wait "
3641 "for completion or use 'zpool detach'"));
3642 } else {
3643 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3644 "cannot replace a replacing device"));
3645 }
3646 } else if (strcmp(type, VDEV_TYPE_RAIDZ) == 0) {
3647 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3648 "raidz_expansion feature must be enabled "
3649 "in order to attach a device to raidz"));
3650 } else {
3651 char status[64] = {0};
3652 zpool_prop_get_feature(zhp,
3653 "feature@device_rebuild", status, 63);
3654 if (rebuild &&
3655 strncmp(status, ZFS_FEATURE_DISABLED, 64) == 0) {
3656 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3657 "device_rebuild feature must be enabled "
3658 "in order to use sequential "
3659 "reconstruction"));
3660 } else {
3661 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3662 "can only attach to mirrors and top-level "
3663 "disks"));
3664 }
3665 }
3666 (void) zfs_error(hdl, EZFS_BADTARGET, errbuf);
3667 break;
3668
3669 case EINVAL:
3670 /*
3671 * The new device must be a single disk.
3672 */
3673 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3674 "new device must be a single disk"));
3675 (void) zfs_error(hdl, EZFS_INVALCONFIG, errbuf);
3676 break;
3677
3678 case EBUSY:
3679 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "%s is busy"),
3680 new_disk);
3681 (void) zfs_error(hdl, EZFS_BADDEV, errbuf);
3682 break;
3683
3684 case EOVERFLOW:
3685 /*
3686 * The new device is too small.
3687 */
3688 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3689 "device is too small"));
3690 (void) zfs_error(hdl, EZFS_BADDEV, errbuf);
3691 break;
3692
3693 case EDOM:
3694 /*
3695 * The new device has a different optimal sector size.
3696 */
3697 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3698 "new device has a different optimal sector size; use the "
3699 "option '-o ashift=N' to override the optimal size"));
3700 (void) zfs_error(hdl, EZFS_BADDEV, errbuf);
3701 break;
3702
3703 case ENAMETOOLONG:
3704 /*
3705 * The resulting top-level vdev spec won't fit in the label.
3706 */
3707 (void) zfs_error(hdl, EZFS_DEVOVERFLOW, errbuf);
3708 break;
3709
3710 case ENXIO:
3711 /*
3712 * The existing raidz vdev has offline children
3713 */
3714 if (strcmp(type, VDEV_TYPE_RAIDZ) == 0) {
3715 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3716 "raidz vdev has devices that are are offline or "
3717 "being replaced"));
3718 (void) zfs_error(hdl, EZFS_BADDEV, errbuf);
3719 break;
3720 } else {
3721 (void) zpool_standard_error(hdl, errno, errbuf);
3722 }
3723 break;
3724
3725 case EADDRINUSE:
3726 /*
3727 * The boot reserved area is already being used (FreeBSD)
3728 */
3729 if (strcmp(type, VDEV_TYPE_RAIDZ) == 0) {
3730 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3731 "the reserved boot area needed for the expansion "
3732 "is already being used by a boot loader"));
3733 (void) zfs_error(hdl, EZFS_BADDEV, errbuf);
3734 } else {
3735 (void) zpool_standard_error(hdl, errno, errbuf);
3736 }
3737 break;
3738
3739 case ZFS_ERR_ASHIFT_MISMATCH:
3740 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3741 "The new device cannot have a higher alignment requirement "
3742 "than the top-level vdev."));
3743 (void) zfs_error(hdl, EZFS_BADTARGET, errbuf);
3744 break;
3745 default:
3746 (void) zpool_standard_error(hdl, errno, errbuf);
3747 }
3748
3749 return (-1);
3750 }
3751
3752 /*
3753 * Detach the specified device.
3754 */
3755 int
zpool_vdev_detach(zpool_handle_t * zhp,const char * path)3756 zpool_vdev_detach(zpool_handle_t *zhp, const char *path)
3757 {
3758 zfs_cmd_t zc = {"\0"};
3759 char errbuf[ERRBUFLEN];
3760 nvlist_t *tgt;
3761 boolean_t avail_spare, l2cache;
3762 libzfs_handle_t *hdl = zhp->zpool_hdl;
3763
3764 (void) snprintf(errbuf, sizeof (errbuf),
3765 dgettext(TEXT_DOMAIN, "cannot detach %s"), path);
3766
3767 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3768 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
3769 NULL)) == NULL)
3770 return (zfs_error(hdl, EZFS_NODEVICE, errbuf));
3771
3772 if (avail_spare)
3773 return (zfs_error(hdl, EZFS_ISSPARE, errbuf));
3774
3775 if (l2cache)
3776 return (zfs_error(hdl, EZFS_ISL2CACHE, errbuf));
3777
3778 zc.zc_guid = fnvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID);
3779
3780 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_DETACH, &zc) == 0)
3781 return (0);
3782
3783 switch (errno) {
3784
3785 case ENOTSUP:
3786 /*
3787 * Can't detach from this type of vdev.
3788 */
3789 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "only "
3790 "applicable to mirror and replacing vdevs"));
3791 (void) zfs_error(hdl, EZFS_BADTARGET, errbuf);
3792 break;
3793
3794 case EBUSY:
3795 /*
3796 * There are no other replicas of this device.
3797 */
3798 (void) zfs_error(hdl, EZFS_NOREPLICAS, errbuf);
3799 break;
3800
3801 default:
3802 (void) zpool_standard_error(hdl, errno, errbuf);
3803 }
3804
3805 return (-1);
3806 }
3807
3808 /*
3809 * Find a mirror vdev in the source nvlist.
3810 *
3811 * The mchild array contains a list of disks in one of the top-level mirrors
3812 * of the source pool. The schild array contains a list of disks that the
3813 * user specified on the command line. We loop over the mchild array to
3814 * see if any entry in the schild array matches.
3815 *
3816 * If a disk in the mchild array is found in the schild array, we return
3817 * the index of that entry. Otherwise we return -1.
3818 */
3819 static int
find_vdev_entry(zpool_handle_t * zhp,nvlist_t ** mchild,uint_t mchildren,nvlist_t ** schild,uint_t schildren)3820 find_vdev_entry(zpool_handle_t *zhp, nvlist_t **mchild, uint_t mchildren,
3821 nvlist_t **schild, uint_t schildren)
3822 {
3823 uint_t mc;
3824
3825 for (mc = 0; mc < mchildren; mc++) {
3826 uint_t sc;
3827 char *mpath = zpool_vdev_name(zhp->zpool_hdl, zhp,
3828 mchild[mc], 0);
3829
3830 for (sc = 0; sc < schildren; sc++) {
3831 char *spath = zpool_vdev_name(zhp->zpool_hdl, zhp,
3832 schild[sc], 0);
3833 boolean_t result = (strcmp(mpath, spath) == 0);
3834
3835 free(spath);
3836 if (result) {
3837 free(mpath);
3838 return (mc);
3839 }
3840 }
3841
3842 free(mpath);
3843 }
3844
3845 return (-1);
3846 }
3847
3848 /*
3849 * Split a mirror pool. If newroot points to null, then a new nvlist
3850 * is generated and it is the responsibility of the caller to free it.
3851 */
3852 int
zpool_vdev_split(zpool_handle_t * zhp,char * newname,nvlist_t ** newroot,nvlist_t * props,splitflags_t flags)3853 zpool_vdev_split(zpool_handle_t *zhp, char *newname, nvlist_t **newroot,
3854 nvlist_t *props, splitflags_t flags)
3855 {
3856 zfs_cmd_t zc = {"\0"};
3857 char errbuf[ERRBUFLEN];
3858 const char *bias;
3859 nvlist_t *tree, *config, **child, **newchild, *newconfig = NULL;
3860 nvlist_t **varray = NULL, *zc_props = NULL;
3861 uint_t c, children, newchildren, lastlog = 0, vcount, found = 0;
3862 libzfs_handle_t *hdl = zhp->zpool_hdl;
3863 uint64_t vers, readonly = B_FALSE;
3864 boolean_t freelist = B_FALSE, memory_err = B_TRUE;
3865 int retval = 0;
3866
3867 (void) snprintf(errbuf, sizeof (errbuf),
3868 dgettext(TEXT_DOMAIN, "Unable to split %s"), zhp->zpool_name);
3869
3870 if (!zpool_name_valid(hdl, B_FALSE, newname))
3871 return (zfs_error(hdl, EZFS_INVALIDNAME, errbuf));
3872
3873 if ((config = zpool_get_config(zhp, NULL)) == NULL) {
3874 (void) fprintf(stderr, gettext("Internal error: unable to "
3875 "retrieve pool configuration\n"));
3876 return (-1);
3877 }
3878
3879 tree = fnvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE);
3880 vers = fnvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION);
3881
3882 if (props) {
3883 prop_flags_t flags = { .create = B_FALSE, .import = B_TRUE };
3884 if ((zc_props = zpool_valid_proplist(hdl, zhp->zpool_name,
3885 props, vers, flags, errbuf)) == NULL)
3886 return (-1);
3887 (void) nvlist_lookup_uint64(zc_props,
3888 zpool_prop_to_name(ZPOOL_PROP_READONLY), &readonly);
3889 if (readonly) {
3890 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3891 "property %s can only be set at import time"),
3892 zpool_prop_to_name(ZPOOL_PROP_READONLY));
3893 return (-1);
3894 }
3895 }
3896
3897 if (nvlist_lookup_nvlist_array(tree, ZPOOL_CONFIG_CHILDREN, &child,
3898 &children) != 0) {
3899 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3900 "Source pool is missing vdev tree"));
3901 nvlist_free(zc_props);
3902 return (-1);
3903 }
3904
3905 varray = zfs_alloc(hdl, children * sizeof (nvlist_t *));
3906 vcount = 0;
3907
3908 if (*newroot == NULL ||
3909 nvlist_lookup_nvlist_array(*newroot, ZPOOL_CONFIG_CHILDREN,
3910 &newchild, &newchildren) != 0)
3911 newchildren = 0;
3912
3913 for (c = 0; c < children; c++) {
3914 uint64_t is_log = B_FALSE, is_hole = B_FALSE;
3915 boolean_t is_special = B_FALSE, is_dedup = B_FALSE;
3916 const char *type;
3917 nvlist_t **mchild, *vdev;
3918 uint_t mchildren;
3919 int entry;
3920
3921 /*
3922 * Unlike cache & spares, slogs are stored in the
3923 * ZPOOL_CONFIG_CHILDREN array. We filter them out here.
3924 */
3925 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG,
3926 &is_log);
3927 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_HOLE,
3928 &is_hole);
3929 if (is_log || is_hole) {
3930 /*
3931 * Create a hole vdev and put it in the config.
3932 */
3933 if (nvlist_alloc(&vdev, NV_UNIQUE_NAME, 0) != 0)
3934 goto out;
3935 if (nvlist_add_string(vdev, ZPOOL_CONFIG_TYPE,
3936 VDEV_TYPE_HOLE) != 0)
3937 goto out;
3938 if (nvlist_add_uint64(vdev, ZPOOL_CONFIG_IS_HOLE,
3939 1) != 0)
3940 goto out;
3941 if (lastlog == 0)
3942 lastlog = vcount;
3943 varray[vcount++] = vdev;
3944 continue;
3945 }
3946 lastlog = 0;
3947 type = fnvlist_lookup_string(child[c], ZPOOL_CONFIG_TYPE);
3948
3949 if (strcmp(type, VDEV_TYPE_INDIRECT) == 0) {
3950 vdev = child[c];
3951 if (nvlist_dup(vdev, &varray[vcount++], 0) != 0)
3952 goto out;
3953 continue;
3954 } else if (strcmp(type, VDEV_TYPE_MIRROR) != 0) {
3955 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3956 "Source pool must be composed only of mirrors\n"));
3957 retval = zfs_error(hdl, EZFS_INVALCONFIG, errbuf);
3958 goto out;
3959 }
3960
3961 if (nvlist_lookup_string(child[c],
3962 ZPOOL_CONFIG_ALLOCATION_BIAS, &bias) == 0) {
3963 if (strcmp(bias, VDEV_ALLOC_BIAS_SPECIAL) == 0)
3964 is_special = B_TRUE;
3965 else if (strcmp(bias, VDEV_ALLOC_BIAS_DEDUP) == 0)
3966 is_dedup = B_TRUE;
3967 }
3968 verify(nvlist_lookup_nvlist_array(child[c],
3969 ZPOOL_CONFIG_CHILDREN, &mchild, &mchildren) == 0);
3970
3971 /* find or add an entry for this top-level vdev */
3972 if (newchildren > 0 &&
3973 (entry = find_vdev_entry(zhp, mchild, mchildren,
3974 newchild, newchildren)) >= 0) {
3975 /* We found a disk that the user specified. */
3976 vdev = mchild[entry];
3977 ++found;
3978 } else {
3979 /* User didn't specify a disk for this vdev. */
3980 vdev = mchild[mchildren - 1];
3981 }
3982
3983 if (nvlist_dup(vdev, &varray[vcount++], 0) != 0)
3984 goto out;
3985
3986 if (flags.dryrun != 0) {
3987 if (is_dedup == B_TRUE) {
3988 if (nvlist_add_string(varray[vcount - 1],
3989 ZPOOL_CONFIG_ALLOCATION_BIAS,
3990 VDEV_ALLOC_BIAS_DEDUP) != 0)
3991 goto out;
3992 } else if (is_special == B_TRUE) {
3993 if (nvlist_add_string(varray[vcount - 1],
3994 ZPOOL_CONFIG_ALLOCATION_BIAS,
3995 VDEV_ALLOC_BIAS_SPECIAL) != 0)
3996 goto out;
3997 }
3998 }
3999 }
4000
4001 /* did we find every disk the user specified? */
4002 if (found != newchildren) {
4003 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "Device list must "
4004 "include at most one disk from each mirror"));
4005 retval = zfs_error(hdl, EZFS_INVALCONFIG, errbuf);
4006 goto out;
4007 }
4008
4009 /* Prepare the nvlist for populating. */
4010 if (*newroot == NULL) {
4011 if (nvlist_alloc(newroot, NV_UNIQUE_NAME, 0) != 0)
4012 goto out;
4013 freelist = B_TRUE;
4014 if (nvlist_add_string(*newroot, ZPOOL_CONFIG_TYPE,
4015 VDEV_TYPE_ROOT) != 0)
4016 goto out;
4017 } else {
4018 verify(nvlist_remove_all(*newroot, ZPOOL_CONFIG_CHILDREN) == 0);
4019 }
4020
4021 /* Add all the children we found */
4022 if (nvlist_add_nvlist_array(*newroot, ZPOOL_CONFIG_CHILDREN,
4023 (const nvlist_t **)varray, lastlog == 0 ? vcount : lastlog) != 0)
4024 goto out;
4025
4026 /*
4027 * If we're just doing a dry run, exit now with success.
4028 */
4029 if (flags.dryrun) {
4030 memory_err = B_FALSE;
4031 freelist = B_FALSE;
4032 goto out;
4033 }
4034
4035 /* now build up the config list & call the ioctl */
4036 if (nvlist_alloc(&newconfig, NV_UNIQUE_NAME, 0) != 0)
4037 goto out;
4038
4039 if (nvlist_add_nvlist(newconfig,
4040 ZPOOL_CONFIG_VDEV_TREE, *newroot) != 0 ||
4041 nvlist_add_string(newconfig,
4042 ZPOOL_CONFIG_POOL_NAME, newname) != 0 ||
4043 nvlist_add_uint64(newconfig, ZPOOL_CONFIG_VERSION, vers) != 0)
4044 goto out;
4045
4046 /*
4047 * The new pool is automatically part of the namespace unless we
4048 * explicitly export it.
4049 */
4050 if (!flags.import)
4051 zc.zc_cookie = ZPOOL_EXPORT_AFTER_SPLIT;
4052 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
4053 (void) strlcpy(zc.zc_string, newname, sizeof (zc.zc_string));
4054 zcmd_write_conf_nvlist(hdl, &zc, newconfig);
4055 if (zc_props != NULL)
4056 zcmd_write_src_nvlist(hdl, &zc, zc_props);
4057
4058 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SPLIT, &zc) != 0) {
4059 retval = zpool_standard_error(hdl, errno, errbuf);
4060 goto out;
4061 }
4062
4063 freelist = B_FALSE;
4064 memory_err = B_FALSE;
4065
4066 out:
4067 if (varray != NULL) {
4068 int v;
4069
4070 for (v = 0; v < vcount; v++)
4071 nvlist_free(varray[v]);
4072 free(varray);
4073 }
4074 zcmd_free_nvlists(&zc);
4075 nvlist_free(zc_props);
4076 nvlist_free(newconfig);
4077 if (freelist) {
4078 nvlist_free(*newroot);
4079 *newroot = NULL;
4080 }
4081
4082 if (retval != 0)
4083 return (retval);
4084
4085 if (memory_err)
4086 return (no_memory(hdl));
4087
4088 return (0);
4089 }
4090
4091 /*
4092 * Remove the given device.
4093 */
4094 int
zpool_vdev_remove(zpool_handle_t * zhp,const char * path)4095 zpool_vdev_remove(zpool_handle_t *zhp, const char *path)
4096 {
4097 zfs_cmd_t zc = {"\0"};
4098 char errbuf[ERRBUFLEN];
4099 nvlist_t *tgt;
4100 boolean_t avail_spare, l2cache, islog;
4101 libzfs_handle_t *hdl = zhp->zpool_hdl;
4102 uint64_t version;
4103
4104 (void) snprintf(errbuf, sizeof (errbuf),
4105 dgettext(TEXT_DOMAIN, "cannot remove %s"), path);
4106
4107 if (zpool_is_draid_spare(path)) {
4108 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
4109 "dRAID spares cannot be removed"));
4110 return (zfs_error(hdl, EZFS_NODEVICE, errbuf));
4111 }
4112
4113 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
4114 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
4115 &islog)) == NULL)
4116 return (zfs_error(hdl, EZFS_NODEVICE, errbuf));
4117
4118 version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL);
4119 if (islog && version < SPA_VERSION_HOLES) {
4120 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
4121 "pool must be upgraded to support log removal"));
4122 return (zfs_error(hdl, EZFS_BADVERSION, errbuf));
4123 }
4124
4125 zc.zc_guid = fnvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID);
4126
4127 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_REMOVE, &zc) == 0)
4128 return (0);
4129
4130 switch (errno) {
4131
4132 case EALREADY:
4133 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
4134 "removal for this vdev is already in progress."));
4135 (void) zfs_error(hdl, EZFS_BUSY, errbuf);
4136 break;
4137
4138 case EINVAL:
4139 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
4140 "invalid config; all top-level vdevs must "
4141 "have the same sector size and not be raidz."));
4142 (void) zfs_error(hdl, EZFS_INVALCONFIG, errbuf);
4143 break;
4144
4145 case EBUSY:
4146 if (islog) {
4147 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
4148 "Mount encrypted datasets to replay logs."));
4149 } else {
4150 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
4151 "Pool busy; removal may already be in progress"));
4152 }
4153 (void) zfs_error(hdl, EZFS_BUSY, errbuf);
4154 break;
4155
4156 case EACCES:
4157 if (islog) {
4158 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
4159 "Mount encrypted datasets to replay logs."));
4160 (void) zfs_error(hdl, EZFS_BUSY, errbuf);
4161 } else {
4162 (void) zpool_standard_error(hdl, errno, errbuf);
4163 }
4164 break;
4165
4166 default:
4167 (void) zpool_standard_error(hdl, errno, errbuf);
4168 }
4169 return (-1);
4170 }
4171
4172 int
zpool_vdev_remove_cancel(zpool_handle_t * zhp)4173 zpool_vdev_remove_cancel(zpool_handle_t *zhp)
4174 {
4175 zfs_cmd_t zc = {{0}};
4176 char errbuf[ERRBUFLEN];
4177 libzfs_handle_t *hdl = zhp->zpool_hdl;
4178
4179 (void) snprintf(errbuf, sizeof (errbuf),
4180 dgettext(TEXT_DOMAIN, "cannot cancel removal"));
4181
4182 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
4183 zc.zc_cookie = 1;
4184
4185 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_REMOVE, &zc) == 0)
4186 return (0);
4187
4188 return (zpool_standard_error(hdl, errno, errbuf));
4189 }
4190
4191 int
zpool_vdev_indirect_size(zpool_handle_t * zhp,const char * path,uint64_t * sizep)4192 zpool_vdev_indirect_size(zpool_handle_t *zhp, const char *path,
4193 uint64_t *sizep)
4194 {
4195 char errbuf[ERRBUFLEN];
4196 nvlist_t *tgt;
4197 boolean_t avail_spare, l2cache, islog;
4198 libzfs_handle_t *hdl = zhp->zpool_hdl;
4199
4200 (void) snprintf(errbuf, sizeof (errbuf),
4201 dgettext(TEXT_DOMAIN, "cannot determine indirect size of %s"),
4202 path);
4203
4204 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
4205 &islog)) == NULL)
4206 return (zfs_error(hdl, EZFS_NODEVICE, errbuf));
4207
4208 if (avail_spare || l2cache || islog) {
4209 *sizep = 0;
4210 return (0);
4211 }
4212
4213 if (nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_INDIRECT_SIZE, sizep) != 0) {
4214 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
4215 "indirect size not available"));
4216 return (zfs_error(hdl, EINVAL, errbuf));
4217 }
4218 return (0);
4219 }
4220
4221 /*
4222 * Clear the errors for the pool, or the particular device if specified.
4223 */
4224 int
zpool_clear(zpool_handle_t * zhp,const char * path,nvlist_t * rewindnvl)4225 zpool_clear(zpool_handle_t *zhp, const char *path, nvlist_t *rewindnvl)
4226 {
4227 zfs_cmd_t zc = {"\0"};
4228 char errbuf[ERRBUFLEN];
4229 nvlist_t *tgt;
4230 zpool_load_policy_t policy;
4231 boolean_t avail_spare, l2cache;
4232 libzfs_handle_t *hdl = zhp->zpool_hdl;
4233 nvlist_t *nvi = NULL;
4234 int error;
4235
4236 if (path)
4237 (void) snprintf(errbuf, sizeof (errbuf),
4238 dgettext(TEXT_DOMAIN, "cannot clear errors for %s"),
4239 path);
4240 else
4241 (void) snprintf(errbuf, sizeof (errbuf),
4242 dgettext(TEXT_DOMAIN, "cannot clear errors for %s"),
4243 zhp->zpool_name);
4244
4245 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
4246 if (path) {
4247 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare,
4248 &l2cache, NULL)) == NULL)
4249 return (zfs_error(hdl, EZFS_NODEVICE, errbuf));
4250
4251 /*
4252 * Don't allow error clearing for hot spares. Do allow
4253 * error clearing for l2cache devices.
4254 */
4255 if (avail_spare)
4256 return (zfs_error(hdl, EZFS_ISSPARE, errbuf));
4257
4258 zc.zc_guid = fnvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID);
4259 }
4260
4261 zpool_get_load_policy(rewindnvl, &policy);
4262 zc.zc_cookie = policy.zlp_rewind;
4263
4264 zcmd_alloc_dst_nvlist(hdl, &zc, zhp->zpool_config_size * 2);
4265 zcmd_write_src_nvlist(hdl, &zc, rewindnvl);
4266
4267 while ((error = zfs_ioctl(hdl, ZFS_IOC_CLEAR, &zc)) != 0 &&
4268 errno == ENOMEM)
4269 zcmd_expand_dst_nvlist(hdl, &zc);
4270
4271 if (!error || ((policy.zlp_rewind & ZPOOL_TRY_REWIND) &&
4272 errno != EPERM && errno != EACCES)) {
4273 if (policy.zlp_rewind &
4274 (ZPOOL_DO_REWIND | ZPOOL_TRY_REWIND)) {
4275 (void) zcmd_read_dst_nvlist(hdl, &zc, &nvi);
4276 zpool_rewind_exclaim(hdl, zc.zc_name,
4277 ((policy.zlp_rewind & ZPOOL_TRY_REWIND) != 0),
4278 nvi);
4279 nvlist_free(nvi);
4280 }
4281 zcmd_free_nvlists(&zc);
4282 return (0);
4283 }
4284
4285 zcmd_free_nvlists(&zc);
4286 return (zpool_standard_error(hdl, errno, errbuf));
4287 }
4288
4289 /*
4290 * Similar to zpool_clear(), but takes a GUID (used by fmd).
4291 */
4292 int
zpool_vdev_clear(zpool_handle_t * zhp,uint64_t guid)4293 zpool_vdev_clear(zpool_handle_t *zhp, uint64_t guid)
4294 {
4295 zfs_cmd_t zc = {"\0"};
4296 char errbuf[ERRBUFLEN];
4297 libzfs_handle_t *hdl = zhp->zpool_hdl;
4298
4299 (void) snprintf(errbuf, sizeof (errbuf),
4300 dgettext(TEXT_DOMAIN, "cannot clear errors for %llx"),
4301 (u_longlong_t)guid);
4302
4303 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
4304 zc.zc_guid = guid;
4305 zc.zc_cookie = ZPOOL_NO_REWIND;
4306
4307 if (zfs_ioctl(hdl, ZFS_IOC_CLEAR, &zc) == 0)
4308 return (0);
4309
4310 return (zpool_standard_error(hdl, errno, errbuf));
4311 }
4312
4313 /*
4314 * Change the GUID for a pool.
4315 *
4316 * Similar to zpool_reguid(), but may take a GUID.
4317 *
4318 * If the guid argument is NULL, then no GUID is passed in the nvlist to the
4319 * ioctl().
4320 */
4321 int
zpool_set_guid(zpool_handle_t * zhp,const uint64_t * guid)4322 zpool_set_guid(zpool_handle_t *zhp, const uint64_t *guid)
4323 {
4324 char errbuf[ERRBUFLEN];
4325 libzfs_handle_t *hdl = zhp->zpool_hdl;
4326 nvlist_t *nvl = NULL;
4327 zfs_cmd_t zc = {"\0"};
4328 int error = -1;
4329
4330 if (guid != NULL) {
4331 if (nvlist_alloc(&nvl, NV_UNIQUE_NAME, 0) != 0)
4332 return (no_memory(hdl));
4333
4334 if (nvlist_add_uint64(nvl, ZPOOL_REGUID_GUID, *guid) != 0) {
4335 nvlist_free(nvl);
4336 return (no_memory(hdl));
4337 }
4338
4339 zcmd_write_src_nvlist(hdl, &zc, nvl);
4340 }
4341
4342 (void) snprintf(errbuf, sizeof (errbuf),
4343 dgettext(TEXT_DOMAIN, "cannot reguid '%s'"), zhp->zpool_name);
4344
4345 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
4346 error = zfs_ioctl(hdl, ZFS_IOC_POOL_REGUID, &zc);
4347 if (error) {
4348 return (zpool_standard_error(hdl, errno, errbuf));
4349 }
4350 if (guid != NULL) {
4351 zcmd_free_nvlists(&zc);
4352 nvlist_free(nvl);
4353 }
4354 return (0);
4355 }
4356
4357 /*
4358 * Change the GUID for a pool.
4359 */
4360 int
zpool_reguid(zpool_handle_t * zhp)4361 zpool_reguid(zpool_handle_t *zhp)
4362 {
4363 return (zpool_set_guid(zhp, NULL));
4364 }
4365
4366 /*
4367 * Reopen the pool.
4368 */
4369 int
zpool_reopen_one(zpool_handle_t * zhp,void * data)4370 zpool_reopen_one(zpool_handle_t *zhp, void *data)
4371 {
4372 libzfs_handle_t *hdl = zpool_get_handle(zhp);
4373 const char *pool_name = zpool_get_name(zhp);
4374 boolean_t *scrub_restart = data;
4375 int error;
4376
4377 error = lzc_reopen(pool_name, *scrub_restart);
4378 if (error) {
4379 return (zpool_standard_error_fmt(hdl, error,
4380 dgettext(TEXT_DOMAIN, "cannot reopen '%s'"), pool_name));
4381 }
4382
4383 return (0);
4384 }
4385
4386 /* call into libzfs_core to execute the sync IOCTL per pool */
4387 int
zpool_sync_one(zpool_handle_t * zhp,void * data)4388 zpool_sync_one(zpool_handle_t *zhp, void *data)
4389 {
4390 int ret;
4391 libzfs_handle_t *hdl = zpool_get_handle(zhp);
4392 const char *pool_name = zpool_get_name(zhp);
4393 boolean_t *force = data;
4394 nvlist_t *innvl = fnvlist_alloc();
4395
4396 fnvlist_add_boolean_value(innvl, "force", *force);
4397 if ((ret = lzc_sync(pool_name, innvl, NULL)) != 0) {
4398 nvlist_free(innvl);
4399 return (zpool_standard_error_fmt(hdl, ret,
4400 dgettext(TEXT_DOMAIN, "sync '%s' failed"), pool_name));
4401 }
4402 nvlist_free(innvl);
4403
4404 return (0);
4405 }
4406
4407 #define PATH_BUF_LEN 64
4408
4409 /*
4410 * Given a vdev, return the name to display in iostat. If the vdev has a path,
4411 * we use that, stripping off any leading "/dev/dsk/"; if not, we use the type.
4412 * We also check if this is a whole disk, in which case we strip off the
4413 * trailing 's0' slice name.
4414 *
4415 * This routine is also responsible for identifying when disks have been
4416 * reconfigured in a new location. The kernel will have opened the device by
4417 * devid, but the path will still refer to the old location. To catch this, we
4418 * first do a path -> devid translation (which is fast for the common case). If
4419 * the devid matches, we're done. If not, we do a reverse devid -> path
4420 * translation and issue the appropriate ioctl() to update the path of the vdev.
4421 * If 'zhp' is NULL, then this is an exported pool, and we don't need to do any
4422 * of these checks.
4423 */
4424 char *
zpool_vdev_name(libzfs_handle_t * hdl,zpool_handle_t * zhp,nvlist_t * nv,int name_flags)4425 zpool_vdev_name(libzfs_handle_t *hdl, zpool_handle_t *zhp, nvlist_t *nv,
4426 int name_flags)
4427 {
4428 const char *type, *tpath;
4429 const char *path;
4430 uint64_t value;
4431 char buf[PATH_BUF_LEN];
4432 char tmpbuf[PATH_BUF_LEN * 2];
4433
4434 /*
4435 * vdev_name will be "root"/"root-0" for the root vdev, but it is the
4436 * zpool name that will be displayed to the user.
4437 */
4438 type = fnvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE);
4439 if (zhp != NULL && strcmp(type, "root") == 0)
4440 return (zfs_strdup(hdl, zpool_get_name(zhp)));
4441
4442 if (libzfs_envvar_is_set("ZPOOL_VDEV_NAME_PATH"))
4443 name_flags |= VDEV_NAME_PATH;
4444 if (libzfs_envvar_is_set("ZPOOL_VDEV_NAME_GUID"))
4445 name_flags |= VDEV_NAME_GUID;
4446 if (libzfs_envvar_is_set("ZPOOL_VDEV_NAME_FOLLOW_LINKS"))
4447 name_flags |= VDEV_NAME_FOLLOW_LINKS;
4448
4449 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT, &value) == 0 ||
4450 name_flags & VDEV_NAME_GUID) {
4451 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &value);
4452 (void) snprintf(buf, sizeof (buf), "%llu", (u_longlong_t)value);
4453 path = buf;
4454 } else if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &tpath) == 0) {
4455 path = tpath;
4456
4457 if (name_flags & VDEV_NAME_FOLLOW_LINKS) {
4458 char *rp = realpath(path, NULL);
4459 if (rp) {
4460 strlcpy(buf, rp, sizeof (buf));
4461 path = buf;
4462 free(rp);
4463 }
4464 }
4465
4466 /*
4467 * For a block device only use the name.
4468 */
4469 if ((strcmp(type, VDEV_TYPE_DISK) == 0) &&
4470 !(name_flags & VDEV_NAME_PATH)) {
4471 path = zfs_strip_path(path);
4472 }
4473
4474 /*
4475 * Remove the partition from the path if this is a whole disk.
4476 */
4477 if (strcmp(type, VDEV_TYPE_DRAID_SPARE) != 0 &&
4478 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK, &value)
4479 == 0 && value && !(name_flags & VDEV_NAME_PATH)) {
4480 return (zfs_strip_partition(path));
4481 }
4482 } else {
4483 path = type;
4484
4485 /*
4486 * If it's a raidz device, we need to stick in the parity level.
4487 */
4488 if (strcmp(path, VDEV_TYPE_RAIDZ) == 0) {
4489 value = fnvlist_lookup_uint64(nv, ZPOOL_CONFIG_NPARITY);
4490 (void) snprintf(buf, sizeof (buf), "%s%llu", path,
4491 (u_longlong_t)value);
4492 path = buf;
4493 }
4494
4495 /*
4496 * If it's a dRAID device, we add parity, groups, and spares.
4497 */
4498 if (strcmp(path, VDEV_TYPE_DRAID) == 0) {
4499 uint64_t ndata, nparity, nspares;
4500 nvlist_t **child;
4501 uint_t children;
4502
4503 verify(nvlist_lookup_nvlist_array(nv,
4504 ZPOOL_CONFIG_CHILDREN, &child, &children) == 0);
4505 nparity = fnvlist_lookup_uint64(nv,
4506 ZPOOL_CONFIG_NPARITY);
4507 ndata = fnvlist_lookup_uint64(nv,
4508 ZPOOL_CONFIG_DRAID_NDATA);
4509 nspares = fnvlist_lookup_uint64(nv,
4510 ZPOOL_CONFIG_DRAID_NSPARES);
4511
4512 path = zpool_draid_name(buf, sizeof (buf), ndata,
4513 nparity, nspares, children);
4514 }
4515
4516 /*
4517 * We identify each top-level vdev by using a <type-id>
4518 * naming convention.
4519 */
4520 if (name_flags & VDEV_NAME_TYPE_ID) {
4521 uint64_t id = fnvlist_lookup_uint64(nv,
4522 ZPOOL_CONFIG_ID);
4523 (void) snprintf(tmpbuf, sizeof (tmpbuf), "%s-%llu",
4524 path, (u_longlong_t)id);
4525 path = tmpbuf;
4526 }
4527 }
4528
4529 return (zfs_strdup(hdl, path));
4530 }
4531
4532 static int
zbookmark_mem_compare(const void * a,const void * b)4533 zbookmark_mem_compare(const void *a, const void *b)
4534 {
4535 return (memcmp(a, b, sizeof (zbookmark_phys_t)));
4536 }
4537
4538 void
zpool_add_propname(zpool_handle_t * zhp,const char * propname)4539 zpool_add_propname(zpool_handle_t *zhp, const char *propname)
4540 {
4541 assert(zhp->zpool_n_propnames < ZHP_MAX_PROPNAMES);
4542 zhp->zpool_propnames[zhp->zpool_n_propnames] = propname;
4543 zhp->zpool_n_propnames++;
4544 }
4545
4546 /*
4547 * Retrieve the persistent error log, uniquify the members, and return to the
4548 * caller.
4549 */
4550 int
zpool_get_errlog(zpool_handle_t * zhp,nvlist_t ** nverrlistp)4551 zpool_get_errlog(zpool_handle_t *zhp, nvlist_t **nverrlistp)
4552 {
4553 zfs_cmd_t zc = {"\0"};
4554 libzfs_handle_t *hdl = zhp->zpool_hdl;
4555 zbookmark_phys_t *buf;
4556 uint64_t buflen = 10000; /* approx. 1MB of RAM */
4557
4558 if (fnvlist_lookup_uint64(zhp->zpool_config,
4559 ZPOOL_CONFIG_ERRCOUNT) == 0)
4560 return (0);
4561
4562 /*
4563 * Retrieve the raw error list from the kernel. If it doesn't fit,
4564 * allocate a larger buffer and retry.
4565 */
4566 (void) strcpy(zc.zc_name, zhp->zpool_name);
4567 for (;;) {
4568 buf = zfs_alloc(zhp->zpool_hdl,
4569 buflen * sizeof (zbookmark_phys_t));
4570 zc.zc_nvlist_dst = (uintptr_t)buf;
4571 zc.zc_nvlist_dst_size = buflen;
4572 if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_ERROR_LOG,
4573 &zc) != 0) {
4574 free(buf);
4575 if (errno == ENOMEM) {
4576 buflen *= 2;
4577 } else {
4578 return (zpool_standard_error_fmt(hdl, errno,
4579 dgettext(TEXT_DOMAIN, "errors: List of "
4580 "errors unavailable")));
4581 }
4582 } else {
4583 break;
4584 }
4585 }
4586
4587 /*
4588 * Sort the resulting bookmarks. This is a little confusing due to the
4589 * implementation of ZFS_IOC_ERROR_LOG. The bookmarks are copied last
4590 * to first, and 'zc_nvlist_dst_size' indicates the number of bookmarks
4591 * _not_ copied as part of the process. So we point the start of our
4592 * array appropriate and decrement the total number of elements.
4593 */
4594 zbookmark_phys_t *zb = buf + zc.zc_nvlist_dst_size;
4595 uint64_t zblen = buflen - zc.zc_nvlist_dst_size;
4596
4597 qsort(zb, zblen, sizeof (zbookmark_phys_t), zbookmark_mem_compare);
4598
4599 verify(nvlist_alloc(nverrlistp, 0, KM_SLEEP) == 0);
4600
4601 /*
4602 * Fill in the nverrlistp with nvlist's of dataset and object numbers.
4603 */
4604 for (uint64_t i = 0; i < zblen; i++) {
4605 nvlist_t *nv;
4606
4607 /* ignoring zb_blkid and zb_level for now */
4608 if (i > 0 && zb[i-1].zb_objset == zb[i].zb_objset &&
4609 zb[i-1].zb_object == zb[i].zb_object)
4610 continue;
4611
4612 if (nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) != 0)
4613 goto nomem;
4614 if (nvlist_add_uint64(nv, ZPOOL_ERR_DATASET,
4615 zb[i].zb_objset) != 0) {
4616 nvlist_free(nv);
4617 goto nomem;
4618 }
4619 if (nvlist_add_uint64(nv, ZPOOL_ERR_OBJECT,
4620 zb[i].zb_object) != 0) {
4621 nvlist_free(nv);
4622 goto nomem;
4623 }
4624 if (nvlist_add_nvlist(*nverrlistp, "ejk", nv) != 0) {
4625 nvlist_free(nv);
4626 goto nomem;
4627 }
4628 nvlist_free(nv);
4629 }
4630
4631 free(buf);
4632 return (0);
4633
4634 nomem:
4635 free(buf);
4636 return (no_memory(zhp->zpool_hdl));
4637 }
4638
4639 /*
4640 * Upgrade a ZFS pool to the latest on-disk version.
4641 */
4642 int
zpool_upgrade(zpool_handle_t * zhp,uint64_t new_version)4643 zpool_upgrade(zpool_handle_t *zhp, uint64_t new_version)
4644 {
4645 zfs_cmd_t zc = {"\0"};
4646 libzfs_handle_t *hdl = zhp->zpool_hdl;
4647
4648 (void) strcpy(zc.zc_name, zhp->zpool_name);
4649 zc.zc_cookie = new_version;
4650
4651 if (zfs_ioctl(hdl, ZFS_IOC_POOL_UPGRADE, &zc) != 0)
4652 return (zpool_standard_error_fmt(hdl, errno,
4653 dgettext(TEXT_DOMAIN, "cannot upgrade '%s'"),
4654 zhp->zpool_name));
4655 return (0);
4656 }
4657
4658 void
zfs_save_arguments(int argc,char ** argv,char * string,int len)4659 zfs_save_arguments(int argc, char **argv, char *string, int len)
4660 {
4661 int i;
4662
4663 (void) strlcpy(string, zfs_basename(argv[0]), len);
4664 for (i = 1; i < argc; i++) {
4665 (void) strlcat(string, " ", len);
4666 (void) strlcat(string, argv[i], len);
4667 }
4668 }
4669
4670 int
zpool_log_history(libzfs_handle_t * hdl,const char * message)4671 zpool_log_history(libzfs_handle_t *hdl, const char *message)
4672 {
4673 zfs_cmd_t zc = {"\0"};
4674 nvlist_t *args;
4675
4676 args = fnvlist_alloc();
4677 fnvlist_add_string(args, "message", message);
4678 zcmd_write_src_nvlist(hdl, &zc, args);
4679 int err = zfs_ioctl(hdl, ZFS_IOC_LOG_HISTORY, &zc);
4680 nvlist_free(args);
4681 zcmd_free_nvlists(&zc);
4682 return (err);
4683 }
4684
4685 /*
4686 * Perform ioctl to get some command history of a pool.
4687 *
4688 * 'buf' is the buffer to fill up to 'len' bytes. 'off' is the
4689 * logical offset of the history buffer to start reading from.
4690 *
4691 * Upon return, 'off' is the next logical offset to read from and
4692 * 'len' is the actual amount of bytes read into 'buf'.
4693 */
4694 static int
get_history(zpool_handle_t * zhp,char * buf,uint64_t * off,uint64_t * len)4695 get_history(zpool_handle_t *zhp, char *buf, uint64_t *off, uint64_t *len)
4696 {
4697 zfs_cmd_t zc = {"\0"};
4698 libzfs_handle_t *hdl = zhp->zpool_hdl;
4699
4700 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
4701
4702 zc.zc_history = (uint64_t)(uintptr_t)buf;
4703 zc.zc_history_len = *len;
4704 zc.zc_history_offset = *off;
4705
4706 if (zfs_ioctl(hdl, ZFS_IOC_POOL_GET_HISTORY, &zc) != 0) {
4707 switch (errno) {
4708 case EPERM:
4709 return (zfs_error_fmt(hdl, EZFS_PERM,
4710 dgettext(TEXT_DOMAIN,
4711 "cannot show history for pool '%s'"),
4712 zhp->zpool_name));
4713 case ENOENT:
4714 return (zfs_error_fmt(hdl, EZFS_NOHISTORY,
4715 dgettext(TEXT_DOMAIN, "cannot get history for pool "
4716 "'%s'"), zhp->zpool_name));
4717 case ENOTSUP:
4718 return (zfs_error_fmt(hdl, EZFS_BADVERSION,
4719 dgettext(TEXT_DOMAIN, "cannot get history for pool "
4720 "'%s', pool must be upgraded"), zhp->zpool_name));
4721 default:
4722 return (zpool_standard_error_fmt(hdl, errno,
4723 dgettext(TEXT_DOMAIN,
4724 "cannot get history for '%s'"), zhp->zpool_name));
4725 }
4726 }
4727
4728 *len = zc.zc_history_len;
4729 *off = zc.zc_history_offset;
4730
4731 return (0);
4732 }
4733
4734 /*
4735 * Retrieve the command history of a pool.
4736 */
4737 int
zpool_get_history(zpool_handle_t * zhp,nvlist_t ** nvhisp,uint64_t * off,boolean_t * eof)4738 zpool_get_history(zpool_handle_t *zhp, nvlist_t **nvhisp, uint64_t *off,
4739 boolean_t *eof)
4740 {
4741 libzfs_handle_t *hdl = zhp->zpool_hdl;
4742 char *buf;
4743 int buflen = 128 * 1024;
4744 nvlist_t **records = NULL;
4745 uint_t numrecords = 0;
4746 int err = 0, i;
4747 uint64_t start = *off;
4748
4749 buf = zfs_alloc(hdl, buflen);
4750
4751 /* process about 1MiB a time */
4752 while (*off - start < 1024 * 1024) {
4753 uint64_t bytes_read = buflen;
4754 uint64_t leftover;
4755
4756 if ((err = get_history(zhp, buf, off, &bytes_read)) != 0)
4757 break;
4758
4759 /* if nothing else was read in, we're at EOF, just return */
4760 if (!bytes_read) {
4761 *eof = B_TRUE;
4762 break;
4763 }
4764
4765 if ((err = zpool_history_unpack(buf, bytes_read,
4766 &leftover, &records, &numrecords)) != 0) {
4767 zpool_standard_error_fmt(hdl, err,
4768 dgettext(TEXT_DOMAIN,
4769 "cannot get history for '%s'"), zhp->zpool_name);
4770 break;
4771 }
4772 *off -= leftover;
4773 if (leftover == bytes_read) {
4774 /*
4775 * no progress made, because buffer is not big enough
4776 * to hold this record; resize and retry.
4777 */
4778 buflen *= 2;
4779 free(buf);
4780 buf = zfs_alloc(hdl, buflen);
4781 }
4782 }
4783
4784 free(buf);
4785
4786 if (!err) {
4787 *nvhisp = fnvlist_alloc();
4788 fnvlist_add_nvlist_array(*nvhisp, ZPOOL_HIST_RECORD,
4789 (const nvlist_t **)records, numrecords);
4790 }
4791 for (i = 0; i < numrecords; i++)
4792 nvlist_free(records[i]);
4793 free(records);
4794
4795 return (err);
4796 }
4797
4798 /*
4799 * Retrieve the next event given the passed 'zevent_fd' file descriptor.
4800 * If there is a new event available 'nvp' will contain a newly allocated
4801 * nvlist and 'dropped' will be set to the number of missed events since
4802 * the last call to this function. When 'nvp' is set to NULL it indicates
4803 * no new events are available. In either case the function returns 0 and
4804 * it is up to the caller to free 'nvp'. In the case of a fatal error the
4805 * function will return a non-zero value. When the function is called in
4806 * blocking mode (the default, unless the ZEVENT_NONBLOCK flag is passed),
4807 * it will not return until a new event is available.
4808 */
4809 int
zpool_events_next(libzfs_handle_t * hdl,nvlist_t ** nvp,int * dropped,unsigned flags,int zevent_fd)4810 zpool_events_next(libzfs_handle_t *hdl, nvlist_t **nvp,
4811 int *dropped, unsigned flags, int zevent_fd)
4812 {
4813 zfs_cmd_t zc = {"\0"};
4814 int error = 0;
4815
4816 *nvp = NULL;
4817 *dropped = 0;
4818 zc.zc_cleanup_fd = zevent_fd;
4819
4820 if (flags & ZEVENT_NONBLOCK)
4821 zc.zc_guid = ZEVENT_NONBLOCK;
4822
4823 zcmd_alloc_dst_nvlist(hdl, &zc, ZEVENT_SIZE);
4824
4825 retry:
4826 if (zfs_ioctl(hdl, ZFS_IOC_EVENTS_NEXT, &zc) != 0) {
4827 switch (errno) {
4828 case ESHUTDOWN:
4829 error = zfs_error_fmt(hdl, EZFS_POOLUNAVAIL,
4830 dgettext(TEXT_DOMAIN, "zfs shutdown"));
4831 goto out;
4832 case ENOENT:
4833 /* Blocking error case should not occur */
4834 if (!(flags & ZEVENT_NONBLOCK))
4835 error = zpool_standard_error_fmt(hdl, errno,
4836 dgettext(TEXT_DOMAIN, "cannot get event"));
4837
4838 goto out;
4839 case ENOMEM:
4840 zcmd_expand_dst_nvlist(hdl, &zc);
4841 goto retry;
4842 default:
4843 error = zpool_standard_error_fmt(hdl, errno,
4844 dgettext(TEXT_DOMAIN, "cannot get event"));
4845 goto out;
4846 }
4847 }
4848
4849 error = zcmd_read_dst_nvlist(hdl, &zc, nvp);
4850 if (error != 0)
4851 goto out;
4852
4853 *dropped = (int)zc.zc_cookie;
4854 out:
4855 zcmd_free_nvlists(&zc);
4856
4857 return (error);
4858 }
4859
4860 /*
4861 * Clear all events.
4862 */
4863 int
zpool_events_clear(libzfs_handle_t * hdl,int * count)4864 zpool_events_clear(libzfs_handle_t *hdl, int *count)
4865 {
4866 zfs_cmd_t zc = {"\0"};
4867
4868 if (zfs_ioctl(hdl, ZFS_IOC_EVENTS_CLEAR, &zc) != 0)
4869 return (zpool_standard_error(hdl, errno,
4870 dgettext(TEXT_DOMAIN, "cannot clear events")));
4871
4872 if (count != NULL)
4873 *count = (int)zc.zc_cookie; /* # of events cleared */
4874
4875 return (0);
4876 }
4877
4878 /*
4879 * Seek to a specific EID, ZEVENT_SEEK_START, or ZEVENT_SEEK_END for
4880 * the passed zevent_fd file handle. On success zero is returned,
4881 * otherwise -1 is returned and hdl->libzfs_error is set to the errno.
4882 */
4883 int
zpool_events_seek(libzfs_handle_t * hdl,uint64_t eid,int zevent_fd)4884 zpool_events_seek(libzfs_handle_t *hdl, uint64_t eid, int zevent_fd)
4885 {
4886 zfs_cmd_t zc = {"\0"};
4887 int error = 0;
4888
4889 zc.zc_guid = eid;
4890 zc.zc_cleanup_fd = zevent_fd;
4891
4892 if (zfs_ioctl(hdl, ZFS_IOC_EVENTS_SEEK, &zc) != 0) {
4893 switch (errno) {
4894 case ENOENT:
4895 error = zfs_error_fmt(hdl, EZFS_NOENT,
4896 dgettext(TEXT_DOMAIN, "cannot get event"));
4897 break;
4898
4899 case ENOMEM:
4900 error = zfs_error_fmt(hdl, EZFS_NOMEM,
4901 dgettext(TEXT_DOMAIN, "cannot get event"));
4902 break;
4903
4904 default:
4905 error = zpool_standard_error_fmt(hdl, errno,
4906 dgettext(TEXT_DOMAIN, "cannot get event"));
4907 break;
4908 }
4909 }
4910
4911 return (error);
4912 }
4913
4914 static void
zpool_obj_to_path_impl(zpool_handle_t * zhp,uint64_t dsobj,uint64_t obj,char * pathname,size_t len,boolean_t always_unmounted)4915 zpool_obj_to_path_impl(zpool_handle_t *zhp, uint64_t dsobj, uint64_t obj,
4916 char *pathname, size_t len, boolean_t always_unmounted)
4917 {
4918 zfs_cmd_t zc = {"\0"};
4919 boolean_t mounted = B_FALSE;
4920 char *mntpnt = NULL;
4921 char dsname[ZFS_MAX_DATASET_NAME_LEN];
4922
4923 if (dsobj == 0) {
4924 /* special case for the MOS */
4925 (void) snprintf(pathname, len, "<metadata>:<0x%llx>",
4926 (longlong_t)obj);
4927 return;
4928 }
4929
4930 /* get the dataset's name */
4931 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
4932 zc.zc_obj = dsobj;
4933 if (zfs_ioctl(zhp->zpool_hdl,
4934 ZFS_IOC_DSOBJ_TO_DSNAME, &zc) != 0) {
4935 /* just write out a path of two object numbers */
4936 (void) snprintf(pathname, len, "<0x%llx>:<0x%llx>",
4937 (longlong_t)dsobj, (longlong_t)obj);
4938 return;
4939 }
4940 (void) strlcpy(dsname, zc.zc_value, sizeof (dsname));
4941
4942 /* find out if the dataset is mounted */
4943 mounted = !always_unmounted && is_mounted(zhp->zpool_hdl, dsname,
4944 &mntpnt);
4945
4946 /* get the corrupted object's path */
4947 (void) strlcpy(zc.zc_name, dsname, sizeof (zc.zc_name));
4948 zc.zc_obj = obj;
4949 if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_OBJ_TO_PATH,
4950 &zc) == 0) {
4951 if (mounted) {
4952 (void) snprintf(pathname, len, "%s%s", mntpnt,
4953 zc.zc_value);
4954 } else {
4955 (void) snprintf(pathname, len, "%s:%s",
4956 dsname, zc.zc_value);
4957 }
4958 } else {
4959 (void) snprintf(pathname, len, "%s:<0x%llx>", dsname,
4960 (longlong_t)obj);
4961 }
4962 free(mntpnt);
4963 }
4964
4965 void
zpool_obj_to_path(zpool_handle_t * zhp,uint64_t dsobj,uint64_t obj,char * pathname,size_t len)4966 zpool_obj_to_path(zpool_handle_t *zhp, uint64_t dsobj, uint64_t obj,
4967 char *pathname, size_t len)
4968 {
4969 zpool_obj_to_path_impl(zhp, dsobj, obj, pathname, len, B_FALSE);
4970 }
4971
4972 void
zpool_obj_to_path_ds(zpool_handle_t * zhp,uint64_t dsobj,uint64_t obj,char * pathname,size_t len)4973 zpool_obj_to_path_ds(zpool_handle_t *zhp, uint64_t dsobj, uint64_t obj,
4974 char *pathname, size_t len)
4975 {
4976 zpool_obj_to_path_impl(zhp, dsobj, obj, pathname, len, B_TRUE);
4977 }
4978 /*
4979 * Wait while the specified activity is in progress in the pool.
4980 */
4981 int
zpool_wait(zpool_handle_t * zhp,zpool_wait_activity_t activity)4982 zpool_wait(zpool_handle_t *zhp, zpool_wait_activity_t activity)
4983 {
4984 boolean_t missing;
4985
4986 int error = zpool_wait_status(zhp, activity, &missing, NULL);
4987
4988 if (missing) {
4989 (void) zpool_standard_error_fmt(zhp->zpool_hdl, ENOENT,
4990 dgettext(TEXT_DOMAIN, "error waiting in pool '%s'"),
4991 zhp->zpool_name);
4992 return (ENOENT);
4993 } else {
4994 return (error);
4995 }
4996 }
4997
4998 /*
4999 * Wait for the given activity and return the status of the wait (whether or not
5000 * any waiting was done) in the 'waited' parameter. Non-existent pools are
5001 * reported via the 'missing' parameter, rather than by printing an error
5002 * message. This is convenient when this function is called in a loop over a
5003 * long period of time (as it is, for example, by zpool's wait cmd). In that
5004 * scenario, a pool being exported or destroyed should be considered a normal
5005 * event, so we don't want to print an error when we find that the pool doesn't
5006 * exist.
5007 */
5008 int
zpool_wait_status(zpool_handle_t * zhp,zpool_wait_activity_t activity,boolean_t * missing,boolean_t * waited)5009 zpool_wait_status(zpool_handle_t *zhp, zpool_wait_activity_t activity,
5010 boolean_t *missing, boolean_t *waited)
5011 {
5012 int error = lzc_wait(zhp->zpool_name, activity, waited);
5013 *missing = (error == ENOENT);
5014 if (*missing)
5015 return (0);
5016
5017 if (error != 0) {
5018 (void) zpool_standard_error_fmt(zhp->zpool_hdl, error,
5019 dgettext(TEXT_DOMAIN, "error waiting in pool '%s'"),
5020 zhp->zpool_name);
5021 }
5022
5023 return (error);
5024 }
5025
5026 int
zpool_set_bootenv(zpool_handle_t * zhp,const nvlist_t * envmap)5027 zpool_set_bootenv(zpool_handle_t *zhp, const nvlist_t *envmap)
5028 {
5029 int error = lzc_set_bootenv(zhp->zpool_name, envmap);
5030 if (error != 0) {
5031 (void) zpool_standard_error_fmt(zhp->zpool_hdl, error,
5032 dgettext(TEXT_DOMAIN,
5033 "error setting bootenv in pool '%s'"), zhp->zpool_name);
5034 }
5035
5036 return (error);
5037 }
5038
5039 int
zpool_get_bootenv(zpool_handle_t * zhp,nvlist_t ** nvlp)5040 zpool_get_bootenv(zpool_handle_t *zhp, nvlist_t **nvlp)
5041 {
5042 nvlist_t *nvl;
5043 int error;
5044
5045 nvl = NULL;
5046 error = lzc_get_bootenv(zhp->zpool_name, &nvl);
5047 if (error != 0) {
5048 (void) zpool_standard_error_fmt(zhp->zpool_hdl, error,
5049 dgettext(TEXT_DOMAIN,
5050 "error getting bootenv in pool '%s'"), zhp->zpool_name);
5051 } else {
5052 *nvlp = nvl;
5053 }
5054
5055 return (error);
5056 }
5057
5058 /*
5059 * Attempt to read and parse feature file(s) (from "compatibility" property).
5060 * Files contain zpool feature names, comma or whitespace-separated.
5061 * Comments (# character to next newline) are discarded.
5062 *
5063 * Arguments:
5064 * compatibility : string containing feature filenames
5065 * features : either NULL or pointer to array of boolean
5066 * report : either NULL or pointer to string buffer
5067 * rlen : length of "report" buffer
5068 *
5069 * compatibility is NULL (unset), "", "off", "legacy", or list of
5070 * comma-separated filenames. filenames should either be absolute,
5071 * or relative to:
5072 * 1) ZPOOL_SYSCONF_COMPAT_D (eg: /etc/zfs/compatibility.d) or
5073 * 2) ZPOOL_DATA_COMPAT_D (eg: /usr/share/zfs/compatibility.d).
5074 * (Unset), "" or "off" => enable all features
5075 * "legacy" => disable all features
5076 *
5077 * Any feature names read from files which match unames in spa_feature_table
5078 * will have the corresponding boolean set in the features array (if non-NULL).
5079 * If more than one feature set specified, only features present in *all* of
5080 * them will be set.
5081 *
5082 * "report" if not NULL will be populated with a suitable status message.
5083 *
5084 * Return values:
5085 * ZPOOL_COMPATIBILITY_OK : files read and parsed ok
5086 * ZPOOL_COMPATIBILITY_BADFILE : file too big or not a text file
5087 * ZPOOL_COMPATIBILITY_BADTOKEN : SYSCONF file contains invalid feature name
5088 * ZPOOL_COMPATIBILITY_WARNTOKEN : DATA file contains invalid feature name
5089 * ZPOOL_COMPATIBILITY_NOFILES : no feature files found
5090 */
5091 zpool_compat_status_t
zpool_load_compat(const char * compat,boolean_t * features,char * report,size_t rlen)5092 zpool_load_compat(const char *compat, boolean_t *features, char *report,
5093 size_t rlen)
5094 {
5095 int sdirfd, ddirfd, featfd;
5096 struct stat fs;
5097 char *fc;
5098 char *ps, *ls, *ws;
5099 char *file, *line, *word;
5100
5101 char l_compat[ZFS_MAXPROPLEN];
5102
5103 boolean_t ret_nofiles = B_TRUE;
5104 boolean_t ret_badfile = B_FALSE;
5105 boolean_t ret_badtoken = B_FALSE;
5106 boolean_t ret_warntoken = B_FALSE;
5107
5108 /* special cases (unset), "" and "off" => enable all features */
5109 if (compat == NULL || compat[0] == '\0' ||
5110 strcmp(compat, ZPOOL_COMPAT_OFF) == 0) {
5111 if (features != NULL)
5112 for (uint_t i = 0; i < SPA_FEATURES; i++)
5113 features[i] = B_TRUE;
5114 if (report != NULL)
5115 strlcpy(report, gettext("all features enabled"), rlen);
5116 return (ZPOOL_COMPATIBILITY_OK);
5117 }
5118
5119 /* Final special case "legacy" => disable all features */
5120 if (strcmp(compat, ZPOOL_COMPAT_LEGACY) == 0) {
5121 if (features != NULL)
5122 for (uint_t i = 0; i < SPA_FEATURES; i++)
5123 features[i] = B_FALSE;
5124 if (report != NULL)
5125 strlcpy(report, gettext("all features disabled"), rlen);
5126 return (ZPOOL_COMPATIBILITY_OK);
5127 }
5128
5129 /*
5130 * Start with all true; will be ANDed with results from each file
5131 */
5132 if (features != NULL)
5133 for (uint_t i = 0; i < SPA_FEATURES; i++)
5134 features[i] = B_TRUE;
5135
5136 char err_badfile[ZFS_MAXPROPLEN] = "";
5137 char err_badtoken[ZFS_MAXPROPLEN] = "";
5138
5139 /*
5140 * We ignore errors from the directory open()
5141 * as they're only needed if the filename is relative
5142 * which will be checked during the openat().
5143 */
5144
5145 /* O_PATH safer than O_RDONLY if system allows it */
5146 #if defined(O_PATH)
5147 #define ZC_DIR_FLAGS (O_DIRECTORY | O_CLOEXEC | O_PATH)
5148 #else
5149 #define ZC_DIR_FLAGS (O_DIRECTORY | O_CLOEXEC | O_RDONLY)
5150 #endif
5151
5152 sdirfd = open(ZPOOL_SYSCONF_COMPAT_D, ZC_DIR_FLAGS);
5153 ddirfd = open(ZPOOL_DATA_COMPAT_D, ZC_DIR_FLAGS);
5154
5155 (void) strlcpy(l_compat, compat, ZFS_MAXPROPLEN);
5156
5157 for (file = strtok_r(l_compat, ",", &ps);
5158 file != NULL;
5159 file = strtok_r(NULL, ",", &ps)) {
5160
5161 boolean_t l_features[SPA_FEATURES];
5162
5163 enum { Z_SYSCONF, Z_DATA } source;
5164
5165 /* try sysconfdir first, then datadir */
5166 source = Z_SYSCONF;
5167 if ((featfd = openat(sdirfd, file, O_RDONLY | O_CLOEXEC)) < 0) {
5168 featfd = openat(ddirfd, file, O_RDONLY | O_CLOEXEC);
5169 source = Z_DATA;
5170 }
5171
5172 /* File readable and correct size? */
5173 if (featfd < 0 ||
5174 fstat(featfd, &fs) < 0 ||
5175 fs.st_size < 1 ||
5176 fs.st_size > ZPOOL_COMPAT_MAXSIZE) {
5177 (void) close(featfd);
5178 strlcat(err_badfile, file, ZFS_MAXPROPLEN);
5179 strlcat(err_badfile, " ", ZFS_MAXPROPLEN);
5180 ret_badfile = B_TRUE;
5181 continue;
5182 }
5183
5184 /* Prefault the file if system allows */
5185 #if defined(MAP_POPULATE)
5186 #define ZC_MMAP_FLAGS (MAP_PRIVATE | MAP_POPULATE)
5187 #elif defined(MAP_PREFAULT_READ)
5188 #define ZC_MMAP_FLAGS (MAP_PRIVATE | MAP_PREFAULT_READ)
5189 #else
5190 #define ZC_MMAP_FLAGS (MAP_PRIVATE)
5191 #endif
5192
5193 /* private mmap() so we can strtok safely */
5194 fc = (char *)mmap(NULL, fs.st_size, PROT_READ | PROT_WRITE,
5195 ZC_MMAP_FLAGS, featfd, 0);
5196 (void) close(featfd);
5197
5198 /* map ok, and last character == newline? */
5199 if (fc == MAP_FAILED || fc[fs.st_size - 1] != '\n') {
5200 (void) munmap((void *) fc, fs.st_size);
5201 strlcat(err_badfile, file, ZFS_MAXPROPLEN);
5202 strlcat(err_badfile, " ", ZFS_MAXPROPLEN);
5203 ret_badfile = B_TRUE;
5204 continue;
5205 }
5206
5207 ret_nofiles = B_FALSE;
5208
5209 for (uint_t i = 0; i < SPA_FEATURES; i++)
5210 l_features[i] = B_FALSE;
5211
5212 /* replace final newline with NULL to ensure string ends */
5213 fc[fs.st_size - 1] = '\0';
5214
5215 for (line = strtok_r(fc, "\n", &ls);
5216 line != NULL;
5217 line = strtok_r(NULL, "\n", &ls)) {
5218 /* discard comments */
5219 char *r = strchr(line, '#');
5220 if (r != NULL)
5221 *r = '\0';
5222
5223 for (word = strtok_r(line, ", \t", &ws);
5224 word != NULL;
5225 word = strtok_r(NULL, ", \t", &ws)) {
5226 /* Find matching feature name */
5227 uint_t f;
5228 for (f = 0; f < SPA_FEATURES; f++) {
5229 zfeature_info_t *fi =
5230 &spa_feature_table[f];
5231 if (strcmp(word, fi->fi_uname) == 0) {
5232 l_features[f] = B_TRUE;
5233 break;
5234 }
5235 }
5236 if (f < SPA_FEATURES)
5237 continue;
5238
5239 /* found an unrecognized word */
5240 /* lightly sanitize it */
5241 if (strlen(word) > 32)
5242 word[32] = '\0';
5243 for (char *c = word; *c != '\0'; c++)
5244 if (!isprint(*c))
5245 *c = '?';
5246
5247 strlcat(err_badtoken, word, ZFS_MAXPROPLEN);
5248 strlcat(err_badtoken, " ", ZFS_MAXPROPLEN);
5249 if (source == Z_SYSCONF)
5250 ret_badtoken = B_TRUE;
5251 else
5252 ret_warntoken = B_TRUE;
5253 }
5254 }
5255 (void) munmap((void *) fc, fs.st_size);
5256
5257 if (features != NULL)
5258 for (uint_t i = 0; i < SPA_FEATURES; i++)
5259 features[i] &= l_features[i];
5260 }
5261 (void) close(sdirfd);
5262 (void) close(ddirfd);
5263
5264 /* Return the most serious error */
5265 if (ret_badfile) {
5266 if (report != NULL)
5267 snprintf(report, rlen, gettext("could not read/"
5268 "parse feature file(s): %s"), err_badfile);
5269 return (ZPOOL_COMPATIBILITY_BADFILE);
5270 }
5271 if (ret_nofiles) {
5272 if (report != NULL)
5273 strlcpy(report,
5274 gettext("no valid compatibility files specified"),
5275 rlen);
5276 return (ZPOOL_COMPATIBILITY_NOFILES);
5277 }
5278 if (ret_badtoken) {
5279 if (report != NULL)
5280 snprintf(report, rlen, gettext("invalid feature "
5281 "name(s) in local compatibility files: %s"),
5282 err_badtoken);
5283 return (ZPOOL_COMPATIBILITY_BADTOKEN);
5284 }
5285 if (ret_warntoken) {
5286 if (report != NULL)
5287 snprintf(report, rlen, gettext("unrecognized feature "
5288 "name(s) in distribution compatibility files: %s"),
5289 err_badtoken);
5290 return (ZPOOL_COMPATIBILITY_WARNTOKEN);
5291 }
5292 if (report != NULL)
5293 strlcpy(report, gettext("compatibility set ok"), rlen);
5294 return (ZPOOL_COMPATIBILITY_OK);
5295 }
5296
5297 static int
zpool_vdev_guid(zpool_handle_t * zhp,const char * vdevname,uint64_t * vdev_guid)5298 zpool_vdev_guid(zpool_handle_t *zhp, const char *vdevname, uint64_t *vdev_guid)
5299 {
5300 nvlist_t *tgt;
5301 boolean_t avail_spare, l2cache;
5302
5303 verify(zhp != NULL);
5304 if (zpool_get_state(zhp) == POOL_STATE_UNAVAIL) {
5305 char errbuf[ERRBUFLEN];
5306 (void) snprintf(errbuf, sizeof (errbuf),
5307 dgettext(TEXT_DOMAIN, "pool is in an unavailable state"));
5308 return (zfs_error(zhp->zpool_hdl, EZFS_POOLUNAVAIL, errbuf));
5309 }
5310
5311 if ((tgt = zpool_find_vdev(zhp, vdevname, &avail_spare, &l2cache,
5312 NULL)) == NULL) {
5313 char errbuf[ERRBUFLEN];
5314 (void) snprintf(errbuf, sizeof (errbuf),
5315 dgettext(TEXT_DOMAIN, "can not find %s in %s"),
5316 vdevname, zhp->zpool_name);
5317 return (zfs_error(zhp->zpool_hdl, EZFS_NODEVICE, errbuf));
5318 }
5319
5320 *vdev_guid = fnvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID);
5321 return (0);
5322 }
5323
5324 /*
5325 * Get a vdev property value for 'prop' and return the value in
5326 * a pre-allocated buffer.
5327 */
5328 int
zpool_get_vdev_prop_value(nvlist_t * nvprop,vdev_prop_t prop,char * prop_name,char * buf,size_t len,zprop_source_t * srctype,boolean_t literal)5329 zpool_get_vdev_prop_value(nvlist_t *nvprop, vdev_prop_t prop, char *prop_name,
5330 char *buf, size_t len, zprop_source_t *srctype, boolean_t literal)
5331 {
5332 nvlist_t *nv;
5333 const char *strval;
5334 uint64_t intval;
5335 zprop_source_t src = ZPROP_SRC_NONE;
5336
5337 if (prop == VDEV_PROP_USERPROP) {
5338 /* user property, prop_name must contain the property name */
5339 assert(prop_name != NULL);
5340 if (nvlist_lookup_nvlist(nvprop, prop_name, &nv) == 0) {
5341 src = fnvlist_lookup_uint64(nv, ZPROP_SOURCE);
5342 strval = fnvlist_lookup_string(nv, ZPROP_VALUE);
5343 } else {
5344 /* user prop not found */
5345 return (-1);
5346 }
5347 (void) strlcpy(buf, strval, len);
5348 if (srctype)
5349 *srctype = src;
5350 return (0);
5351 }
5352
5353 if (prop_name == NULL)
5354 prop_name = (char *)vdev_prop_to_name(prop);
5355
5356 switch (vdev_prop_get_type(prop)) {
5357 case PROP_TYPE_STRING:
5358 if (nvlist_lookup_nvlist(nvprop, prop_name, &nv) == 0) {
5359 src = fnvlist_lookup_uint64(nv, ZPROP_SOURCE);
5360 strval = fnvlist_lookup_string(nv, ZPROP_VALUE);
5361 } else {
5362 src = ZPROP_SRC_DEFAULT;
5363 if ((strval = vdev_prop_default_string(prop)) == NULL)
5364 strval = "-";
5365 }
5366 (void) strlcpy(buf, strval, len);
5367 break;
5368
5369 case PROP_TYPE_NUMBER:
5370 if (nvlist_lookup_nvlist(nvprop, prop_name, &nv) == 0) {
5371 src = fnvlist_lookup_uint64(nv, ZPROP_SOURCE);
5372 intval = fnvlist_lookup_uint64(nv, ZPROP_VALUE);
5373 } else {
5374 src = ZPROP_SRC_DEFAULT;
5375 intval = vdev_prop_default_numeric(prop);
5376 }
5377
5378 switch (prop) {
5379 case VDEV_PROP_ASIZE:
5380 case VDEV_PROP_PSIZE:
5381 case VDEV_PROP_SIZE:
5382 case VDEV_PROP_BOOTSIZE:
5383 case VDEV_PROP_ALLOCATED:
5384 case VDEV_PROP_FREE:
5385 case VDEV_PROP_READ_ERRORS:
5386 case VDEV_PROP_WRITE_ERRORS:
5387 case VDEV_PROP_CHECKSUM_ERRORS:
5388 case VDEV_PROP_INITIALIZE_ERRORS:
5389 case VDEV_PROP_TRIM_ERRORS:
5390 case VDEV_PROP_SLOW_IOS:
5391 case VDEV_PROP_OPS_NULL:
5392 case VDEV_PROP_OPS_READ:
5393 case VDEV_PROP_OPS_WRITE:
5394 case VDEV_PROP_OPS_FREE:
5395 case VDEV_PROP_OPS_CLAIM:
5396 case VDEV_PROP_OPS_TRIM:
5397 case VDEV_PROP_BYTES_NULL:
5398 case VDEV_PROP_BYTES_READ:
5399 case VDEV_PROP_BYTES_WRITE:
5400 case VDEV_PROP_BYTES_FREE:
5401 case VDEV_PROP_BYTES_CLAIM:
5402 case VDEV_PROP_BYTES_TRIM:
5403 if (literal) {
5404 (void) snprintf(buf, len, "%llu",
5405 (u_longlong_t)intval);
5406 } else {
5407 (void) zfs_nicenum(intval, buf, len);
5408 }
5409 break;
5410 case VDEV_PROP_EXPANDSZ:
5411 if (intval == 0) {
5412 (void) strlcpy(buf, "-", len);
5413 } else if (literal) {
5414 (void) snprintf(buf, len, "%llu",
5415 (u_longlong_t)intval);
5416 } else {
5417 (void) zfs_nicenum(intval, buf, len);
5418 }
5419 break;
5420 case VDEV_PROP_CAPACITY:
5421 if (literal) {
5422 (void) snprintf(buf, len, "%llu",
5423 (u_longlong_t)intval);
5424 } else {
5425 (void) snprintf(buf, len, "%llu%%",
5426 (u_longlong_t)intval);
5427 }
5428 break;
5429 case VDEV_PROP_CHECKSUM_N:
5430 case VDEV_PROP_CHECKSUM_T:
5431 case VDEV_PROP_IO_N:
5432 case VDEV_PROP_IO_T:
5433 case VDEV_PROP_SLOW_IO_N:
5434 case VDEV_PROP_SLOW_IO_T:
5435 if (intval == UINT64_MAX) {
5436 (void) strlcpy(buf, "-", len);
5437 } else {
5438 (void) snprintf(buf, len, "%llu",
5439 (u_longlong_t)intval);
5440 }
5441 break;
5442 case VDEV_PROP_FRAGMENTATION:
5443 if (intval == UINT64_MAX) {
5444 (void) strlcpy(buf, "-", len);
5445 } else {
5446 (void) snprintf(buf, len, "%llu%%",
5447 (u_longlong_t)intval);
5448 }
5449 break;
5450 case VDEV_PROP_STATE:
5451 if (literal) {
5452 (void) snprintf(buf, len, "%llu",
5453 (u_longlong_t)intval);
5454 } else {
5455 (void) strlcpy(buf, zpool_state_to_name(intval,
5456 VDEV_AUX_NONE), len);
5457 }
5458 break;
5459 default:
5460 (void) snprintf(buf, len, "%llu",
5461 (u_longlong_t)intval);
5462 }
5463 break;
5464
5465 case PROP_TYPE_INDEX:
5466 if (nvlist_lookup_nvlist(nvprop, prop_name, &nv) == 0) {
5467 src = fnvlist_lookup_uint64(nv, ZPROP_SOURCE);
5468 intval = fnvlist_lookup_uint64(nv, ZPROP_VALUE);
5469 } else {
5470 /* 'trim_support' only valid for leaf vdevs */
5471 if (prop == VDEV_PROP_TRIM_SUPPORT) {
5472 (void) strlcpy(buf, "-", len);
5473 break;
5474 }
5475 src = ZPROP_SRC_DEFAULT;
5476 intval = vdev_prop_default_numeric(prop);
5477 /* Only use if provided by the RAIDZ VDEV above */
5478 if (prop == VDEV_PROP_RAIDZ_EXPANDING)
5479 return (ENOENT);
5480 }
5481 if (vdev_prop_index_to_string(prop, intval,
5482 (const char **)&strval) != 0)
5483 return (-1);
5484 (void) strlcpy(buf, strval, len);
5485 break;
5486
5487 default:
5488 abort();
5489 }
5490
5491 if (srctype)
5492 *srctype = src;
5493
5494 return (0);
5495 }
5496
5497 /*
5498 * Get a vdev property value for 'prop_name' and return the value in
5499 * a pre-allocated buffer.
5500 */
5501 int
zpool_get_vdev_prop(zpool_handle_t * zhp,const char * vdevname,vdev_prop_t prop,char * prop_name,char * buf,size_t len,zprop_source_t * srctype,boolean_t literal)5502 zpool_get_vdev_prop(zpool_handle_t *zhp, const char *vdevname, vdev_prop_t prop,
5503 char *prop_name, char *buf, size_t len, zprop_source_t *srctype,
5504 boolean_t literal)
5505 {
5506 nvlist_t *reqnvl, *reqprops;
5507 nvlist_t *retprops = NULL;
5508 uint64_t vdev_guid = 0;
5509 int ret;
5510
5511 if ((ret = zpool_vdev_guid(zhp, vdevname, &vdev_guid)) != 0)
5512 return (ret);
5513
5514 if (nvlist_alloc(&reqnvl, NV_UNIQUE_NAME, 0) != 0)
5515 return (no_memory(zhp->zpool_hdl));
5516 if (nvlist_alloc(&reqprops, NV_UNIQUE_NAME, 0) != 0)
5517 return (no_memory(zhp->zpool_hdl));
5518
5519 fnvlist_add_uint64(reqnvl, ZPOOL_VDEV_PROPS_GET_VDEV, vdev_guid);
5520
5521 if (prop != VDEV_PROP_USERPROP) {
5522 /* prop_name overrides prop value */
5523 if (prop_name != NULL)
5524 prop = vdev_name_to_prop(prop_name);
5525 else
5526 prop_name = (char *)vdev_prop_to_name(prop);
5527 assert(prop < VDEV_NUM_PROPS);
5528 }
5529
5530 assert(prop_name != NULL);
5531 if (nvlist_add_uint64(reqprops, prop_name, prop) != 0) {
5532 nvlist_free(reqnvl);
5533 nvlist_free(reqprops);
5534 return (no_memory(zhp->zpool_hdl));
5535 }
5536
5537 fnvlist_add_nvlist(reqnvl, ZPOOL_VDEV_PROPS_GET_PROPS, reqprops);
5538
5539 ret = lzc_get_vdev_prop(zhp->zpool_name, reqnvl, &retprops);
5540
5541 if (ret == 0) {
5542 ret = zpool_get_vdev_prop_value(retprops, prop, prop_name, buf,
5543 len, srctype, literal);
5544 } else {
5545 char errbuf[ERRBUFLEN];
5546 (void) snprintf(errbuf, sizeof (errbuf),
5547 dgettext(TEXT_DOMAIN, "cannot get vdev property %s from"
5548 " %s in %s"), prop_name, vdevname, zhp->zpool_name);
5549 (void) zpool_standard_error(zhp->zpool_hdl, ret, errbuf);
5550 }
5551
5552 nvlist_free(reqnvl);
5553 nvlist_free(reqprops);
5554 nvlist_free(retprops);
5555
5556 return (ret);
5557 }
5558
5559 /*
5560 * Get all vdev properties
5561 */
5562 int
zpool_get_all_vdev_props(zpool_handle_t * zhp,const char * vdevname,nvlist_t ** outnvl)5563 zpool_get_all_vdev_props(zpool_handle_t *zhp, const char *vdevname,
5564 nvlist_t **outnvl)
5565 {
5566 nvlist_t *nvl = NULL;
5567 uint64_t vdev_guid = 0;
5568 int ret;
5569
5570 if ((ret = zpool_vdev_guid(zhp, vdevname, &vdev_guid)) != 0)
5571 return (ret);
5572
5573 if (nvlist_alloc(&nvl, NV_UNIQUE_NAME, 0) != 0)
5574 return (no_memory(zhp->zpool_hdl));
5575
5576 fnvlist_add_uint64(nvl, ZPOOL_VDEV_PROPS_GET_VDEV, vdev_guid);
5577
5578 ret = lzc_get_vdev_prop(zhp->zpool_name, nvl, outnvl);
5579
5580 nvlist_free(nvl);
5581
5582 if (ret) {
5583 char errbuf[ERRBUFLEN];
5584 (void) snprintf(errbuf, sizeof (errbuf),
5585 dgettext(TEXT_DOMAIN, "cannot get vdev properties for"
5586 " %s in %s"), vdevname, zhp->zpool_name);
5587 (void) zpool_standard_error(zhp->zpool_hdl, errno, errbuf);
5588 }
5589
5590 return (ret);
5591 }
5592
5593 /*
5594 * Set vdev property
5595 */
5596 int
zpool_set_vdev_prop(zpool_handle_t * zhp,const char * vdevname,const char * propname,const char * propval)5597 zpool_set_vdev_prop(zpool_handle_t *zhp, const char *vdevname,
5598 const char *propname, const char *propval)
5599 {
5600 int ret;
5601 nvlist_t *nvl = NULL;
5602 nvlist_t *outnvl = NULL;
5603 nvlist_t *props;
5604 nvlist_t *realprops;
5605 prop_flags_t flags = { 0 };
5606 uint64_t version;
5607 uint64_t vdev_guid;
5608
5609 if ((ret = zpool_vdev_guid(zhp, vdevname, &vdev_guid)) != 0)
5610 return (ret);
5611
5612 if (nvlist_alloc(&nvl, NV_UNIQUE_NAME, 0) != 0)
5613 return (no_memory(zhp->zpool_hdl));
5614 if (nvlist_alloc(&props, NV_UNIQUE_NAME, 0) != 0)
5615 return (no_memory(zhp->zpool_hdl));
5616
5617 fnvlist_add_uint64(nvl, ZPOOL_VDEV_PROPS_SET_VDEV, vdev_guid);
5618
5619 if (nvlist_add_string(props, propname, propval) != 0) {
5620 nvlist_free(props);
5621 return (no_memory(zhp->zpool_hdl));
5622 }
5623
5624 char errbuf[ERRBUFLEN];
5625 (void) snprintf(errbuf, sizeof (errbuf),
5626 dgettext(TEXT_DOMAIN, "cannot set property %s for %s on %s"),
5627 propname, vdevname, zhp->zpool_name);
5628
5629 flags.vdevprop = 1;
5630 version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL);
5631 if ((realprops = zpool_valid_proplist(zhp->zpool_hdl,
5632 zhp->zpool_name, props, version, flags, errbuf)) == NULL) {
5633 nvlist_free(props);
5634 nvlist_free(nvl);
5635 return (-1);
5636 }
5637
5638 nvlist_free(props);
5639 props = realprops;
5640
5641 fnvlist_add_nvlist(nvl, ZPOOL_VDEV_PROPS_SET_PROPS, props);
5642
5643 ret = lzc_set_vdev_prop(zhp->zpool_name, nvl, &outnvl);
5644
5645 nvlist_free(props);
5646 nvlist_free(nvl);
5647 nvlist_free(outnvl);
5648
5649 if (ret)
5650 (void) zpool_standard_error(zhp->zpool_hdl, errno, errbuf);
5651
5652 return (ret);
5653 }
5654
5655 /*
5656 * Prune older entries from the DDT to reclaim space under the quota
5657 */
5658 int
zpool_ddt_prune(zpool_handle_t * zhp,zpool_ddt_prune_unit_t unit,uint64_t amount)5659 zpool_ddt_prune(zpool_handle_t *zhp, zpool_ddt_prune_unit_t unit,
5660 uint64_t amount)
5661 {
5662 int error = lzc_ddt_prune(zhp->zpool_name, unit, amount);
5663 if (error != 0) {
5664 libzfs_handle_t *hdl = zhp->zpool_hdl;
5665 char errbuf[ERRBUFLEN];
5666
5667 (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
5668 "cannot prune dedup table on '%s'"), zhp->zpool_name);
5669
5670 if (error == EALREADY) {
5671 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
5672 "a prune operation is already in progress"));
5673 (void) zfs_error(hdl, EZFS_BUSY, errbuf);
5674 } else {
5675 (void) zpool_standard_error(hdl, errno, errbuf);
5676 }
5677 return (-1);
5678 }
5679
5680 return (0);
5681 }
5682