1 // SPDX-License-Identifier: CDDL-1.0
2 /*
3 * CDDL HEADER START
4 *
5 * The contents of this file are subject to the terms of the
6 * Common Development and Distribution License (the "License").
7 * You may not use this file except in compliance with the License.
8 *
9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10 * or https://opensource.org/licenses/CDDL-1.0.
11 * See the License for the specific language governing permissions
12 * and limitations under the License.
13 *
14 * When distributing Covered Code, include this CDDL HEADER in each
15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16 * If applicable, add the following below this CDDL HEADER, with the
17 * fields enclosed by brackets "[]" replaced with your own identifying
18 * information: Portions Copyright [yyyy] [name of copyright owner]
19 *
20 * CDDL HEADER END
21 */
22
23 /*
24 * Copyright 2015 Nexenta Systems, Inc. All rights reserved.
25 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
26 * Copyright (c) 2011, 2024 by Delphix. All rights reserved.
27 * Copyright 2016 Igor Kozhukhov <ikozhukhov@gmail.com>
28 * Copyright (c) 2018 Datto Inc.
29 * Copyright (c) 2017 Open-E, Inc. All Rights Reserved.
30 * Copyright (c) 2017, Intel Corporation.
31 * Copyright (c) 2018, loli10K <ezomori.nozomu@gmail.com>
32 * Copyright (c) 2021, Colm Buckley <colm@tuatha.org>
33 * Copyright (c) 2021, 2023, Klara Inc.
34 * Copyright (c) 2025 Hewlett Packard Enterprise Development LP.
35 */
36
37 #include <errno.h>
38 #include <libintl.h>
39 #include <stdio.h>
40 #include <stdlib.h>
41 #include <strings.h>
42 #include <unistd.h>
43 #include <libgen.h>
44 #include <zone.h>
45 #include <sys/stat.h>
46 #include <sys/efi_partition.h>
47 #include <sys/systeminfo.h>
48 #include <sys/zfs_ioctl.h>
49 #include <sys/zfs_sysfs.h>
50 #include <sys/vdev_disk.h>
51 #include <sys/types.h>
52 #include <dlfcn.h>
53 #include <libzutil.h>
54 #include <fcntl.h>
55
56 #include "zfs_namecheck.h"
57 #include "zfs_prop.h"
58 #include "libzfs_impl.h"
59 #include "zfs_comutil.h"
60 #include "zfeature_common.h"
61
62 static boolean_t zpool_vdev_is_interior(const char *name);
63
64 typedef struct prop_flags {
65 unsigned int create:1; /* Validate property on creation */
66 unsigned int import:1; /* Validate property on import */
67 unsigned int vdevprop:1; /* Validate property as a VDEV property */
68 } prop_flags_t;
69
70 /*
71 * ====================================================================
72 * zpool property functions
73 * ====================================================================
74 */
75
76 static int
zpool_get_all_props(zpool_handle_t * zhp)77 zpool_get_all_props(zpool_handle_t *zhp)
78 {
79 zfs_cmd_t zc = {"\0"};
80 libzfs_handle_t *hdl = zhp->zpool_hdl;
81
82 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
83
84 if (zhp->zpool_n_propnames > 0) {
85 nvlist_t *innvl = fnvlist_alloc();
86 fnvlist_add_string_array(innvl, ZPOOL_GET_PROPS_NAMES,
87 zhp->zpool_propnames, zhp->zpool_n_propnames);
88 zcmd_write_src_nvlist(hdl, &zc, innvl);
89 fnvlist_free(innvl);
90 }
91
92 zcmd_alloc_dst_nvlist(hdl, &zc, 0);
93
94 while (zfs_ioctl(hdl, ZFS_IOC_POOL_GET_PROPS, &zc) != 0) {
95 if (errno == ENOMEM)
96 zcmd_expand_dst_nvlist(hdl, &zc);
97 else {
98 zcmd_free_nvlists(&zc);
99 return (-1);
100 }
101 }
102
103 if (zcmd_read_dst_nvlist(hdl, &zc, &zhp->zpool_props) != 0) {
104 zcmd_free_nvlists(&zc);
105 return (-1);
106 }
107
108 zcmd_free_nvlists(&zc);
109
110 return (0);
111 }
112
113 int
zpool_props_refresh(zpool_handle_t * zhp)114 zpool_props_refresh(zpool_handle_t *zhp)
115 {
116 nvlist_t *old_props;
117
118 old_props = zhp->zpool_props;
119
120 if (zpool_get_all_props(zhp) != 0)
121 return (-1);
122
123 nvlist_free(old_props);
124 return (0);
125 }
126
127 static const char *
zpool_get_prop_string(zpool_handle_t * zhp,zpool_prop_t prop,zprop_source_t * src)128 zpool_get_prop_string(zpool_handle_t *zhp, zpool_prop_t prop,
129 zprop_source_t *src)
130 {
131 nvlist_t *nv, *nvl;
132 const char *value;
133 zprop_source_t source;
134
135 nvl = zhp->zpool_props;
136 if (nvlist_lookup_nvlist(nvl, zpool_prop_to_name(prop), &nv) == 0) {
137 source = fnvlist_lookup_uint64(nv, ZPROP_SOURCE);
138 value = fnvlist_lookup_string(nv, ZPROP_VALUE);
139 } else {
140 source = ZPROP_SRC_DEFAULT;
141 if ((value = zpool_prop_default_string(prop)) == NULL)
142 value = "-";
143 }
144
145 if (src)
146 *src = source;
147
148 return (value);
149 }
150
151 uint64_t
zpool_get_prop_int(zpool_handle_t * zhp,zpool_prop_t prop,zprop_source_t * src)152 zpool_get_prop_int(zpool_handle_t *zhp, zpool_prop_t prop, zprop_source_t *src)
153 {
154 nvlist_t *nv, *nvl;
155 uint64_t value;
156 zprop_source_t source;
157
158 if (zhp->zpool_props == NULL && zpool_get_all_props(zhp)) {
159 /*
160 * zpool_get_all_props() has most likely failed because
161 * the pool is faulted, but if all we need is the top level
162 * vdev's guid then get it from the zhp config nvlist.
163 */
164 if ((prop == ZPOOL_PROP_GUID) &&
165 (nvlist_lookup_nvlist(zhp->zpool_config,
166 ZPOOL_CONFIG_VDEV_TREE, &nv) == 0) &&
167 (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &value)
168 == 0)) {
169 return (value);
170 }
171 return (zpool_prop_default_numeric(prop));
172 }
173
174 nvl = zhp->zpool_props;
175 if (nvlist_lookup_nvlist(nvl, zpool_prop_to_name(prop), &nv) == 0) {
176 source = fnvlist_lookup_uint64(nv, ZPROP_SOURCE);
177 value = fnvlist_lookup_uint64(nv, ZPROP_VALUE);
178 } else {
179 source = ZPROP_SRC_DEFAULT;
180 value = zpool_prop_default_numeric(prop);
181 }
182
183 if (src)
184 *src = source;
185
186 return (value);
187 }
188
189 /*
190 * Map VDEV STATE to printed strings.
191 */
192 const char *
zpool_state_to_name(vdev_state_t state,vdev_aux_t aux)193 zpool_state_to_name(vdev_state_t state, vdev_aux_t aux)
194 {
195 switch (state) {
196 case VDEV_STATE_CLOSED:
197 case VDEV_STATE_OFFLINE:
198 return (gettext("OFFLINE"));
199 case VDEV_STATE_REMOVED:
200 return (gettext("REMOVED"));
201 case VDEV_STATE_CANT_OPEN:
202 if (aux == VDEV_AUX_CORRUPT_DATA || aux == VDEV_AUX_BAD_LOG)
203 return (gettext("FAULTED"));
204 else if (aux == VDEV_AUX_SPLIT_POOL)
205 return (gettext("SPLIT"));
206 else
207 return (gettext("UNAVAIL"));
208 case VDEV_STATE_FAULTED:
209 return (gettext("FAULTED"));
210 case VDEV_STATE_DEGRADED:
211 return (gettext("DEGRADED"));
212 case VDEV_STATE_HEALTHY:
213 return (gettext("ONLINE"));
214
215 default:
216 break;
217 }
218
219 return (gettext("UNKNOWN"));
220 }
221
222 /*
223 * Map POOL STATE to printed strings.
224 */
225 const char *
zpool_pool_state_to_name(pool_state_t state)226 zpool_pool_state_to_name(pool_state_t state)
227 {
228 switch (state) {
229 default:
230 break;
231 case POOL_STATE_ACTIVE:
232 return (gettext("ACTIVE"));
233 case POOL_STATE_EXPORTED:
234 return (gettext("EXPORTED"));
235 case POOL_STATE_DESTROYED:
236 return (gettext("DESTROYED"));
237 case POOL_STATE_SPARE:
238 return (gettext("SPARE"));
239 case POOL_STATE_L2CACHE:
240 return (gettext("L2CACHE"));
241 case POOL_STATE_UNINITIALIZED:
242 return (gettext("UNINITIALIZED"));
243 case POOL_STATE_UNAVAIL:
244 return (gettext("UNAVAIL"));
245 case POOL_STATE_POTENTIALLY_ACTIVE:
246 return (gettext("POTENTIALLY_ACTIVE"));
247 }
248
249 return (gettext("UNKNOWN"));
250 }
251
252 /*
253 * Given a pool handle, return the pool health string ("ONLINE", "DEGRADED",
254 * "SUSPENDED", etc).
255 */
256 const char *
zpool_get_state_str(zpool_handle_t * zhp)257 zpool_get_state_str(zpool_handle_t *zhp)
258 {
259 zpool_errata_t errata;
260 zpool_status_t status;
261 const char *str;
262
263 status = zpool_get_status(zhp, NULL, &errata);
264
265 if (zpool_get_state(zhp) == POOL_STATE_UNAVAIL) {
266 str = gettext("FAULTED");
267 } else if (status == ZPOOL_STATUS_IO_FAILURE_WAIT ||
268 status == ZPOOL_STATUS_IO_FAILURE_CONTINUE ||
269 status == ZPOOL_STATUS_IO_FAILURE_MMP) {
270 str = gettext("SUSPENDED");
271 } else {
272 nvlist_t *nvroot = fnvlist_lookup_nvlist(
273 zpool_get_config(zhp, NULL), ZPOOL_CONFIG_VDEV_TREE);
274 uint_t vsc;
275 vdev_stat_t *vs = (vdev_stat_t *)fnvlist_lookup_uint64_array(
276 nvroot, ZPOOL_CONFIG_VDEV_STATS, &vsc);
277 str = zpool_state_to_name(vs->vs_state, vs->vs_aux);
278 }
279 return (str);
280 }
281
282 /*
283 * Get a zpool property value for 'prop' and return the value in
284 * a pre-allocated buffer.
285 */
286 int
zpool_get_prop(zpool_handle_t * zhp,zpool_prop_t prop,char * buf,size_t len,zprop_source_t * srctype,boolean_t literal)287 zpool_get_prop(zpool_handle_t *zhp, zpool_prop_t prop, char *buf,
288 size_t len, zprop_source_t *srctype, boolean_t literal)
289 {
290 uint64_t intval;
291 const char *strval;
292 zprop_source_t src = ZPROP_SRC_NONE;
293
294 if (zpool_get_state(zhp) == POOL_STATE_UNAVAIL) {
295 switch (prop) {
296 case ZPOOL_PROP_NAME:
297 (void) strlcpy(buf, zpool_get_name(zhp), len);
298 break;
299
300 case ZPOOL_PROP_HEALTH:
301 (void) strlcpy(buf, zpool_get_state_str(zhp), len);
302 break;
303
304 case ZPOOL_PROP_GUID:
305 intval = zpool_get_prop_int(zhp, prop, &src);
306 (void) snprintf(buf, len, "%llu", (u_longlong_t)intval);
307 break;
308
309 case ZPOOL_PROP_ALTROOT:
310 case ZPOOL_PROP_CACHEFILE:
311 case ZPOOL_PROP_COMMENT:
312 case ZPOOL_PROP_COMPATIBILITY:
313 if (zhp->zpool_props != NULL ||
314 zpool_get_all_props(zhp) == 0) {
315 (void) strlcpy(buf,
316 zpool_get_prop_string(zhp, prop, &src),
317 len);
318 break;
319 }
320 zfs_fallthrough;
321 default:
322 (void) strlcpy(buf, "-", len);
323 break;
324 }
325
326 if (srctype != NULL)
327 *srctype = src;
328 return (0);
329 }
330
331 /*
332 * ZPOOL_PROP_DEDUPCACHED can be fetched by name only using
333 * the ZPOOL_GET_PROPS_NAMES mechanism
334 */
335 if (prop == ZPOOL_PROP_DEDUPCACHED) {
336 zpool_add_propname(zhp, ZPOOL_DEDUPCACHED_PROP_NAME);
337 (void) zpool_props_refresh(zhp);
338 }
339
340 if (zhp->zpool_props == NULL && zpool_get_all_props(zhp) &&
341 prop != ZPOOL_PROP_NAME)
342 return (-1);
343
344 switch (zpool_prop_get_type(prop)) {
345 case PROP_TYPE_STRING:
346 (void) strlcpy(buf, zpool_get_prop_string(zhp, prop, &src),
347 len);
348 break;
349
350 case PROP_TYPE_NUMBER:
351 intval = zpool_get_prop_int(zhp, prop, &src);
352
353 switch (prop) {
354 case ZPOOL_PROP_DEDUP_TABLE_QUOTA:
355 /*
356 * If dedup quota is 0, we translate this into 'none'
357 * (unless literal is set). And if it is UINT64_MAX
358 * we translate that as 'automatic' (limit to size of
359 * the dedicated dedup VDEV. Otherwise, fall throught
360 * into the regular number formating.
361 */
362 if (intval == 0) {
363 (void) strlcpy(buf, literal ? "0" : "none",
364 len);
365 break;
366 } else if (intval == UINT64_MAX) {
367 (void) strlcpy(buf, "auto", len);
368 break;
369 }
370 zfs_fallthrough;
371
372 case ZPOOL_PROP_SIZE:
373 case ZPOOL_PROP_ALLOCATED:
374 case ZPOOL_PROP_FREE:
375 case ZPOOL_PROP_FREEING:
376 case ZPOOL_PROP_LEAKED:
377 case ZPOOL_PROP_ASHIFT:
378 case ZPOOL_PROP_MAXBLOCKSIZE:
379 case ZPOOL_PROP_MAXDNODESIZE:
380 case ZPOOL_PROP_BCLONESAVED:
381 case ZPOOL_PROP_BCLONEUSED:
382 case ZPOOL_PROP_DEDUP_TABLE_SIZE:
383 case ZPOOL_PROP_DEDUPCACHED:
384 if (literal)
385 (void) snprintf(buf, len, "%llu",
386 (u_longlong_t)intval);
387 else
388 (void) zfs_nicenum(intval, buf, len);
389 break;
390
391 case ZPOOL_PROP_EXPANDSZ:
392 case ZPOOL_PROP_CHECKPOINT:
393 if (intval == 0) {
394 (void) strlcpy(buf, "-", len);
395 } else if (literal) {
396 (void) snprintf(buf, len, "%llu",
397 (u_longlong_t)intval);
398 } else {
399 (void) zfs_nicebytes(intval, buf, len);
400 }
401 break;
402
403 case ZPOOL_PROP_CAPACITY:
404 if (literal) {
405 (void) snprintf(buf, len, "%llu",
406 (u_longlong_t)intval);
407 } else {
408 (void) snprintf(buf, len, "%llu%%",
409 (u_longlong_t)intval);
410 }
411 break;
412
413 case ZPOOL_PROP_FRAGMENTATION:
414 if (intval == UINT64_MAX) {
415 (void) strlcpy(buf, "-", len);
416 } else if (literal) {
417 (void) snprintf(buf, len, "%llu",
418 (u_longlong_t)intval);
419 } else {
420 (void) snprintf(buf, len, "%llu%%",
421 (u_longlong_t)intval);
422 }
423 break;
424
425 case ZPOOL_PROP_BCLONERATIO:
426 case ZPOOL_PROP_DEDUPRATIO:
427 if (literal)
428 (void) snprintf(buf, len, "%llu.%02llu",
429 (u_longlong_t)(intval / 100),
430 (u_longlong_t)(intval % 100));
431 else
432 (void) snprintf(buf, len, "%llu.%02llux",
433 (u_longlong_t)(intval / 100),
434 (u_longlong_t)(intval % 100));
435 break;
436
437 case ZPOOL_PROP_HEALTH:
438 (void) strlcpy(buf, zpool_get_state_str(zhp), len);
439 break;
440 case ZPOOL_PROP_VERSION:
441 if (intval >= SPA_VERSION_FEATURES) {
442 (void) snprintf(buf, len, "-");
443 break;
444 }
445 zfs_fallthrough;
446 default:
447 (void) snprintf(buf, len, "%llu", (u_longlong_t)intval);
448 }
449 break;
450
451 case PROP_TYPE_INDEX:
452 intval = zpool_get_prop_int(zhp, prop, &src);
453 if (zpool_prop_index_to_string(prop, intval, &strval)
454 != 0)
455 return (-1);
456 (void) strlcpy(buf, strval, len);
457 break;
458
459 default:
460 abort();
461 }
462
463 if (srctype)
464 *srctype = src;
465
466 return (0);
467 }
468
469 /*
470 * Get a zpool property value for 'propname' and return the value in
471 * a pre-allocated buffer.
472 */
473 int
zpool_get_userprop(zpool_handle_t * zhp,const char * propname,char * buf,size_t len,zprop_source_t * srctype)474 zpool_get_userprop(zpool_handle_t *zhp, const char *propname, char *buf,
475 size_t len, zprop_source_t *srctype)
476 {
477 nvlist_t *nv;
478 uint64_t ival;
479 const char *value;
480 zprop_source_t source = ZPROP_SRC_LOCAL;
481
482 if (zhp->zpool_props == NULL)
483 zpool_get_all_props(zhp);
484
485 if (nvlist_lookup_nvlist(zhp->zpool_props, propname, &nv) == 0) {
486 if (nvlist_lookup_uint64(nv, ZPROP_SOURCE, &ival) == 0)
487 source = ival;
488 verify(nvlist_lookup_string(nv, ZPROP_VALUE, &value) == 0);
489 } else {
490 source = ZPROP_SRC_DEFAULT;
491 value = "-";
492 }
493
494 if (srctype)
495 *srctype = source;
496
497 (void) strlcpy(buf, value, len);
498
499 return (0);
500 }
501
502 /*
503 * Check if the bootfs name has the same pool name as it is set to.
504 * Assuming bootfs is a valid dataset name.
505 */
506 static boolean_t
bootfs_name_valid(const char * pool,const char * bootfs)507 bootfs_name_valid(const char *pool, const char *bootfs)
508 {
509 int len = strlen(pool);
510 if (bootfs[0] == '\0')
511 return (B_TRUE);
512
513 if (!zfs_name_valid(bootfs, ZFS_TYPE_FILESYSTEM|ZFS_TYPE_SNAPSHOT))
514 return (B_FALSE);
515
516 if (strncmp(pool, bootfs, len) == 0 &&
517 (bootfs[len] == '/' || bootfs[len] == '\0'))
518 return (B_TRUE);
519
520 return (B_FALSE);
521 }
522
523 /*
524 * Given an nvlist of zpool properties to be set, validate that they are
525 * correct, and parse any numeric properties (index, boolean, etc) if they are
526 * specified as strings.
527 */
528 static nvlist_t *
zpool_valid_proplist(libzfs_handle_t * hdl,const char * poolname,nvlist_t * props,uint64_t version,prop_flags_t flags,char * errbuf)529 zpool_valid_proplist(libzfs_handle_t *hdl, const char *poolname,
530 nvlist_t *props, uint64_t version, prop_flags_t flags, char *errbuf)
531 {
532 nvpair_t *elem;
533 nvlist_t *retprops;
534 zpool_prop_t prop;
535 const char *strval;
536 uint64_t intval;
537 const char *check;
538 struct stat64 statbuf;
539 zpool_handle_t *zhp;
540 char *parent, *slash;
541 char report[1024];
542
543 if (nvlist_alloc(&retprops, NV_UNIQUE_NAME, 0) != 0) {
544 (void) no_memory(hdl);
545 return (NULL);
546 }
547
548 elem = NULL;
549 while ((elem = nvlist_next_nvpair(props, elem)) != NULL) {
550 const char *propname = nvpair_name(elem);
551
552 if (flags.vdevprop && zpool_prop_vdev(propname)) {
553 vdev_prop_t vprop = vdev_name_to_prop(propname);
554
555 if (vdev_prop_readonly(vprop)) {
556 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' "
557 "is readonly"), propname);
558 (void) zfs_error(hdl, EZFS_PROPREADONLY,
559 errbuf);
560 goto error;
561 }
562
563 if (zprop_parse_value(hdl, elem, vprop, ZFS_TYPE_VDEV,
564 retprops, &strval, &intval, errbuf) != 0)
565 goto error;
566
567 continue;
568 } else if (flags.vdevprop && vdev_prop_user(propname)) {
569 if (nvlist_add_nvpair(retprops, elem) != 0) {
570 (void) no_memory(hdl);
571 goto error;
572 }
573 continue;
574 } else if (flags.vdevprop) {
575 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
576 "invalid property: '%s'"), propname);
577 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
578 goto error;
579 }
580
581 prop = zpool_name_to_prop(propname);
582 if (prop == ZPOOL_PROP_INVAL && zpool_prop_feature(propname)) {
583 int err;
584 char *fname = strchr(propname, '@') + 1;
585
586 err = zfeature_lookup_name(fname, NULL);
587 if (err != 0) {
588 ASSERT3U(err, ==, ENOENT);
589 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
590 "feature '%s' unsupported by kernel"),
591 fname);
592 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
593 goto error;
594 }
595
596 if (nvpair_type(elem) != DATA_TYPE_STRING) {
597 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
598 "'%s' must be a string"), propname);
599 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
600 goto error;
601 }
602
603 (void) nvpair_value_string(elem, &strval);
604 if (strcmp(strval, ZFS_FEATURE_ENABLED) != 0 &&
605 strcmp(strval, ZFS_FEATURE_DISABLED) != 0) {
606 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
607 "property '%s' can only be set to "
608 "'enabled' or 'disabled'"), propname);
609 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
610 goto error;
611 }
612
613 if (!flags.create &&
614 strcmp(strval, ZFS_FEATURE_DISABLED) == 0) {
615 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
616 "property '%s' can only be set to "
617 "'disabled' at creation time"), propname);
618 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
619 goto error;
620 }
621
622 if (nvlist_add_uint64(retprops, propname, 0) != 0) {
623 (void) no_memory(hdl);
624 goto error;
625 }
626 continue;
627 } else if (prop == ZPOOL_PROP_INVAL &&
628 zfs_prop_user(propname)) {
629 /*
630 * This is a user property: make sure it's a
631 * string, and that it's less than ZAP_MAXNAMELEN.
632 */
633 if (nvpair_type(elem) != DATA_TYPE_STRING) {
634 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
635 "'%s' must be a string"), propname);
636 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
637 goto error;
638 }
639
640 if (strlen(nvpair_name(elem)) >= ZAP_MAXNAMELEN) {
641 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
642 "property name '%s' is too long"),
643 propname);
644 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
645 goto error;
646 }
647
648 (void) nvpair_value_string(elem, &strval);
649
650 if (strlen(strval) >= ZFS_MAXPROPLEN) {
651 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
652 "property value '%s' is too long"),
653 strval);
654 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
655 goto error;
656 }
657
658 if (nvlist_add_string(retprops, propname,
659 strval) != 0) {
660 (void) no_memory(hdl);
661 goto error;
662 }
663
664 continue;
665 }
666
667 /*
668 * Make sure this property is valid and applies to this type.
669 */
670 if (prop == ZPOOL_PROP_INVAL) {
671 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
672 "invalid property '%s'"), propname);
673 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
674 goto error;
675 }
676
677 if (zpool_prop_readonly(prop)) {
678 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' "
679 "is readonly"), propname);
680 (void) zfs_error(hdl, EZFS_PROPREADONLY, errbuf);
681 goto error;
682 }
683
684 if (!flags.create && zpool_prop_setonce(prop)) {
685 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
686 "property '%s' can only be set at "
687 "creation time"), propname);
688 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
689 goto error;
690 }
691
692 if (zprop_parse_value(hdl, elem, prop, ZFS_TYPE_POOL, retprops,
693 &strval, &intval, errbuf) != 0)
694 goto error;
695
696 /*
697 * Perform additional checking for specific properties.
698 */
699 switch (prop) {
700 case ZPOOL_PROP_VERSION:
701 if (intval < version ||
702 !SPA_VERSION_IS_SUPPORTED(intval)) {
703 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
704 "property '%s' number %llu is invalid."),
705 propname, (unsigned long long)intval);
706 (void) zfs_error(hdl, EZFS_BADVERSION, errbuf);
707 goto error;
708 }
709 break;
710
711 case ZPOOL_PROP_ASHIFT:
712 if (intval != 0 &&
713 (intval < ASHIFT_MIN || intval > ASHIFT_MAX)) {
714 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
715 "property '%s' number %llu is invalid, "
716 "only values between %" PRId32 " and %"
717 PRId32 " are allowed."),
718 propname, (unsigned long long)intval,
719 ASHIFT_MIN, ASHIFT_MAX);
720 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
721 goto error;
722 }
723 break;
724
725 case ZPOOL_PROP_BOOTFS:
726 if (flags.create || flags.import) {
727 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
728 "property '%s' cannot be set at creation "
729 "or import time"), propname);
730 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
731 goto error;
732 }
733
734 if (version < SPA_VERSION_BOOTFS) {
735 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
736 "pool must be upgraded to support "
737 "'%s' property"), propname);
738 (void) zfs_error(hdl, EZFS_BADVERSION, errbuf);
739 goto error;
740 }
741
742 /*
743 * bootfs property value has to be a dataset name and
744 * the dataset has to be in the same pool as it sets to.
745 */
746 if (!bootfs_name_valid(poolname, strval)) {
747 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' "
748 "is an invalid name"), strval);
749 (void) zfs_error(hdl, EZFS_INVALIDNAME, errbuf);
750 goto error;
751 }
752
753 if ((zhp = zpool_open_canfail(hdl, poolname)) == NULL) {
754 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
755 "could not open pool '%s'"), poolname);
756 (void) zfs_error(hdl, EZFS_OPENFAILED, errbuf);
757 goto error;
758 }
759 zpool_close(zhp);
760 break;
761
762 case ZPOOL_PROP_ALTROOT:
763 if (!flags.create && !flags.import) {
764 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
765 "property '%s' can only be set during pool "
766 "creation or import"), propname);
767 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
768 goto error;
769 }
770
771 if (strval[0] != '/') {
772 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
773 "bad alternate root '%s'"), strval);
774 (void) zfs_error(hdl, EZFS_BADPATH, errbuf);
775 goto error;
776 }
777 break;
778
779 case ZPOOL_PROP_CACHEFILE:
780 if (strval[0] == '\0')
781 break;
782
783 if (strcmp(strval, "none") == 0)
784 break;
785
786 if (strval[0] != '/') {
787 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
788 "property '%s' must be empty, an "
789 "absolute path, or 'none'"), propname);
790 (void) zfs_error(hdl, EZFS_BADPATH, errbuf);
791 goto error;
792 }
793
794 parent = strdup(strval);
795 if (parent == NULL) {
796 (void) zfs_error(hdl, EZFS_NOMEM, errbuf);
797 goto error;
798 }
799 slash = strrchr(parent, '/');
800
801 if (slash[1] == '\0' || strcmp(slash, "/.") == 0 ||
802 strcmp(slash, "/..") == 0) {
803 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
804 "'%s' is not a valid file"), parent);
805 (void) zfs_error(hdl, EZFS_BADPATH, errbuf);
806 free(parent);
807 goto error;
808 }
809
810 *slash = '\0';
811
812 if (parent[0] != '\0' &&
813 (stat64(parent, &statbuf) != 0 ||
814 !S_ISDIR(statbuf.st_mode))) {
815 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
816 "'%s' is not a valid directory"),
817 parent);
818 (void) zfs_error(hdl, EZFS_BADPATH, errbuf);
819 free(parent);
820 goto error;
821 }
822 free(parent);
823
824 break;
825
826 case ZPOOL_PROP_COMPATIBILITY:
827 switch (zpool_load_compat(strval, NULL, report, 1024)) {
828 case ZPOOL_COMPATIBILITY_OK:
829 case ZPOOL_COMPATIBILITY_WARNTOKEN:
830 break;
831 case ZPOOL_COMPATIBILITY_BADFILE:
832 case ZPOOL_COMPATIBILITY_BADTOKEN:
833 case ZPOOL_COMPATIBILITY_NOFILES:
834 zfs_error_aux(hdl, "%s", report);
835 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
836 goto error;
837 }
838 break;
839
840 case ZPOOL_PROP_COMMENT:
841 for (check = strval; *check != '\0'; check++) {
842 if (!isprint(*check)) {
843 zfs_error_aux(hdl,
844 dgettext(TEXT_DOMAIN,
845 "comment may only have printable "
846 "characters"));
847 (void) zfs_error(hdl, EZFS_BADPROP,
848 errbuf);
849 goto error;
850 }
851 }
852 if (strlen(strval) > ZPROP_MAX_COMMENT) {
853 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
854 "comment must not exceed %d characters"),
855 ZPROP_MAX_COMMENT);
856 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
857 goto error;
858 }
859 break;
860 case ZPOOL_PROP_READONLY:
861 if (!flags.import) {
862 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
863 "property '%s' can only be set at "
864 "import time"), propname);
865 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
866 goto error;
867 }
868 break;
869 case ZPOOL_PROP_MULTIHOST:
870 if (get_system_hostid() == 0) {
871 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
872 "requires a non-zero system hostid"));
873 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
874 goto error;
875 }
876 break;
877 case ZPOOL_PROP_DEDUPDITTO:
878 printf("Note: property '%s' no longer has "
879 "any effect\n", propname);
880 break;
881
882 default:
883 break;
884 }
885 }
886
887 return (retprops);
888 error:
889 nvlist_free(retprops);
890 return (NULL);
891 }
892
893 /*
894 * Set zpool property : propname=propval.
895 */
896 int
zpool_set_prop(zpool_handle_t * zhp,const char * propname,const char * propval)897 zpool_set_prop(zpool_handle_t *zhp, const char *propname, const char *propval)
898 {
899 zfs_cmd_t zc = {"\0"};
900 int ret;
901 char errbuf[ERRBUFLEN];
902 nvlist_t *nvl = NULL;
903 nvlist_t *realprops;
904 uint64_t version;
905 prop_flags_t flags = { 0 };
906
907 (void) snprintf(errbuf, sizeof (errbuf),
908 dgettext(TEXT_DOMAIN, "cannot set property for '%s'"),
909 zhp->zpool_name);
910
911 if (nvlist_alloc(&nvl, NV_UNIQUE_NAME, 0) != 0)
912 return (no_memory(zhp->zpool_hdl));
913
914 if (nvlist_add_string(nvl, propname, propval) != 0) {
915 nvlist_free(nvl);
916 return (no_memory(zhp->zpool_hdl));
917 }
918
919 version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL);
920 if ((realprops = zpool_valid_proplist(zhp->zpool_hdl,
921 zhp->zpool_name, nvl, version, flags, errbuf)) == NULL) {
922 nvlist_free(nvl);
923 return (-1);
924 }
925
926 nvlist_free(nvl);
927 nvl = realprops;
928
929 /*
930 * Execute the corresponding ioctl() to set this property.
931 */
932 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
933
934 zcmd_write_src_nvlist(zhp->zpool_hdl, &zc, nvl);
935
936 ret = zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_SET_PROPS, &zc);
937
938 zcmd_free_nvlists(&zc);
939 nvlist_free(nvl);
940
941 if (ret)
942 (void) zpool_standard_error(zhp->zpool_hdl, errno, errbuf);
943 else
944 (void) zpool_props_refresh(zhp);
945
946 return (ret);
947 }
948
949 int
zpool_expand_proplist(zpool_handle_t * zhp,zprop_list_t ** plp,zfs_type_t type,boolean_t literal)950 zpool_expand_proplist(zpool_handle_t *zhp, zprop_list_t **plp,
951 zfs_type_t type, boolean_t literal)
952 {
953 libzfs_handle_t *hdl = zhp->zpool_hdl;
954 zprop_list_t *entry;
955 char buf[ZFS_MAXPROPLEN];
956 nvlist_t *features = NULL;
957 nvpair_t *nvp;
958 zprop_list_t **last;
959 boolean_t firstexpand = (NULL == *plp);
960 int i;
961
962 if (zprop_expand_list(hdl, plp, type) != 0)
963 return (-1);
964
965 if (type == ZFS_TYPE_VDEV)
966 return (0);
967
968 last = plp;
969 while (*last != NULL)
970 last = &(*last)->pl_next;
971
972 if ((*plp)->pl_all)
973 features = zpool_get_features(zhp);
974
975 if ((*plp)->pl_all && firstexpand) {
976 /* Handle userprops in the all properties case */
977 if (zhp->zpool_props == NULL && zpool_props_refresh(zhp))
978 return (-1);
979
980 nvp = NULL;
981 while ((nvp = nvlist_next_nvpair(zhp->zpool_props, nvp)) !=
982 NULL) {
983 const char *propname = nvpair_name(nvp);
984
985 if (!zfs_prop_user(propname))
986 continue;
987
988 entry = zfs_alloc(hdl, sizeof (zprop_list_t));
989 entry->pl_prop = ZPROP_USERPROP;
990 entry->pl_user_prop = zfs_strdup(hdl, propname);
991 entry->pl_width = strlen(entry->pl_user_prop);
992 entry->pl_all = B_TRUE;
993
994 *last = entry;
995 last = &entry->pl_next;
996 }
997
998 for (i = 0; i < SPA_FEATURES; i++) {
999 entry = zfs_alloc(hdl, sizeof (zprop_list_t));
1000 entry->pl_prop = ZPROP_USERPROP;
1001 entry->pl_user_prop = zfs_asprintf(hdl, "feature@%s",
1002 spa_feature_table[i].fi_uname);
1003 entry->pl_width = strlen(entry->pl_user_prop);
1004 entry->pl_all = B_TRUE;
1005
1006 *last = entry;
1007 last = &entry->pl_next;
1008 }
1009 }
1010
1011 /* add any unsupported features */
1012 for (nvp = nvlist_next_nvpair(features, NULL);
1013 nvp != NULL; nvp = nvlist_next_nvpair(features, nvp)) {
1014 char *propname;
1015 boolean_t found;
1016
1017 if (zfeature_is_supported(nvpair_name(nvp)))
1018 continue;
1019
1020 propname = zfs_asprintf(hdl, "unsupported@%s",
1021 nvpair_name(nvp));
1022
1023 /*
1024 * Before adding the property to the list make sure that no
1025 * other pool already added the same property.
1026 */
1027 found = B_FALSE;
1028 entry = *plp;
1029 while (entry != NULL) {
1030 if (entry->pl_user_prop != NULL &&
1031 strcmp(propname, entry->pl_user_prop) == 0) {
1032 found = B_TRUE;
1033 break;
1034 }
1035 entry = entry->pl_next;
1036 }
1037 if (found) {
1038 free(propname);
1039 continue;
1040 }
1041
1042 entry = zfs_alloc(hdl, sizeof (zprop_list_t));
1043 entry->pl_prop = ZPROP_USERPROP;
1044 entry->pl_user_prop = propname;
1045 entry->pl_width = strlen(entry->pl_user_prop);
1046 entry->pl_all = B_TRUE;
1047
1048 *last = entry;
1049 last = &entry->pl_next;
1050 }
1051
1052 for (entry = *plp; entry != NULL; entry = entry->pl_next) {
1053 if (entry->pl_fixed && !literal)
1054 continue;
1055
1056 if (entry->pl_prop != ZPROP_USERPROP &&
1057 zpool_get_prop(zhp, entry->pl_prop, buf, sizeof (buf),
1058 NULL, literal) == 0) {
1059 if (strlen(buf) > entry->pl_width)
1060 entry->pl_width = strlen(buf);
1061 } else if (entry->pl_prop == ZPROP_INVAL &&
1062 zfs_prop_user(entry->pl_user_prop) &&
1063 zpool_get_userprop(zhp, entry->pl_user_prop, buf,
1064 sizeof (buf), NULL) == 0) {
1065 if (strlen(buf) > entry->pl_width)
1066 entry->pl_width = strlen(buf);
1067 }
1068 }
1069
1070 return (0);
1071 }
1072
1073 int
vdev_expand_proplist(zpool_handle_t * zhp,const char * vdevname,zprop_list_t ** plp)1074 vdev_expand_proplist(zpool_handle_t *zhp, const char *vdevname,
1075 zprop_list_t **plp)
1076 {
1077 zprop_list_t *entry;
1078 char buf[ZFS_MAXPROPLEN];
1079 const char *strval = NULL;
1080 int err = 0;
1081 nvpair_t *elem = NULL;
1082 nvlist_t *vprops = NULL;
1083 nvlist_t *propval = NULL;
1084 const char *propname;
1085 vdev_prop_t prop;
1086 zprop_list_t **last;
1087
1088 for (entry = *plp; entry != NULL; entry = entry->pl_next) {
1089 if (entry->pl_fixed)
1090 continue;
1091
1092 if (zpool_get_vdev_prop(zhp, vdevname, entry->pl_prop,
1093 entry->pl_user_prop, buf, sizeof (buf), NULL,
1094 B_FALSE) == 0) {
1095 if (strlen(buf) > entry->pl_width)
1096 entry->pl_width = strlen(buf);
1097 }
1098 if (entry->pl_prop == VDEV_PROP_NAME &&
1099 strlen(vdevname) > entry->pl_width)
1100 entry->pl_width = strlen(vdevname);
1101 }
1102
1103 /* Handle the all properties case */
1104 last = plp;
1105 if (*last != NULL && (*last)->pl_all == B_TRUE) {
1106 while (*last != NULL)
1107 last = &(*last)->pl_next;
1108
1109 err = zpool_get_all_vdev_props(zhp, vdevname, &vprops);
1110 if (err != 0)
1111 return (err);
1112
1113 while ((elem = nvlist_next_nvpair(vprops, elem)) != NULL) {
1114 propname = nvpair_name(elem);
1115
1116 /* Skip properties that are not user defined */
1117 if ((prop = vdev_name_to_prop(propname)) !=
1118 VDEV_PROP_USERPROP)
1119 continue;
1120
1121 if (nvpair_value_nvlist(elem, &propval) != 0)
1122 continue;
1123
1124 strval = fnvlist_lookup_string(propval, ZPROP_VALUE);
1125
1126 entry = zfs_alloc(zhp->zpool_hdl,
1127 sizeof (zprop_list_t));
1128 entry->pl_prop = prop;
1129 entry->pl_user_prop = zfs_strdup(zhp->zpool_hdl,
1130 propname);
1131 entry->pl_width = strlen(strval);
1132 entry->pl_all = B_TRUE;
1133 *last = entry;
1134 last = &entry->pl_next;
1135 }
1136 }
1137
1138 return (0);
1139 }
1140
1141 /*
1142 * Get the state for the given feature on the given ZFS pool.
1143 */
1144 int
zpool_prop_get_feature(zpool_handle_t * zhp,const char * propname,char * buf,size_t len)1145 zpool_prop_get_feature(zpool_handle_t *zhp, const char *propname, char *buf,
1146 size_t len)
1147 {
1148 uint64_t refcount;
1149 boolean_t found = B_FALSE;
1150 nvlist_t *features = zpool_get_features(zhp);
1151 boolean_t supported;
1152 const char *feature = strchr(propname, '@') + 1;
1153
1154 supported = zpool_prop_feature(propname);
1155 ASSERT(supported || zpool_prop_unsupported(propname));
1156
1157 /*
1158 * Convert from feature name to feature guid. This conversion is
1159 * unnecessary for unsupported@... properties because they already
1160 * use guids.
1161 */
1162 if (supported) {
1163 int ret;
1164 spa_feature_t fid;
1165
1166 ret = zfeature_lookup_name(feature, &fid);
1167 if (ret != 0) {
1168 (void) strlcpy(buf, "-", len);
1169 return (ENOTSUP);
1170 }
1171 feature = spa_feature_table[fid].fi_guid;
1172 }
1173
1174 if (nvlist_lookup_uint64(features, feature, &refcount) == 0)
1175 found = B_TRUE;
1176
1177 if (supported) {
1178 if (!found) {
1179 (void) strlcpy(buf, ZFS_FEATURE_DISABLED, len);
1180 } else {
1181 if (refcount == 0)
1182 (void) strlcpy(buf, ZFS_FEATURE_ENABLED, len);
1183 else
1184 (void) strlcpy(buf, ZFS_FEATURE_ACTIVE, len);
1185 }
1186 } else {
1187 if (found) {
1188 if (refcount == 0) {
1189 (void) strcpy(buf, ZFS_UNSUPPORTED_INACTIVE);
1190 } else {
1191 (void) strcpy(buf, ZFS_UNSUPPORTED_READONLY);
1192 }
1193 } else {
1194 (void) strlcpy(buf, "-", len);
1195 return (ENOTSUP);
1196 }
1197 }
1198
1199 return (0);
1200 }
1201
1202 /*
1203 * Validate the given pool name, optionally putting an extended error message in
1204 * 'buf'.
1205 */
1206 boolean_t
zpool_name_valid(libzfs_handle_t * hdl,boolean_t isopen,const char * pool)1207 zpool_name_valid(libzfs_handle_t *hdl, boolean_t isopen, const char *pool)
1208 {
1209 namecheck_err_t why;
1210 char what;
1211 int ret;
1212
1213 ret = pool_namecheck(pool, &why, &what);
1214
1215 /*
1216 * The rules for reserved pool names were extended at a later point.
1217 * But we need to support users with existing pools that may now be
1218 * invalid. So we only check for this expanded set of names during a
1219 * create (or import), and only in userland.
1220 */
1221 if (ret == 0 && !isopen &&
1222 (strncmp(pool, "mirror", 6) == 0 ||
1223 strncmp(pool, "raidz", 5) == 0 ||
1224 strncmp(pool, "draid", 5) == 0 ||
1225 strncmp(pool, "spare", 5) == 0 ||
1226 strcmp(pool, "log") == 0)) {
1227 if (hdl != NULL)
1228 zfs_error_aux(hdl,
1229 dgettext(TEXT_DOMAIN, "name is reserved"));
1230 return (B_FALSE);
1231 }
1232
1233
1234 if (ret != 0) {
1235 if (hdl != NULL) {
1236 switch (why) {
1237 case NAME_ERR_TOOLONG:
1238 zfs_error_aux(hdl,
1239 dgettext(TEXT_DOMAIN, "name is too long"));
1240 break;
1241
1242 case NAME_ERR_INVALCHAR:
1243 zfs_error_aux(hdl,
1244 dgettext(TEXT_DOMAIN, "invalid character "
1245 "'%c' in pool name"), what);
1246 break;
1247
1248 case NAME_ERR_NOLETTER:
1249 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1250 "name must begin with a letter"));
1251 break;
1252
1253 case NAME_ERR_RESERVED:
1254 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1255 "name is reserved"));
1256 break;
1257
1258 case NAME_ERR_DISKLIKE:
1259 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1260 "pool name is reserved"));
1261 break;
1262
1263 case NAME_ERR_LEADING_SLASH:
1264 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1265 "leading slash in name"));
1266 break;
1267
1268 case NAME_ERR_EMPTY_COMPONENT:
1269 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1270 "empty component in name"));
1271 break;
1272
1273 case NAME_ERR_TRAILING_SLASH:
1274 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1275 "trailing slash in name"));
1276 break;
1277
1278 case NAME_ERR_MULTIPLE_DELIMITERS:
1279 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1280 "multiple '@' and/or '#' delimiters in "
1281 "name"));
1282 break;
1283
1284 case NAME_ERR_NO_AT:
1285 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1286 "permission set is missing '@'"));
1287 break;
1288
1289 default:
1290 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1291 "(%d) not defined"), why);
1292 break;
1293 }
1294 }
1295 return (B_FALSE);
1296 }
1297
1298 return (B_TRUE);
1299 }
1300
1301 /*
1302 * Open a handle to the given pool, even if the pool is currently in the FAULTED
1303 * state.
1304 */
1305 zpool_handle_t *
zpool_open_canfail(libzfs_handle_t * hdl,const char * pool)1306 zpool_open_canfail(libzfs_handle_t *hdl, const char *pool)
1307 {
1308 zpool_handle_t *zhp;
1309 boolean_t missing;
1310
1311 /*
1312 * Make sure the pool name is valid.
1313 */
1314 if (!zpool_name_valid(hdl, B_TRUE, pool)) {
1315 (void) zfs_error_fmt(hdl, EZFS_INVALIDNAME,
1316 dgettext(TEXT_DOMAIN, "cannot open '%s'"),
1317 pool);
1318 return (NULL);
1319 }
1320
1321 zhp = zfs_alloc(hdl, sizeof (zpool_handle_t));
1322
1323 zhp->zpool_hdl = hdl;
1324 (void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name));
1325
1326 if (zpool_refresh_stats(zhp, &missing) != 0) {
1327 zpool_close(zhp);
1328 return (NULL);
1329 }
1330
1331 if (missing) {
1332 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "no such pool"));
1333 (void) zfs_error_fmt(hdl, EZFS_NOENT,
1334 dgettext(TEXT_DOMAIN, "cannot open '%s'"), pool);
1335 zpool_close(zhp);
1336 return (NULL);
1337 }
1338
1339 return (zhp);
1340 }
1341
1342 /*
1343 * Like the above, but silent on error. Used when iterating over pools (because
1344 * the configuration cache may be out of date).
1345 */
1346 int
zpool_open_silent(libzfs_handle_t * hdl,const char * pool,zpool_handle_t ** ret)1347 zpool_open_silent(libzfs_handle_t *hdl, const char *pool, zpool_handle_t **ret)
1348 {
1349 zpool_handle_t *zhp;
1350 boolean_t missing;
1351
1352 zhp = zfs_alloc(hdl, sizeof (zpool_handle_t));
1353
1354 zhp->zpool_hdl = hdl;
1355 (void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name));
1356
1357 if (zpool_refresh_stats(zhp, &missing) != 0) {
1358 zpool_close(zhp);
1359 return (-1);
1360 }
1361
1362 if (missing) {
1363 zpool_close(zhp);
1364 *ret = NULL;
1365 return (0);
1366 }
1367
1368 *ret = zhp;
1369 return (0);
1370 }
1371
1372 /*
1373 * Similar to zpool_open_canfail(), but refuses to open pools in the faulted
1374 * state.
1375 */
1376 zpool_handle_t *
zpool_open(libzfs_handle_t * hdl,const char * pool)1377 zpool_open(libzfs_handle_t *hdl, const char *pool)
1378 {
1379 zpool_handle_t *zhp;
1380
1381 if ((zhp = zpool_open_canfail(hdl, pool)) == NULL)
1382 return (NULL);
1383
1384 if (zhp->zpool_state == POOL_STATE_UNAVAIL) {
1385 (void) zfs_error_fmt(hdl, EZFS_POOLUNAVAIL,
1386 dgettext(TEXT_DOMAIN, "cannot open '%s'"), zhp->zpool_name);
1387 zpool_close(zhp);
1388 return (NULL);
1389 }
1390
1391 return (zhp);
1392 }
1393
1394 /*
1395 * Close the handle. Simply frees the memory associated with the handle.
1396 */
1397 void
zpool_close(zpool_handle_t * zhp)1398 zpool_close(zpool_handle_t *zhp)
1399 {
1400 nvlist_free(zhp->zpool_config);
1401 nvlist_free(zhp->zpool_old_config);
1402 nvlist_free(zhp->zpool_props);
1403 free(zhp);
1404 }
1405
1406 /*
1407 * Return the name of the pool.
1408 */
1409 const char *
zpool_get_name(zpool_handle_t * zhp)1410 zpool_get_name(zpool_handle_t *zhp)
1411 {
1412 return (zhp->zpool_name);
1413 }
1414
1415
1416 /*
1417 * Return the state of the pool (ACTIVE or UNAVAILABLE)
1418 */
1419 int
zpool_get_state(zpool_handle_t * zhp)1420 zpool_get_state(zpool_handle_t *zhp)
1421 {
1422 return (zhp->zpool_state);
1423 }
1424
1425 /*
1426 * Check if vdev list contains a dRAID vdev
1427 */
1428 static boolean_t
zpool_has_draid_vdev(nvlist_t * nvroot)1429 zpool_has_draid_vdev(nvlist_t *nvroot)
1430 {
1431 nvlist_t **child;
1432 uint_t children;
1433
1434 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
1435 &child, &children) == 0) {
1436 for (uint_t c = 0; c < children; c++) {
1437 const char *type;
1438
1439 if (nvlist_lookup_string(child[c],
1440 ZPOOL_CONFIG_TYPE, &type) == 0 &&
1441 strcmp(type, VDEV_TYPE_DRAID) == 0) {
1442 return (B_TRUE);
1443 }
1444 }
1445 }
1446 return (B_FALSE);
1447 }
1448
1449 /*
1450 * Output a dRAID top-level vdev name in to the provided buffer.
1451 */
1452 static char *
zpool_draid_name(char * name,int len,uint64_t data,uint64_t parity,uint64_t spares,uint64_t children)1453 zpool_draid_name(char *name, int len, uint64_t data, uint64_t parity,
1454 uint64_t spares, uint64_t children)
1455 {
1456 snprintf(name, len, "%s%llu:%llud:%lluc:%llus",
1457 VDEV_TYPE_DRAID, (u_longlong_t)parity, (u_longlong_t)data,
1458 (u_longlong_t)children, (u_longlong_t)spares);
1459
1460 return (name);
1461 }
1462
1463 /*
1464 * Return B_TRUE if the provided name is a dRAID spare name.
1465 */
1466 boolean_t
zpool_is_draid_spare(const char * name)1467 zpool_is_draid_spare(const char *name)
1468 {
1469 uint64_t spare_id, parity, vdev_id;
1470
1471 if (sscanf(name, VDEV_TYPE_DRAID "%llu-%llu-%llu",
1472 (u_longlong_t *)&parity, (u_longlong_t *)&vdev_id,
1473 (u_longlong_t *)&spare_id) == 3) {
1474 return (B_TRUE);
1475 }
1476
1477 return (B_FALSE);
1478 }
1479
1480 /*
1481 * Create the named pool, using the provided vdev list. It is assumed
1482 * that the consumer has already validated the contents of the nvlist, so we
1483 * don't have to worry about error semantics.
1484 */
1485 int
zpool_create(libzfs_handle_t * hdl,const char * pool,nvlist_t * nvroot,nvlist_t * props,nvlist_t * fsprops)1486 zpool_create(libzfs_handle_t *hdl, const char *pool, nvlist_t *nvroot,
1487 nvlist_t *props, nvlist_t *fsprops)
1488 {
1489 zfs_cmd_t zc = {"\0"};
1490 nvlist_t *zc_fsprops = NULL;
1491 nvlist_t *zc_props = NULL;
1492 nvlist_t *hidden_args = NULL;
1493 uint8_t *wkeydata = NULL;
1494 uint_t wkeylen = 0;
1495 char errbuf[ERRBUFLEN];
1496 int ret = -1;
1497
1498 (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
1499 "cannot create '%s'"), pool);
1500
1501 if (!zpool_name_valid(hdl, B_FALSE, pool))
1502 return (zfs_error(hdl, EZFS_INVALIDNAME, errbuf));
1503
1504 zcmd_write_conf_nvlist(hdl, &zc, nvroot);
1505
1506 if (props) {
1507 prop_flags_t flags = { .create = B_TRUE, .import = B_FALSE };
1508
1509 if ((zc_props = zpool_valid_proplist(hdl, pool, props,
1510 SPA_VERSION_1, flags, errbuf)) == NULL) {
1511 goto create_failed;
1512 }
1513 }
1514
1515 if (fsprops) {
1516 uint64_t zoned;
1517 const char *zonestr;
1518
1519 zoned = ((nvlist_lookup_string(fsprops,
1520 zfs_prop_to_name(ZFS_PROP_ZONED), &zonestr) == 0) &&
1521 strcmp(zonestr, "on") == 0);
1522
1523 if ((zc_fsprops = zfs_valid_proplist(hdl, ZFS_TYPE_FILESYSTEM,
1524 fsprops, zoned, NULL, NULL, B_TRUE, errbuf)) == NULL) {
1525 goto create_failed;
1526 }
1527
1528 if (!zc_props &&
1529 (nvlist_alloc(&zc_props, NV_UNIQUE_NAME, 0) != 0)) {
1530 goto create_failed;
1531 }
1532 if (zfs_crypto_create(hdl, NULL, zc_fsprops, props, B_TRUE,
1533 &wkeydata, &wkeylen) != 0) {
1534 zfs_error(hdl, EZFS_CRYPTOFAILED, errbuf);
1535 goto create_failed;
1536 }
1537 if (nvlist_add_nvlist(zc_props,
1538 ZPOOL_ROOTFS_PROPS, zc_fsprops) != 0) {
1539 goto create_failed;
1540 }
1541 if (wkeydata != NULL) {
1542 if (nvlist_alloc(&hidden_args, NV_UNIQUE_NAME, 0) != 0)
1543 goto create_failed;
1544
1545 if (nvlist_add_uint8_array(hidden_args, "wkeydata",
1546 wkeydata, wkeylen) != 0)
1547 goto create_failed;
1548
1549 if (nvlist_add_nvlist(zc_props, ZPOOL_HIDDEN_ARGS,
1550 hidden_args) != 0)
1551 goto create_failed;
1552 }
1553 }
1554
1555 if (zc_props)
1556 zcmd_write_src_nvlist(hdl, &zc, zc_props);
1557
1558 (void) strlcpy(zc.zc_name, pool, sizeof (zc.zc_name));
1559
1560 if ((ret = zfs_ioctl(hdl, ZFS_IOC_POOL_CREATE, &zc)) != 0) {
1561
1562 zcmd_free_nvlists(&zc);
1563 nvlist_free(zc_props);
1564 nvlist_free(zc_fsprops);
1565 nvlist_free(hidden_args);
1566 if (wkeydata != NULL)
1567 free(wkeydata);
1568
1569 switch (errno) {
1570 case EBUSY:
1571 /*
1572 * This can happen if the user has specified the same
1573 * device multiple times. We can't reliably detect this
1574 * until we try to add it and see we already have a
1575 * label. This can also happen under if the device is
1576 * part of an active md or lvm device.
1577 */
1578 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1579 "one or more vdevs refer to the same device, or "
1580 "one of\nthe devices is part of an active md or "
1581 "lvm device"));
1582 return (zfs_error(hdl, EZFS_BADDEV, errbuf));
1583
1584 case ERANGE:
1585 /*
1586 * This happens if the record size is smaller or larger
1587 * than the allowed size range, or not a power of 2.
1588 *
1589 * NOTE: although zfs_valid_proplist is called earlier,
1590 * this case may have slipped through since the
1591 * pool does not exist yet and it is therefore
1592 * impossible to read properties e.g. max blocksize
1593 * from the pool.
1594 */
1595 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1596 "record size invalid"));
1597 return (zfs_error(hdl, EZFS_BADPROP, errbuf));
1598
1599 case EOVERFLOW:
1600 /*
1601 * This occurs when one of the devices is below
1602 * SPA_MINDEVSIZE. Unfortunately, we can't detect which
1603 * device was the problem device since there's no
1604 * reliable way to determine device size from userland.
1605 */
1606 {
1607 char buf[64];
1608
1609 zfs_nicebytes(SPA_MINDEVSIZE, buf,
1610 sizeof (buf));
1611
1612 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1613 "one or more devices is less than the "
1614 "minimum size (%s)"), buf);
1615 }
1616 return (zfs_error(hdl, EZFS_BADDEV, errbuf));
1617
1618 case ENOSPC:
1619 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1620 "one or more devices is out of space"));
1621 return (zfs_error(hdl, EZFS_BADDEV, errbuf));
1622
1623 case EINVAL:
1624 if (zpool_has_draid_vdev(nvroot) &&
1625 zfeature_lookup_name("draid", NULL) != 0) {
1626 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1627 "dRAID vdevs are unsupported by the "
1628 "kernel"));
1629 return (zfs_error(hdl, EZFS_BADDEV, errbuf));
1630 } else {
1631 return (zpool_standard_error(hdl, errno,
1632 errbuf));
1633 }
1634
1635 default:
1636 return (zpool_standard_error(hdl, errno, errbuf));
1637 }
1638 }
1639
1640 create_failed:
1641 zcmd_free_nvlists(&zc);
1642 nvlist_free(zc_props);
1643 nvlist_free(zc_fsprops);
1644 nvlist_free(hidden_args);
1645 if (wkeydata != NULL)
1646 free(wkeydata);
1647 return (ret);
1648 }
1649
1650 /*
1651 * Destroy the given pool. It is up to the caller to ensure that there are no
1652 * datasets left in the pool.
1653 */
1654 int
zpool_destroy(zpool_handle_t * zhp,const char * log_str)1655 zpool_destroy(zpool_handle_t *zhp, const char *log_str)
1656 {
1657 zfs_cmd_t zc = {"\0"};
1658 zfs_handle_t *zfp = NULL;
1659 libzfs_handle_t *hdl = zhp->zpool_hdl;
1660 char errbuf[ERRBUFLEN];
1661
1662 if (zhp->zpool_state == POOL_STATE_ACTIVE &&
1663 (zfp = zfs_open(hdl, zhp->zpool_name, ZFS_TYPE_FILESYSTEM)) == NULL)
1664 return (-1);
1665
1666 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1667 zc.zc_history = (uint64_t)(uintptr_t)log_str;
1668
1669 if (zfs_ioctl(hdl, ZFS_IOC_POOL_DESTROY, &zc) != 0) {
1670 (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
1671 "cannot destroy '%s'"), zhp->zpool_name);
1672
1673 if (errno == EROFS) {
1674 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1675 "one or more devices is read only"));
1676 (void) zfs_error(hdl, EZFS_BADDEV, errbuf);
1677 } else {
1678 (void) zpool_standard_error(hdl, errno, errbuf);
1679 }
1680
1681 if (zfp)
1682 zfs_close(zfp);
1683 return (-1);
1684 }
1685
1686 if (zfp) {
1687 remove_mountpoint(zfp);
1688 zfs_close(zfp);
1689 }
1690
1691 return (0);
1692 }
1693
1694 /*
1695 * Create a checkpoint in the given pool.
1696 */
1697 int
zpool_checkpoint(zpool_handle_t * zhp)1698 zpool_checkpoint(zpool_handle_t *zhp)
1699 {
1700 libzfs_handle_t *hdl = zhp->zpool_hdl;
1701 char errbuf[ERRBUFLEN];
1702 int error;
1703
1704 error = lzc_pool_checkpoint(zhp->zpool_name);
1705 if (error != 0) {
1706 (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
1707 "cannot checkpoint '%s'"), zhp->zpool_name);
1708 (void) zpool_standard_error(hdl, error, errbuf);
1709 return (-1);
1710 }
1711
1712 return (0);
1713 }
1714
1715 /*
1716 * Discard the checkpoint from the given pool.
1717 */
1718 int
zpool_discard_checkpoint(zpool_handle_t * zhp)1719 zpool_discard_checkpoint(zpool_handle_t *zhp)
1720 {
1721 libzfs_handle_t *hdl = zhp->zpool_hdl;
1722 char errbuf[ERRBUFLEN];
1723 int error;
1724
1725 error = lzc_pool_checkpoint_discard(zhp->zpool_name);
1726 if (error != 0) {
1727 (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
1728 "cannot discard checkpoint in '%s'"), zhp->zpool_name);
1729 (void) zpool_standard_error(hdl, error, errbuf);
1730 return (-1);
1731 }
1732
1733 return (0);
1734 }
1735
1736 /*
1737 * Load data type for the given pool.
1738 */
1739 int
zpool_prefetch(zpool_handle_t * zhp,zpool_prefetch_type_t type)1740 zpool_prefetch(zpool_handle_t *zhp, zpool_prefetch_type_t type)
1741 {
1742 libzfs_handle_t *hdl = zhp->zpool_hdl;
1743 char msg[1024];
1744 int error;
1745
1746 error = lzc_pool_prefetch(zhp->zpool_name, type);
1747 if (error != 0) {
1748 const char *typename = "unknown";
1749 if (type == ZPOOL_PREFETCH_DDT)
1750 typename = "ddt";
1751 else if (type == ZPOOL_PREFETCH_BRT)
1752 typename = "brt";
1753 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1754 "cannot prefetch %s in '%s'"), typename, zhp->zpool_name);
1755 (void) zpool_standard_error(hdl, error, msg);
1756 return (-1);
1757 }
1758
1759 return (0);
1760 }
1761
1762 /*
1763 * Add the given vdevs to the pool. The caller must have already performed the
1764 * necessary verification to ensure that the vdev specification is well-formed.
1765 */
1766 int
zpool_add(zpool_handle_t * zhp,nvlist_t * nvroot,boolean_t check_ashift)1767 zpool_add(zpool_handle_t *zhp, nvlist_t *nvroot, boolean_t check_ashift)
1768 {
1769 zfs_cmd_t zc = {"\0"};
1770 int ret;
1771 libzfs_handle_t *hdl = zhp->zpool_hdl;
1772 char errbuf[ERRBUFLEN];
1773 nvlist_t **spares, **l2cache;
1774 uint_t nspares, nl2cache;
1775
1776 (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
1777 "cannot add to '%s'"), zhp->zpool_name);
1778
1779 if (zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL) <
1780 SPA_VERSION_SPARES &&
1781 nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
1782 &spares, &nspares) == 0) {
1783 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be "
1784 "upgraded to add hot spares"));
1785 return (zfs_error(hdl, EZFS_BADVERSION, errbuf));
1786 }
1787
1788 if (zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL) <
1789 SPA_VERSION_L2CACHE &&
1790 nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
1791 &l2cache, &nl2cache) == 0) {
1792 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be "
1793 "upgraded to add cache devices"));
1794 return (zfs_error(hdl, EZFS_BADVERSION, errbuf));
1795 }
1796
1797 zcmd_write_conf_nvlist(hdl, &zc, nvroot);
1798 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1799 zc.zc_flags = check_ashift;
1800
1801 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_ADD, &zc) != 0) {
1802 switch (errno) {
1803 case EBUSY:
1804 /*
1805 * This can happen if the user has specified the same
1806 * device multiple times. We can't reliably detect this
1807 * until we try to add it and see we already have a
1808 * label.
1809 */
1810 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1811 "one or more vdevs refer to the same device"));
1812 (void) zfs_error(hdl, EZFS_BADDEV, errbuf);
1813 break;
1814
1815 case EINVAL:
1816
1817 if (zpool_has_draid_vdev(nvroot) &&
1818 zfeature_lookup_name("draid", NULL) != 0) {
1819 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1820 "dRAID vdevs are unsupported by the "
1821 "kernel"));
1822 } else {
1823 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1824 "invalid config; a pool with removing/"
1825 "removed vdevs does not support adding "
1826 "raidz or dRAID vdevs"));
1827 }
1828
1829 (void) zfs_error(hdl, EZFS_BADDEV, errbuf);
1830 break;
1831
1832 case EOVERFLOW:
1833 /*
1834 * This occurs when one of the devices is below
1835 * SPA_MINDEVSIZE. Unfortunately, we can't detect which
1836 * device was the problem device since there's no
1837 * reliable way to determine device size from userland.
1838 */
1839 {
1840 char buf[64];
1841
1842 zfs_nicebytes(SPA_MINDEVSIZE, buf,
1843 sizeof (buf));
1844
1845 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1846 "device is less than the minimum "
1847 "size (%s)"), buf);
1848 }
1849 (void) zfs_error(hdl, EZFS_BADDEV, errbuf);
1850 break;
1851
1852 case ENOTSUP:
1853 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1854 "pool must be upgraded to add these vdevs"));
1855 (void) zfs_error(hdl, EZFS_BADVERSION, errbuf);
1856 break;
1857
1858 default:
1859 (void) zpool_standard_error(hdl, errno, errbuf);
1860 }
1861
1862 ret = -1;
1863 } else {
1864 ret = 0;
1865 }
1866
1867 zcmd_free_nvlists(&zc);
1868
1869 return (ret);
1870 }
1871
1872 /*
1873 * Exports the pool from the system. The caller must ensure that there are no
1874 * mounted datasets in the pool.
1875 */
1876 static int
zpool_export_common(zpool_handle_t * zhp,boolean_t force,boolean_t hardforce,const char * log_str)1877 zpool_export_common(zpool_handle_t *zhp, boolean_t force, boolean_t hardforce,
1878 const char *log_str)
1879 {
1880 zfs_cmd_t zc = {"\0"};
1881
1882 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1883 zc.zc_cookie = force;
1884 zc.zc_guid = hardforce;
1885 zc.zc_history = (uint64_t)(uintptr_t)log_str;
1886
1887 if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_EXPORT, &zc) != 0) {
1888 switch (errno) {
1889 case EXDEV:
1890 zfs_error_aux(zhp->zpool_hdl, dgettext(TEXT_DOMAIN,
1891 "use '-f' to override the following errors:\n"
1892 "'%s' has an active shared spare which could be"
1893 " used by other pools once '%s' is exported."),
1894 zhp->zpool_name, zhp->zpool_name);
1895 return (zfs_error_fmt(zhp->zpool_hdl, EZFS_ACTIVE_SPARE,
1896 dgettext(TEXT_DOMAIN, "cannot export '%s'"),
1897 zhp->zpool_name));
1898 default:
1899 return (zpool_standard_error_fmt(zhp->zpool_hdl, errno,
1900 dgettext(TEXT_DOMAIN, "cannot export '%s'"),
1901 zhp->zpool_name));
1902 }
1903 }
1904
1905 return (0);
1906 }
1907
1908 int
zpool_export(zpool_handle_t * zhp,boolean_t force,const char * log_str)1909 zpool_export(zpool_handle_t *zhp, boolean_t force, const char *log_str)
1910 {
1911 return (zpool_export_common(zhp, force, B_FALSE, log_str));
1912 }
1913
1914 int
zpool_export_force(zpool_handle_t * zhp,const char * log_str)1915 zpool_export_force(zpool_handle_t *zhp, const char *log_str)
1916 {
1917 return (zpool_export_common(zhp, B_TRUE, B_TRUE, log_str));
1918 }
1919
1920 static void
zpool_rewind_exclaim(libzfs_handle_t * hdl,const char * name,boolean_t dryrun,nvlist_t * config)1921 zpool_rewind_exclaim(libzfs_handle_t *hdl, const char *name, boolean_t dryrun,
1922 nvlist_t *config)
1923 {
1924 nvlist_t *nv = NULL;
1925 uint64_t rewindto;
1926 int64_t loss = -1;
1927 struct tm t;
1928 char timestr[128];
1929
1930 if (!hdl->libzfs_printerr || config == NULL)
1931 return;
1932
1933 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, &nv) != 0 ||
1934 nvlist_lookup_nvlist(nv, ZPOOL_CONFIG_REWIND_INFO, &nv) != 0) {
1935 return;
1936 }
1937
1938 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_TIME, &rewindto) != 0)
1939 return;
1940 (void) nvlist_lookup_int64(nv, ZPOOL_CONFIG_REWIND_TIME, &loss);
1941
1942 if (localtime_r((time_t *)&rewindto, &t) != NULL &&
1943 ctime_r((time_t *)&rewindto, timestr) != NULL) {
1944 timestr[24] = 0;
1945 if (dryrun) {
1946 (void) printf(dgettext(TEXT_DOMAIN,
1947 "Would be able to return %s "
1948 "to its state as of %s.\n"),
1949 name, timestr);
1950 } else {
1951 (void) printf(dgettext(TEXT_DOMAIN,
1952 "Pool %s returned to its state as of %s.\n"),
1953 name, timestr);
1954 }
1955 if (loss > 120) {
1956 (void) printf(dgettext(TEXT_DOMAIN,
1957 "%s approximately %lld "),
1958 dryrun ? "Would discard" : "Discarded",
1959 ((longlong_t)loss + 30) / 60);
1960 (void) printf(dgettext(TEXT_DOMAIN,
1961 "minutes of transactions.\n"));
1962 } else if (loss > 0) {
1963 (void) printf(dgettext(TEXT_DOMAIN,
1964 "%s approximately %lld "),
1965 dryrun ? "Would discard" : "Discarded",
1966 (longlong_t)loss);
1967 (void) printf(dgettext(TEXT_DOMAIN,
1968 "seconds of transactions.\n"));
1969 }
1970 }
1971 }
1972
1973 void
zpool_explain_recover(libzfs_handle_t * hdl,const char * name,int reason,nvlist_t * config,char * buf,size_t size)1974 zpool_explain_recover(libzfs_handle_t *hdl, const char *name, int reason,
1975 nvlist_t *config, char *buf, size_t size)
1976 {
1977 nvlist_t *nv = NULL;
1978 int64_t loss = -1;
1979 uint64_t edata = UINT64_MAX;
1980 uint64_t rewindto;
1981 struct tm t;
1982 char timestr[128], temp[1024];
1983
1984 if (!hdl->libzfs_printerr)
1985 return;
1986
1987 /* All attempted rewinds failed if ZPOOL_CONFIG_LOAD_TIME missing */
1988 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, &nv) != 0 ||
1989 nvlist_lookup_nvlist(nv, ZPOOL_CONFIG_REWIND_INFO, &nv) != 0 ||
1990 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_TIME, &rewindto) != 0)
1991 goto no_info;
1992
1993 (void) nvlist_lookup_int64(nv, ZPOOL_CONFIG_REWIND_TIME, &loss);
1994 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_DATA_ERRORS,
1995 &edata);
1996
1997 (void) snprintf(buf, size, dgettext(TEXT_DOMAIN,
1998 "Recovery is possible, but will result in some data loss.\n"));
1999
2000 if (localtime_r((time_t *)&rewindto, &t) != NULL &&
2001 ctime_r((time_t *)&rewindto, timestr) != NULL) {
2002 timestr[24] = 0;
2003 (void) snprintf(temp, 1024, dgettext(TEXT_DOMAIN,
2004 "\tReturning the pool to its state as of %s\n"
2005 "\tshould correct the problem. "), timestr);
2006 (void) strlcat(buf, temp, size);
2007 } else {
2008 (void) strlcat(buf, dgettext(TEXT_DOMAIN,
2009 "\tReverting the pool to an earlier state "
2010 "should correct the problem.\n\t"), size);
2011 }
2012
2013 if (loss > 120) {
2014 (void) snprintf(temp, 1024, dgettext(TEXT_DOMAIN,
2015 "Approximately %lld minutes of data\n"
2016 "\tmust be discarded, irreversibly. "),
2017 ((longlong_t)loss + 30) / 60);
2018 (void) strlcat(buf, temp, size);
2019 } else if (loss > 0) {
2020 (void) snprintf(temp, 1024, dgettext(TEXT_DOMAIN,
2021 "Approximately %lld seconds of data\n"
2022 "\tmust be discarded, irreversibly. "),
2023 (longlong_t)loss);
2024 (void) strlcat(buf, temp, size);
2025 }
2026 if (edata != 0 && edata != UINT64_MAX) {
2027 if (edata == 1) {
2028 (void) strlcat(buf, dgettext(TEXT_DOMAIN,
2029 "After rewind, at least\n"
2030 "\tone persistent user-data error will remain. "),
2031 size);
2032 } else {
2033 (void) strlcat(buf, dgettext(TEXT_DOMAIN,
2034 "After rewind, several\n"
2035 "\tpersistent user-data errors will remain. "),
2036 size);
2037 }
2038 }
2039 (void) snprintf(temp, 1024, dgettext(TEXT_DOMAIN,
2040 "Recovery can be attempted\n\tby executing 'zpool %s -F %s'. "),
2041 reason >= 0 ? "clear" : "import", name);
2042 (void) strlcat(buf, temp, size);
2043
2044 (void) strlcat(buf, dgettext(TEXT_DOMAIN,
2045 "A scrub of the pool\n"
2046 "\tis strongly recommended after recovery.\n"), size);
2047 return;
2048
2049 no_info:
2050 (void) strlcat(buf, dgettext(TEXT_DOMAIN,
2051 "Destroy and re-create the pool from\n\ta backup source.\n"), size);
2052 }
2053
2054 /*
2055 * zpool_import() is a contracted interface. Should be kept the same
2056 * if possible.
2057 *
2058 * Applications should use zpool_import_props() to import a pool with
2059 * new properties value to be set.
2060 */
2061 int
zpool_import(libzfs_handle_t * hdl,nvlist_t * config,const char * newname,char * altroot)2062 zpool_import(libzfs_handle_t *hdl, nvlist_t *config, const char *newname,
2063 char *altroot)
2064 {
2065 nvlist_t *props = NULL;
2066 int ret;
2067
2068 if (altroot != NULL) {
2069 if (nvlist_alloc(&props, NV_UNIQUE_NAME, 0) != 0) {
2070 return (zfs_error_fmt(hdl, EZFS_NOMEM,
2071 dgettext(TEXT_DOMAIN, "cannot import '%s'"),
2072 newname));
2073 }
2074
2075 if (nvlist_add_string(props,
2076 zpool_prop_to_name(ZPOOL_PROP_ALTROOT), altroot) != 0 ||
2077 nvlist_add_string(props,
2078 zpool_prop_to_name(ZPOOL_PROP_CACHEFILE), "none") != 0) {
2079 nvlist_free(props);
2080 return (zfs_error_fmt(hdl, EZFS_NOMEM,
2081 dgettext(TEXT_DOMAIN, "cannot import '%s'"),
2082 newname));
2083 }
2084 }
2085
2086 ret = zpool_import_props(hdl, config, newname, props,
2087 ZFS_IMPORT_NORMAL);
2088 nvlist_free(props);
2089 return (ret);
2090 }
2091
2092 static void
print_vdev_tree(libzfs_handle_t * hdl,const char * name,nvlist_t * nv,int indent)2093 print_vdev_tree(libzfs_handle_t *hdl, const char *name, nvlist_t *nv,
2094 int indent)
2095 {
2096 nvlist_t **child;
2097 uint_t c, children;
2098 char *vname;
2099 uint64_t is_log = 0;
2100
2101 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_LOG,
2102 &is_log);
2103
2104 if (name != NULL)
2105 (void) printf("\t%*s%s%s\n", indent, "", name,
2106 is_log ? " [log]" : "");
2107
2108 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
2109 &child, &children) != 0)
2110 return;
2111
2112 for (c = 0; c < children; c++) {
2113 vname = zpool_vdev_name(hdl, NULL, child[c], VDEV_NAME_TYPE_ID);
2114 print_vdev_tree(hdl, vname, child[c], indent + 2);
2115 free(vname);
2116 }
2117 }
2118
2119 void
zpool_collect_unsup_feat(nvlist_t * config,char * buf,size_t size)2120 zpool_collect_unsup_feat(nvlist_t *config, char *buf, size_t size)
2121 {
2122 nvlist_t *nvinfo, *unsup_feat;
2123 char temp[512];
2124
2125 nvinfo = fnvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO);
2126 unsup_feat = fnvlist_lookup_nvlist(nvinfo, ZPOOL_CONFIG_UNSUP_FEAT);
2127
2128 for (nvpair_t *nvp = nvlist_next_nvpair(unsup_feat, NULL);
2129 nvp != NULL; nvp = nvlist_next_nvpair(unsup_feat, nvp)) {
2130 const char *desc = fnvpair_value_string(nvp);
2131 if (strlen(desc) > 0) {
2132 (void) snprintf(temp, 512, "\t%s (%s)\n",
2133 nvpair_name(nvp), desc);
2134 (void) strlcat(buf, temp, size);
2135 } else {
2136 (void) snprintf(temp, 512, "\t%s\n", nvpair_name(nvp));
2137 (void) strlcat(buf, temp, size);
2138 }
2139 }
2140 }
2141
2142 /*
2143 * Import the given pool using the known configuration and a list of
2144 * properties to be set. The configuration should have come from
2145 * zpool_find_import(). The 'newname' parameters control whether the pool
2146 * is imported with a different name.
2147 */
2148 int
zpool_import_props(libzfs_handle_t * hdl,nvlist_t * config,const char * newname,nvlist_t * props,int flags)2149 zpool_import_props(libzfs_handle_t *hdl, nvlist_t *config, const char *newname,
2150 nvlist_t *props, int flags)
2151 {
2152 zfs_cmd_t zc = {"\0"};
2153 zpool_load_policy_t policy;
2154 nvlist_t *nv = NULL;
2155 nvlist_t *nvinfo = NULL;
2156 nvlist_t *missing = NULL;
2157 const char *thename;
2158 const char *origname;
2159 int ret;
2160 int error = 0;
2161 char buf[2048];
2162 char errbuf[ERRBUFLEN];
2163
2164 origname = fnvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME);
2165
2166 (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
2167 "cannot import pool '%s'"), origname);
2168
2169 if (newname != NULL) {
2170 if (!zpool_name_valid(hdl, B_FALSE, newname))
2171 return (zfs_error_fmt(hdl, EZFS_INVALIDNAME,
2172 dgettext(TEXT_DOMAIN, "cannot import '%s'"),
2173 newname));
2174 thename = newname;
2175 } else {
2176 thename = origname;
2177 }
2178
2179 if (props != NULL) {
2180 uint64_t version;
2181 prop_flags_t flags = { .create = B_FALSE, .import = B_TRUE };
2182
2183 version = fnvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION);
2184
2185 if ((props = zpool_valid_proplist(hdl, origname,
2186 props, version, flags, errbuf)) == NULL)
2187 return (-1);
2188 zcmd_write_src_nvlist(hdl, &zc, props);
2189 nvlist_free(props);
2190 }
2191
2192 (void) strlcpy(zc.zc_name, thename, sizeof (zc.zc_name));
2193
2194 zc.zc_guid = fnvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID);
2195
2196 zcmd_write_conf_nvlist(hdl, &zc, config);
2197 zcmd_alloc_dst_nvlist(hdl, &zc, zc.zc_nvlist_conf_size * 2);
2198
2199 zc.zc_cookie = flags;
2200 while ((ret = zfs_ioctl(hdl, ZFS_IOC_POOL_IMPORT, &zc)) != 0 &&
2201 errno == ENOMEM)
2202 zcmd_expand_dst_nvlist(hdl, &zc);
2203 if (ret != 0)
2204 error = errno;
2205
2206 (void) zcmd_read_dst_nvlist(hdl, &zc, &nv);
2207
2208 zcmd_free_nvlists(&zc);
2209
2210 zpool_get_load_policy(config, &policy);
2211
2212 if (error) {
2213 char desc[1024];
2214 char aux[256];
2215
2216 /*
2217 * Dry-run failed, but we print out what success
2218 * looks like if we found a best txg
2219 */
2220 if (policy.zlp_rewind & ZPOOL_TRY_REWIND) {
2221 zpool_rewind_exclaim(hdl, newname ? origname : thename,
2222 B_TRUE, nv);
2223 nvlist_free(nv);
2224 return (-1);
2225 }
2226
2227 if (newname == NULL)
2228 (void) snprintf(desc, sizeof (desc),
2229 dgettext(TEXT_DOMAIN, "cannot import '%s'"),
2230 thename);
2231 else
2232 (void) snprintf(desc, sizeof (desc),
2233 dgettext(TEXT_DOMAIN, "cannot import '%s' as '%s'"),
2234 origname, thename);
2235
2236 switch (error) {
2237 case ENOTSUP:
2238 if (nv != NULL && nvlist_lookup_nvlist(nv,
2239 ZPOOL_CONFIG_LOAD_INFO, &nvinfo) == 0 &&
2240 nvlist_exists(nvinfo, ZPOOL_CONFIG_UNSUP_FEAT)) {
2241 (void) printf(dgettext(TEXT_DOMAIN, "This "
2242 "pool uses the following feature(s) not "
2243 "supported by this system:\n"));
2244 memset(buf, 0, 2048);
2245 zpool_collect_unsup_feat(nv, buf, 2048);
2246 (void) printf("%s", buf);
2247 if (nvlist_exists(nvinfo,
2248 ZPOOL_CONFIG_CAN_RDONLY)) {
2249 (void) printf(dgettext(TEXT_DOMAIN,
2250 "All unsupported features are only "
2251 "required for writing to the pool."
2252 "\nThe pool can be imported using "
2253 "'-o readonly=on'.\n"));
2254 }
2255 }
2256 /*
2257 * Unsupported version.
2258 */
2259 (void) zfs_error(hdl, EZFS_BADVERSION, desc);
2260 break;
2261
2262 case EREMOTEIO:
2263 if (nv != NULL && nvlist_lookup_nvlist(nv,
2264 ZPOOL_CONFIG_LOAD_INFO, &nvinfo) == 0) {
2265 const char *hostname = "<unknown>";
2266 uint64_t hostid = 0;
2267 mmp_state_t mmp_state;
2268
2269 mmp_state = fnvlist_lookup_uint64(nvinfo,
2270 ZPOOL_CONFIG_MMP_STATE);
2271
2272 if (nvlist_exists(nvinfo,
2273 ZPOOL_CONFIG_MMP_HOSTNAME))
2274 hostname = fnvlist_lookup_string(nvinfo,
2275 ZPOOL_CONFIG_MMP_HOSTNAME);
2276
2277 if (nvlist_exists(nvinfo,
2278 ZPOOL_CONFIG_MMP_HOSTID))
2279 hostid = fnvlist_lookup_uint64(nvinfo,
2280 ZPOOL_CONFIG_MMP_HOSTID);
2281
2282 if (mmp_state == MMP_STATE_ACTIVE) {
2283 (void) snprintf(aux, sizeof (aux),
2284 dgettext(TEXT_DOMAIN, "pool is imp"
2285 "orted on host '%s' (hostid=%lx).\n"
2286 "Export the pool on the other "
2287 "system, then run 'zpool import'."),
2288 hostname, (unsigned long) hostid);
2289 } else if (mmp_state == MMP_STATE_NO_HOSTID) {
2290 (void) snprintf(aux, sizeof (aux),
2291 dgettext(TEXT_DOMAIN, "pool has "
2292 "the multihost property on and "
2293 "the\nsystem's hostid is not set. "
2294 "Set a unique system hostid with "
2295 "the zgenhostid(8) command.\n"));
2296 }
2297
2298 (void) zfs_error_aux(hdl, "%s", aux);
2299 }
2300 (void) zfs_error(hdl, EZFS_ACTIVE_POOL, desc);
2301 break;
2302
2303 case EINVAL:
2304 (void) zfs_error(hdl, EZFS_INVALCONFIG, desc);
2305 break;
2306
2307 case EROFS:
2308 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2309 "one or more devices is read only"));
2310 (void) zfs_error(hdl, EZFS_BADDEV, desc);
2311 break;
2312
2313 case ENXIO:
2314 if (nv && nvlist_lookup_nvlist(nv,
2315 ZPOOL_CONFIG_LOAD_INFO, &nvinfo) == 0 &&
2316 nvlist_lookup_nvlist(nvinfo,
2317 ZPOOL_CONFIG_MISSING_DEVICES, &missing) == 0) {
2318 (void) printf(dgettext(TEXT_DOMAIN,
2319 "The devices below are missing or "
2320 "corrupted, use '-m' to import the pool "
2321 "anyway:\n"));
2322 print_vdev_tree(hdl, NULL, missing, 2);
2323 (void) printf("\n");
2324 }
2325 (void) zpool_standard_error(hdl, error, desc);
2326 break;
2327
2328 case EEXIST:
2329 (void) zpool_standard_error(hdl, error, desc);
2330 break;
2331
2332 case EBUSY:
2333 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2334 "one or more devices are already in use\n"));
2335 (void) zfs_error(hdl, EZFS_BADDEV, desc);
2336 break;
2337 case ENAMETOOLONG:
2338 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2339 "new name of at least one dataset is longer than "
2340 "the maximum allowable length"));
2341 (void) zfs_error(hdl, EZFS_NAMETOOLONG, desc);
2342 break;
2343 default:
2344 (void) zpool_standard_error(hdl, error, desc);
2345 memset(buf, 0, 2048);
2346 zpool_explain_recover(hdl,
2347 newname ? origname : thename, -error, nv,
2348 buf, 2048);
2349 (void) printf("\t%s", buf);
2350 break;
2351 }
2352
2353 nvlist_free(nv);
2354 ret = -1;
2355 } else {
2356 zpool_handle_t *zhp;
2357
2358 /*
2359 * This should never fail, but play it safe anyway.
2360 */
2361 if (zpool_open_silent(hdl, thename, &zhp) != 0)
2362 ret = -1;
2363 else if (zhp != NULL)
2364 zpool_close(zhp);
2365 if (policy.zlp_rewind &
2366 (ZPOOL_DO_REWIND | ZPOOL_TRY_REWIND)) {
2367 zpool_rewind_exclaim(hdl, newname ? origname : thename,
2368 ((policy.zlp_rewind & ZPOOL_TRY_REWIND) != 0), nv);
2369 }
2370 nvlist_free(nv);
2371 }
2372
2373 return (ret);
2374 }
2375
2376 /*
2377 * Translate vdev names to guids. If a vdev_path is determined to be
2378 * unsuitable then a vd_errlist is allocated and the vdev path and errno
2379 * are added to it.
2380 */
2381 static int
zpool_translate_vdev_guids(zpool_handle_t * zhp,nvlist_t * vds,nvlist_t * vdev_guids,nvlist_t * guids_to_paths,nvlist_t ** vd_errlist)2382 zpool_translate_vdev_guids(zpool_handle_t *zhp, nvlist_t *vds,
2383 nvlist_t *vdev_guids, nvlist_t *guids_to_paths, nvlist_t **vd_errlist)
2384 {
2385 nvlist_t *errlist = NULL;
2386 int error = 0;
2387
2388 for (nvpair_t *elem = nvlist_next_nvpair(vds, NULL); elem != NULL;
2389 elem = nvlist_next_nvpair(vds, elem)) {
2390 boolean_t spare, cache;
2391
2392 const char *vd_path = nvpair_name(elem);
2393 nvlist_t *tgt = zpool_find_vdev(zhp, vd_path, &spare, &cache,
2394 NULL);
2395
2396 if ((tgt == NULL) || cache || spare) {
2397 if (errlist == NULL) {
2398 errlist = fnvlist_alloc();
2399 error = EINVAL;
2400 }
2401
2402 uint64_t err = (tgt == NULL) ? EZFS_NODEVICE :
2403 (spare ? EZFS_ISSPARE : EZFS_ISL2CACHE);
2404 fnvlist_add_int64(errlist, vd_path, err);
2405 continue;
2406 }
2407
2408 uint64_t guid = fnvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID);
2409 fnvlist_add_uint64(vdev_guids, vd_path, guid);
2410
2411 char msg[MAXNAMELEN];
2412 (void) snprintf(msg, sizeof (msg), "%llu", (u_longlong_t)guid);
2413 fnvlist_add_string(guids_to_paths, msg, vd_path);
2414 }
2415
2416 if (error != 0) {
2417 verify(errlist != NULL);
2418 if (vd_errlist != NULL)
2419 *vd_errlist = errlist;
2420 else
2421 fnvlist_free(errlist);
2422 }
2423
2424 return (error);
2425 }
2426
2427 static int
xlate_init_err(int err)2428 xlate_init_err(int err)
2429 {
2430 switch (err) {
2431 case ENODEV:
2432 return (EZFS_NODEVICE);
2433 case EINVAL:
2434 case EROFS:
2435 return (EZFS_BADDEV);
2436 case EBUSY:
2437 return (EZFS_INITIALIZING);
2438 case ESRCH:
2439 return (EZFS_NO_INITIALIZE);
2440 }
2441 return (err);
2442 }
2443
2444 int
zpool_initialize_one(zpool_handle_t * zhp,void * data)2445 zpool_initialize_one(zpool_handle_t *zhp, void *data)
2446 {
2447 int error;
2448 libzfs_handle_t *hdl = zpool_get_handle(zhp);
2449 const char *pool_name = zpool_get_name(zhp);
2450 if (zpool_open_silent(hdl, pool_name, &zhp) != 0)
2451 return (-1);
2452 initialize_cbdata_t *cb = data;
2453 nvlist_t *vdevs = fnvlist_alloc();
2454
2455 nvlist_t *config = zpool_get_config(zhp, NULL);
2456 nvlist_t *nvroot = fnvlist_lookup_nvlist(config,
2457 ZPOOL_CONFIG_VDEV_TREE);
2458 zpool_collect_leaves(zhp, nvroot, vdevs);
2459 if (cb->wait)
2460 error = zpool_initialize_wait(zhp, cb->cmd_type, vdevs);
2461 else
2462 error = zpool_initialize(zhp, cb->cmd_type, vdevs);
2463 fnvlist_free(vdevs);
2464
2465 return (error);
2466 }
2467
2468 /*
2469 * Begin, suspend, cancel, or uninit (clear) the initialization (initializing
2470 * of all free blocks) for the given vdevs in the given pool.
2471 */
2472 static int
zpool_initialize_impl(zpool_handle_t * zhp,pool_initialize_func_t cmd_type,nvlist_t * vds,boolean_t wait)2473 zpool_initialize_impl(zpool_handle_t *zhp, pool_initialize_func_t cmd_type,
2474 nvlist_t *vds, boolean_t wait)
2475 {
2476 int err;
2477
2478 nvlist_t *vdev_guids = fnvlist_alloc();
2479 nvlist_t *guids_to_paths = fnvlist_alloc();
2480 nvlist_t *vd_errlist = NULL;
2481 nvlist_t *errlist;
2482 nvpair_t *elem;
2483
2484 err = zpool_translate_vdev_guids(zhp, vds, vdev_guids,
2485 guids_to_paths, &vd_errlist);
2486
2487 if (err != 0) {
2488 verify(vd_errlist != NULL);
2489 goto list_errors;
2490 }
2491
2492 err = lzc_initialize(zhp->zpool_name, cmd_type,
2493 vdev_guids, &errlist);
2494
2495 if (err != 0) {
2496 if (errlist != NULL && nvlist_lookup_nvlist(errlist,
2497 ZPOOL_INITIALIZE_VDEVS, &vd_errlist) == 0) {
2498 goto list_errors;
2499 }
2500
2501 if (err == EINVAL && cmd_type == POOL_INITIALIZE_UNINIT) {
2502 zfs_error_aux(zhp->zpool_hdl, dgettext(TEXT_DOMAIN,
2503 "uninitialize is not supported by kernel"));
2504 }
2505
2506 (void) zpool_standard_error(zhp->zpool_hdl, err,
2507 dgettext(TEXT_DOMAIN, "operation failed"));
2508 goto out;
2509 }
2510
2511 if (wait) {
2512 for (elem = nvlist_next_nvpair(vdev_guids, NULL); elem != NULL;
2513 elem = nvlist_next_nvpair(vdev_guids, elem)) {
2514
2515 uint64_t guid = fnvpair_value_uint64(elem);
2516
2517 err = lzc_wait_tag(zhp->zpool_name,
2518 ZPOOL_WAIT_INITIALIZE, guid, NULL);
2519 if (err != 0) {
2520 (void) zpool_standard_error_fmt(zhp->zpool_hdl,
2521 err, dgettext(TEXT_DOMAIN, "error "
2522 "waiting for '%s' to initialize"),
2523 nvpair_name(elem));
2524
2525 goto out;
2526 }
2527 }
2528 }
2529 goto out;
2530
2531 list_errors:
2532 for (elem = nvlist_next_nvpair(vd_errlist, NULL); elem != NULL;
2533 elem = nvlist_next_nvpair(vd_errlist, elem)) {
2534 int64_t vd_error = xlate_init_err(fnvpair_value_int64(elem));
2535 const char *path;
2536
2537 if (nvlist_lookup_string(guids_to_paths, nvpair_name(elem),
2538 &path) != 0)
2539 path = nvpair_name(elem);
2540
2541 (void) zfs_error_fmt(zhp->zpool_hdl, vd_error,
2542 "cannot initialize '%s'", path);
2543 }
2544
2545 out:
2546 fnvlist_free(vdev_guids);
2547 fnvlist_free(guids_to_paths);
2548
2549 if (vd_errlist != NULL)
2550 fnvlist_free(vd_errlist);
2551
2552 return (err == 0 ? 0 : -1);
2553 }
2554
2555 int
zpool_initialize(zpool_handle_t * zhp,pool_initialize_func_t cmd_type,nvlist_t * vds)2556 zpool_initialize(zpool_handle_t *zhp, pool_initialize_func_t cmd_type,
2557 nvlist_t *vds)
2558 {
2559 return (zpool_initialize_impl(zhp, cmd_type, vds, B_FALSE));
2560 }
2561
2562 int
zpool_initialize_wait(zpool_handle_t * zhp,pool_initialize_func_t cmd_type,nvlist_t * vds)2563 zpool_initialize_wait(zpool_handle_t *zhp, pool_initialize_func_t cmd_type,
2564 nvlist_t *vds)
2565 {
2566 return (zpool_initialize_impl(zhp, cmd_type, vds, B_TRUE));
2567 }
2568
2569 static int
xlate_trim_err(int err)2570 xlate_trim_err(int err)
2571 {
2572 switch (err) {
2573 case ENODEV:
2574 return (EZFS_NODEVICE);
2575 case EINVAL:
2576 case EROFS:
2577 return (EZFS_BADDEV);
2578 case EBUSY:
2579 return (EZFS_TRIMMING);
2580 case ESRCH:
2581 return (EZFS_NO_TRIM);
2582 case EOPNOTSUPP:
2583 return (EZFS_TRIM_NOTSUP);
2584 }
2585 return (err);
2586 }
2587
2588 void
zpool_collect_leaves(zpool_handle_t * zhp,nvlist_t * nvroot,nvlist_t * res)2589 zpool_collect_leaves(zpool_handle_t *zhp, nvlist_t *nvroot, nvlist_t *res)
2590 {
2591 libzfs_handle_t *hdl = zhp->zpool_hdl;
2592 uint_t children = 0;
2593 nvlist_t **child;
2594 uint_t i;
2595
2596 (void) nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
2597 &child, &children);
2598
2599 if (children == 0) {
2600 char *path = zpool_vdev_name(hdl, zhp, nvroot,
2601 VDEV_NAME_PATH);
2602
2603 if (strcmp(path, VDEV_TYPE_INDIRECT) != 0 &&
2604 strcmp(path, VDEV_TYPE_HOLE) != 0)
2605 fnvlist_add_boolean(res, path);
2606
2607 free(path);
2608 return;
2609 }
2610
2611 for (i = 0; i < children; i++) {
2612 zpool_collect_leaves(zhp, child[i], res);
2613 }
2614 }
2615
2616 int
zpool_trim_one(zpool_handle_t * zhp,void * data)2617 zpool_trim_one(zpool_handle_t *zhp, void *data)
2618 {
2619 int error;
2620 libzfs_handle_t *hdl = zpool_get_handle(zhp);
2621 const char *pool_name = zpool_get_name(zhp);
2622 if (zpool_open_silent(hdl, pool_name, &zhp) != 0)
2623 return (-1);
2624
2625 trim_cbdata_t *cb = data;
2626 nvlist_t *vdevs = fnvlist_alloc();
2627
2628 /* no individual leaf vdevs specified, so add them all */
2629 nvlist_t *config = zpool_get_config(zhp, NULL);
2630 nvlist_t *nvroot = fnvlist_lookup_nvlist(config,
2631 ZPOOL_CONFIG_VDEV_TREE);
2632
2633 zpool_collect_leaves(zhp, nvroot, vdevs);
2634 error = zpool_trim(zhp, cb->cmd_type, vdevs, &cb->trim_flags);
2635 fnvlist_free(vdevs);
2636
2637 return (error);
2638 }
2639
2640 static int
zpool_trim_wait(zpool_handle_t * zhp,nvlist_t * vdev_guids)2641 zpool_trim_wait(zpool_handle_t *zhp, nvlist_t *vdev_guids)
2642 {
2643 int err;
2644 nvpair_t *elem;
2645
2646 for (elem = nvlist_next_nvpair(vdev_guids, NULL); elem != NULL;
2647 elem = nvlist_next_nvpair(vdev_guids, elem)) {
2648
2649 uint64_t guid = fnvpair_value_uint64(elem);
2650
2651 err = lzc_wait_tag(zhp->zpool_name,
2652 ZPOOL_WAIT_TRIM, guid, NULL);
2653 if (err != 0) {
2654 (void) zpool_standard_error_fmt(zhp->zpool_hdl,
2655 err, dgettext(TEXT_DOMAIN, "error "
2656 "waiting to trim '%s'"), nvpair_name(elem));
2657
2658 return (err);
2659 }
2660 }
2661 return (0);
2662 }
2663
2664 /*
2665 * Check errlist and report any errors, omitting ones which should be
2666 * suppressed. Returns B_TRUE if any errors were reported.
2667 */
2668 static boolean_t
check_trim_errs(zpool_handle_t * zhp,trimflags_t * trim_flags,nvlist_t * guids_to_paths,nvlist_t * vds,nvlist_t * errlist)2669 check_trim_errs(zpool_handle_t *zhp, trimflags_t *trim_flags,
2670 nvlist_t *guids_to_paths, nvlist_t *vds, nvlist_t *errlist)
2671 {
2672 nvpair_t *elem;
2673 boolean_t reported_errs = B_FALSE;
2674 int num_vds = 0;
2675 int num_suppressed_errs = 0;
2676
2677 for (elem = nvlist_next_nvpair(vds, NULL);
2678 elem != NULL; elem = nvlist_next_nvpair(vds, elem)) {
2679 num_vds++;
2680 }
2681
2682 for (elem = nvlist_next_nvpair(errlist, NULL);
2683 elem != NULL; elem = nvlist_next_nvpair(errlist, elem)) {
2684 int64_t vd_error = xlate_trim_err(fnvpair_value_int64(elem));
2685 const char *path;
2686
2687 /*
2688 * If only the pool was specified, and it was not a secure
2689 * trim then suppress warnings for individual vdevs which
2690 * do not support trimming.
2691 */
2692 if (vd_error == EZFS_TRIM_NOTSUP &&
2693 trim_flags->fullpool &&
2694 !trim_flags->secure) {
2695 num_suppressed_errs++;
2696 continue;
2697 }
2698
2699 reported_errs = B_TRUE;
2700 if (nvlist_lookup_string(guids_to_paths, nvpair_name(elem),
2701 &path) != 0)
2702 path = nvpair_name(elem);
2703
2704 (void) zfs_error_fmt(zhp->zpool_hdl, vd_error,
2705 "cannot trim '%s'", path);
2706 }
2707
2708 if (num_suppressed_errs == num_vds) {
2709 (void) zfs_error_aux(zhp->zpool_hdl, dgettext(TEXT_DOMAIN,
2710 "no devices in pool support trim operations"));
2711 (void) (zfs_error(zhp->zpool_hdl, EZFS_TRIM_NOTSUP,
2712 dgettext(TEXT_DOMAIN, "cannot trim")));
2713 reported_errs = B_TRUE;
2714 }
2715
2716 return (reported_errs);
2717 }
2718
2719 /*
2720 * Begin, suspend, or cancel the TRIM (discarding of all free blocks) for
2721 * the given vdevs in the given pool.
2722 */
2723 int
zpool_trim(zpool_handle_t * zhp,pool_trim_func_t cmd_type,nvlist_t * vds,trimflags_t * trim_flags)2724 zpool_trim(zpool_handle_t *zhp, pool_trim_func_t cmd_type, nvlist_t *vds,
2725 trimflags_t *trim_flags)
2726 {
2727 int err;
2728 int retval = 0;
2729
2730 nvlist_t *vdev_guids = fnvlist_alloc();
2731 nvlist_t *guids_to_paths = fnvlist_alloc();
2732 nvlist_t *errlist = NULL;
2733
2734 err = zpool_translate_vdev_guids(zhp, vds, vdev_guids,
2735 guids_to_paths, &errlist);
2736 if (err != 0) {
2737 check_trim_errs(zhp, trim_flags, guids_to_paths, vds, errlist);
2738 retval = -1;
2739 goto out;
2740 }
2741
2742 err = lzc_trim(zhp->zpool_name, cmd_type, trim_flags->rate,
2743 trim_flags->secure, vdev_guids, &errlist);
2744 if (err != 0) {
2745 nvlist_t *vd_errlist;
2746 if (errlist != NULL && nvlist_lookup_nvlist(errlist,
2747 ZPOOL_TRIM_VDEVS, &vd_errlist) == 0) {
2748 if (check_trim_errs(zhp, trim_flags, guids_to_paths,
2749 vds, vd_errlist)) {
2750 retval = -1;
2751 goto out;
2752 }
2753 } else {
2754 char errbuf[ERRBUFLEN];
2755
2756 (void) snprintf(errbuf, sizeof (errbuf),
2757 dgettext(TEXT_DOMAIN, "operation failed"));
2758 zpool_standard_error(zhp->zpool_hdl, err, errbuf);
2759 retval = -1;
2760 goto out;
2761 }
2762 }
2763
2764
2765 if (trim_flags->wait)
2766 retval = zpool_trim_wait(zhp, vdev_guids);
2767
2768 out:
2769 if (errlist != NULL)
2770 fnvlist_free(errlist);
2771 fnvlist_free(vdev_guids);
2772 fnvlist_free(guids_to_paths);
2773 return (retval);
2774 }
2775
2776 /*
2777 * Scan the pool.
2778 */
2779 int
zpool_scan(zpool_handle_t * zhp,pool_scan_func_t func,pool_scrub_cmd_t cmd)2780 zpool_scan(zpool_handle_t *zhp, pool_scan_func_t func, pool_scrub_cmd_t cmd) {
2781 return (zpool_scan_range(zhp, func, cmd, 0, 0));
2782 }
2783
2784 int
zpool_scan_range(zpool_handle_t * zhp,pool_scan_func_t func,pool_scrub_cmd_t cmd,time_t date_start,time_t date_end)2785 zpool_scan_range(zpool_handle_t *zhp, pool_scan_func_t func,
2786 pool_scrub_cmd_t cmd, time_t date_start, time_t date_end)
2787 {
2788 char errbuf[ERRBUFLEN];
2789 int err;
2790 libzfs_handle_t *hdl = zhp->zpool_hdl;
2791
2792 nvlist_t *args = fnvlist_alloc();
2793 fnvlist_add_uint64(args, "scan_type", (uint64_t)func);
2794 fnvlist_add_uint64(args, "scan_command", (uint64_t)cmd);
2795 fnvlist_add_uint64(args, "scan_date_start", (uint64_t)date_start);
2796 fnvlist_add_uint64(args, "scan_date_end", (uint64_t)date_end);
2797
2798 err = lzc_scrub(ZFS_IOC_POOL_SCRUB, zhp->zpool_name, args, NULL);
2799 fnvlist_free(args);
2800
2801 if (err == 0) {
2802 return (0);
2803 } else if (err == ZFS_ERR_IOC_CMD_UNAVAIL) {
2804 zfs_cmd_t zc = {"\0"};
2805 (void) strlcpy(zc.zc_name, zhp->zpool_name,
2806 sizeof (zc.zc_name));
2807 zc.zc_cookie = func;
2808 zc.zc_flags = cmd;
2809
2810 if (zfs_ioctl(hdl, ZFS_IOC_POOL_SCAN, &zc) == 0)
2811 return (0);
2812 }
2813
2814 /*
2815 * An ECANCELED on a scrub means one of the following:
2816 * 1. we resumed a paused scrub.
2817 * 2. we resumed a paused error scrub.
2818 * 3. Error scrub is not run because of no error log.
2819 *
2820 * Note that we no longer return ECANCELED in case 1 or 2. However, in
2821 * order to prevent problems where we have a newer userland than
2822 * kernel, we keep this check in place. That prevents erroneous
2823 * failures when an older kernel returns ECANCELED in those cases.
2824 */
2825 if (err == ECANCELED && (func == POOL_SCAN_SCRUB ||
2826 func == POOL_SCAN_ERRORSCRUB) && cmd == POOL_SCRUB_NORMAL)
2827 return (0);
2828 /*
2829 * The following cases have been handled here:
2830 * 1. Paused a scrub/error scrub if there is none in progress.
2831 */
2832 if (err == ENOENT && func != POOL_SCAN_NONE && cmd ==
2833 POOL_SCRUB_PAUSE) {
2834 return (0);
2835 }
2836
2837 ASSERT3U(func, >=, POOL_SCAN_NONE);
2838 ASSERT3U(func, <, POOL_SCAN_FUNCS);
2839
2840 if (func == POOL_SCAN_SCRUB || func == POOL_SCAN_ERRORSCRUB) {
2841 if (cmd == POOL_SCRUB_PAUSE) {
2842 (void) snprintf(errbuf, sizeof (errbuf),
2843 dgettext(TEXT_DOMAIN, "cannot pause scrubbing %s"),
2844 zhp->zpool_name);
2845 } else {
2846 assert(cmd == POOL_SCRUB_NORMAL);
2847 (void) snprintf(errbuf, sizeof (errbuf),
2848 dgettext(TEXT_DOMAIN, "cannot scrub %s"),
2849 zhp->zpool_name);
2850 }
2851 } else if (func == POOL_SCAN_RESILVER) {
2852 assert(cmd == POOL_SCRUB_NORMAL);
2853 (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
2854 "cannot restart resilver on %s"), zhp->zpool_name);
2855 } else if (func == POOL_SCAN_NONE) {
2856 (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
2857 "cannot cancel scrubbing %s"), zhp->zpool_name);
2858 } else {
2859 assert(!"unexpected result");
2860 }
2861
2862 /*
2863 * With EBUSY, six cases are possible:
2864 *
2865 * Current state Requested
2866 * 1. Normal Scrub Running Normal Scrub or Error Scrub
2867 * 2. Normal Scrub Paused Error Scrub
2868 * 3. Normal Scrub Paused Pause Normal Scrub
2869 * 4. Error Scrub Running Normal Scrub or Error Scrub
2870 * 5. Error Scrub Paused Pause Error Scrub
2871 * 6. Resilvering Anything else
2872 */
2873 if (err == EBUSY) {
2874 nvlist_t *nvroot;
2875 pool_scan_stat_t *ps = NULL;
2876 uint_t psc;
2877
2878 nvroot = fnvlist_lookup_nvlist(zhp->zpool_config,
2879 ZPOOL_CONFIG_VDEV_TREE);
2880 (void) nvlist_lookup_uint64_array(nvroot,
2881 ZPOOL_CONFIG_SCAN_STATS, (uint64_t **)&ps, &psc);
2882 if (ps && ps->pss_func == POOL_SCAN_SCRUB &&
2883 ps->pss_state == DSS_SCANNING) {
2884 if (ps->pss_pass_scrub_pause == 0) {
2885 /* handles case 1 */
2886 assert(cmd == POOL_SCRUB_NORMAL);
2887 return (zfs_error(hdl, EZFS_SCRUBBING,
2888 errbuf));
2889 } else {
2890 if (func == POOL_SCAN_ERRORSCRUB) {
2891 /* handles case 2 */
2892 ASSERT3U(cmd, ==, POOL_SCRUB_NORMAL);
2893 return (zfs_error(hdl,
2894 EZFS_SCRUB_PAUSED_TO_CANCEL,
2895 errbuf));
2896 } else {
2897 /* handles case 3 */
2898 ASSERT3U(func, ==, POOL_SCAN_SCRUB);
2899 ASSERT3U(cmd, ==, POOL_SCRUB_PAUSE);
2900 return (zfs_error(hdl,
2901 EZFS_SCRUB_PAUSED, errbuf));
2902 }
2903 }
2904 } else if (ps &&
2905 ps->pss_error_scrub_func == POOL_SCAN_ERRORSCRUB &&
2906 ps->pss_error_scrub_state == DSS_ERRORSCRUBBING) {
2907 if (ps->pss_pass_error_scrub_pause == 0) {
2908 /* handles case 4 */
2909 ASSERT3U(cmd, ==, POOL_SCRUB_NORMAL);
2910 return (zfs_error(hdl, EZFS_ERRORSCRUBBING,
2911 errbuf));
2912 } else {
2913 /* handles case 5 */
2914 ASSERT3U(func, ==, POOL_SCAN_ERRORSCRUB);
2915 ASSERT3U(cmd, ==, POOL_SCRUB_PAUSE);
2916 return (zfs_error(hdl, EZFS_ERRORSCRUB_PAUSED,
2917 errbuf));
2918 }
2919 } else {
2920 /* handles case 6 */
2921 return (zfs_error(hdl, EZFS_RESILVERING, errbuf));
2922 }
2923 } else if (err == ENOENT) {
2924 return (zfs_error(hdl, EZFS_NO_SCRUB, errbuf));
2925 } else if (err == ENOTSUP && func == POOL_SCAN_RESILVER) {
2926 return (zfs_error(hdl, EZFS_NO_RESILVER_DEFER, errbuf));
2927 } else {
2928 return (zpool_standard_error(hdl, err, errbuf));
2929 }
2930 }
2931
2932 /*
2933 * Find a vdev that matches the search criteria specified. We use the
2934 * the nvpair name to determine how we should look for the device.
2935 * 'avail_spare' is set to TRUE if the provided guid refers to an AVAIL
2936 * spare; but FALSE if its an INUSE spare.
2937 *
2938 * If 'return_parent' is set, then return the *parent* of the vdev you're
2939 * searching for rather than the vdev itself.
2940 */
2941 static nvlist_t *
vdev_to_nvlist_iter(nvlist_t * nv,nvlist_t * search,boolean_t * avail_spare,boolean_t * l2cache,boolean_t * log,boolean_t return_parent)2942 vdev_to_nvlist_iter(nvlist_t *nv, nvlist_t *search, boolean_t *avail_spare,
2943 boolean_t *l2cache, boolean_t *log, boolean_t return_parent)
2944 {
2945 uint_t c, children;
2946 nvlist_t **child;
2947 nvlist_t *ret;
2948 uint64_t is_log;
2949 const char *srchkey;
2950 nvpair_t *pair = nvlist_next_nvpair(search, NULL);
2951 const char *tmp = NULL;
2952 boolean_t is_root;
2953
2954 /* Nothing to look for */
2955 if (search == NULL || pair == NULL)
2956 return (NULL);
2957
2958 /* Obtain the key we will use to search */
2959 srchkey = nvpair_name(pair);
2960
2961 nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &tmp);
2962 if (strcmp(tmp, "root") == 0)
2963 is_root = B_TRUE;
2964 else
2965 is_root = B_FALSE;
2966
2967 switch (nvpair_type(pair)) {
2968 case DATA_TYPE_UINT64:
2969 if (strcmp(srchkey, ZPOOL_CONFIG_GUID) == 0) {
2970 uint64_t srchval = fnvpair_value_uint64(pair);
2971 uint64_t theguid = fnvlist_lookup_uint64(nv,
2972 ZPOOL_CONFIG_GUID);
2973 if (theguid == srchval)
2974 return (nv);
2975 }
2976 break;
2977
2978 case DATA_TYPE_STRING: {
2979 const char *srchval, *val;
2980
2981 srchval = fnvpair_value_string(pair);
2982 if (nvlist_lookup_string(nv, srchkey, &val) != 0)
2983 break;
2984
2985 /*
2986 * Search for the requested value. Special cases:
2987 *
2988 * - ZPOOL_CONFIG_PATH for whole disk entries. These end in
2989 * "-part1", or "p1". The suffix is hidden from the user,
2990 * but included in the string, so this matches around it.
2991 * - ZPOOL_CONFIG_PATH for short names zfs_strcmp_shortname()
2992 * is used to check all possible expanded paths.
2993 * - looking for a top-level vdev name (i.e. ZPOOL_CONFIG_TYPE).
2994 *
2995 * Otherwise, all other searches are simple string compares.
2996 */
2997 if (strcmp(srchkey, ZPOOL_CONFIG_PATH) == 0) {
2998 uint64_t wholedisk = 0;
2999
3000 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK,
3001 &wholedisk);
3002 if (zfs_strcmp_pathname(srchval, val, wholedisk) == 0)
3003 return (nv);
3004
3005 } else if (strcmp(srchkey, ZPOOL_CONFIG_TYPE) == 0) {
3006 char *type, *idx, *end, *p;
3007 uint64_t id, vdev_id;
3008
3009 /*
3010 * Determine our vdev type, keeping in mind
3011 * that the srchval is composed of a type and
3012 * vdev id pair (i.e. mirror-4).
3013 */
3014 if ((type = strdup(srchval)) == NULL)
3015 return (NULL);
3016
3017 if ((p = strrchr(type, '-')) == NULL) {
3018 free(type);
3019 break;
3020 }
3021 idx = p + 1;
3022 *p = '\0';
3023
3024 /*
3025 * draid names are presented like: draid2:4d:6c:0s
3026 * We match them up to the first ':' so we can still
3027 * do the parity check below, but the other params
3028 * are ignored.
3029 */
3030 if ((p = strchr(type, ':')) != NULL) {
3031 if (strncmp(type, VDEV_TYPE_DRAID,
3032 strlen(VDEV_TYPE_DRAID)) == 0)
3033 *p = '\0';
3034 }
3035
3036 /*
3037 * If the types don't match then keep looking.
3038 */
3039 if (strncmp(val, type, strlen(val)) != 0) {
3040 free(type);
3041 break;
3042 }
3043
3044 verify(zpool_vdev_is_interior(type));
3045
3046 id = fnvlist_lookup_uint64(nv, ZPOOL_CONFIG_ID);
3047 errno = 0;
3048 vdev_id = strtoull(idx, &end, 10);
3049
3050 /*
3051 * If we are looking for a raidz and a parity is
3052 * specified, make sure it matches.
3053 */
3054 int rzlen = strlen(VDEV_TYPE_RAIDZ);
3055 assert(rzlen == strlen(VDEV_TYPE_DRAID));
3056 int typlen = strlen(type);
3057 if ((strncmp(type, VDEV_TYPE_RAIDZ, rzlen) == 0 ||
3058 strncmp(type, VDEV_TYPE_DRAID, rzlen) == 0) &&
3059 typlen != rzlen) {
3060 uint64_t vdev_parity;
3061 int parity = *(type + rzlen) - '0';
3062
3063 if (parity <= 0 || parity > 3 ||
3064 (typlen - rzlen) != 1) {
3065 /*
3066 * Nonsense parity specified, can
3067 * never match
3068 */
3069 free(type);
3070 return (NULL);
3071 }
3072 vdev_parity = fnvlist_lookup_uint64(nv,
3073 ZPOOL_CONFIG_NPARITY);
3074 if ((int)vdev_parity != parity) {
3075 free(type);
3076 break;
3077 }
3078 }
3079
3080 free(type);
3081 if (errno != 0)
3082 return (NULL);
3083
3084 /*
3085 * Now verify that we have the correct vdev id.
3086 */
3087 if (vdev_id == id)
3088 return (nv);
3089 }
3090
3091 /*
3092 * Common case
3093 */
3094 if (strcmp(srchval, val) == 0)
3095 return (nv);
3096 break;
3097 }
3098
3099 default:
3100 break;
3101 }
3102
3103 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
3104 &child, &children) != 0)
3105 return (NULL);
3106
3107 for (c = 0; c < children; c++) {
3108 if ((ret = vdev_to_nvlist_iter(child[c], search,
3109 avail_spare, l2cache, NULL, return_parent)) != NULL) {
3110 /*
3111 * The 'is_log' value is only set for the toplevel
3112 * vdev, not the leaf vdevs. So we always lookup the
3113 * log device from the root of the vdev tree (where
3114 * 'log' is non-NULL).
3115 */
3116 if (log != NULL &&
3117 nvlist_lookup_uint64(child[c],
3118 ZPOOL_CONFIG_IS_LOG, &is_log) == 0 &&
3119 is_log) {
3120 *log = B_TRUE;
3121 }
3122 return (ret && return_parent && !is_root ? nv : ret);
3123 }
3124 }
3125
3126 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES,
3127 &child, &children) == 0) {
3128 for (c = 0; c < children; c++) {
3129 if ((ret = vdev_to_nvlist_iter(child[c], search,
3130 avail_spare, l2cache, NULL, return_parent))
3131 != NULL) {
3132 *avail_spare = B_TRUE;
3133 return (ret && return_parent &&
3134 !is_root ? nv : ret);
3135 }
3136 }
3137 }
3138
3139 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE,
3140 &child, &children) == 0) {
3141 for (c = 0; c < children; c++) {
3142 if ((ret = vdev_to_nvlist_iter(child[c], search,
3143 avail_spare, l2cache, NULL, return_parent))
3144 != NULL) {
3145 *l2cache = B_TRUE;
3146 return (ret && return_parent &&
3147 !is_root ? nv : ret);
3148 }
3149 }
3150 }
3151
3152 return (NULL);
3153 }
3154
3155 /*
3156 * Given a physical path or guid, find the associated vdev.
3157 */
3158 nvlist_t *
zpool_find_vdev_by_physpath(zpool_handle_t * zhp,const char * ppath,boolean_t * avail_spare,boolean_t * l2cache,boolean_t * log)3159 zpool_find_vdev_by_physpath(zpool_handle_t *zhp, const char *ppath,
3160 boolean_t *avail_spare, boolean_t *l2cache, boolean_t *log)
3161 {
3162 nvlist_t *search, *nvroot, *ret;
3163 uint64_t guid;
3164 char *end;
3165
3166 search = fnvlist_alloc();
3167
3168 guid = strtoull(ppath, &end, 0);
3169 if (guid != 0 && *end == '\0') {
3170 fnvlist_add_uint64(search, ZPOOL_CONFIG_GUID, guid);
3171 } else {
3172 fnvlist_add_string(search, ZPOOL_CONFIG_PHYS_PATH, ppath);
3173 }
3174
3175 nvroot = fnvlist_lookup_nvlist(zhp->zpool_config,
3176 ZPOOL_CONFIG_VDEV_TREE);
3177
3178 *avail_spare = B_FALSE;
3179 *l2cache = B_FALSE;
3180 if (log != NULL)
3181 *log = B_FALSE;
3182 ret = vdev_to_nvlist_iter(nvroot, search, avail_spare, l2cache, log,
3183 B_FALSE);
3184 fnvlist_free(search);
3185
3186 return (ret);
3187 }
3188
3189 /*
3190 * Determine if we have an "interior" top-level vdev (i.e mirror/raidz).
3191 */
3192 static boolean_t
zpool_vdev_is_interior(const char * name)3193 zpool_vdev_is_interior(const char *name)
3194 {
3195 if (strncmp(name, VDEV_TYPE_RAIDZ, strlen(VDEV_TYPE_RAIDZ)) == 0 ||
3196 strncmp(name, VDEV_TYPE_SPARE, strlen(VDEV_TYPE_SPARE)) == 0 ||
3197 strncmp(name,
3198 VDEV_TYPE_REPLACING, strlen(VDEV_TYPE_REPLACING)) == 0 ||
3199 strncmp(name, VDEV_TYPE_ROOT, strlen(VDEV_TYPE_ROOT)) == 0 ||
3200 strncmp(name, VDEV_TYPE_MIRROR, strlen(VDEV_TYPE_MIRROR)) == 0)
3201 return (B_TRUE);
3202
3203 if (strncmp(name, VDEV_TYPE_DRAID, strlen(VDEV_TYPE_DRAID)) == 0 &&
3204 !zpool_is_draid_spare(name))
3205 return (B_TRUE);
3206
3207 return (B_FALSE);
3208 }
3209
3210 /*
3211 * Lookup the nvlist for a given vdev or vdev's parent (depending on
3212 * if 'return_parent' is set).
3213 */
3214 static nvlist_t *
__zpool_find_vdev(zpool_handle_t * zhp,const char * path,boolean_t * avail_spare,boolean_t * l2cache,boolean_t * log,boolean_t return_parent)3215 __zpool_find_vdev(zpool_handle_t *zhp, const char *path, boolean_t *avail_spare,
3216 boolean_t *l2cache, boolean_t *log, boolean_t return_parent)
3217 {
3218 char *end;
3219 nvlist_t *nvroot, *search, *ret;
3220 uint64_t guid;
3221 boolean_t __avail_spare, __l2cache, __log;
3222
3223 search = fnvlist_alloc();
3224
3225 guid = strtoull(path, &end, 0);
3226 if (guid != 0 && *end == '\0') {
3227 fnvlist_add_uint64(search, ZPOOL_CONFIG_GUID, guid);
3228 } else if (zpool_vdev_is_interior(path)) {
3229 fnvlist_add_string(search, ZPOOL_CONFIG_TYPE, path);
3230 } else {
3231 fnvlist_add_string(search, ZPOOL_CONFIG_PATH, path);
3232 }
3233
3234 nvroot = fnvlist_lookup_nvlist(zhp->zpool_config,
3235 ZPOOL_CONFIG_VDEV_TREE);
3236
3237 /*
3238 * User can pass NULL for avail_spare, l2cache, and log, but
3239 * we still need to provide variables to vdev_to_nvlist_iter(), so
3240 * just point them to junk variables here.
3241 */
3242 if (!avail_spare)
3243 avail_spare = &__avail_spare;
3244 if (!l2cache)
3245 l2cache = &__l2cache;
3246 if (!log)
3247 log = &__log;
3248
3249 *avail_spare = B_FALSE;
3250 *l2cache = B_FALSE;
3251 if (log != NULL)
3252 *log = B_FALSE;
3253 ret = vdev_to_nvlist_iter(nvroot, search, avail_spare, l2cache, log,
3254 return_parent);
3255 fnvlist_free(search);
3256
3257 return (ret);
3258 }
3259
3260 nvlist_t *
zpool_find_vdev(zpool_handle_t * zhp,const char * path,boolean_t * avail_spare,boolean_t * l2cache,boolean_t * log)3261 zpool_find_vdev(zpool_handle_t *zhp, const char *path, boolean_t *avail_spare,
3262 boolean_t *l2cache, boolean_t *log)
3263 {
3264 return (__zpool_find_vdev(zhp, path, avail_spare, l2cache, log,
3265 B_FALSE));
3266 }
3267
3268 /* Given a vdev path, return its parent's nvlist */
3269 nvlist_t *
zpool_find_parent_vdev(zpool_handle_t * zhp,const char * path,boolean_t * avail_spare,boolean_t * l2cache,boolean_t * log)3270 zpool_find_parent_vdev(zpool_handle_t *zhp, const char *path,
3271 boolean_t *avail_spare, boolean_t *l2cache, boolean_t *log)
3272 {
3273 return (__zpool_find_vdev(zhp, path, avail_spare, l2cache, log,
3274 B_TRUE));
3275 }
3276
3277 /*
3278 * Convert a vdev path to a GUID. Returns GUID or 0 on error.
3279 *
3280 * If is_spare, is_l2cache, or is_log is non-NULL, then store within it
3281 * if the VDEV is a spare, l2cache, or log device. If they're NULL then
3282 * ignore them.
3283 */
3284 static uint64_t
zpool_vdev_path_to_guid_impl(zpool_handle_t * zhp,const char * path,boolean_t * is_spare,boolean_t * is_l2cache,boolean_t * is_log)3285 zpool_vdev_path_to_guid_impl(zpool_handle_t *zhp, const char *path,
3286 boolean_t *is_spare, boolean_t *is_l2cache, boolean_t *is_log)
3287 {
3288 boolean_t spare = B_FALSE, l2cache = B_FALSE, log = B_FALSE;
3289 nvlist_t *tgt;
3290
3291 if ((tgt = zpool_find_vdev(zhp, path, &spare, &l2cache,
3292 &log)) == NULL)
3293 return (0);
3294
3295 if (is_spare != NULL)
3296 *is_spare = spare;
3297 if (is_l2cache != NULL)
3298 *is_l2cache = l2cache;
3299 if (is_log != NULL)
3300 *is_log = log;
3301
3302 return (fnvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID));
3303 }
3304
3305 /* Convert a vdev path to a GUID. Returns GUID or 0 on error. */
3306 uint64_t
zpool_vdev_path_to_guid(zpool_handle_t * zhp,const char * path)3307 zpool_vdev_path_to_guid(zpool_handle_t *zhp, const char *path)
3308 {
3309 return (zpool_vdev_path_to_guid_impl(zhp, path, NULL, NULL, NULL));
3310 }
3311
3312 /*
3313 * Bring the specified vdev online. The 'flags' parameter is a set of the
3314 * ZFS_ONLINE_* flags.
3315 */
3316 int
zpool_vdev_online(zpool_handle_t * zhp,const char * path,int flags,vdev_state_t * newstate)3317 zpool_vdev_online(zpool_handle_t *zhp, const char *path, int flags,
3318 vdev_state_t *newstate)
3319 {
3320 zfs_cmd_t zc = {"\0"};
3321 char errbuf[ERRBUFLEN];
3322 nvlist_t *tgt;
3323 boolean_t avail_spare, l2cache, islog;
3324 libzfs_handle_t *hdl = zhp->zpool_hdl;
3325
3326 if (flags & ZFS_ONLINE_EXPAND) {
3327 (void) snprintf(errbuf, sizeof (errbuf),
3328 dgettext(TEXT_DOMAIN, "cannot expand %s"), path);
3329 } else {
3330 (void) snprintf(errbuf, sizeof (errbuf),
3331 dgettext(TEXT_DOMAIN, "cannot online %s"), path);
3332 }
3333
3334 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3335 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
3336 &islog)) == NULL)
3337 return (zfs_error(hdl, EZFS_NODEVICE, errbuf));
3338
3339 zc.zc_guid = fnvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID);
3340
3341 if (!(flags & ZFS_ONLINE_SPARE) && avail_spare)
3342 return (zfs_error(hdl, EZFS_ISSPARE, errbuf));
3343
3344 #ifndef __FreeBSD__
3345 const char *pathname;
3346 if ((flags & ZFS_ONLINE_EXPAND ||
3347 zpool_get_prop_int(zhp, ZPOOL_PROP_AUTOEXPAND, NULL)) &&
3348 nvlist_lookup_string(tgt, ZPOOL_CONFIG_PATH, &pathname) == 0) {
3349 uint64_t wholedisk = 0;
3350
3351 (void) nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_WHOLE_DISK,
3352 &wholedisk);
3353
3354 /*
3355 * XXX - L2ARC 1.0 devices can't support expansion.
3356 */
3357 if (l2cache) {
3358 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3359 "cannot expand cache devices"));
3360 return (zfs_error(hdl, EZFS_VDEVNOTSUP, errbuf));
3361 }
3362
3363 if (wholedisk) {
3364 const char *fullpath = path;
3365 char buf[MAXPATHLEN];
3366 int error;
3367
3368 if (path[0] != '/') {
3369 error = zfs_resolve_shortname(path, buf,
3370 sizeof (buf));
3371 if (error != 0)
3372 return (zfs_error(hdl, EZFS_NODEVICE,
3373 errbuf));
3374
3375 fullpath = buf;
3376 }
3377
3378 error = zpool_relabel_disk(hdl, fullpath, errbuf);
3379 if (error != 0)
3380 return (error);
3381 }
3382 }
3383 #endif
3384
3385 zc.zc_cookie = VDEV_STATE_ONLINE;
3386 zc.zc_obj = flags;
3387
3388 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SET_STATE, &zc) != 0) {
3389 if (errno == EINVAL) {
3390 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "was split "
3391 "from this pool into a new one. Use '%s' "
3392 "instead"), "zpool detach");
3393 return (zfs_error(hdl, EZFS_POSTSPLIT_ONLINE, errbuf));
3394 }
3395 return (zpool_standard_error(hdl, errno, errbuf));
3396 }
3397
3398 *newstate = zc.zc_cookie;
3399 return (0);
3400 }
3401
3402 /*
3403 * Take the specified vdev offline
3404 */
3405 int
zpool_vdev_offline(zpool_handle_t * zhp,const char * path,boolean_t istmp)3406 zpool_vdev_offline(zpool_handle_t *zhp, const char *path, boolean_t istmp)
3407 {
3408 zfs_cmd_t zc = {"\0"};
3409 char errbuf[ERRBUFLEN];
3410 nvlist_t *tgt;
3411 boolean_t avail_spare, l2cache;
3412 libzfs_handle_t *hdl = zhp->zpool_hdl;
3413
3414 (void) snprintf(errbuf, sizeof (errbuf),
3415 dgettext(TEXT_DOMAIN, "cannot offline %s"), path);
3416
3417 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3418 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
3419 NULL)) == NULL)
3420 return (zfs_error(hdl, EZFS_NODEVICE, errbuf));
3421
3422 zc.zc_guid = fnvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID);
3423
3424 if (avail_spare)
3425 return (zfs_error(hdl, EZFS_ISSPARE, errbuf));
3426
3427 zc.zc_cookie = VDEV_STATE_OFFLINE;
3428 zc.zc_obj = istmp ? ZFS_OFFLINE_TEMPORARY : 0;
3429
3430 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
3431 return (0);
3432
3433 switch (errno) {
3434 case EBUSY:
3435
3436 /*
3437 * There are no other replicas of this device.
3438 */
3439 return (zfs_error(hdl, EZFS_NOREPLICAS, errbuf));
3440
3441 case EEXIST:
3442 /*
3443 * The log device has unplayed logs
3444 */
3445 return (zfs_error(hdl, EZFS_UNPLAYED_LOGS, errbuf));
3446
3447 default:
3448 return (zpool_standard_error(hdl, errno, errbuf));
3449 }
3450 }
3451
3452 /*
3453 * Remove the specified vdev asynchronously from the configuration, so
3454 * that it may come ONLINE if reinserted. This is called from zed on
3455 * Udev remove event.
3456 * Note: We also have a similar function zpool_vdev_remove() that
3457 * removes the vdev from the pool.
3458 */
3459 int
zpool_vdev_remove_wanted(zpool_handle_t * zhp,const char * path)3460 zpool_vdev_remove_wanted(zpool_handle_t *zhp, const char *path)
3461 {
3462 zfs_cmd_t zc = {"\0"};
3463 char errbuf[ERRBUFLEN];
3464 nvlist_t *tgt;
3465 boolean_t avail_spare, l2cache;
3466 libzfs_handle_t *hdl = zhp->zpool_hdl;
3467
3468 (void) snprintf(errbuf, sizeof (errbuf),
3469 dgettext(TEXT_DOMAIN, "cannot remove %s"), path);
3470
3471 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3472 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
3473 NULL)) == NULL)
3474 return (zfs_error(hdl, EZFS_NODEVICE, errbuf));
3475
3476 zc.zc_guid = fnvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID);
3477
3478 zc.zc_cookie = VDEV_STATE_REMOVED;
3479
3480 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
3481 return (0);
3482
3483 return (zpool_standard_error(hdl, errno, errbuf));
3484 }
3485
3486 /*
3487 * Mark the given vdev faulted.
3488 */
3489 int
zpool_vdev_fault(zpool_handle_t * zhp,uint64_t guid,vdev_aux_t aux)3490 zpool_vdev_fault(zpool_handle_t *zhp, uint64_t guid, vdev_aux_t aux)
3491 {
3492 zfs_cmd_t zc = {"\0"};
3493 char errbuf[ERRBUFLEN];
3494 libzfs_handle_t *hdl = zhp->zpool_hdl;
3495
3496 (void) snprintf(errbuf, sizeof (errbuf),
3497 dgettext(TEXT_DOMAIN, "cannot fault %llu"), (u_longlong_t)guid);
3498
3499 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3500 zc.zc_guid = guid;
3501 zc.zc_cookie = VDEV_STATE_FAULTED;
3502 zc.zc_obj = aux;
3503
3504 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
3505 return (0);
3506
3507 switch (errno) {
3508 case EBUSY:
3509
3510 /*
3511 * There are no other replicas of this device.
3512 */
3513 return (zfs_error(hdl, EZFS_NOREPLICAS, errbuf));
3514
3515 default:
3516 return (zpool_standard_error(hdl, errno, errbuf));
3517 }
3518
3519 }
3520
3521 /*
3522 * Generic set vdev state function
3523 */
3524 static int
zpool_vdev_set_state(zpool_handle_t * zhp,uint64_t guid,vdev_aux_t aux,vdev_state_t state)3525 zpool_vdev_set_state(zpool_handle_t *zhp, uint64_t guid, vdev_aux_t aux,
3526 vdev_state_t state)
3527 {
3528 zfs_cmd_t zc = {"\0"};
3529 char errbuf[ERRBUFLEN];
3530 libzfs_handle_t *hdl = zhp->zpool_hdl;
3531
3532 (void) snprintf(errbuf, sizeof (errbuf),
3533 dgettext(TEXT_DOMAIN, "cannot set %s %llu"),
3534 zpool_state_to_name(state, aux), (u_longlong_t)guid);
3535
3536 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3537 zc.zc_guid = guid;
3538 zc.zc_cookie = state;
3539 zc.zc_obj = aux;
3540
3541 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
3542 return (0);
3543
3544 return (zpool_standard_error(hdl, errno, errbuf));
3545 }
3546
3547 /*
3548 * Mark the given vdev degraded.
3549 */
3550 int
zpool_vdev_degrade(zpool_handle_t * zhp,uint64_t guid,vdev_aux_t aux)3551 zpool_vdev_degrade(zpool_handle_t *zhp, uint64_t guid, vdev_aux_t aux)
3552 {
3553 return (zpool_vdev_set_state(zhp, guid, aux, VDEV_STATE_DEGRADED));
3554 }
3555
3556 /*
3557 * Mark the given vdev as in a removed state (as if the device does not exist).
3558 *
3559 * This is different than zpool_vdev_remove() which does a removal of a device
3560 * from the pool (but the device does exist).
3561 */
3562 int
zpool_vdev_set_removed_state(zpool_handle_t * zhp,uint64_t guid,vdev_aux_t aux)3563 zpool_vdev_set_removed_state(zpool_handle_t *zhp, uint64_t guid, vdev_aux_t aux)
3564 {
3565 return (zpool_vdev_set_state(zhp, guid, aux, VDEV_STATE_REMOVED));
3566 }
3567
3568 /*
3569 * Returns TRUE if the given nvlist is a vdev that was originally swapped in as
3570 * a hot spare.
3571 */
3572 static boolean_t
is_replacing_spare(nvlist_t * search,nvlist_t * tgt,int which)3573 is_replacing_spare(nvlist_t *search, nvlist_t *tgt, int which)
3574 {
3575 nvlist_t **child;
3576 uint_t c, children;
3577
3578 if (nvlist_lookup_nvlist_array(search, ZPOOL_CONFIG_CHILDREN, &child,
3579 &children) == 0) {
3580 const char *type = fnvlist_lookup_string(search,
3581 ZPOOL_CONFIG_TYPE);
3582 if ((strcmp(type, VDEV_TYPE_SPARE) == 0 ||
3583 strcmp(type, VDEV_TYPE_DRAID_SPARE) == 0) &&
3584 children == 2 && child[which] == tgt)
3585 return (B_TRUE);
3586
3587 for (c = 0; c < children; c++)
3588 if (is_replacing_spare(child[c], tgt, which))
3589 return (B_TRUE);
3590 }
3591
3592 return (B_FALSE);
3593 }
3594
3595 /*
3596 * Attach new_disk (fully described by nvroot) to old_disk.
3597 * If 'replacing' is specified, the new disk will replace the old one.
3598 */
3599 int
zpool_vdev_attach(zpool_handle_t * zhp,const char * old_disk,const char * new_disk,nvlist_t * nvroot,int replacing,boolean_t rebuild)3600 zpool_vdev_attach(zpool_handle_t *zhp, const char *old_disk,
3601 const char *new_disk, nvlist_t *nvroot, int replacing, boolean_t rebuild)
3602 {
3603 zfs_cmd_t zc = {"\0"};
3604 char errbuf[ERRBUFLEN];
3605 int ret;
3606 nvlist_t *tgt;
3607 boolean_t avail_spare, l2cache, islog;
3608 uint64_t val;
3609 char *newname;
3610 const char *type;
3611 nvlist_t **child;
3612 uint_t children;
3613 nvlist_t *config_root;
3614 libzfs_handle_t *hdl = zhp->zpool_hdl;
3615
3616 if (replacing)
3617 (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
3618 "cannot replace %s with %s"), old_disk, new_disk);
3619 else
3620 (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
3621 "cannot attach %s to %s"), new_disk, old_disk);
3622
3623 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3624 if ((tgt = zpool_find_vdev(zhp, old_disk, &avail_spare, &l2cache,
3625 &islog)) == NULL)
3626 return (zfs_error(hdl, EZFS_NODEVICE, errbuf));
3627
3628 if (avail_spare)
3629 return (zfs_error(hdl, EZFS_ISSPARE, errbuf));
3630
3631 if (l2cache)
3632 return (zfs_error(hdl, EZFS_ISL2CACHE, errbuf));
3633
3634 zc.zc_guid = fnvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID);
3635 zc.zc_cookie = replacing;
3636 zc.zc_simple = rebuild;
3637
3638 if (rebuild &&
3639 zfeature_lookup_guid("org.openzfs:device_rebuild", NULL) != 0) {
3640 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3641 "the loaded zfs module doesn't support device rebuilds"));
3642 return (zfs_error(hdl, EZFS_POOL_NOTSUP, errbuf));
3643 }
3644
3645 type = fnvlist_lookup_string(tgt, ZPOOL_CONFIG_TYPE);
3646 if (strcmp(type, VDEV_TYPE_RAIDZ) == 0 &&
3647 zfeature_lookup_guid("org.openzfs:raidz_expansion", NULL) != 0) {
3648 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3649 "the loaded zfs module doesn't support raidz expansion"));
3650 return (zfs_error(hdl, EZFS_POOL_NOTSUP, errbuf));
3651 }
3652
3653 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
3654 &child, &children) != 0 || children != 1) {
3655 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3656 "new device must be a single disk"));
3657 return (zfs_error(hdl, EZFS_INVALCONFIG, errbuf));
3658 }
3659
3660 config_root = fnvlist_lookup_nvlist(zpool_get_config(zhp, NULL),
3661 ZPOOL_CONFIG_VDEV_TREE);
3662
3663 if ((newname = zpool_vdev_name(NULL, NULL, child[0], 0)) == NULL)
3664 return (-1);
3665
3666 /*
3667 * If the target is a hot spare that has been swapped in, we can only
3668 * replace it with another hot spare.
3669 */
3670 if (replacing &&
3671 nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_IS_SPARE, &val) == 0 &&
3672 (zpool_find_vdev(zhp, newname, &avail_spare, &l2cache,
3673 NULL) == NULL || !avail_spare) &&
3674 is_replacing_spare(config_root, tgt, 1)) {
3675 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3676 "can only be replaced by another hot spare"));
3677 free(newname);
3678 return (zfs_error(hdl, EZFS_BADTARGET, errbuf));
3679 }
3680
3681 free(newname);
3682
3683 zcmd_write_conf_nvlist(hdl, &zc, nvroot);
3684
3685 ret = zfs_ioctl(hdl, ZFS_IOC_VDEV_ATTACH, &zc);
3686
3687 zcmd_free_nvlists(&zc);
3688
3689 if (ret == 0)
3690 return (0);
3691
3692 switch (errno) {
3693 case ENOTSUP:
3694 /*
3695 * Can't attach to or replace this type of vdev.
3696 */
3697 if (replacing) {
3698 uint64_t version = zpool_get_prop_int(zhp,
3699 ZPOOL_PROP_VERSION, NULL);
3700
3701 if (islog) {
3702 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3703 "cannot replace a log with a spare"));
3704 } else if (rebuild) {
3705 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3706 "only mirror and dRAID vdevs support "
3707 "sequential reconstruction"));
3708 } else if (zpool_is_draid_spare(new_disk)) {
3709 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3710 "dRAID spares can only replace child "
3711 "devices in their parent's dRAID vdev"));
3712 } else if (version >= SPA_VERSION_MULTI_REPLACE) {
3713 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3714 "already in replacing/spare config; wait "
3715 "for completion or use 'zpool detach'"));
3716 } else {
3717 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3718 "cannot replace a replacing device"));
3719 }
3720 } else if (strcmp(type, VDEV_TYPE_RAIDZ) == 0) {
3721 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3722 "raidz_expansion feature must be enabled "
3723 "in order to attach a device to raidz"));
3724 } else {
3725 char status[64] = {0};
3726 zpool_prop_get_feature(zhp,
3727 "feature@device_rebuild", status, 63);
3728 if (rebuild &&
3729 strncmp(status, ZFS_FEATURE_DISABLED, 64) == 0) {
3730 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3731 "device_rebuild feature must be enabled "
3732 "in order to use sequential "
3733 "reconstruction"));
3734 } else {
3735 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3736 "can only attach to mirrors and top-level "
3737 "disks"));
3738 }
3739 }
3740 (void) zfs_error(hdl, EZFS_BADTARGET, errbuf);
3741 break;
3742
3743 case EINVAL:
3744 /*
3745 * The new device must be a single disk.
3746 */
3747 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3748 "new device must be a single disk"));
3749 (void) zfs_error(hdl, EZFS_INVALCONFIG, errbuf);
3750 break;
3751
3752 case EBUSY:
3753 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "%s is busy"),
3754 new_disk);
3755 (void) zfs_error(hdl, EZFS_BADDEV, errbuf);
3756 break;
3757
3758 case EOVERFLOW:
3759 /*
3760 * The new device is too small.
3761 */
3762 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3763 "device is too small"));
3764 (void) zfs_error(hdl, EZFS_BADDEV, errbuf);
3765 break;
3766
3767 case EDOM:
3768 /*
3769 * The new device has a different optimal sector size.
3770 */
3771 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3772 "new device has a different optimal sector size; use the "
3773 "option '-o ashift=N' to override the optimal size"));
3774 (void) zfs_error(hdl, EZFS_BADDEV, errbuf);
3775 break;
3776
3777 case ENAMETOOLONG:
3778 /*
3779 * The resulting top-level vdev spec won't fit in the label.
3780 */
3781 (void) zfs_error(hdl, EZFS_DEVOVERFLOW, errbuf);
3782 break;
3783
3784 case ENXIO:
3785 /*
3786 * The existing raidz vdev has offline children
3787 */
3788 if (strcmp(type, VDEV_TYPE_RAIDZ) == 0) {
3789 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3790 "raidz vdev has devices that are are offline or "
3791 "being replaced"));
3792 (void) zfs_error(hdl, EZFS_BADDEV, errbuf);
3793 break;
3794 } else {
3795 (void) zpool_standard_error(hdl, errno, errbuf);
3796 }
3797 break;
3798
3799 case EADDRINUSE:
3800 /*
3801 * The boot reserved area is already being used (FreeBSD)
3802 */
3803 if (strcmp(type, VDEV_TYPE_RAIDZ) == 0) {
3804 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3805 "the reserved boot area needed for the expansion "
3806 "is already being used by a boot loader"));
3807 (void) zfs_error(hdl, EZFS_BADDEV, errbuf);
3808 } else {
3809 (void) zpool_standard_error(hdl, errno, errbuf);
3810 }
3811 break;
3812
3813 case ZFS_ERR_ASHIFT_MISMATCH:
3814 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3815 "The new device cannot have a higher alignment requirement "
3816 "than the top-level vdev."));
3817 (void) zfs_error(hdl, EZFS_BADTARGET, errbuf);
3818 break;
3819 default:
3820 (void) zpool_standard_error(hdl, errno, errbuf);
3821 }
3822
3823 return (-1);
3824 }
3825
3826 /*
3827 * Detach the specified device.
3828 */
3829 int
zpool_vdev_detach(zpool_handle_t * zhp,const char * path)3830 zpool_vdev_detach(zpool_handle_t *zhp, const char *path)
3831 {
3832 zfs_cmd_t zc = {"\0"};
3833 char errbuf[ERRBUFLEN];
3834 nvlist_t *tgt;
3835 boolean_t avail_spare, l2cache;
3836 libzfs_handle_t *hdl = zhp->zpool_hdl;
3837
3838 (void) snprintf(errbuf, sizeof (errbuf),
3839 dgettext(TEXT_DOMAIN, "cannot detach %s"), path);
3840
3841 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3842 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
3843 NULL)) == NULL)
3844 return (zfs_error(hdl, EZFS_NODEVICE, errbuf));
3845
3846 if (avail_spare)
3847 return (zfs_error(hdl, EZFS_ISSPARE, errbuf));
3848
3849 if (l2cache)
3850 return (zfs_error(hdl, EZFS_ISL2CACHE, errbuf));
3851
3852 zc.zc_guid = fnvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID);
3853
3854 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_DETACH, &zc) == 0)
3855 return (0);
3856
3857 switch (errno) {
3858
3859 case ENOTSUP:
3860 /*
3861 * Can't detach from this type of vdev.
3862 */
3863 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "only "
3864 "applicable to mirror and replacing vdevs"));
3865 (void) zfs_error(hdl, EZFS_BADTARGET, errbuf);
3866 break;
3867
3868 case EBUSY:
3869 /*
3870 * There are no other replicas of this device.
3871 */
3872 (void) zfs_error(hdl, EZFS_NOREPLICAS, errbuf);
3873 break;
3874
3875 default:
3876 (void) zpool_standard_error(hdl, errno, errbuf);
3877 }
3878
3879 return (-1);
3880 }
3881
3882 /*
3883 * Find a mirror vdev in the source nvlist.
3884 *
3885 * The mchild array contains a list of disks in one of the top-level mirrors
3886 * of the source pool. The schild array contains a list of disks that the
3887 * user specified on the command line. We loop over the mchild array to
3888 * see if any entry in the schild array matches.
3889 *
3890 * If a disk in the mchild array is found in the schild array, we return
3891 * the index of that entry. Otherwise we return -1.
3892 */
3893 static int
find_vdev_entry(zpool_handle_t * zhp,nvlist_t ** mchild,uint_t mchildren,nvlist_t ** schild,uint_t schildren)3894 find_vdev_entry(zpool_handle_t *zhp, nvlist_t **mchild, uint_t mchildren,
3895 nvlist_t **schild, uint_t schildren)
3896 {
3897 uint_t mc;
3898
3899 for (mc = 0; mc < mchildren; mc++) {
3900 uint_t sc;
3901 char *mpath = zpool_vdev_name(zhp->zpool_hdl, zhp,
3902 mchild[mc], 0);
3903
3904 for (sc = 0; sc < schildren; sc++) {
3905 char *spath = zpool_vdev_name(zhp->zpool_hdl, zhp,
3906 schild[sc], 0);
3907 boolean_t result = (strcmp(mpath, spath) == 0);
3908
3909 free(spath);
3910 if (result) {
3911 free(mpath);
3912 return (mc);
3913 }
3914 }
3915
3916 free(mpath);
3917 }
3918
3919 return (-1);
3920 }
3921
3922 /*
3923 * Split a mirror pool. If newroot points to null, then a new nvlist
3924 * is generated and it is the responsibility of the caller to free it.
3925 */
3926 int
zpool_vdev_split(zpool_handle_t * zhp,char * newname,nvlist_t ** newroot,nvlist_t * props,splitflags_t flags)3927 zpool_vdev_split(zpool_handle_t *zhp, char *newname, nvlist_t **newroot,
3928 nvlist_t *props, splitflags_t flags)
3929 {
3930 zfs_cmd_t zc = {"\0"};
3931 char errbuf[ERRBUFLEN];
3932 const char *bias;
3933 nvlist_t *tree, *config, **child, **newchild, *newconfig = NULL;
3934 nvlist_t **varray = NULL, *zc_props = NULL;
3935 uint_t c, children, newchildren, lastlog = 0, vcount, found = 0;
3936 libzfs_handle_t *hdl = zhp->zpool_hdl;
3937 uint64_t vers, readonly = B_FALSE;
3938 boolean_t freelist = B_FALSE, memory_err = B_TRUE;
3939 int retval = 0;
3940
3941 (void) snprintf(errbuf, sizeof (errbuf),
3942 dgettext(TEXT_DOMAIN, "Unable to split %s"), zhp->zpool_name);
3943
3944 if (!zpool_name_valid(hdl, B_FALSE, newname))
3945 return (zfs_error(hdl, EZFS_INVALIDNAME, errbuf));
3946
3947 if ((config = zpool_get_config(zhp, NULL)) == NULL) {
3948 (void) fprintf(stderr, gettext("Internal error: unable to "
3949 "retrieve pool configuration\n"));
3950 return (-1);
3951 }
3952
3953 tree = fnvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE);
3954 vers = fnvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION);
3955
3956 if (props) {
3957 prop_flags_t flags = { .create = B_FALSE, .import = B_TRUE };
3958 if ((zc_props = zpool_valid_proplist(hdl, zhp->zpool_name,
3959 props, vers, flags, errbuf)) == NULL)
3960 return (-1);
3961 (void) nvlist_lookup_uint64(zc_props,
3962 zpool_prop_to_name(ZPOOL_PROP_READONLY), &readonly);
3963 if (readonly) {
3964 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3965 "property %s can only be set at import time"),
3966 zpool_prop_to_name(ZPOOL_PROP_READONLY));
3967 return (-1);
3968 }
3969 }
3970
3971 if (nvlist_lookup_nvlist_array(tree, ZPOOL_CONFIG_CHILDREN, &child,
3972 &children) != 0) {
3973 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3974 "Source pool is missing vdev tree"));
3975 nvlist_free(zc_props);
3976 return (-1);
3977 }
3978
3979 varray = zfs_alloc(hdl, children * sizeof (nvlist_t *));
3980 vcount = 0;
3981
3982 if (*newroot == NULL ||
3983 nvlist_lookup_nvlist_array(*newroot, ZPOOL_CONFIG_CHILDREN,
3984 &newchild, &newchildren) != 0)
3985 newchildren = 0;
3986
3987 for (c = 0; c < children; c++) {
3988 uint64_t is_log = B_FALSE, is_hole = B_FALSE;
3989 boolean_t is_special = B_FALSE, is_dedup = B_FALSE;
3990 const char *type;
3991 nvlist_t **mchild, *vdev;
3992 uint_t mchildren;
3993 int entry;
3994
3995 /*
3996 * Unlike cache & spares, slogs are stored in the
3997 * ZPOOL_CONFIG_CHILDREN array. We filter them out here.
3998 */
3999 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG,
4000 &is_log);
4001 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_HOLE,
4002 &is_hole);
4003 if (is_log || is_hole) {
4004 /*
4005 * Create a hole vdev and put it in the config.
4006 */
4007 if (nvlist_alloc(&vdev, NV_UNIQUE_NAME, 0) != 0)
4008 goto out;
4009 if (nvlist_add_string(vdev, ZPOOL_CONFIG_TYPE,
4010 VDEV_TYPE_HOLE) != 0)
4011 goto out;
4012 if (nvlist_add_uint64(vdev, ZPOOL_CONFIG_IS_HOLE,
4013 1) != 0)
4014 goto out;
4015 if (lastlog == 0)
4016 lastlog = vcount;
4017 varray[vcount++] = vdev;
4018 continue;
4019 }
4020 lastlog = 0;
4021 type = fnvlist_lookup_string(child[c], ZPOOL_CONFIG_TYPE);
4022
4023 if (strcmp(type, VDEV_TYPE_INDIRECT) == 0) {
4024 vdev = child[c];
4025 if (nvlist_dup(vdev, &varray[vcount++], 0) != 0)
4026 goto out;
4027 continue;
4028 } else if (strcmp(type, VDEV_TYPE_MIRROR) != 0) {
4029 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
4030 "Source pool must be composed only of mirrors\n"));
4031 retval = zfs_error(hdl, EZFS_INVALCONFIG, errbuf);
4032 goto out;
4033 }
4034
4035 if (nvlist_lookup_string(child[c],
4036 ZPOOL_CONFIG_ALLOCATION_BIAS, &bias) == 0) {
4037 if (strcmp(bias, VDEV_ALLOC_BIAS_SPECIAL) == 0)
4038 is_special = B_TRUE;
4039 else if (strcmp(bias, VDEV_ALLOC_BIAS_DEDUP) == 0)
4040 is_dedup = B_TRUE;
4041 }
4042 verify(nvlist_lookup_nvlist_array(child[c],
4043 ZPOOL_CONFIG_CHILDREN, &mchild, &mchildren) == 0);
4044
4045 /* find or add an entry for this top-level vdev */
4046 if (newchildren > 0 &&
4047 (entry = find_vdev_entry(zhp, mchild, mchildren,
4048 newchild, newchildren)) >= 0) {
4049 /* We found a disk that the user specified. */
4050 vdev = mchild[entry];
4051 ++found;
4052 } else {
4053 /* User didn't specify a disk for this vdev. */
4054 vdev = mchild[mchildren - 1];
4055 }
4056
4057 if (nvlist_dup(vdev, &varray[vcount++], 0) != 0)
4058 goto out;
4059
4060 if (flags.dryrun != 0) {
4061 if (is_dedup == B_TRUE) {
4062 if (nvlist_add_string(varray[vcount - 1],
4063 ZPOOL_CONFIG_ALLOCATION_BIAS,
4064 VDEV_ALLOC_BIAS_DEDUP) != 0)
4065 goto out;
4066 } else if (is_special == B_TRUE) {
4067 if (nvlist_add_string(varray[vcount - 1],
4068 ZPOOL_CONFIG_ALLOCATION_BIAS,
4069 VDEV_ALLOC_BIAS_SPECIAL) != 0)
4070 goto out;
4071 }
4072 }
4073 }
4074
4075 /* did we find every disk the user specified? */
4076 if (found != newchildren) {
4077 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "Device list must "
4078 "include at most one disk from each mirror"));
4079 retval = zfs_error(hdl, EZFS_INVALCONFIG, errbuf);
4080 goto out;
4081 }
4082
4083 /* Prepare the nvlist for populating. */
4084 if (*newroot == NULL) {
4085 if (nvlist_alloc(newroot, NV_UNIQUE_NAME, 0) != 0)
4086 goto out;
4087 freelist = B_TRUE;
4088 if (nvlist_add_string(*newroot, ZPOOL_CONFIG_TYPE,
4089 VDEV_TYPE_ROOT) != 0)
4090 goto out;
4091 } else {
4092 verify(nvlist_remove_all(*newroot, ZPOOL_CONFIG_CHILDREN) == 0);
4093 }
4094
4095 /* Add all the children we found */
4096 if (nvlist_add_nvlist_array(*newroot, ZPOOL_CONFIG_CHILDREN,
4097 (const nvlist_t **)varray, lastlog == 0 ? vcount : lastlog) != 0)
4098 goto out;
4099
4100 /*
4101 * If we're just doing a dry run, exit now with success.
4102 */
4103 if (flags.dryrun) {
4104 memory_err = B_FALSE;
4105 freelist = B_FALSE;
4106 goto out;
4107 }
4108
4109 /* now build up the config list & call the ioctl */
4110 if (nvlist_alloc(&newconfig, NV_UNIQUE_NAME, 0) != 0)
4111 goto out;
4112
4113 if (nvlist_add_nvlist(newconfig,
4114 ZPOOL_CONFIG_VDEV_TREE, *newroot) != 0 ||
4115 nvlist_add_string(newconfig,
4116 ZPOOL_CONFIG_POOL_NAME, newname) != 0 ||
4117 nvlist_add_uint64(newconfig, ZPOOL_CONFIG_VERSION, vers) != 0)
4118 goto out;
4119
4120 /*
4121 * The new pool is automatically part of the namespace unless we
4122 * explicitly export it.
4123 */
4124 if (!flags.import)
4125 zc.zc_cookie = ZPOOL_EXPORT_AFTER_SPLIT;
4126 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
4127 (void) strlcpy(zc.zc_string, newname, sizeof (zc.zc_string));
4128 zcmd_write_conf_nvlist(hdl, &zc, newconfig);
4129 if (zc_props != NULL)
4130 zcmd_write_src_nvlist(hdl, &zc, zc_props);
4131
4132 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SPLIT, &zc) != 0) {
4133 retval = zpool_standard_error(hdl, errno, errbuf);
4134 goto out;
4135 }
4136
4137 freelist = B_FALSE;
4138 memory_err = B_FALSE;
4139
4140 out:
4141 if (varray != NULL) {
4142 int v;
4143
4144 for (v = 0; v < vcount; v++)
4145 nvlist_free(varray[v]);
4146 free(varray);
4147 }
4148 zcmd_free_nvlists(&zc);
4149 nvlist_free(zc_props);
4150 nvlist_free(newconfig);
4151 if (freelist) {
4152 nvlist_free(*newroot);
4153 *newroot = NULL;
4154 }
4155
4156 if (retval != 0)
4157 return (retval);
4158
4159 if (memory_err)
4160 return (no_memory(hdl));
4161
4162 return (0);
4163 }
4164
4165 /*
4166 * Remove the given device.
4167 */
4168 int
zpool_vdev_remove(zpool_handle_t * zhp,const char * path)4169 zpool_vdev_remove(zpool_handle_t *zhp, const char *path)
4170 {
4171 zfs_cmd_t zc = {"\0"};
4172 char errbuf[ERRBUFLEN];
4173 nvlist_t *tgt;
4174 boolean_t avail_spare, l2cache, islog;
4175 libzfs_handle_t *hdl = zhp->zpool_hdl;
4176 uint64_t version;
4177
4178 (void) snprintf(errbuf, sizeof (errbuf),
4179 dgettext(TEXT_DOMAIN, "cannot remove %s"), path);
4180
4181 if (zpool_is_draid_spare(path)) {
4182 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
4183 "dRAID spares cannot be removed"));
4184 return (zfs_error(hdl, EZFS_NODEVICE, errbuf));
4185 }
4186
4187 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
4188 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
4189 &islog)) == NULL)
4190 return (zfs_error(hdl, EZFS_NODEVICE, errbuf));
4191
4192 version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL);
4193 if (islog && version < SPA_VERSION_HOLES) {
4194 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
4195 "pool must be upgraded to support log removal"));
4196 return (zfs_error(hdl, EZFS_BADVERSION, errbuf));
4197 }
4198
4199 zc.zc_guid = fnvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID);
4200
4201 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_REMOVE, &zc) == 0)
4202 return (0);
4203
4204 switch (errno) {
4205
4206 case EALREADY:
4207 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
4208 "removal for this vdev is already in progress."));
4209 (void) zfs_error(hdl, EZFS_BUSY, errbuf);
4210 break;
4211
4212 case EINVAL:
4213 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
4214 "invalid config; all top-level vdevs must "
4215 "have the same sector size and not be raidz."));
4216 (void) zfs_error(hdl, EZFS_INVALCONFIG, errbuf);
4217 break;
4218
4219 case EBUSY:
4220 if (islog) {
4221 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
4222 "Mount encrypted datasets to replay logs."));
4223 } else {
4224 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
4225 "Pool busy; removal may already be in progress"));
4226 }
4227 (void) zfs_error(hdl, EZFS_BUSY, errbuf);
4228 break;
4229
4230 case EACCES:
4231 if (islog) {
4232 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
4233 "Mount encrypted datasets to replay logs."));
4234 (void) zfs_error(hdl, EZFS_BUSY, errbuf);
4235 } else {
4236 (void) zpool_standard_error(hdl, errno, errbuf);
4237 }
4238 break;
4239
4240 default:
4241 (void) zpool_standard_error(hdl, errno, errbuf);
4242 }
4243 return (-1);
4244 }
4245
4246 int
zpool_vdev_remove_cancel(zpool_handle_t * zhp)4247 zpool_vdev_remove_cancel(zpool_handle_t *zhp)
4248 {
4249 zfs_cmd_t zc = {{0}};
4250 char errbuf[ERRBUFLEN];
4251 libzfs_handle_t *hdl = zhp->zpool_hdl;
4252
4253 (void) snprintf(errbuf, sizeof (errbuf),
4254 dgettext(TEXT_DOMAIN, "cannot cancel removal"));
4255
4256 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
4257 zc.zc_cookie = 1;
4258
4259 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_REMOVE, &zc) == 0)
4260 return (0);
4261
4262 return (zpool_standard_error(hdl, errno, errbuf));
4263 }
4264
4265 int
zpool_vdev_indirect_size(zpool_handle_t * zhp,const char * path,uint64_t * sizep)4266 zpool_vdev_indirect_size(zpool_handle_t *zhp, const char *path,
4267 uint64_t *sizep)
4268 {
4269 char errbuf[ERRBUFLEN];
4270 nvlist_t *tgt;
4271 boolean_t avail_spare, l2cache, islog;
4272 libzfs_handle_t *hdl = zhp->zpool_hdl;
4273
4274 (void) snprintf(errbuf, sizeof (errbuf),
4275 dgettext(TEXT_DOMAIN, "cannot determine indirect size of %s"),
4276 path);
4277
4278 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
4279 &islog)) == NULL)
4280 return (zfs_error(hdl, EZFS_NODEVICE, errbuf));
4281
4282 if (avail_spare || l2cache || islog) {
4283 *sizep = 0;
4284 return (0);
4285 }
4286
4287 if (nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_INDIRECT_SIZE, sizep) != 0) {
4288 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
4289 "indirect size not available"));
4290 return (zfs_error(hdl, EINVAL, errbuf));
4291 }
4292 return (0);
4293 }
4294
4295 /*
4296 * Clear the errors for the pool, or the particular device if specified.
4297 */
4298 int
zpool_clear(zpool_handle_t * zhp,const char * path,nvlist_t * rewindnvl)4299 zpool_clear(zpool_handle_t *zhp, const char *path, nvlist_t *rewindnvl)
4300 {
4301 zfs_cmd_t zc = {"\0"};
4302 char errbuf[ERRBUFLEN];
4303 nvlist_t *tgt;
4304 zpool_load_policy_t policy;
4305 boolean_t avail_spare, l2cache;
4306 libzfs_handle_t *hdl = zhp->zpool_hdl;
4307 nvlist_t *nvi = NULL;
4308 int error;
4309
4310 if (path)
4311 (void) snprintf(errbuf, sizeof (errbuf),
4312 dgettext(TEXT_DOMAIN, "cannot clear errors for %s"),
4313 path);
4314 else
4315 (void) snprintf(errbuf, sizeof (errbuf),
4316 dgettext(TEXT_DOMAIN, "cannot clear errors for %s"),
4317 zhp->zpool_name);
4318
4319 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
4320 if (path) {
4321 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare,
4322 &l2cache, NULL)) == NULL)
4323 return (zfs_error(hdl, EZFS_NODEVICE, errbuf));
4324
4325 /*
4326 * Don't allow error clearing for hot spares. Do allow
4327 * error clearing for l2cache devices.
4328 */
4329 if (avail_spare)
4330 return (zfs_error(hdl, EZFS_ISSPARE, errbuf));
4331
4332 zc.zc_guid = fnvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID);
4333 }
4334
4335 zpool_get_load_policy(rewindnvl, &policy);
4336 zc.zc_cookie = policy.zlp_rewind;
4337
4338 zcmd_alloc_dst_nvlist(hdl, &zc, zhp->zpool_config_size * 2);
4339 zcmd_write_src_nvlist(hdl, &zc, rewindnvl);
4340
4341 while ((error = zfs_ioctl(hdl, ZFS_IOC_CLEAR, &zc)) != 0 &&
4342 errno == ENOMEM)
4343 zcmd_expand_dst_nvlist(hdl, &zc);
4344
4345 if (!error || ((policy.zlp_rewind & ZPOOL_TRY_REWIND) &&
4346 errno != EPERM && errno != EACCES)) {
4347 if (policy.zlp_rewind &
4348 (ZPOOL_DO_REWIND | ZPOOL_TRY_REWIND)) {
4349 (void) zcmd_read_dst_nvlist(hdl, &zc, &nvi);
4350 zpool_rewind_exclaim(hdl, zc.zc_name,
4351 ((policy.zlp_rewind & ZPOOL_TRY_REWIND) != 0),
4352 nvi);
4353 nvlist_free(nvi);
4354 }
4355 zcmd_free_nvlists(&zc);
4356 return (0);
4357 }
4358
4359 zcmd_free_nvlists(&zc);
4360 return (zpool_standard_error(hdl, errno, errbuf));
4361 }
4362
4363 /*
4364 * Similar to zpool_clear(), but takes a GUID (used by fmd).
4365 */
4366 int
zpool_vdev_clear(zpool_handle_t * zhp,uint64_t guid)4367 zpool_vdev_clear(zpool_handle_t *zhp, uint64_t guid)
4368 {
4369 zfs_cmd_t zc = {"\0"};
4370 char errbuf[ERRBUFLEN];
4371 libzfs_handle_t *hdl = zhp->zpool_hdl;
4372
4373 (void) snprintf(errbuf, sizeof (errbuf),
4374 dgettext(TEXT_DOMAIN, "cannot clear errors for %llx"),
4375 (u_longlong_t)guid);
4376
4377 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
4378 zc.zc_guid = guid;
4379 zc.zc_cookie = ZPOOL_NO_REWIND;
4380
4381 if (zfs_ioctl(hdl, ZFS_IOC_CLEAR, &zc) == 0)
4382 return (0);
4383
4384 return (zpool_standard_error(hdl, errno, errbuf));
4385 }
4386
4387 /*
4388 * Change the GUID for a pool.
4389 *
4390 * Similar to zpool_reguid(), but may take a GUID.
4391 *
4392 * If the guid argument is NULL, then no GUID is passed in the nvlist to the
4393 * ioctl().
4394 */
4395 int
zpool_set_guid(zpool_handle_t * zhp,const uint64_t * guid)4396 zpool_set_guid(zpool_handle_t *zhp, const uint64_t *guid)
4397 {
4398 char errbuf[ERRBUFLEN];
4399 libzfs_handle_t *hdl = zhp->zpool_hdl;
4400 nvlist_t *nvl = NULL;
4401 zfs_cmd_t zc = {"\0"};
4402 int error;
4403
4404 if (guid != NULL) {
4405 if (nvlist_alloc(&nvl, NV_UNIQUE_NAME, 0) != 0)
4406 return (no_memory(hdl));
4407
4408 if (nvlist_add_uint64(nvl, ZPOOL_REGUID_GUID, *guid) != 0) {
4409 nvlist_free(nvl);
4410 return (no_memory(hdl));
4411 }
4412
4413 zcmd_write_src_nvlist(hdl, &zc, nvl);
4414 }
4415
4416 (void) snprintf(errbuf, sizeof (errbuf),
4417 dgettext(TEXT_DOMAIN, "cannot reguid '%s'"), zhp->zpool_name);
4418
4419 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
4420 error = zfs_ioctl(hdl, ZFS_IOC_POOL_REGUID, &zc);
4421 if (error) {
4422 return (zpool_standard_error(hdl, errno, errbuf));
4423 }
4424 if (guid != NULL) {
4425 zcmd_free_nvlists(&zc);
4426 nvlist_free(nvl);
4427 }
4428 return (0);
4429 }
4430
4431 /*
4432 * Change the GUID for a pool.
4433 */
4434 int
zpool_reguid(zpool_handle_t * zhp)4435 zpool_reguid(zpool_handle_t *zhp)
4436 {
4437 return (zpool_set_guid(zhp, NULL));
4438 }
4439
4440 /*
4441 * Reopen the pool.
4442 */
4443 int
zpool_reopen_one(zpool_handle_t * zhp,void * data)4444 zpool_reopen_one(zpool_handle_t *zhp, void *data)
4445 {
4446 libzfs_handle_t *hdl = zpool_get_handle(zhp);
4447 const char *pool_name = zpool_get_name(zhp);
4448 boolean_t *scrub_restart = data;
4449 int error;
4450
4451 error = lzc_reopen(pool_name, *scrub_restart);
4452 if (error) {
4453 return (zpool_standard_error_fmt(hdl, error,
4454 dgettext(TEXT_DOMAIN, "cannot reopen '%s'"), pool_name));
4455 }
4456
4457 return (0);
4458 }
4459
4460 /* call into libzfs_core to execute the sync IOCTL per pool */
4461 int
zpool_sync_one(zpool_handle_t * zhp,void * data)4462 zpool_sync_one(zpool_handle_t *zhp, void *data)
4463 {
4464 int ret;
4465 libzfs_handle_t *hdl = zpool_get_handle(zhp);
4466 const char *pool_name = zpool_get_name(zhp);
4467 boolean_t *force = data;
4468 nvlist_t *innvl = fnvlist_alloc();
4469
4470 fnvlist_add_boolean_value(innvl, "force", *force);
4471 if ((ret = lzc_sync(pool_name, innvl, NULL)) != 0) {
4472 nvlist_free(innvl);
4473 return (zpool_standard_error_fmt(hdl, ret,
4474 dgettext(TEXT_DOMAIN, "sync '%s' failed"), pool_name));
4475 }
4476 nvlist_free(innvl);
4477
4478 return (0);
4479 }
4480
4481 #define PATH_BUF_LEN 64
4482
4483 /*
4484 * Given a vdev, return the name to display in iostat. If the vdev has a path,
4485 * we use that, stripping off any leading "/dev/dsk/"; if not, we use the type.
4486 * We also check if this is a whole disk, in which case we strip off the
4487 * trailing 's0' slice name.
4488 *
4489 * This routine is also responsible for identifying when disks have been
4490 * reconfigured in a new location. The kernel will have opened the device by
4491 * devid, but the path will still refer to the old location. To catch this, we
4492 * first do a path -> devid translation (which is fast for the common case). If
4493 * the devid matches, we're done. If not, we do a reverse devid -> path
4494 * translation and issue the appropriate ioctl() to update the path of the vdev.
4495 * If 'zhp' is NULL, then this is an exported pool, and we don't need to do any
4496 * of these checks.
4497 */
4498 char *
zpool_vdev_name(libzfs_handle_t * hdl,zpool_handle_t * zhp,nvlist_t * nv,int name_flags)4499 zpool_vdev_name(libzfs_handle_t *hdl, zpool_handle_t *zhp, nvlist_t *nv,
4500 int name_flags)
4501 {
4502 const char *type, *tpath;
4503 const char *path;
4504 uint64_t value;
4505 char buf[PATH_BUF_LEN];
4506 char tmpbuf[PATH_BUF_LEN * 2];
4507
4508 /*
4509 * vdev_name will be "root"/"root-0" for the root vdev, but it is the
4510 * zpool name that will be displayed to the user.
4511 */
4512 type = fnvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE);
4513 if (zhp != NULL && strcmp(type, "root") == 0)
4514 return (zfs_strdup(hdl, zpool_get_name(zhp)));
4515
4516 if (libzfs_envvar_is_set("ZPOOL_VDEV_NAME_PATH"))
4517 name_flags |= VDEV_NAME_PATH;
4518 if (libzfs_envvar_is_set("ZPOOL_VDEV_NAME_GUID"))
4519 name_flags |= VDEV_NAME_GUID;
4520 if (libzfs_envvar_is_set("ZPOOL_VDEV_NAME_FOLLOW_LINKS"))
4521 name_flags |= VDEV_NAME_FOLLOW_LINKS;
4522
4523 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT, &value) == 0 ||
4524 name_flags & VDEV_NAME_GUID) {
4525 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &value);
4526 (void) snprintf(buf, sizeof (buf), "%llu", (u_longlong_t)value);
4527 path = buf;
4528 } else if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &tpath) == 0) {
4529 path = tpath;
4530
4531 if (name_flags & VDEV_NAME_FOLLOW_LINKS) {
4532 char *rp = realpath(path, NULL);
4533 if (rp) {
4534 strlcpy(buf, rp, sizeof (buf));
4535 path = buf;
4536 free(rp);
4537 }
4538 }
4539
4540 /*
4541 * For a block device only use the name.
4542 */
4543 if ((strcmp(type, VDEV_TYPE_DISK) == 0) &&
4544 !(name_flags & VDEV_NAME_PATH)) {
4545 path = zfs_strip_path(path);
4546 }
4547
4548 /*
4549 * Remove the partition from the path if this is a whole disk.
4550 */
4551 if (strcmp(type, VDEV_TYPE_DRAID_SPARE) != 0 &&
4552 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK, &value)
4553 == 0 && value && !(name_flags & VDEV_NAME_PATH)) {
4554 return (zfs_strip_partition(path));
4555 }
4556 } else {
4557 path = type;
4558
4559 /*
4560 * If it's a raidz device, we need to stick in the parity level.
4561 */
4562 if (strcmp(path, VDEV_TYPE_RAIDZ) == 0) {
4563 value = fnvlist_lookup_uint64(nv, ZPOOL_CONFIG_NPARITY);
4564 (void) snprintf(buf, sizeof (buf), "%s%llu", path,
4565 (u_longlong_t)value);
4566 path = buf;
4567 }
4568
4569 /*
4570 * If it's a dRAID device, we add parity, groups, and spares.
4571 */
4572 if (strcmp(path, VDEV_TYPE_DRAID) == 0) {
4573 uint64_t ndata, nparity, nspares;
4574 nvlist_t **child;
4575 uint_t children;
4576
4577 verify(nvlist_lookup_nvlist_array(nv,
4578 ZPOOL_CONFIG_CHILDREN, &child, &children) == 0);
4579 nparity = fnvlist_lookup_uint64(nv,
4580 ZPOOL_CONFIG_NPARITY);
4581 ndata = fnvlist_lookup_uint64(nv,
4582 ZPOOL_CONFIG_DRAID_NDATA);
4583 nspares = fnvlist_lookup_uint64(nv,
4584 ZPOOL_CONFIG_DRAID_NSPARES);
4585
4586 path = zpool_draid_name(buf, sizeof (buf), ndata,
4587 nparity, nspares, children);
4588 }
4589
4590 /*
4591 * We identify each top-level vdev by using a <type-id>
4592 * naming convention.
4593 */
4594 if (name_flags & VDEV_NAME_TYPE_ID) {
4595 uint64_t id = fnvlist_lookup_uint64(nv,
4596 ZPOOL_CONFIG_ID);
4597 (void) snprintf(tmpbuf, sizeof (tmpbuf), "%s-%llu",
4598 path, (u_longlong_t)id);
4599 path = tmpbuf;
4600 }
4601 }
4602
4603 return (zfs_strdup(hdl, path));
4604 }
4605
4606 static int
zbookmark_mem_compare(const void * a,const void * b)4607 zbookmark_mem_compare(const void *a, const void *b)
4608 {
4609 return (memcmp(a, b, sizeof (zbookmark_phys_t)));
4610 }
4611
4612 void
zpool_add_propname(zpool_handle_t * zhp,const char * propname)4613 zpool_add_propname(zpool_handle_t *zhp, const char *propname)
4614 {
4615 assert(zhp->zpool_n_propnames < ZHP_MAX_PROPNAMES);
4616 zhp->zpool_propnames[zhp->zpool_n_propnames] = propname;
4617 zhp->zpool_n_propnames++;
4618 }
4619
4620 /*
4621 * Retrieve the persistent error log, uniquify the members, and return to the
4622 * caller.
4623 */
4624 int
zpool_get_errlog(zpool_handle_t * zhp,nvlist_t ** nverrlistp)4625 zpool_get_errlog(zpool_handle_t *zhp, nvlist_t **nverrlistp)
4626 {
4627 zfs_cmd_t zc = {"\0"};
4628 libzfs_handle_t *hdl = zhp->zpool_hdl;
4629 zbookmark_phys_t *buf;
4630 uint64_t buflen = 10000; /* approx. 1MB of RAM */
4631
4632 if (fnvlist_lookup_uint64(zhp->zpool_config,
4633 ZPOOL_CONFIG_ERRCOUNT) == 0)
4634 return (0);
4635
4636 /*
4637 * Retrieve the raw error list from the kernel. If it doesn't fit,
4638 * allocate a larger buffer and retry.
4639 */
4640 (void) strcpy(zc.zc_name, zhp->zpool_name);
4641 for (;;) {
4642 buf = zfs_alloc(zhp->zpool_hdl,
4643 buflen * sizeof (zbookmark_phys_t));
4644 zc.zc_nvlist_dst = (uintptr_t)buf;
4645 zc.zc_nvlist_dst_size = buflen;
4646 if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_ERROR_LOG,
4647 &zc) != 0) {
4648 free(buf);
4649 if (errno == ENOMEM) {
4650 buflen *= 2;
4651 } else {
4652 return (zpool_standard_error_fmt(hdl, errno,
4653 dgettext(TEXT_DOMAIN, "errors: List of "
4654 "errors unavailable")));
4655 }
4656 } else {
4657 break;
4658 }
4659 }
4660
4661 /*
4662 * Sort the resulting bookmarks. This is a little confusing due to the
4663 * implementation of ZFS_IOC_ERROR_LOG. The bookmarks are copied last
4664 * to first, and 'zc_nvlist_dst_size' indicates the number of bookmarks
4665 * _not_ copied as part of the process. So we point the start of our
4666 * array appropriate and decrement the total number of elements.
4667 */
4668 zbookmark_phys_t *zb = buf + zc.zc_nvlist_dst_size;
4669 uint64_t zblen = buflen - zc.zc_nvlist_dst_size;
4670
4671 qsort(zb, zblen, sizeof (zbookmark_phys_t), zbookmark_mem_compare);
4672
4673 verify(nvlist_alloc(nverrlistp, 0, KM_SLEEP) == 0);
4674
4675 /*
4676 * Fill in the nverrlistp with nvlist's of dataset and object numbers.
4677 */
4678 for (uint64_t i = 0; i < zblen; i++) {
4679 nvlist_t *nv;
4680
4681 /* ignoring zb_blkid and zb_level for now */
4682 if (i > 0 && zb[i-1].zb_objset == zb[i].zb_objset &&
4683 zb[i-1].zb_object == zb[i].zb_object)
4684 continue;
4685
4686 if (nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) != 0)
4687 goto nomem;
4688 if (nvlist_add_uint64(nv, ZPOOL_ERR_DATASET,
4689 zb[i].zb_objset) != 0) {
4690 nvlist_free(nv);
4691 goto nomem;
4692 }
4693 if (nvlist_add_uint64(nv, ZPOOL_ERR_OBJECT,
4694 zb[i].zb_object) != 0) {
4695 nvlist_free(nv);
4696 goto nomem;
4697 }
4698 if (nvlist_add_nvlist(*nverrlistp, "ejk", nv) != 0) {
4699 nvlist_free(nv);
4700 goto nomem;
4701 }
4702 nvlist_free(nv);
4703 }
4704
4705 free(buf);
4706 return (0);
4707
4708 nomem:
4709 free(buf);
4710 return (no_memory(zhp->zpool_hdl));
4711 }
4712
4713 /*
4714 * Upgrade a ZFS pool to the latest on-disk version.
4715 */
4716 int
zpool_upgrade(zpool_handle_t * zhp,uint64_t new_version)4717 zpool_upgrade(zpool_handle_t *zhp, uint64_t new_version)
4718 {
4719 zfs_cmd_t zc = {"\0"};
4720 libzfs_handle_t *hdl = zhp->zpool_hdl;
4721
4722 (void) strcpy(zc.zc_name, zhp->zpool_name);
4723 zc.zc_cookie = new_version;
4724
4725 if (zfs_ioctl(hdl, ZFS_IOC_POOL_UPGRADE, &zc) != 0)
4726 return (zpool_standard_error_fmt(hdl, errno,
4727 dgettext(TEXT_DOMAIN, "cannot upgrade '%s'"),
4728 zhp->zpool_name));
4729 return (0);
4730 }
4731
4732 void
zfs_save_arguments(int argc,char ** argv,char * string,int len)4733 zfs_save_arguments(int argc, char **argv, char *string, int len)
4734 {
4735 int i;
4736
4737 (void) strlcpy(string, zfs_basename(argv[0]), len);
4738 for (i = 1; i < argc; i++) {
4739 (void) strlcat(string, " ", len);
4740 (void) strlcat(string, argv[i], len);
4741 }
4742 }
4743
4744 int
zpool_log_history(libzfs_handle_t * hdl,const char * message)4745 zpool_log_history(libzfs_handle_t *hdl, const char *message)
4746 {
4747 zfs_cmd_t zc = {"\0"};
4748 nvlist_t *args;
4749
4750 args = fnvlist_alloc();
4751 fnvlist_add_string(args, "message", message);
4752 zcmd_write_src_nvlist(hdl, &zc, args);
4753 int err = zfs_ioctl(hdl, ZFS_IOC_LOG_HISTORY, &zc);
4754 nvlist_free(args);
4755 zcmd_free_nvlists(&zc);
4756 return (err);
4757 }
4758
4759 /*
4760 * Perform ioctl to get some command history of a pool.
4761 *
4762 * 'buf' is the buffer to fill up to 'len' bytes. 'off' is the
4763 * logical offset of the history buffer to start reading from.
4764 *
4765 * Upon return, 'off' is the next logical offset to read from and
4766 * 'len' is the actual amount of bytes read into 'buf'.
4767 */
4768 static int
get_history(zpool_handle_t * zhp,char * buf,uint64_t * off,uint64_t * len)4769 get_history(zpool_handle_t *zhp, char *buf, uint64_t *off, uint64_t *len)
4770 {
4771 zfs_cmd_t zc = {"\0"};
4772 libzfs_handle_t *hdl = zhp->zpool_hdl;
4773
4774 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
4775
4776 zc.zc_history = (uint64_t)(uintptr_t)buf;
4777 zc.zc_history_len = *len;
4778 zc.zc_history_offset = *off;
4779
4780 if (zfs_ioctl(hdl, ZFS_IOC_POOL_GET_HISTORY, &zc) != 0) {
4781 switch (errno) {
4782 case EPERM:
4783 return (zfs_error_fmt(hdl, EZFS_PERM,
4784 dgettext(TEXT_DOMAIN,
4785 "cannot show history for pool '%s'"),
4786 zhp->zpool_name));
4787 case ENOENT:
4788 return (zfs_error_fmt(hdl, EZFS_NOHISTORY,
4789 dgettext(TEXT_DOMAIN, "cannot get history for pool "
4790 "'%s'"), zhp->zpool_name));
4791 case ENOTSUP:
4792 return (zfs_error_fmt(hdl, EZFS_BADVERSION,
4793 dgettext(TEXT_DOMAIN, "cannot get history for pool "
4794 "'%s', pool must be upgraded"), zhp->zpool_name));
4795 default:
4796 return (zpool_standard_error_fmt(hdl, errno,
4797 dgettext(TEXT_DOMAIN,
4798 "cannot get history for '%s'"), zhp->zpool_name));
4799 }
4800 }
4801
4802 *len = zc.zc_history_len;
4803 *off = zc.zc_history_offset;
4804
4805 return (0);
4806 }
4807
4808 /*
4809 * Retrieve the command history of a pool.
4810 */
4811 int
zpool_get_history(zpool_handle_t * zhp,nvlist_t ** nvhisp,uint64_t * off,boolean_t * eof)4812 zpool_get_history(zpool_handle_t *zhp, nvlist_t **nvhisp, uint64_t *off,
4813 boolean_t *eof)
4814 {
4815 libzfs_handle_t *hdl = zhp->zpool_hdl;
4816 char *buf;
4817 int buflen = 128 * 1024;
4818 nvlist_t **records = NULL;
4819 uint_t numrecords = 0;
4820 int err = 0, i;
4821 uint64_t start = *off;
4822
4823 buf = zfs_alloc(hdl, buflen);
4824
4825 /* process about 1MiB a time */
4826 while (*off - start < 1024 * 1024) {
4827 uint64_t bytes_read = buflen;
4828 uint64_t leftover;
4829
4830 if ((err = get_history(zhp, buf, off, &bytes_read)) != 0)
4831 break;
4832
4833 /* if nothing else was read in, we're at EOF, just return */
4834 if (!bytes_read) {
4835 *eof = B_TRUE;
4836 break;
4837 }
4838
4839 if ((err = zpool_history_unpack(buf, bytes_read,
4840 &leftover, &records, &numrecords)) != 0) {
4841 zpool_standard_error_fmt(hdl, err,
4842 dgettext(TEXT_DOMAIN,
4843 "cannot get history for '%s'"), zhp->zpool_name);
4844 break;
4845 }
4846 *off -= leftover;
4847 if (leftover == bytes_read) {
4848 /*
4849 * no progress made, because buffer is not big enough
4850 * to hold this record; resize and retry.
4851 */
4852 buflen *= 2;
4853 free(buf);
4854 buf = zfs_alloc(hdl, buflen);
4855 }
4856 }
4857
4858 free(buf);
4859
4860 if (!err) {
4861 *nvhisp = fnvlist_alloc();
4862 fnvlist_add_nvlist_array(*nvhisp, ZPOOL_HIST_RECORD,
4863 (const nvlist_t **)records, numrecords);
4864 }
4865 for (i = 0; i < numrecords; i++)
4866 nvlist_free(records[i]);
4867 free(records);
4868
4869 return (err);
4870 }
4871
4872 /*
4873 * Retrieve the next event given the passed 'zevent_fd' file descriptor.
4874 * If there is a new event available 'nvp' will contain a newly allocated
4875 * nvlist and 'dropped' will be set to the number of missed events since
4876 * the last call to this function. When 'nvp' is set to NULL it indicates
4877 * no new events are available. In either case the function returns 0 and
4878 * it is up to the caller to free 'nvp'. In the case of a fatal error the
4879 * function will return a non-zero value. When the function is called in
4880 * blocking mode (the default, unless the ZEVENT_NONBLOCK flag is passed),
4881 * it will not return until a new event is available.
4882 */
4883 int
zpool_events_next(libzfs_handle_t * hdl,nvlist_t ** nvp,int * dropped,unsigned flags,int zevent_fd)4884 zpool_events_next(libzfs_handle_t *hdl, nvlist_t **nvp,
4885 int *dropped, unsigned flags, int zevent_fd)
4886 {
4887 zfs_cmd_t zc = {"\0"};
4888 int error = 0;
4889
4890 *nvp = NULL;
4891 *dropped = 0;
4892 zc.zc_cleanup_fd = zevent_fd;
4893
4894 if (flags & ZEVENT_NONBLOCK)
4895 zc.zc_guid = ZEVENT_NONBLOCK;
4896
4897 zcmd_alloc_dst_nvlist(hdl, &zc, ZEVENT_SIZE);
4898
4899 retry:
4900 if (zfs_ioctl(hdl, ZFS_IOC_EVENTS_NEXT, &zc) != 0) {
4901 switch (errno) {
4902 case ESHUTDOWN:
4903 error = zfs_error_fmt(hdl, EZFS_POOLUNAVAIL,
4904 dgettext(TEXT_DOMAIN, "zfs shutdown"));
4905 goto out;
4906 case ENOENT:
4907 /* Blocking error case should not occur */
4908 if (!(flags & ZEVENT_NONBLOCK))
4909 error = zpool_standard_error_fmt(hdl, errno,
4910 dgettext(TEXT_DOMAIN, "cannot get event"));
4911
4912 goto out;
4913 case ENOMEM:
4914 zcmd_expand_dst_nvlist(hdl, &zc);
4915 goto retry;
4916 default:
4917 error = zpool_standard_error_fmt(hdl, errno,
4918 dgettext(TEXT_DOMAIN, "cannot get event"));
4919 goto out;
4920 }
4921 }
4922
4923 error = zcmd_read_dst_nvlist(hdl, &zc, nvp);
4924 if (error != 0)
4925 goto out;
4926
4927 *dropped = (int)zc.zc_cookie;
4928 out:
4929 zcmd_free_nvlists(&zc);
4930
4931 return (error);
4932 }
4933
4934 /*
4935 * Clear all events.
4936 */
4937 int
zpool_events_clear(libzfs_handle_t * hdl,int * count)4938 zpool_events_clear(libzfs_handle_t *hdl, int *count)
4939 {
4940 zfs_cmd_t zc = {"\0"};
4941
4942 if (zfs_ioctl(hdl, ZFS_IOC_EVENTS_CLEAR, &zc) != 0)
4943 return (zpool_standard_error(hdl, errno,
4944 dgettext(TEXT_DOMAIN, "cannot clear events")));
4945
4946 if (count != NULL)
4947 *count = (int)zc.zc_cookie; /* # of events cleared */
4948
4949 return (0);
4950 }
4951
4952 /*
4953 * Seek to a specific EID, ZEVENT_SEEK_START, or ZEVENT_SEEK_END for
4954 * the passed zevent_fd file handle. On success zero is returned,
4955 * otherwise -1 is returned and hdl->libzfs_error is set to the errno.
4956 */
4957 int
zpool_events_seek(libzfs_handle_t * hdl,uint64_t eid,int zevent_fd)4958 zpool_events_seek(libzfs_handle_t *hdl, uint64_t eid, int zevent_fd)
4959 {
4960 zfs_cmd_t zc = {"\0"};
4961 int error = 0;
4962
4963 zc.zc_guid = eid;
4964 zc.zc_cleanup_fd = zevent_fd;
4965
4966 if (zfs_ioctl(hdl, ZFS_IOC_EVENTS_SEEK, &zc) != 0) {
4967 switch (errno) {
4968 case ENOENT:
4969 error = zfs_error_fmt(hdl, EZFS_NOENT,
4970 dgettext(TEXT_DOMAIN, "cannot get event"));
4971 break;
4972
4973 case ENOMEM:
4974 error = zfs_error_fmt(hdl, EZFS_NOMEM,
4975 dgettext(TEXT_DOMAIN, "cannot get event"));
4976 break;
4977
4978 default:
4979 error = zpool_standard_error_fmt(hdl, errno,
4980 dgettext(TEXT_DOMAIN, "cannot get event"));
4981 break;
4982 }
4983 }
4984
4985 return (error);
4986 }
4987
4988 static void
zpool_obj_to_path_impl(zpool_handle_t * zhp,uint64_t dsobj,uint64_t obj,char * pathname,size_t len,boolean_t always_unmounted)4989 zpool_obj_to_path_impl(zpool_handle_t *zhp, uint64_t dsobj, uint64_t obj,
4990 char *pathname, size_t len, boolean_t always_unmounted)
4991 {
4992 zfs_cmd_t zc = {"\0"};
4993 boolean_t mounted = B_FALSE;
4994 char *mntpnt = NULL;
4995 char dsname[ZFS_MAX_DATASET_NAME_LEN];
4996
4997 if (dsobj == 0) {
4998 /* special case for the MOS */
4999 (void) snprintf(pathname, len, "<metadata>:<0x%llx>",
5000 (longlong_t)obj);
5001 return;
5002 }
5003
5004 /* get the dataset's name */
5005 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
5006 zc.zc_obj = dsobj;
5007 if (zfs_ioctl(zhp->zpool_hdl,
5008 ZFS_IOC_DSOBJ_TO_DSNAME, &zc) != 0) {
5009 /* just write out a path of two object numbers */
5010 (void) snprintf(pathname, len, "<0x%llx>:<0x%llx>",
5011 (longlong_t)dsobj, (longlong_t)obj);
5012 return;
5013 }
5014 (void) strlcpy(dsname, zc.zc_value, sizeof (dsname));
5015
5016 /* find out if the dataset is mounted */
5017 mounted = !always_unmounted && is_mounted(zhp->zpool_hdl, dsname,
5018 &mntpnt);
5019
5020 /* get the corrupted object's path */
5021 (void) strlcpy(zc.zc_name, dsname, sizeof (zc.zc_name));
5022 zc.zc_obj = obj;
5023 if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_OBJ_TO_PATH,
5024 &zc) == 0) {
5025 if (mounted) {
5026 (void) snprintf(pathname, len, "%s%s", mntpnt,
5027 zc.zc_value);
5028 } else {
5029 (void) snprintf(pathname, len, "%s:%s",
5030 dsname, zc.zc_value);
5031 }
5032 } else {
5033 (void) snprintf(pathname, len, "%s:<0x%llx>", dsname,
5034 (longlong_t)obj);
5035 }
5036 free(mntpnt);
5037 }
5038
5039 void
zpool_obj_to_path(zpool_handle_t * zhp,uint64_t dsobj,uint64_t obj,char * pathname,size_t len)5040 zpool_obj_to_path(zpool_handle_t *zhp, uint64_t dsobj, uint64_t obj,
5041 char *pathname, size_t len)
5042 {
5043 zpool_obj_to_path_impl(zhp, dsobj, obj, pathname, len, B_FALSE);
5044 }
5045
5046 void
zpool_obj_to_path_ds(zpool_handle_t * zhp,uint64_t dsobj,uint64_t obj,char * pathname,size_t len)5047 zpool_obj_to_path_ds(zpool_handle_t *zhp, uint64_t dsobj, uint64_t obj,
5048 char *pathname, size_t len)
5049 {
5050 zpool_obj_to_path_impl(zhp, dsobj, obj, pathname, len, B_TRUE);
5051 }
5052 /*
5053 * Wait while the specified activity is in progress in the pool.
5054 */
5055 int
zpool_wait(zpool_handle_t * zhp,zpool_wait_activity_t activity)5056 zpool_wait(zpool_handle_t *zhp, zpool_wait_activity_t activity)
5057 {
5058 boolean_t missing;
5059
5060 int error = zpool_wait_status(zhp, activity, &missing, NULL);
5061
5062 if (missing) {
5063 (void) zpool_standard_error_fmt(zhp->zpool_hdl, ENOENT,
5064 dgettext(TEXT_DOMAIN, "error waiting in pool '%s'"),
5065 zhp->zpool_name);
5066 return (ENOENT);
5067 } else {
5068 return (error);
5069 }
5070 }
5071
5072 /*
5073 * Wait for the given activity and return the status of the wait (whether or not
5074 * any waiting was done) in the 'waited' parameter. Non-existent pools are
5075 * reported via the 'missing' parameter, rather than by printing an error
5076 * message. This is convenient when this function is called in a loop over a
5077 * long period of time (as it is, for example, by zpool's wait cmd). In that
5078 * scenario, a pool being exported or destroyed should be considered a normal
5079 * event, so we don't want to print an error when we find that the pool doesn't
5080 * exist.
5081 */
5082 int
zpool_wait_status(zpool_handle_t * zhp,zpool_wait_activity_t activity,boolean_t * missing,boolean_t * waited)5083 zpool_wait_status(zpool_handle_t *zhp, zpool_wait_activity_t activity,
5084 boolean_t *missing, boolean_t *waited)
5085 {
5086 int error = lzc_wait(zhp->zpool_name, activity, waited);
5087 *missing = (error == ENOENT);
5088 if (*missing)
5089 return (0);
5090
5091 if (error != 0) {
5092 (void) zpool_standard_error_fmt(zhp->zpool_hdl, error,
5093 dgettext(TEXT_DOMAIN, "error waiting in pool '%s'"),
5094 zhp->zpool_name);
5095 }
5096
5097 return (error);
5098 }
5099
5100 int
zpool_set_bootenv(zpool_handle_t * zhp,const nvlist_t * envmap)5101 zpool_set_bootenv(zpool_handle_t *zhp, const nvlist_t *envmap)
5102 {
5103 int error = lzc_set_bootenv(zhp->zpool_name, envmap);
5104 if (error != 0) {
5105 (void) zpool_standard_error_fmt(zhp->zpool_hdl, error,
5106 dgettext(TEXT_DOMAIN,
5107 "error setting bootenv in pool '%s'"), zhp->zpool_name);
5108 }
5109
5110 return (error);
5111 }
5112
5113 int
zpool_get_bootenv(zpool_handle_t * zhp,nvlist_t ** nvlp)5114 zpool_get_bootenv(zpool_handle_t *zhp, nvlist_t **nvlp)
5115 {
5116 nvlist_t *nvl;
5117 int error;
5118
5119 nvl = NULL;
5120 error = lzc_get_bootenv(zhp->zpool_name, &nvl);
5121 if (error != 0) {
5122 (void) zpool_standard_error_fmt(zhp->zpool_hdl, error,
5123 dgettext(TEXT_DOMAIN,
5124 "error getting bootenv in pool '%s'"), zhp->zpool_name);
5125 } else {
5126 *nvlp = nvl;
5127 }
5128
5129 return (error);
5130 }
5131
5132 /*
5133 * Attempt to read and parse feature file(s) (from "compatibility" property).
5134 * Files contain zpool feature names, comma or whitespace-separated.
5135 * Comments (# character to next newline) are discarded.
5136 *
5137 * Arguments:
5138 * compatibility : string containing feature filenames
5139 * features : either NULL or pointer to array of boolean
5140 * report : either NULL or pointer to string buffer
5141 * rlen : length of "report" buffer
5142 *
5143 * compatibility is NULL (unset), "", "off", "legacy", or list of
5144 * comma-separated filenames. filenames should either be absolute,
5145 * or relative to:
5146 * 1) ZPOOL_SYSCONF_COMPAT_D (eg: /etc/zfs/compatibility.d) or
5147 * 2) ZPOOL_DATA_COMPAT_D (eg: /usr/share/zfs/compatibility.d).
5148 * (Unset), "" or "off" => enable all features
5149 * "legacy" => disable all features
5150 *
5151 * Any feature names read from files which match unames in spa_feature_table
5152 * will have the corresponding boolean set in the features array (if non-NULL).
5153 * If more than one feature set specified, only features present in *all* of
5154 * them will be set.
5155 *
5156 * "report" if not NULL will be populated with a suitable status message.
5157 *
5158 * Return values:
5159 * ZPOOL_COMPATIBILITY_OK : files read and parsed ok
5160 * ZPOOL_COMPATIBILITY_BADFILE : file too big or not a text file
5161 * ZPOOL_COMPATIBILITY_BADTOKEN : SYSCONF file contains invalid feature name
5162 * ZPOOL_COMPATIBILITY_WARNTOKEN : DATA file contains invalid feature name
5163 * ZPOOL_COMPATIBILITY_NOFILES : no feature files found
5164 */
5165 zpool_compat_status_t
zpool_load_compat(const char * compat,boolean_t * features,char * report,size_t rlen)5166 zpool_load_compat(const char *compat, boolean_t *features, char *report,
5167 size_t rlen)
5168 {
5169 int sdirfd, ddirfd, featfd;
5170 struct stat fs;
5171 char *fc;
5172 char *ps, *ls, *ws;
5173 char *file, *line, *word;
5174
5175 char l_compat[ZFS_MAXPROPLEN];
5176
5177 boolean_t ret_nofiles = B_TRUE;
5178 boolean_t ret_badfile = B_FALSE;
5179 boolean_t ret_badtoken = B_FALSE;
5180 boolean_t ret_warntoken = B_FALSE;
5181
5182 /* special cases (unset), "" and "off" => enable all features */
5183 if (compat == NULL || compat[0] == '\0' ||
5184 strcmp(compat, ZPOOL_COMPAT_OFF) == 0) {
5185 if (features != NULL) {
5186 for (uint_t i = 0; i < SPA_FEATURES; i++)
5187 features[i] = B_TRUE;
5188 }
5189 if (report != NULL)
5190 strlcpy(report, gettext("all features enabled"), rlen);
5191 return (ZPOOL_COMPATIBILITY_OK);
5192 }
5193
5194 /* Final special case "legacy" => disable all features */
5195 if (strcmp(compat, ZPOOL_COMPAT_LEGACY) == 0) {
5196 if (features != NULL)
5197 for (uint_t i = 0; i < SPA_FEATURES; i++)
5198 features[i] = B_FALSE;
5199 if (report != NULL)
5200 strlcpy(report, gettext("all features disabled"), rlen);
5201 return (ZPOOL_COMPATIBILITY_OK);
5202 }
5203
5204 /*
5205 * Start with all true; will be ANDed with results from each file
5206 */
5207 if (features != NULL)
5208 for (uint_t i = 0; i < SPA_FEATURES; i++)
5209 features[i] = B_TRUE;
5210
5211 char err_badfile[ZFS_MAXPROPLEN] = "";
5212 char err_badtoken[ZFS_MAXPROPLEN] = "";
5213
5214 /*
5215 * We ignore errors from the directory open()
5216 * as they're only needed if the filename is relative
5217 * which will be checked during the openat().
5218 */
5219
5220 /* O_PATH safer than O_RDONLY if system allows it */
5221 #if defined(O_PATH)
5222 #define ZC_DIR_FLAGS (O_DIRECTORY | O_CLOEXEC | O_PATH)
5223 #else
5224 #define ZC_DIR_FLAGS (O_DIRECTORY | O_CLOEXEC | O_RDONLY)
5225 #endif
5226
5227 sdirfd = open(ZPOOL_SYSCONF_COMPAT_D, ZC_DIR_FLAGS);
5228 ddirfd = open(ZPOOL_DATA_COMPAT_D, ZC_DIR_FLAGS);
5229
5230 (void) strlcpy(l_compat, compat, ZFS_MAXPROPLEN);
5231
5232 for (file = strtok_r(l_compat, ",", &ps);
5233 file != NULL;
5234 file = strtok_r(NULL, ",", &ps)) {
5235
5236 boolean_t l_features[SPA_FEATURES];
5237
5238 enum { Z_SYSCONF, Z_DATA } source;
5239
5240 /* try sysconfdir first, then datadir */
5241 source = Z_SYSCONF;
5242 if ((featfd = openat(sdirfd, file, O_RDONLY | O_CLOEXEC)) < 0) {
5243 featfd = openat(ddirfd, file, O_RDONLY | O_CLOEXEC);
5244 source = Z_DATA;
5245 }
5246
5247 /* File readable and correct size? */
5248 if (featfd < 0 ||
5249 fstat(featfd, &fs) < 0 ||
5250 fs.st_size < 1 ||
5251 fs.st_size > ZPOOL_COMPAT_MAXSIZE) {
5252 (void) close(featfd);
5253 strlcat(err_badfile, file, ZFS_MAXPROPLEN);
5254 strlcat(err_badfile, " ", ZFS_MAXPROPLEN);
5255 ret_badfile = B_TRUE;
5256 continue;
5257 }
5258
5259 /* Prefault the file if system allows */
5260 #if defined(MAP_POPULATE)
5261 #define ZC_MMAP_FLAGS (MAP_PRIVATE | MAP_POPULATE)
5262 #elif defined(MAP_PREFAULT_READ)
5263 #define ZC_MMAP_FLAGS (MAP_PRIVATE | MAP_PREFAULT_READ)
5264 #else
5265 #define ZC_MMAP_FLAGS (MAP_PRIVATE)
5266 #endif
5267
5268 /* private mmap() so we can strtok safely */
5269 fc = (char *)mmap(NULL, fs.st_size, PROT_READ | PROT_WRITE,
5270 ZC_MMAP_FLAGS, featfd, 0);
5271 (void) close(featfd);
5272
5273 /* map ok, and last character == newline? */
5274 if (fc == MAP_FAILED || fc[fs.st_size - 1] != '\n') {
5275 (void) munmap((void *) fc, fs.st_size);
5276 strlcat(err_badfile, file, ZFS_MAXPROPLEN);
5277 strlcat(err_badfile, " ", ZFS_MAXPROPLEN);
5278 ret_badfile = B_TRUE;
5279 continue;
5280 }
5281
5282 ret_nofiles = B_FALSE;
5283
5284 for (uint_t i = 0; i < SPA_FEATURES; i++)
5285 l_features[i] = B_FALSE;
5286
5287 /* replace final newline with NULL to ensure string ends */
5288 fc[fs.st_size - 1] = '\0';
5289
5290 for (line = strtok_r(fc, "\n", &ls);
5291 line != NULL;
5292 line = strtok_r(NULL, "\n", &ls)) {
5293 /* discard comments */
5294 char *r = strchr(line, '#');
5295 if (r != NULL)
5296 *r = '\0';
5297
5298 for (word = strtok_r(line, ", \t", &ws);
5299 word != NULL;
5300 word = strtok_r(NULL, ", \t", &ws)) {
5301 /* Find matching feature name */
5302 uint_t f;
5303 for (f = 0; f < SPA_FEATURES; f++) {
5304 zfeature_info_t *fi =
5305 &spa_feature_table[f];
5306 if (strcmp(word, fi->fi_uname) == 0) {
5307 l_features[f] = B_TRUE;
5308 break;
5309 }
5310 }
5311 if (f < SPA_FEATURES)
5312 continue;
5313
5314 /* found an unrecognized word */
5315 /* lightly sanitize it */
5316 if (strlen(word) > 32)
5317 word[32] = '\0';
5318 for (char *c = word; *c != '\0'; c++)
5319 if (!isprint(*c))
5320 *c = '?';
5321
5322 strlcat(err_badtoken, word, ZFS_MAXPROPLEN);
5323 strlcat(err_badtoken, " ", ZFS_MAXPROPLEN);
5324 if (source == Z_SYSCONF)
5325 ret_badtoken = B_TRUE;
5326 else
5327 ret_warntoken = B_TRUE;
5328 }
5329 }
5330 (void) munmap((void *) fc, fs.st_size);
5331
5332 if (features != NULL)
5333 for (uint_t i = 0; i < SPA_FEATURES; i++)
5334 features[i] &= l_features[i];
5335 }
5336 (void) close(sdirfd);
5337 (void) close(ddirfd);
5338
5339 /* Return the most serious error */
5340 if (ret_badfile) {
5341 if (report != NULL)
5342 snprintf(report, rlen, gettext("could not read/"
5343 "parse feature file(s): %s"), err_badfile);
5344 return (ZPOOL_COMPATIBILITY_BADFILE);
5345 }
5346 if (ret_nofiles) {
5347 if (report != NULL)
5348 strlcpy(report,
5349 gettext("no valid compatibility files specified"),
5350 rlen);
5351 return (ZPOOL_COMPATIBILITY_NOFILES);
5352 }
5353 if (ret_badtoken) {
5354 if (report != NULL)
5355 snprintf(report, rlen, gettext("invalid feature "
5356 "name(s) in local compatibility files: %s"),
5357 err_badtoken);
5358 return (ZPOOL_COMPATIBILITY_BADTOKEN);
5359 }
5360 if (ret_warntoken) {
5361 if (report != NULL)
5362 snprintf(report, rlen, gettext("unrecognized feature "
5363 "name(s) in distribution compatibility files: %s"),
5364 err_badtoken);
5365 return (ZPOOL_COMPATIBILITY_WARNTOKEN);
5366 }
5367 if (report != NULL)
5368 strlcpy(report, gettext("compatibility set ok"), rlen);
5369 return (ZPOOL_COMPATIBILITY_OK);
5370 }
5371
5372 static int
zpool_vdev_guid(zpool_handle_t * zhp,const char * vdevname,uint64_t * vdev_guid)5373 zpool_vdev_guid(zpool_handle_t *zhp, const char *vdevname, uint64_t *vdev_guid)
5374 {
5375 nvlist_t *tgt;
5376 boolean_t avail_spare, l2cache;
5377
5378 verify(zhp != NULL);
5379 if (zpool_get_state(zhp) == POOL_STATE_UNAVAIL) {
5380 char errbuf[ERRBUFLEN];
5381 (void) snprintf(errbuf, sizeof (errbuf),
5382 dgettext(TEXT_DOMAIN, "pool is in an unavailable state"));
5383 return (zfs_error(zhp->zpool_hdl, EZFS_POOLUNAVAIL, errbuf));
5384 }
5385
5386 if ((tgt = zpool_find_vdev(zhp, vdevname, &avail_spare, &l2cache,
5387 NULL)) == NULL) {
5388 char errbuf[ERRBUFLEN];
5389 (void) snprintf(errbuf, sizeof (errbuf),
5390 dgettext(TEXT_DOMAIN, "can not find %s in %s"),
5391 vdevname, zhp->zpool_name);
5392 return (zfs_error(zhp->zpool_hdl, EZFS_NODEVICE, errbuf));
5393 }
5394
5395 *vdev_guid = fnvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID);
5396 return (0);
5397 }
5398
5399 /*
5400 * Get a vdev property value for 'prop' and return the value in
5401 * a pre-allocated buffer.
5402 */
5403 int
zpool_get_vdev_prop_value(nvlist_t * nvprop,vdev_prop_t prop,char * prop_name,char * buf,size_t len,zprop_source_t * srctype,boolean_t literal)5404 zpool_get_vdev_prop_value(nvlist_t *nvprop, vdev_prop_t prop, char *prop_name,
5405 char *buf, size_t len, zprop_source_t *srctype, boolean_t literal)
5406 {
5407 nvlist_t *nv;
5408 const char *strval;
5409 uint64_t intval;
5410 zprop_source_t src = ZPROP_SRC_NONE;
5411
5412 if (prop == VDEV_PROP_USERPROP) {
5413 /* user property, prop_name must contain the property name */
5414 assert(prop_name != NULL);
5415 if (nvlist_lookup_nvlist(nvprop, prop_name, &nv) == 0) {
5416 src = fnvlist_lookup_uint64(nv, ZPROP_SOURCE);
5417 strval = fnvlist_lookup_string(nv, ZPROP_VALUE);
5418 } else {
5419 /* user prop not found */
5420 src = ZPROP_SRC_DEFAULT;
5421 strval = "-";
5422 }
5423 (void) strlcpy(buf, strval, len);
5424 if (srctype)
5425 *srctype = src;
5426 return (0);
5427 }
5428
5429 if (prop_name == NULL)
5430 prop_name = (char *)vdev_prop_to_name(prop);
5431
5432 switch (vdev_prop_get_type(prop)) {
5433 case PROP_TYPE_STRING:
5434 if (nvlist_lookup_nvlist(nvprop, prop_name, &nv) == 0) {
5435 src = fnvlist_lookup_uint64(nv, ZPROP_SOURCE);
5436 strval = fnvlist_lookup_string(nv, ZPROP_VALUE);
5437 } else {
5438 src = ZPROP_SRC_DEFAULT;
5439 if ((strval = vdev_prop_default_string(prop)) == NULL)
5440 strval = "-";
5441 }
5442 (void) strlcpy(buf, strval, len);
5443 break;
5444
5445 case PROP_TYPE_NUMBER:
5446 if (nvlist_lookup_nvlist(nvprop, prop_name, &nv) == 0) {
5447 src = fnvlist_lookup_uint64(nv, ZPROP_SOURCE);
5448 intval = fnvlist_lookup_uint64(nv, ZPROP_VALUE);
5449 } else {
5450 src = ZPROP_SRC_DEFAULT;
5451 intval = vdev_prop_default_numeric(prop);
5452 }
5453
5454 switch (prop) {
5455 case VDEV_PROP_ASIZE:
5456 case VDEV_PROP_PSIZE:
5457 case VDEV_PROP_SIZE:
5458 case VDEV_PROP_BOOTSIZE:
5459 case VDEV_PROP_ALLOCATED:
5460 case VDEV_PROP_FREE:
5461 case VDEV_PROP_READ_ERRORS:
5462 case VDEV_PROP_WRITE_ERRORS:
5463 case VDEV_PROP_CHECKSUM_ERRORS:
5464 case VDEV_PROP_INITIALIZE_ERRORS:
5465 case VDEV_PROP_TRIM_ERRORS:
5466 case VDEV_PROP_SLOW_IOS:
5467 case VDEV_PROP_OPS_NULL:
5468 case VDEV_PROP_OPS_READ:
5469 case VDEV_PROP_OPS_WRITE:
5470 case VDEV_PROP_OPS_FREE:
5471 case VDEV_PROP_OPS_CLAIM:
5472 case VDEV_PROP_OPS_TRIM:
5473 case VDEV_PROP_BYTES_NULL:
5474 case VDEV_PROP_BYTES_READ:
5475 case VDEV_PROP_BYTES_WRITE:
5476 case VDEV_PROP_BYTES_FREE:
5477 case VDEV_PROP_BYTES_CLAIM:
5478 case VDEV_PROP_BYTES_TRIM:
5479 if (literal) {
5480 (void) snprintf(buf, len, "%llu",
5481 (u_longlong_t)intval);
5482 } else {
5483 (void) zfs_nicenum(intval, buf, len);
5484 }
5485 break;
5486 case VDEV_PROP_EXPANDSZ:
5487 if (intval == 0) {
5488 (void) strlcpy(buf, "-", len);
5489 } else if (literal) {
5490 (void) snprintf(buf, len, "%llu",
5491 (u_longlong_t)intval);
5492 } else {
5493 (void) zfs_nicenum(intval, buf, len);
5494 }
5495 break;
5496 case VDEV_PROP_CAPACITY:
5497 if (literal) {
5498 (void) snprintf(buf, len, "%llu",
5499 (u_longlong_t)intval);
5500 } else {
5501 (void) snprintf(buf, len, "%llu%%",
5502 (u_longlong_t)intval);
5503 }
5504 break;
5505 case VDEV_PROP_CHECKSUM_N:
5506 case VDEV_PROP_CHECKSUM_T:
5507 case VDEV_PROP_IO_N:
5508 case VDEV_PROP_IO_T:
5509 case VDEV_PROP_SLOW_IO_N:
5510 case VDEV_PROP_SLOW_IO_T:
5511 if (intval == UINT64_MAX) {
5512 (void) strlcpy(buf, "-", len);
5513 } else {
5514 (void) snprintf(buf, len, "%llu",
5515 (u_longlong_t)intval);
5516 }
5517 break;
5518 case VDEV_PROP_FRAGMENTATION:
5519 if (intval == UINT64_MAX) {
5520 (void) strlcpy(buf, "-", len);
5521 } else {
5522 (void) snprintf(buf, len, "%llu%%",
5523 (u_longlong_t)intval);
5524 }
5525 break;
5526 case VDEV_PROP_STATE:
5527 if (literal) {
5528 (void) snprintf(buf, len, "%llu",
5529 (u_longlong_t)intval);
5530 } else {
5531 (void) strlcpy(buf, zpool_state_to_name(intval,
5532 VDEV_AUX_NONE), len);
5533 }
5534 break;
5535 default:
5536 (void) snprintf(buf, len, "%llu",
5537 (u_longlong_t)intval);
5538 }
5539 break;
5540
5541 case PROP_TYPE_INDEX:
5542 if (nvlist_lookup_nvlist(nvprop, prop_name, &nv) == 0) {
5543 src = fnvlist_lookup_uint64(nv, ZPROP_SOURCE);
5544 intval = fnvlist_lookup_uint64(nv, ZPROP_VALUE);
5545 } else {
5546 /* 'trim_support' only valid for leaf vdevs */
5547 if (prop == VDEV_PROP_TRIM_SUPPORT) {
5548 (void) strlcpy(buf, "-", len);
5549 break;
5550 }
5551 src = ZPROP_SRC_DEFAULT;
5552 intval = vdev_prop_default_numeric(prop);
5553 /* Only use if provided by the RAIDZ VDEV above */
5554 if (prop == VDEV_PROP_RAIDZ_EXPANDING)
5555 return (ENOENT);
5556 if (prop == VDEV_PROP_SIT_OUT)
5557 return (ENOENT);
5558 }
5559 if (vdev_prop_index_to_string(prop, intval,
5560 (const char **)&strval) != 0)
5561 return (-1);
5562 (void) strlcpy(buf, strval, len);
5563 break;
5564
5565 default:
5566 abort();
5567 }
5568
5569 if (srctype)
5570 *srctype = src;
5571
5572 return (0);
5573 }
5574
5575 /*
5576 * Get a vdev property value for 'prop_name' and return the value in
5577 * a pre-allocated buffer.
5578 */
5579 int
zpool_get_vdev_prop(zpool_handle_t * zhp,const char * vdevname,vdev_prop_t prop,char * prop_name,char * buf,size_t len,zprop_source_t * srctype,boolean_t literal)5580 zpool_get_vdev_prop(zpool_handle_t *zhp, const char *vdevname, vdev_prop_t prop,
5581 char *prop_name, char *buf, size_t len, zprop_source_t *srctype,
5582 boolean_t literal)
5583 {
5584 nvlist_t *reqnvl, *reqprops;
5585 nvlist_t *retprops = NULL;
5586 uint64_t vdev_guid = 0;
5587 int ret;
5588
5589 if ((ret = zpool_vdev_guid(zhp, vdevname, &vdev_guid)) != 0)
5590 return (ret);
5591
5592 if (nvlist_alloc(&reqnvl, NV_UNIQUE_NAME, 0) != 0)
5593 return (no_memory(zhp->zpool_hdl));
5594 if (nvlist_alloc(&reqprops, NV_UNIQUE_NAME, 0) != 0)
5595 return (no_memory(zhp->zpool_hdl));
5596
5597 fnvlist_add_uint64(reqnvl, ZPOOL_VDEV_PROPS_GET_VDEV, vdev_guid);
5598
5599 if (prop != VDEV_PROP_USERPROP) {
5600 /* prop_name overrides prop value */
5601 if (prop_name != NULL)
5602 prop = vdev_name_to_prop(prop_name);
5603 else
5604 prop_name = (char *)vdev_prop_to_name(prop);
5605 assert(prop < VDEV_NUM_PROPS);
5606 }
5607
5608 assert(prop_name != NULL);
5609 if (nvlist_add_uint64(reqprops, prop_name, prop) != 0) {
5610 nvlist_free(reqnvl);
5611 nvlist_free(reqprops);
5612 return (no_memory(zhp->zpool_hdl));
5613 }
5614
5615 fnvlist_add_nvlist(reqnvl, ZPOOL_VDEV_PROPS_GET_PROPS, reqprops);
5616
5617 ret = lzc_get_vdev_prop(zhp->zpool_name, reqnvl, &retprops);
5618
5619 if (ret == 0) {
5620 ret = zpool_get_vdev_prop_value(retprops, prop, prop_name, buf,
5621 len, srctype, literal);
5622 } else {
5623 char errbuf[ERRBUFLEN];
5624 (void) snprintf(errbuf, sizeof (errbuf),
5625 dgettext(TEXT_DOMAIN, "cannot get vdev property %s from"
5626 " %s in %s"), prop_name, vdevname, zhp->zpool_name);
5627 (void) zpool_standard_error(zhp->zpool_hdl, ret, errbuf);
5628 }
5629
5630 nvlist_free(reqnvl);
5631 nvlist_free(reqprops);
5632 nvlist_free(retprops);
5633
5634 return (ret);
5635 }
5636
5637 /*
5638 * Get all vdev properties
5639 */
5640 int
zpool_get_all_vdev_props(zpool_handle_t * zhp,const char * vdevname,nvlist_t ** outnvl)5641 zpool_get_all_vdev_props(zpool_handle_t *zhp, const char *vdevname,
5642 nvlist_t **outnvl)
5643 {
5644 nvlist_t *nvl = NULL;
5645 uint64_t vdev_guid = 0;
5646 int ret;
5647
5648 if ((ret = zpool_vdev_guid(zhp, vdevname, &vdev_guid)) != 0)
5649 return (ret);
5650
5651 if (nvlist_alloc(&nvl, NV_UNIQUE_NAME, 0) != 0)
5652 return (no_memory(zhp->zpool_hdl));
5653
5654 fnvlist_add_uint64(nvl, ZPOOL_VDEV_PROPS_GET_VDEV, vdev_guid);
5655
5656 ret = lzc_get_vdev_prop(zhp->zpool_name, nvl, outnvl);
5657
5658 nvlist_free(nvl);
5659
5660 if (ret) {
5661 char errbuf[ERRBUFLEN];
5662 (void) snprintf(errbuf, sizeof (errbuf),
5663 dgettext(TEXT_DOMAIN, "cannot get vdev properties for"
5664 " %s in %s"), vdevname, zhp->zpool_name);
5665 (void) zpool_standard_error(zhp->zpool_hdl, errno, errbuf);
5666 }
5667
5668 return (ret);
5669 }
5670
5671 /*
5672 * Set vdev property
5673 */
5674 int
zpool_set_vdev_prop(zpool_handle_t * zhp,const char * vdevname,const char * propname,const char * propval)5675 zpool_set_vdev_prop(zpool_handle_t *zhp, const char *vdevname,
5676 const char *propname, const char *propval)
5677 {
5678 int ret;
5679 nvlist_t *nvl = NULL;
5680 nvlist_t *outnvl = NULL;
5681 nvlist_t *props;
5682 nvlist_t *realprops;
5683 prop_flags_t flags = { 0 };
5684 uint64_t version;
5685 uint64_t vdev_guid;
5686
5687 if ((ret = zpool_vdev_guid(zhp, vdevname, &vdev_guid)) != 0)
5688 return (ret);
5689
5690 if (nvlist_alloc(&nvl, NV_UNIQUE_NAME, 0) != 0)
5691 return (no_memory(zhp->zpool_hdl));
5692 if (nvlist_alloc(&props, NV_UNIQUE_NAME, 0) != 0)
5693 return (no_memory(zhp->zpool_hdl));
5694
5695 fnvlist_add_uint64(nvl, ZPOOL_VDEV_PROPS_SET_VDEV, vdev_guid);
5696
5697 if (nvlist_add_string(props, propname, propval) != 0) {
5698 nvlist_free(props);
5699 return (no_memory(zhp->zpool_hdl));
5700 }
5701
5702 char errbuf[ERRBUFLEN];
5703 (void) snprintf(errbuf, sizeof (errbuf),
5704 dgettext(TEXT_DOMAIN, "cannot set property %s for %s on %s"),
5705 propname, vdevname, zhp->zpool_name);
5706
5707 flags.vdevprop = 1;
5708 version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL);
5709 if ((realprops = zpool_valid_proplist(zhp->zpool_hdl,
5710 zhp->zpool_name, props, version, flags, errbuf)) == NULL) {
5711 nvlist_free(props);
5712 nvlist_free(nvl);
5713 return (-1);
5714 }
5715
5716 nvlist_free(props);
5717 props = realprops;
5718
5719 fnvlist_add_nvlist(nvl, ZPOOL_VDEV_PROPS_SET_PROPS, props);
5720
5721 ret = lzc_set_vdev_prop(zhp->zpool_name, nvl, &outnvl);
5722
5723 nvlist_free(props);
5724 nvlist_free(nvl);
5725 nvlist_free(outnvl);
5726
5727 if (ret) {
5728 if (errno == ENOTSUP) {
5729 zfs_error_aux(zhp->zpool_hdl, dgettext(TEXT_DOMAIN,
5730 "property not supported for this vdev"));
5731 (void) zfs_error(zhp->zpool_hdl, EZFS_PROPTYPE, errbuf);
5732 } else {
5733 (void) zpool_standard_error(zhp->zpool_hdl, errno,
5734 errbuf);
5735 }
5736 }
5737
5738 return (ret);
5739 }
5740
5741 /*
5742 * Prune older entries from the DDT to reclaim space under the quota
5743 */
5744 int
zpool_ddt_prune(zpool_handle_t * zhp,zpool_ddt_prune_unit_t unit,uint64_t amount)5745 zpool_ddt_prune(zpool_handle_t *zhp, zpool_ddt_prune_unit_t unit,
5746 uint64_t amount)
5747 {
5748 int error = lzc_ddt_prune(zhp->zpool_name, unit, amount);
5749 if (error != 0) {
5750 libzfs_handle_t *hdl = zhp->zpool_hdl;
5751 char errbuf[ERRBUFLEN];
5752
5753 (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
5754 "cannot prune dedup table on '%s'"), zhp->zpool_name);
5755
5756 if (error == EALREADY) {
5757 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
5758 "a prune operation is already in progress"));
5759 (void) zfs_error(hdl, EZFS_BUSY, errbuf);
5760 } else {
5761 (void) zpool_standard_error(hdl, errno, errbuf);
5762 }
5763 return (-1);
5764 }
5765
5766 return (0);
5767 }
5768