1 // SPDX-License-Identifier: CDDL-1.0
2 /*
3 * CDDL HEADER START
4 *
5 * The contents of this file are subject to the terms of the
6 * Common Development and Distribution License (the "License").
7 * You may not use this file except in compliance with the License.
8 *
9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10 * or https://opensource.org/licenses/CDDL-1.0.
11 * See the License for the specific language governing permissions
12 * and limitations under the License.
13 *
14 * When distributing Covered Code, include this CDDL HEADER in each
15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16 * If applicable, add the following below this CDDL HEADER, with the
17 * fields enclosed by brackets "[]" replaced with your own identifying
18 * information: Portions Copyright [yyyy] [name of copyright owner]
19 *
20 * CDDL HEADER END
21 */
22
23 /*
24 * Copyright 2015 Nexenta Systems, Inc. All rights reserved.
25 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
26 * Copyright (c) 2011, 2024 by Delphix. All rights reserved.
27 * Copyright 2016 Igor Kozhukhov <ikozhukhov@gmail.com>
28 * Copyright (c) 2018 Datto Inc.
29 * Copyright (c) 2017 Open-E, Inc. All Rights Reserved.
30 * Copyright (c) 2017, Intel Corporation.
31 * Copyright (c) 2018, loli10K <ezomori.nozomu@gmail.com>
32 * Copyright (c) 2021, Colm Buckley <colm@tuatha.org>
33 * Copyright (c) 2021, 2023, Klara Inc.
34 * Copyright (c) 2025 Hewlett Packard Enterprise Development LP.
35 */
36
37 #include <errno.h>
38 #include <libintl.h>
39 #include <stdio.h>
40 #include <stdlib.h>
41 #include <strings.h>
42 #include <unistd.h>
43 #include <libgen.h>
44 #include <zone.h>
45 #include <sys/stat.h>
46 #include <sys/efi_partition.h>
47 #include <sys/systeminfo.h>
48 #include <sys/zfs_ioctl.h>
49 #include <sys/zfs_sysfs.h>
50 #include <sys/vdev_disk.h>
51 #include <sys/types.h>
52 #include <dlfcn.h>
53 #include <libzutil.h>
54 #include <fcntl.h>
55
56 #include "zfs_namecheck.h"
57 #include "zfs_prop.h"
58 #include "libzfs_impl.h"
59 #include "zfs_comutil.h"
60 #include "zfeature_common.h"
61
62 static boolean_t zpool_vdev_is_interior(const char *name);
63
64 typedef struct prop_flags {
65 unsigned int create:1; /* Validate property on creation */
66 unsigned int import:1; /* Validate property on import */
67 unsigned int vdevprop:1; /* Validate property as a VDEV property */
68 } prop_flags_t;
69
70 /*
71 * ====================================================================
72 * zpool property functions
73 * ====================================================================
74 */
75
76 static int
zpool_get_all_props(zpool_handle_t * zhp)77 zpool_get_all_props(zpool_handle_t *zhp)
78 {
79 zfs_cmd_t zc = {"\0"};
80 libzfs_handle_t *hdl = zhp->zpool_hdl;
81
82 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
83
84 if (zhp->zpool_n_propnames > 0) {
85 nvlist_t *innvl = fnvlist_alloc();
86 fnvlist_add_string_array(innvl, ZPOOL_GET_PROPS_NAMES,
87 zhp->zpool_propnames, zhp->zpool_n_propnames);
88 zcmd_write_src_nvlist(hdl, &zc, innvl);
89 fnvlist_free(innvl);
90 }
91
92 zcmd_alloc_dst_nvlist(hdl, &zc, 0);
93
94 while (zfs_ioctl(hdl, ZFS_IOC_POOL_GET_PROPS, &zc) != 0) {
95 if (errno == ENOMEM)
96 zcmd_expand_dst_nvlist(hdl, &zc);
97 else {
98 zcmd_free_nvlists(&zc);
99 return (-1);
100 }
101 }
102
103 if (zcmd_read_dst_nvlist(hdl, &zc, &zhp->zpool_props) != 0) {
104 zcmd_free_nvlists(&zc);
105 return (-1);
106 }
107
108 zcmd_free_nvlists(&zc);
109
110 return (0);
111 }
112
113 int
zpool_props_refresh(zpool_handle_t * zhp)114 zpool_props_refresh(zpool_handle_t *zhp)
115 {
116 nvlist_t *old_props;
117
118 old_props = zhp->zpool_props;
119
120 if (zpool_get_all_props(zhp) != 0)
121 return (-1);
122
123 nvlist_free(old_props);
124 return (0);
125 }
126
127 static const char *
zpool_get_prop_string(zpool_handle_t * zhp,zpool_prop_t prop,zprop_source_t * src)128 zpool_get_prop_string(zpool_handle_t *zhp, zpool_prop_t prop,
129 zprop_source_t *src)
130 {
131 nvlist_t *nv, *nvl;
132 const char *value;
133 zprop_source_t source;
134
135 nvl = zhp->zpool_props;
136 if (nvlist_lookup_nvlist(nvl, zpool_prop_to_name(prop), &nv) == 0) {
137 source = fnvlist_lookup_uint64(nv, ZPROP_SOURCE);
138 value = fnvlist_lookup_string(nv, ZPROP_VALUE);
139 } else {
140 source = ZPROP_SRC_DEFAULT;
141 if ((value = zpool_prop_default_string(prop)) == NULL)
142 value = "-";
143 }
144
145 if (src)
146 *src = source;
147
148 return (value);
149 }
150
151 uint64_t
zpool_get_prop_int(zpool_handle_t * zhp,zpool_prop_t prop,zprop_source_t * src)152 zpool_get_prop_int(zpool_handle_t *zhp, zpool_prop_t prop, zprop_source_t *src)
153 {
154 nvlist_t *nv, *nvl;
155 uint64_t value;
156 zprop_source_t source;
157
158 if (zhp->zpool_props == NULL && zpool_get_all_props(zhp)) {
159 /*
160 * zpool_get_all_props() has most likely failed because
161 * the pool is faulted, but if all we need is the top level
162 * vdev's guid then get it from the zhp config nvlist.
163 */
164 if ((prop == ZPOOL_PROP_GUID) &&
165 (nvlist_lookup_nvlist(zhp->zpool_config,
166 ZPOOL_CONFIG_VDEV_TREE, &nv) == 0) &&
167 (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &value)
168 == 0)) {
169 return (value);
170 }
171 return (zpool_prop_default_numeric(prop));
172 }
173
174 nvl = zhp->zpool_props;
175 if (nvlist_lookup_nvlist(nvl, zpool_prop_to_name(prop), &nv) == 0) {
176 source = fnvlist_lookup_uint64(nv, ZPROP_SOURCE);
177 value = fnvlist_lookup_uint64(nv, ZPROP_VALUE);
178 } else {
179 source = ZPROP_SRC_DEFAULT;
180 value = zpool_prop_default_numeric(prop);
181 }
182
183 if (src)
184 *src = source;
185
186 return (value);
187 }
188
189 /*
190 * Map VDEV STATE to printed strings.
191 */
192 const char *
zpool_state_to_name(vdev_state_t state,vdev_aux_t aux)193 zpool_state_to_name(vdev_state_t state, vdev_aux_t aux)
194 {
195 switch (state) {
196 case VDEV_STATE_CLOSED:
197 case VDEV_STATE_OFFLINE:
198 return (gettext("OFFLINE"));
199 case VDEV_STATE_REMOVED:
200 return (gettext("REMOVED"));
201 case VDEV_STATE_CANT_OPEN:
202 if (aux == VDEV_AUX_CORRUPT_DATA || aux == VDEV_AUX_BAD_LOG)
203 return (gettext("FAULTED"));
204 else if (aux == VDEV_AUX_SPLIT_POOL)
205 return (gettext("SPLIT"));
206 else
207 return (gettext("UNAVAIL"));
208 case VDEV_STATE_FAULTED:
209 return (gettext("FAULTED"));
210 case VDEV_STATE_DEGRADED:
211 return (gettext("DEGRADED"));
212 case VDEV_STATE_HEALTHY:
213 return (gettext("ONLINE"));
214
215 default:
216 break;
217 }
218
219 return (gettext("UNKNOWN"));
220 }
221
222 /*
223 * Map POOL STATE to printed strings.
224 */
225 const char *
zpool_pool_state_to_name(pool_state_t state)226 zpool_pool_state_to_name(pool_state_t state)
227 {
228 switch (state) {
229 default:
230 break;
231 case POOL_STATE_ACTIVE:
232 return (gettext("ACTIVE"));
233 case POOL_STATE_EXPORTED:
234 return (gettext("EXPORTED"));
235 case POOL_STATE_DESTROYED:
236 return (gettext("DESTROYED"));
237 case POOL_STATE_SPARE:
238 return (gettext("SPARE"));
239 case POOL_STATE_L2CACHE:
240 return (gettext("L2CACHE"));
241 case POOL_STATE_UNINITIALIZED:
242 return (gettext("UNINITIALIZED"));
243 case POOL_STATE_UNAVAIL:
244 return (gettext("UNAVAIL"));
245 case POOL_STATE_POTENTIALLY_ACTIVE:
246 return (gettext("POTENTIALLY_ACTIVE"));
247 }
248
249 return (gettext("UNKNOWN"));
250 }
251
252 /*
253 * Given a pool handle, return the pool health string ("ONLINE", "DEGRADED",
254 * "SUSPENDED", etc).
255 */
256 const char *
zpool_get_state_str(zpool_handle_t * zhp)257 zpool_get_state_str(zpool_handle_t *zhp)
258 {
259 zpool_errata_t errata;
260 zpool_status_t status;
261 const char *str;
262
263 status = zpool_get_status(zhp, NULL, &errata);
264
265 if (zpool_get_state(zhp) == POOL_STATE_UNAVAIL) {
266 str = gettext("FAULTED");
267 } else if (status == ZPOOL_STATUS_IO_FAILURE_WAIT ||
268 status == ZPOOL_STATUS_IO_FAILURE_CONTINUE ||
269 status == ZPOOL_STATUS_IO_FAILURE_MMP) {
270 str = gettext("SUSPENDED");
271 } else {
272 nvlist_t *nvroot = fnvlist_lookup_nvlist(
273 zpool_get_config(zhp, NULL), ZPOOL_CONFIG_VDEV_TREE);
274 uint_t vsc;
275 vdev_stat_t *vs = (vdev_stat_t *)fnvlist_lookup_uint64_array(
276 nvroot, ZPOOL_CONFIG_VDEV_STATS, &vsc);
277 str = zpool_state_to_name(vs->vs_state, vs->vs_aux);
278 }
279 return (str);
280 }
281
282 /*
283 * Get a zpool property value for 'prop' and return the value in
284 * a pre-allocated buffer.
285 */
286 int
zpool_get_prop(zpool_handle_t * zhp,zpool_prop_t prop,char * buf,size_t len,zprop_source_t * srctype,boolean_t literal)287 zpool_get_prop(zpool_handle_t *zhp, zpool_prop_t prop, char *buf,
288 size_t len, zprop_source_t *srctype, boolean_t literal)
289 {
290 uint64_t intval;
291 const char *strval;
292 zprop_source_t src = ZPROP_SRC_NONE;
293
294 if (zpool_get_state(zhp) == POOL_STATE_UNAVAIL) {
295 switch (prop) {
296 case ZPOOL_PROP_NAME:
297 (void) strlcpy(buf, zpool_get_name(zhp), len);
298 break;
299
300 case ZPOOL_PROP_HEALTH:
301 (void) strlcpy(buf, zpool_get_state_str(zhp), len);
302 break;
303
304 case ZPOOL_PROP_GUID:
305 intval = zpool_get_prop_int(zhp, prop, &src);
306 (void) snprintf(buf, len, "%llu", (u_longlong_t)intval);
307 break;
308
309 case ZPOOL_PROP_ALTROOT:
310 case ZPOOL_PROP_CACHEFILE:
311 case ZPOOL_PROP_COMMENT:
312 case ZPOOL_PROP_COMPATIBILITY:
313 if (zhp->zpool_props != NULL ||
314 zpool_get_all_props(zhp) == 0) {
315 (void) strlcpy(buf,
316 zpool_get_prop_string(zhp, prop, &src),
317 len);
318 break;
319 }
320 zfs_fallthrough;
321 default:
322 (void) strlcpy(buf, "-", len);
323 break;
324 }
325
326 if (srctype != NULL)
327 *srctype = src;
328 return (0);
329 }
330
331 /*
332 * ZPOOL_PROP_DEDUPCACHED can be fetched by name only using
333 * the ZPOOL_GET_PROPS_NAMES mechanism
334 */
335 if (prop == ZPOOL_PROP_DEDUPCACHED) {
336 zpool_add_propname(zhp, ZPOOL_DEDUPCACHED_PROP_NAME);
337 (void) zpool_props_refresh(zhp);
338 }
339
340 if (zhp->zpool_props == NULL && zpool_get_all_props(zhp) &&
341 prop != ZPOOL_PROP_NAME)
342 return (-1);
343
344 switch (zpool_prop_get_type(prop)) {
345 case PROP_TYPE_STRING:
346 (void) strlcpy(buf, zpool_get_prop_string(zhp, prop, &src),
347 len);
348 break;
349
350 case PROP_TYPE_NUMBER:
351 intval = zpool_get_prop_int(zhp, prop, &src);
352
353 switch (prop) {
354 case ZPOOL_PROP_DEDUP_TABLE_QUOTA:
355 /*
356 * If dedup quota is 0, we translate this into 'none'
357 * (unless literal is set). And if it is UINT64_MAX
358 * we translate that as 'automatic' (limit to size of
359 * the dedicated dedup VDEV. Otherwise, fall throught
360 * into the regular number formating.
361 */
362 if (intval == 0) {
363 (void) strlcpy(buf, literal ? "0" : "none",
364 len);
365 break;
366 } else if (intval == UINT64_MAX) {
367 (void) strlcpy(buf, "auto", len);
368 break;
369 }
370 zfs_fallthrough;
371
372 case ZPOOL_PROP_SIZE:
373 case ZPOOL_PROP_ALLOCATED:
374 case ZPOOL_PROP_FREE:
375 case ZPOOL_PROP_FREEING:
376 case ZPOOL_PROP_LEAKED:
377 case ZPOOL_PROP_ASHIFT:
378 case ZPOOL_PROP_MAXBLOCKSIZE:
379 case ZPOOL_PROP_MAXDNODESIZE:
380 case ZPOOL_PROP_BCLONESAVED:
381 case ZPOOL_PROP_BCLONEUSED:
382 case ZPOOL_PROP_DEDUP_TABLE_SIZE:
383 case ZPOOL_PROP_DEDUPCACHED:
384 if (literal)
385 (void) snprintf(buf, len, "%llu",
386 (u_longlong_t)intval);
387 else
388 (void) zfs_nicenum(intval, buf, len);
389 break;
390
391 case ZPOOL_PROP_EXPANDSZ:
392 case ZPOOL_PROP_CHECKPOINT:
393 if (intval == 0) {
394 (void) strlcpy(buf, "-", len);
395 } else if (literal) {
396 (void) snprintf(buf, len, "%llu",
397 (u_longlong_t)intval);
398 } else {
399 (void) zfs_nicebytes(intval, buf, len);
400 }
401 break;
402
403 case ZPOOL_PROP_CAPACITY:
404 if (literal) {
405 (void) snprintf(buf, len, "%llu",
406 (u_longlong_t)intval);
407 } else {
408 (void) snprintf(buf, len, "%llu%%",
409 (u_longlong_t)intval);
410 }
411 break;
412
413 case ZPOOL_PROP_FRAGMENTATION:
414 if (intval == UINT64_MAX) {
415 (void) strlcpy(buf, "-", len);
416 } else if (literal) {
417 (void) snprintf(buf, len, "%llu",
418 (u_longlong_t)intval);
419 } else {
420 (void) snprintf(buf, len, "%llu%%",
421 (u_longlong_t)intval);
422 }
423 break;
424
425 case ZPOOL_PROP_BCLONERATIO:
426 case ZPOOL_PROP_DEDUPRATIO:
427 if (literal)
428 (void) snprintf(buf, len, "%llu.%02llu",
429 (u_longlong_t)(intval / 100),
430 (u_longlong_t)(intval % 100));
431 else
432 (void) snprintf(buf, len, "%llu.%02llux",
433 (u_longlong_t)(intval / 100),
434 (u_longlong_t)(intval % 100));
435 break;
436
437 case ZPOOL_PROP_HEALTH:
438 (void) strlcpy(buf, zpool_get_state_str(zhp), len);
439 break;
440 case ZPOOL_PROP_VERSION:
441 if (intval >= SPA_VERSION_FEATURES) {
442 (void) snprintf(buf, len, "-");
443 break;
444 }
445 zfs_fallthrough;
446 default:
447 (void) snprintf(buf, len, "%llu", (u_longlong_t)intval);
448 }
449 break;
450
451 case PROP_TYPE_INDEX:
452 intval = zpool_get_prop_int(zhp, prop, &src);
453 if (zpool_prop_index_to_string(prop, intval, &strval)
454 != 0)
455 return (-1);
456 (void) strlcpy(buf, strval, len);
457 break;
458
459 default:
460 abort();
461 }
462
463 if (srctype)
464 *srctype = src;
465
466 return (0);
467 }
468
469 /*
470 * Get a zpool property value for 'propname' and return the value in
471 * a pre-allocated buffer.
472 */
473 int
zpool_get_userprop(zpool_handle_t * zhp,const char * propname,char * buf,size_t len,zprop_source_t * srctype)474 zpool_get_userprop(zpool_handle_t *zhp, const char *propname, char *buf,
475 size_t len, zprop_source_t *srctype)
476 {
477 nvlist_t *nv;
478 uint64_t ival;
479 const char *value;
480 zprop_source_t source = ZPROP_SRC_LOCAL;
481
482 if (zhp->zpool_props == NULL)
483 zpool_get_all_props(zhp);
484
485 if (nvlist_lookup_nvlist(zhp->zpool_props, propname, &nv) == 0) {
486 if (nvlist_lookup_uint64(nv, ZPROP_SOURCE, &ival) == 0)
487 source = ival;
488 verify(nvlist_lookup_string(nv, ZPROP_VALUE, &value) == 0);
489 } else {
490 source = ZPROP_SRC_DEFAULT;
491 value = "-";
492 }
493
494 if (srctype)
495 *srctype = source;
496
497 (void) strlcpy(buf, value, len);
498
499 return (0);
500 }
501
502 /*
503 * Check if the bootfs name has the same pool name as it is set to.
504 * Assuming bootfs is a valid dataset name.
505 */
506 static boolean_t
bootfs_name_valid(const char * pool,const char * bootfs)507 bootfs_name_valid(const char *pool, const char *bootfs)
508 {
509 int len = strlen(pool);
510 if (bootfs[0] == '\0')
511 return (B_TRUE);
512
513 if (!zfs_name_valid(bootfs, ZFS_TYPE_FILESYSTEM|ZFS_TYPE_SNAPSHOT))
514 return (B_FALSE);
515
516 if (strncmp(pool, bootfs, len) == 0 &&
517 (bootfs[len] == '/' || bootfs[len] == '\0'))
518 return (B_TRUE);
519
520 return (B_FALSE);
521 }
522
523 /*
524 * Given an nvlist of zpool properties to be set, validate that they are
525 * correct, and parse any numeric properties (index, boolean, etc) if they are
526 * specified as strings.
527 */
528 static nvlist_t *
zpool_valid_proplist(libzfs_handle_t * hdl,const char * poolname,nvlist_t * props,uint64_t version,prop_flags_t flags,char * errbuf)529 zpool_valid_proplist(libzfs_handle_t *hdl, const char *poolname,
530 nvlist_t *props, uint64_t version, prop_flags_t flags, char *errbuf)
531 {
532 nvpair_t *elem;
533 nvlist_t *retprops;
534 zpool_prop_t prop;
535 const char *strval;
536 uint64_t intval;
537 const char *check;
538 struct stat64 statbuf;
539 zpool_handle_t *zhp;
540 char *parent, *slash;
541 char report[1024];
542
543 if (nvlist_alloc(&retprops, NV_UNIQUE_NAME, 0) != 0) {
544 (void) no_memory(hdl);
545 return (NULL);
546 }
547
548 elem = NULL;
549 while ((elem = nvlist_next_nvpair(props, elem)) != NULL) {
550 const char *propname = nvpair_name(elem);
551
552 if (flags.vdevprop && zpool_prop_vdev(propname)) {
553 vdev_prop_t vprop = vdev_name_to_prop(propname);
554
555 if (vdev_prop_readonly(vprop)) {
556 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' "
557 "is readonly"), propname);
558 (void) zfs_error(hdl, EZFS_PROPREADONLY,
559 errbuf);
560 goto error;
561 }
562
563 if (zprop_parse_value(hdl, elem, vprop, ZFS_TYPE_VDEV,
564 retprops, &strval, &intval, errbuf) != 0)
565 goto error;
566
567 continue;
568 } else if (flags.vdevprop && vdev_prop_user(propname)) {
569 if (nvlist_add_nvpair(retprops, elem) != 0) {
570 (void) no_memory(hdl);
571 goto error;
572 }
573 continue;
574 } else if (flags.vdevprop) {
575 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
576 "invalid property: '%s'"), propname);
577 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
578 goto error;
579 }
580
581 prop = zpool_name_to_prop(propname);
582 if (prop == ZPOOL_PROP_INVAL && zpool_prop_feature(propname)) {
583 int err;
584 char *fname = strchr(propname, '@') + 1;
585
586 err = zfeature_lookup_name(fname, NULL);
587 if (err != 0) {
588 ASSERT3U(err, ==, ENOENT);
589 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
590 "feature '%s' unsupported by kernel"),
591 fname);
592 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
593 goto error;
594 }
595
596 if (nvpair_type(elem) != DATA_TYPE_STRING) {
597 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
598 "'%s' must be a string"), propname);
599 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
600 goto error;
601 }
602
603 (void) nvpair_value_string(elem, &strval);
604 if (strcmp(strval, ZFS_FEATURE_ENABLED) != 0 &&
605 strcmp(strval, ZFS_FEATURE_DISABLED) != 0) {
606 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
607 "property '%s' can only be set to "
608 "'enabled' or 'disabled'"), propname);
609 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
610 goto error;
611 }
612
613 if (!flags.create &&
614 strcmp(strval, ZFS_FEATURE_DISABLED) == 0) {
615 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
616 "property '%s' can only be set to "
617 "'disabled' at creation time"), propname);
618 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
619 goto error;
620 }
621
622 if (nvlist_add_uint64(retprops, propname, 0) != 0) {
623 (void) no_memory(hdl);
624 goto error;
625 }
626 continue;
627 } else if (prop == ZPOOL_PROP_INVAL &&
628 zfs_prop_user(propname)) {
629 /*
630 * This is a user property: make sure it's a
631 * string, and that it's less than ZAP_MAXNAMELEN.
632 */
633 if (nvpair_type(elem) != DATA_TYPE_STRING) {
634 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
635 "'%s' must be a string"), propname);
636 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
637 goto error;
638 }
639
640 if (strlen(nvpair_name(elem)) >= ZAP_MAXNAMELEN) {
641 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
642 "property name '%s' is too long"),
643 propname);
644 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
645 goto error;
646 }
647
648 (void) nvpair_value_string(elem, &strval);
649
650 if (strlen(strval) >= ZFS_MAXPROPLEN) {
651 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
652 "property value '%s' is too long"),
653 strval);
654 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
655 goto error;
656 }
657
658 if (nvlist_add_string(retprops, propname,
659 strval) != 0) {
660 (void) no_memory(hdl);
661 goto error;
662 }
663
664 continue;
665 }
666
667 /*
668 * Make sure this property is valid and applies to this type.
669 */
670 if (prop == ZPOOL_PROP_INVAL) {
671 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
672 "invalid property '%s'"), propname);
673 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
674 goto error;
675 }
676
677 if (zpool_prop_readonly(prop)) {
678 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' "
679 "is readonly"), propname);
680 (void) zfs_error(hdl, EZFS_PROPREADONLY, errbuf);
681 goto error;
682 }
683
684 if (!flags.create && zpool_prop_setonce(prop)) {
685 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
686 "property '%s' can only be set at "
687 "creation time"), propname);
688 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
689 goto error;
690 }
691
692 if (zprop_parse_value(hdl, elem, prop, ZFS_TYPE_POOL, retprops,
693 &strval, &intval, errbuf) != 0)
694 goto error;
695
696 /*
697 * Perform additional checking for specific properties.
698 */
699 switch (prop) {
700 case ZPOOL_PROP_VERSION:
701 if (intval < version ||
702 !SPA_VERSION_IS_SUPPORTED(intval)) {
703 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
704 "property '%s' number %llu is invalid."),
705 propname, (unsigned long long)intval);
706 (void) zfs_error(hdl, EZFS_BADVERSION, errbuf);
707 goto error;
708 }
709 break;
710
711 case ZPOOL_PROP_ASHIFT:
712 if (intval != 0 &&
713 (intval < ASHIFT_MIN || intval > ASHIFT_MAX)) {
714 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
715 "property '%s' number %llu is invalid, "
716 "only values between %" PRId32 " and %"
717 PRId32 " are allowed."),
718 propname, (unsigned long long)intval,
719 ASHIFT_MIN, ASHIFT_MAX);
720 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
721 goto error;
722 }
723 break;
724
725 case ZPOOL_PROP_BOOTFS:
726 if (flags.create || flags.import) {
727 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
728 "property '%s' cannot be set at creation "
729 "or import time"), propname);
730 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
731 goto error;
732 }
733
734 if (version < SPA_VERSION_BOOTFS) {
735 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
736 "pool must be upgraded to support "
737 "'%s' property"), propname);
738 (void) zfs_error(hdl, EZFS_BADVERSION, errbuf);
739 goto error;
740 }
741
742 /*
743 * bootfs property value has to be a dataset name and
744 * the dataset has to be in the same pool as it sets to.
745 */
746 if (!bootfs_name_valid(poolname, strval)) {
747 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' "
748 "is an invalid name"), strval);
749 (void) zfs_error(hdl, EZFS_INVALIDNAME, errbuf);
750 goto error;
751 }
752
753 if ((zhp = zpool_open_canfail(hdl, poolname)) == NULL) {
754 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
755 "could not open pool '%s'"), poolname);
756 (void) zfs_error(hdl, EZFS_OPENFAILED, errbuf);
757 goto error;
758 }
759 zpool_close(zhp);
760 break;
761
762 case ZPOOL_PROP_ALTROOT:
763 if (!flags.create && !flags.import) {
764 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
765 "property '%s' can only be set during pool "
766 "creation or import"), propname);
767 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
768 goto error;
769 }
770
771 if (strval[0] != '/') {
772 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
773 "bad alternate root '%s'"), strval);
774 (void) zfs_error(hdl, EZFS_BADPATH, errbuf);
775 goto error;
776 }
777 break;
778
779 case ZPOOL_PROP_CACHEFILE:
780 if (strval[0] == '\0')
781 break;
782
783 if (strcmp(strval, "none") == 0)
784 break;
785
786 if (strval[0] != '/') {
787 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
788 "property '%s' must be empty, an "
789 "absolute path, or 'none'"), propname);
790 (void) zfs_error(hdl, EZFS_BADPATH, errbuf);
791 goto error;
792 }
793
794 parent = strdup(strval);
795 if (parent == NULL) {
796 (void) zfs_error(hdl, EZFS_NOMEM, errbuf);
797 goto error;
798 }
799 slash = strrchr(parent, '/');
800
801 if (slash[1] == '\0' || strcmp(slash, "/.") == 0 ||
802 strcmp(slash, "/..") == 0) {
803 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
804 "'%s' is not a valid file"), parent);
805 (void) zfs_error(hdl, EZFS_BADPATH, errbuf);
806 free(parent);
807 goto error;
808 }
809
810 *slash = '\0';
811
812 if (parent[0] != '\0' &&
813 (stat64(parent, &statbuf) != 0 ||
814 !S_ISDIR(statbuf.st_mode))) {
815 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
816 "'%s' is not a valid directory"),
817 parent);
818 (void) zfs_error(hdl, EZFS_BADPATH, errbuf);
819 free(parent);
820 goto error;
821 }
822 free(parent);
823
824 break;
825
826 case ZPOOL_PROP_COMPATIBILITY:
827 switch (zpool_load_compat(strval, NULL, report, 1024)) {
828 case ZPOOL_COMPATIBILITY_OK:
829 case ZPOOL_COMPATIBILITY_WARNTOKEN:
830 break;
831 case ZPOOL_COMPATIBILITY_BADFILE:
832 case ZPOOL_COMPATIBILITY_BADTOKEN:
833 case ZPOOL_COMPATIBILITY_NOFILES:
834 zfs_error_aux(hdl, "%s", report);
835 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
836 goto error;
837 }
838 break;
839
840 case ZPOOL_PROP_COMMENT:
841 for (check = strval; *check != '\0'; check++) {
842 if (!isprint(*check)) {
843 zfs_error_aux(hdl,
844 dgettext(TEXT_DOMAIN,
845 "comment may only have printable "
846 "characters"));
847 (void) zfs_error(hdl, EZFS_BADPROP,
848 errbuf);
849 goto error;
850 }
851 }
852 if (strlen(strval) > ZPROP_MAX_COMMENT) {
853 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
854 "comment must not exceed %d characters"),
855 ZPROP_MAX_COMMENT);
856 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
857 goto error;
858 }
859 break;
860 case ZPOOL_PROP_READONLY:
861 if (!flags.import) {
862 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
863 "property '%s' can only be set at "
864 "import time"), propname);
865 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
866 goto error;
867 }
868 break;
869 case ZPOOL_PROP_MULTIHOST:
870 if (get_system_hostid() == 0) {
871 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
872 "requires a non-zero system hostid"));
873 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
874 goto error;
875 }
876 break;
877 case ZPOOL_PROP_DEDUPDITTO:
878 printf("Note: property '%s' no longer has "
879 "any effect\n", propname);
880 break;
881
882 default:
883 break;
884 }
885 }
886
887 return (retprops);
888 error:
889 nvlist_free(retprops);
890 return (NULL);
891 }
892
893 /*
894 * Set zpool property : propname=propval.
895 */
896 int
zpool_set_prop(zpool_handle_t * zhp,const char * propname,const char * propval)897 zpool_set_prop(zpool_handle_t *zhp, const char *propname, const char *propval)
898 {
899 zfs_cmd_t zc = {"\0"};
900 int ret;
901 char errbuf[ERRBUFLEN];
902 nvlist_t *nvl = NULL;
903 nvlist_t *realprops;
904 uint64_t version;
905 prop_flags_t flags = { 0 };
906
907 (void) snprintf(errbuf, sizeof (errbuf),
908 dgettext(TEXT_DOMAIN, "cannot set property for '%s'"),
909 zhp->zpool_name);
910
911 if (nvlist_alloc(&nvl, NV_UNIQUE_NAME, 0) != 0)
912 return (no_memory(zhp->zpool_hdl));
913
914 if (nvlist_add_string(nvl, propname, propval) != 0) {
915 nvlist_free(nvl);
916 return (no_memory(zhp->zpool_hdl));
917 }
918
919 version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL);
920 if ((realprops = zpool_valid_proplist(zhp->zpool_hdl,
921 zhp->zpool_name, nvl, version, flags, errbuf)) == NULL) {
922 nvlist_free(nvl);
923 return (-1);
924 }
925
926 nvlist_free(nvl);
927 nvl = realprops;
928
929 /*
930 * Execute the corresponding ioctl() to set this property.
931 */
932 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
933
934 zcmd_write_src_nvlist(zhp->zpool_hdl, &zc, nvl);
935
936 ret = zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_SET_PROPS, &zc);
937
938 zcmd_free_nvlists(&zc);
939 nvlist_free(nvl);
940
941 if (ret)
942 (void) zpool_standard_error(zhp->zpool_hdl, errno, errbuf);
943 else
944 (void) zpool_props_refresh(zhp);
945
946 return (ret);
947 }
948
949 int
zpool_expand_proplist(zpool_handle_t * zhp,zprop_list_t ** plp,zfs_type_t type,boolean_t literal)950 zpool_expand_proplist(zpool_handle_t *zhp, zprop_list_t **plp,
951 zfs_type_t type, boolean_t literal)
952 {
953 libzfs_handle_t *hdl = zhp->zpool_hdl;
954 zprop_list_t *entry;
955 char buf[ZFS_MAXPROPLEN];
956 nvlist_t *features = NULL;
957 nvpair_t *nvp;
958 zprop_list_t **last;
959 boolean_t firstexpand = (NULL == *plp);
960 int i;
961
962 if (zprop_expand_list(hdl, plp, type) != 0)
963 return (-1);
964
965 if (type == ZFS_TYPE_VDEV)
966 return (0);
967
968 last = plp;
969 while (*last != NULL)
970 last = &(*last)->pl_next;
971
972 if ((*plp)->pl_all)
973 features = zpool_get_features(zhp);
974
975 if ((*plp)->pl_all && firstexpand) {
976 /* Handle userprops in the all properties case */
977 if (zhp->zpool_props == NULL && zpool_props_refresh(zhp))
978 return (-1);
979
980 nvp = NULL;
981 while ((nvp = nvlist_next_nvpair(zhp->zpool_props, nvp)) !=
982 NULL) {
983 const char *propname = nvpair_name(nvp);
984
985 if (!zfs_prop_user(propname))
986 continue;
987
988 entry = zfs_alloc(hdl, sizeof (zprop_list_t));
989 entry->pl_prop = ZPROP_USERPROP;
990 entry->pl_user_prop = zfs_strdup(hdl, propname);
991 entry->pl_width = strlen(entry->pl_user_prop);
992 entry->pl_all = B_TRUE;
993
994 *last = entry;
995 last = &entry->pl_next;
996 }
997
998 for (i = 0; i < SPA_FEATURES; i++) {
999 entry = zfs_alloc(hdl, sizeof (zprop_list_t));
1000 entry->pl_prop = ZPROP_USERPROP;
1001 entry->pl_user_prop = zfs_asprintf(hdl, "feature@%s",
1002 spa_feature_table[i].fi_uname);
1003 entry->pl_width = strlen(entry->pl_user_prop);
1004 entry->pl_all = B_TRUE;
1005
1006 *last = entry;
1007 last = &entry->pl_next;
1008 }
1009 }
1010
1011 /* add any unsupported features */
1012 for (nvp = nvlist_next_nvpair(features, NULL);
1013 nvp != NULL; nvp = nvlist_next_nvpair(features, nvp)) {
1014 char *propname;
1015 boolean_t found;
1016
1017 if (zfeature_is_supported(nvpair_name(nvp)))
1018 continue;
1019
1020 propname = zfs_asprintf(hdl, "unsupported@%s",
1021 nvpair_name(nvp));
1022
1023 /*
1024 * Before adding the property to the list make sure that no
1025 * other pool already added the same property.
1026 */
1027 found = B_FALSE;
1028 entry = *plp;
1029 while (entry != NULL) {
1030 if (entry->pl_user_prop != NULL &&
1031 strcmp(propname, entry->pl_user_prop) == 0) {
1032 found = B_TRUE;
1033 break;
1034 }
1035 entry = entry->pl_next;
1036 }
1037 if (found) {
1038 free(propname);
1039 continue;
1040 }
1041
1042 entry = zfs_alloc(hdl, sizeof (zprop_list_t));
1043 entry->pl_prop = ZPROP_USERPROP;
1044 entry->pl_user_prop = propname;
1045 entry->pl_width = strlen(entry->pl_user_prop);
1046 entry->pl_all = B_TRUE;
1047
1048 *last = entry;
1049 last = &entry->pl_next;
1050 }
1051
1052 for (entry = *plp; entry != NULL; entry = entry->pl_next) {
1053 if (entry->pl_fixed && !literal)
1054 continue;
1055
1056 if (entry->pl_prop != ZPROP_USERPROP &&
1057 zpool_get_prop(zhp, entry->pl_prop, buf, sizeof (buf),
1058 NULL, literal) == 0) {
1059 if (strlen(buf) > entry->pl_width)
1060 entry->pl_width = strlen(buf);
1061 } else if (entry->pl_prop == ZPROP_INVAL &&
1062 zfs_prop_user(entry->pl_user_prop) &&
1063 zpool_get_userprop(zhp, entry->pl_user_prop, buf,
1064 sizeof (buf), NULL) == 0) {
1065 if (strlen(buf) > entry->pl_width)
1066 entry->pl_width = strlen(buf);
1067 }
1068 }
1069
1070 return (0);
1071 }
1072
1073 int
vdev_expand_proplist(zpool_handle_t * zhp,const char * vdevname,zprop_list_t ** plp)1074 vdev_expand_proplist(zpool_handle_t *zhp, const char *vdevname,
1075 zprop_list_t **plp)
1076 {
1077 zprop_list_t *entry;
1078 char buf[ZFS_MAXPROPLEN];
1079 const char *strval = NULL;
1080 int err = 0;
1081 nvpair_t *elem = NULL;
1082 nvlist_t *vprops = NULL;
1083 nvlist_t *propval = NULL;
1084 const char *propname;
1085 vdev_prop_t prop;
1086 zprop_list_t **last;
1087
1088 for (entry = *plp; entry != NULL; entry = entry->pl_next) {
1089 if (entry->pl_fixed)
1090 continue;
1091
1092 if (zpool_get_vdev_prop(zhp, vdevname, entry->pl_prop,
1093 entry->pl_user_prop, buf, sizeof (buf), NULL,
1094 B_FALSE) == 0) {
1095 if (strlen(buf) > entry->pl_width)
1096 entry->pl_width = strlen(buf);
1097 }
1098 if (entry->pl_prop == VDEV_PROP_NAME &&
1099 strlen(vdevname) > entry->pl_width)
1100 entry->pl_width = strlen(vdevname);
1101 }
1102
1103 /* Handle the all properties case */
1104 last = plp;
1105 if (*last != NULL && (*last)->pl_all == B_TRUE) {
1106 while (*last != NULL)
1107 last = &(*last)->pl_next;
1108
1109 err = zpool_get_all_vdev_props(zhp, vdevname, &vprops);
1110 if (err != 0)
1111 return (err);
1112
1113 while ((elem = nvlist_next_nvpair(vprops, elem)) != NULL) {
1114 propname = nvpair_name(elem);
1115
1116 /* Skip properties that are not user defined */
1117 if ((prop = vdev_name_to_prop(propname)) !=
1118 VDEV_PROP_USERPROP)
1119 continue;
1120
1121 if (nvpair_value_nvlist(elem, &propval) != 0)
1122 continue;
1123
1124 strval = fnvlist_lookup_string(propval, ZPROP_VALUE);
1125
1126 entry = zfs_alloc(zhp->zpool_hdl,
1127 sizeof (zprop_list_t));
1128 entry->pl_prop = prop;
1129 entry->pl_user_prop = zfs_strdup(zhp->zpool_hdl,
1130 propname);
1131 entry->pl_width = strlen(strval);
1132 entry->pl_all = B_TRUE;
1133 *last = entry;
1134 last = &entry->pl_next;
1135 }
1136 }
1137
1138 return (0);
1139 }
1140
1141 /*
1142 * Get the state for the given feature on the given ZFS pool.
1143 */
1144 int
zpool_prop_get_feature(zpool_handle_t * zhp,const char * propname,char * buf,size_t len)1145 zpool_prop_get_feature(zpool_handle_t *zhp, const char *propname, char *buf,
1146 size_t len)
1147 {
1148 uint64_t refcount;
1149 boolean_t found = B_FALSE;
1150 nvlist_t *features = zpool_get_features(zhp);
1151 boolean_t supported;
1152 const char *feature = strchr(propname, '@') + 1;
1153
1154 supported = zpool_prop_feature(propname);
1155 ASSERT(supported || zpool_prop_unsupported(propname));
1156
1157 /*
1158 * Convert from feature name to feature guid. This conversion is
1159 * unnecessary for unsupported@... properties because they already
1160 * use guids.
1161 */
1162 if (supported) {
1163 int ret;
1164 spa_feature_t fid;
1165
1166 ret = zfeature_lookup_name(feature, &fid);
1167 if (ret != 0) {
1168 (void) strlcpy(buf, "-", len);
1169 return (ENOTSUP);
1170 }
1171 feature = spa_feature_table[fid].fi_guid;
1172 }
1173
1174 if (nvlist_lookup_uint64(features, feature, &refcount) == 0)
1175 found = B_TRUE;
1176
1177 if (supported) {
1178 if (!found) {
1179 (void) strlcpy(buf, ZFS_FEATURE_DISABLED, len);
1180 } else {
1181 if (refcount == 0)
1182 (void) strlcpy(buf, ZFS_FEATURE_ENABLED, len);
1183 else
1184 (void) strlcpy(buf, ZFS_FEATURE_ACTIVE, len);
1185 }
1186 } else {
1187 if (found) {
1188 if (refcount == 0) {
1189 (void) strcpy(buf, ZFS_UNSUPPORTED_INACTIVE);
1190 } else {
1191 (void) strcpy(buf, ZFS_UNSUPPORTED_READONLY);
1192 }
1193 } else {
1194 (void) strlcpy(buf, "-", len);
1195 return (ENOTSUP);
1196 }
1197 }
1198
1199 return (0);
1200 }
1201
1202 /*
1203 * Validate the given pool name, optionally putting an extended error message in
1204 * 'buf'.
1205 */
1206 boolean_t
zpool_name_valid(libzfs_handle_t * hdl,boolean_t isopen,const char * pool)1207 zpool_name_valid(libzfs_handle_t *hdl, boolean_t isopen, const char *pool)
1208 {
1209 namecheck_err_t why;
1210 char what;
1211 int ret;
1212
1213 ret = pool_namecheck(pool, &why, &what);
1214
1215 /*
1216 * The rules for reserved pool names were extended at a later point.
1217 * But we need to support users with existing pools that may now be
1218 * invalid. So we only check for this expanded set of names during a
1219 * create (or import), and only in userland.
1220 */
1221 if (ret == 0 && !isopen &&
1222 (strncmp(pool, "mirror", 6) == 0 ||
1223 strncmp(pool, "raidz", 5) == 0 ||
1224 strncmp(pool, "draid", 5) == 0 ||
1225 strncmp(pool, "spare", 5) == 0 ||
1226 strcmp(pool, "log") == 0)) {
1227 if (hdl != NULL)
1228 zfs_error_aux(hdl,
1229 dgettext(TEXT_DOMAIN, "name is reserved"));
1230 return (B_FALSE);
1231 }
1232
1233
1234 if (ret != 0) {
1235 if (hdl != NULL) {
1236 switch (why) {
1237 case NAME_ERR_TOOLONG:
1238 zfs_error_aux(hdl,
1239 dgettext(TEXT_DOMAIN, "name is too long"));
1240 break;
1241
1242 case NAME_ERR_INVALCHAR:
1243 zfs_error_aux(hdl,
1244 dgettext(TEXT_DOMAIN, "invalid character "
1245 "'%c' in pool name"), what);
1246 break;
1247
1248 case NAME_ERR_NOLETTER:
1249 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1250 "name must begin with a letter"));
1251 break;
1252
1253 case NAME_ERR_RESERVED:
1254 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1255 "name is reserved"));
1256 break;
1257
1258 case NAME_ERR_DISKLIKE:
1259 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1260 "pool name is reserved"));
1261 break;
1262
1263 case NAME_ERR_LEADING_SLASH:
1264 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1265 "leading slash in name"));
1266 break;
1267
1268 case NAME_ERR_EMPTY_COMPONENT:
1269 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1270 "empty component in name"));
1271 break;
1272
1273 case NAME_ERR_TRAILING_SLASH:
1274 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1275 "trailing slash in name"));
1276 break;
1277
1278 case NAME_ERR_MULTIPLE_DELIMITERS:
1279 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1280 "multiple '@' and/or '#' delimiters in "
1281 "name"));
1282 break;
1283
1284 case NAME_ERR_NO_AT:
1285 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1286 "permission set is missing '@'"));
1287 break;
1288
1289 default:
1290 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1291 "(%d) not defined"), why);
1292 break;
1293 }
1294 }
1295 return (B_FALSE);
1296 }
1297
1298 return (B_TRUE);
1299 }
1300
1301 /*
1302 * Open a handle to the given pool, even if the pool is currently in the FAULTED
1303 * state.
1304 */
1305 zpool_handle_t *
zpool_open_canfail(libzfs_handle_t * hdl,const char * pool)1306 zpool_open_canfail(libzfs_handle_t *hdl, const char *pool)
1307 {
1308 zpool_handle_t *zhp;
1309 boolean_t missing;
1310
1311 /*
1312 * Make sure the pool name is valid.
1313 */
1314 if (!zpool_name_valid(hdl, B_TRUE, pool)) {
1315 (void) zfs_error_fmt(hdl, EZFS_INVALIDNAME,
1316 dgettext(TEXT_DOMAIN, "cannot open '%s'"),
1317 pool);
1318 return (NULL);
1319 }
1320
1321 zhp = zfs_alloc(hdl, sizeof (zpool_handle_t));
1322
1323 zhp->zpool_hdl = hdl;
1324 (void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name));
1325
1326 if (zpool_refresh_stats(zhp, &missing) != 0) {
1327 zpool_close(zhp);
1328 return (NULL);
1329 }
1330
1331 if (missing) {
1332 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "no such pool"));
1333 (void) zfs_error_fmt(hdl, EZFS_NOENT,
1334 dgettext(TEXT_DOMAIN, "cannot open '%s'"), pool);
1335 zpool_close(zhp);
1336 return (NULL);
1337 }
1338
1339 return (zhp);
1340 }
1341
1342 /*
1343 * Like the above, but silent on error. Used when iterating over pools (because
1344 * the configuration cache may be out of date).
1345 */
1346 int
zpool_open_silent(libzfs_handle_t * hdl,const char * pool,zpool_handle_t ** ret)1347 zpool_open_silent(libzfs_handle_t *hdl, const char *pool, zpool_handle_t **ret)
1348 {
1349 zpool_handle_t *zhp;
1350 boolean_t missing;
1351
1352 zhp = zfs_alloc(hdl, sizeof (zpool_handle_t));
1353
1354 zhp->zpool_hdl = hdl;
1355 (void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name));
1356
1357 if (zpool_refresh_stats(zhp, &missing) != 0) {
1358 zpool_close(zhp);
1359 return (-1);
1360 }
1361
1362 if (missing) {
1363 zpool_close(zhp);
1364 *ret = NULL;
1365 return (0);
1366 }
1367
1368 *ret = zhp;
1369 return (0);
1370 }
1371
1372 /*
1373 * Similar to zpool_open_canfail(), but refuses to open pools in the faulted
1374 * state.
1375 */
1376 zpool_handle_t *
zpool_open(libzfs_handle_t * hdl,const char * pool)1377 zpool_open(libzfs_handle_t *hdl, const char *pool)
1378 {
1379 zpool_handle_t *zhp;
1380
1381 if ((zhp = zpool_open_canfail(hdl, pool)) == NULL)
1382 return (NULL);
1383
1384 if (zhp->zpool_state == POOL_STATE_UNAVAIL) {
1385 (void) zfs_error_fmt(hdl, EZFS_POOLUNAVAIL,
1386 dgettext(TEXT_DOMAIN, "cannot open '%s'"), zhp->zpool_name);
1387 zpool_close(zhp);
1388 return (NULL);
1389 }
1390
1391 return (zhp);
1392 }
1393
1394 /*
1395 * Close the handle. Simply frees the memory associated with the handle.
1396 */
1397 void
zpool_close(zpool_handle_t * zhp)1398 zpool_close(zpool_handle_t *zhp)
1399 {
1400 nvlist_free(zhp->zpool_config);
1401 nvlist_free(zhp->zpool_old_config);
1402 nvlist_free(zhp->zpool_props);
1403 free(zhp);
1404 }
1405
1406 /*
1407 * Return the name of the pool.
1408 */
1409 const char *
zpool_get_name(zpool_handle_t * zhp)1410 zpool_get_name(zpool_handle_t *zhp)
1411 {
1412 return (zhp->zpool_name);
1413 }
1414
1415
1416 /*
1417 * Return the state of the pool (ACTIVE or UNAVAILABLE)
1418 */
1419 int
zpool_get_state(zpool_handle_t * zhp)1420 zpool_get_state(zpool_handle_t *zhp)
1421 {
1422 return (zhp->zpool_state);
1423 }
1424
1425 /*
1426 * Check if vdev list contains a dRAID vdev
1427 */
1428 static boolean_t
zpool_has_draid_vdev(nvlist_t * nvroot)1429 zpool_has_draid_vdev(nvlist_t *nvroot)
1430 {
1431 nvlist_t **child;
1432 uint_t children;
1433
1434 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
1435 &child, &children) == 0) {
1436 for (uint_t c = 0; c < children; c++) {
1437 const char *type;
1438
1439 if (nvlist_lookup_string(child[c],
1440 ZPOOL_CONFIG_TYPE, &type) == 0 &&
1441 strcmp(type, VDEV_TYPE_DRAID) == 0) {
1442 return (B_TRUE);
1443 }
1444 }
1445 }
1446 return (B_FALSE);
1447 }
1448
1449 /*
1450 * Output a dRAID top-level vdev name in to the provided buffer.
1451 */
1452 static char *
zpool_draid_name(char * name,int len,uint64_t data,uint64_t parity,uint64_t spares,uint64_t children)1453 zpool_draid_name(char *name, int len, uint64_t data, uint64_t parity,
1454 uint64_t spares, uint64_t children)
1455 {
1456 snprintf(name, len, "%s%llu:%llud:%lluc:%llus",
1457 VDEV_TYPE_DRAID, (u_longlong_t)parity, (u_longlong_t)data,
1458 (u_longlong_t)children, (u_longlong_t)spares);
1459
1460 return (name);
1461 }
1462
1463 /*
1464 * Return B_TRUE if the provided name is a dRAID spare name.
1465 */
1466 boolean_t
zpool_is_draid_spare(const char * name)1467 zpool_is_draid_spare(const char *name)
1468 {
1469 uint64_t spare_id, parity, vdev_id;
1470
1471 if (sscanf(name, VDEV_TYPE_DRAID "%llu-%llu-%llu",
1472 (u_longlong_t *)&parity, (u_longlong_t *)&vdev_id,
1473 (u_longlong_t *)&spare_id) == 3) {
1474 return (B_TRUE);
1475 }
1476
1477 return (B_FALSE);
1478 }
1479
1480 /*
1481 * Create the named pool, using the provided vdev list. It is assumed
1482 * that the consumer has already validated the contents of the nvlist, so we
1483 * don't have to worry about error semantics.
1484 */
1485 int
zpool_create(libzfs_handle_t * hdl,const char * pool,nvlist_t * nvroot,nvlist_t * props,nvlist_t * fsprops)1486 zpool_create(libzfs_handle_t *hdl, const char *pool, nvlist_t *nvroot,
1487 nvlist_t *props, nvlist_t *fsprops)
1488 {
1489 zfs_cmd_t zc = {"\0"};
1490 nvlist_t *zc_fsprops = NULL;
1491 nvlist_t *zc_props = NULL;
1492 nvlist_t *hidden_args = NULL;
1493 uint8_t *wkeydata = NULL;
1494 uint_t wkeylen = 0;
1495 char errbuf[ERRBUFLEN];
1496 int ret = -1;
1497
1498 (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
1499 "cannot create '%s'"), pool);
1500
1501 if (!zpool_name_valid(hdl, B_FALSE, pool))
1502 return (zfs_error(hdl, EZFS_INVALIDNAME, errbuf));
1503
1504 zcmd_write_conf_nvlist(hdl, &zc, nvroot);
1505
1506 if (props) {
1507 prop_flags_t flags = { .create = B_TRUE, .import = B_FALSE };
1508
1509 if ((zc_props = zpool_valid_proplist(hdl, pool, props,
1510 SPA_VERSION_1, flags, errbuf)) == NULL) {
1511 goto create_failed;
1512 }
1513 }
1514
1515 if (fsprops) {
1516 uint64_t zoned;
1517 const char *zonestr;
1518
1519 zoned = ((nvlist_lookup_string(fsprops,
1520 zfs_prop_to_name(ZFS_PROP_ZONED), &zonestr) == 0) &&
1521 strcmp(zonestr, "on") == 0);
1522
1523 if ((zc_fsprops = zfs_valid_proplist(hdl, ZFS_TYPE_FILESYSTEM,
1524 fsprops, zoned, NULL, NULL, B_TRUE, errbuf)) == NULL) {
1525 goto create_failed;
1526 }
1527
1528 if (!zc_props &&
1529 (nvlist_alloc(&zc_props, NV_UNIQUE_NAME, 0) != 0)) {
1530 goto create_failed;
1531 }
1532 if (zfs_crypto_create(hdl, NULL, zc_fsprops, props, B_TRUE,
1533 &wkeydata, &wkeylen) != 0) {
1534 zfs_error(hdl, EZFS_CRYPTOFAILED, errbuf);
1535 goto create_failed;
1536 }
1537 if (nvlist_add_nvlist(zc_props,
1538 ZPOOL_ROOTFS_PROPS, zc_fsprops) != 0) {
1539 goto create_failed;
1540 }
1541 if (wkeydata != NULL) {
1542 if (nvlist_alloc(&hidden_args, NV_UNIQUE_NAME, 0) != 0)
1543 goto create_failed;
1544
1545 if (nvlist_add_uint8_array(hidden_args, "wkeydata",
1546 wkeydata, wkeylen) != 0)
1547 goto create_failed;
1548
1549 if (nvlist_add_nvlist(zc_props, ZPOOL_HIDDEN_ARGS,
1550 hidden_args) != 0)
1551 goto create_failed;
1552 }
1553 }
1554
1555 if (zc_props)
1556 zcmd_write_src_nvlist(hdl, &zc, zc_props);
1557
1558 (void) strlcpy(zc.zc_name, pool, sizeof (zc.zc_name));
1559
1560 if ((ret = zfs_ioctl(hdl, ZFS_IOC_POOL_CREATE, &zc)) != 0) {
1561
1562 zcmd_free_nvlists(&zc);
1563 nvlist_free(zc_props);
1564 nvlist_free(zc_fsprops);
1565 nvlist_free(hidden_args);
1566 if (wkeydata != NULL)
1567 free(wkeydata);
1568
1569 switch (errno) {
1570 case EBUSY:
1571 /*
1572 * This can happen if the user has specified the same
1573 * device multiple times. We can't reliably detect this
1574 * until we try to add it and see we already have a
1575 * label. This can also happen under if the device is
1576 * part of an active md or lvm device.
1577 */
1578 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1579 "one or more vdevs refer to the same device, or "
1580 "one of\nthe devices is part of an active md or "
1581 "lvm device"));
1582 return (zfs_error(hdl, EZFS_BADDEV, errbuf));
1583
1584 case ERANGE:
1585 /*
1586 * This happens if the record size is smaller or larger
1587 * than the allowed size range, or not a power of 2.
1588 *
1589 * NOTE: although zfs_valid_proplist is called earlier,
1590 * this case may have slipped through since the
1591 * pool does not exist yet and it is therefore
1592 * impossible to read properties e.g. max blocksize
1593 * from the pool.
1594 */
1595 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1596 "record size invalid"));
1597 return (zfs_error(hdl, EZFS_BADPROP, errbuf));
1598
1599 case EOVERFLOW:
1600 /*
1601 * This occurs when one of the devices is below
1602 * SPA_MINDEVSIZE. Unfortunately, we can't detect which
1603 * device was the problem device since there's no
1604 * reliable way to determine device size from userland.
1605 */
1606 {
1607 char buf[64];
1608
1609 zfs_nicebytes(SPA_MINDEVSIZE, buf,
1610 sizeof (buf));
1611
1612 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1613 "one or more devices is less than the "
1614 "minimum size (%s)"), buf);
1615 }
1616 return (zfs_error(hdl, EZFS_BADDEV, errbuf));
1617
1618 case ENOSPC:
1619 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1620 "one or more devices is out of space"));
1621 return (zfs_error(hdl, EZFS_BADDEV, errbuf));
1622
1623 case EINVAL:
1624 if (zpool_has_draid_vdev(nvroot) &&
1625 zfeature_lookup_name("draid", NULL) != 0) {
1626 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1627 "dRAID vdevs are unsupported by the "
1628 "kernel"));
1629 return (zfs_error(hdl, EZFS_BADDEV, errbuf));
1630 } else {
1631 return (zpool_standard_error(hdl, errno,
1632 errbuf));
1633 }
1634
1635 default:
1636 return (zpool_standard_error(hdl, errno, errbuf));
1637 }
1638 }
1639
1640 create_failed:
1641 zcmd_free_nvlists(&zc);
1642 nvlist_free(zc_props);
1643 nvlist_free(zc_fsprops);
1644 nvlist_free(hidden_args);
1645 if (wkeydata != NULL)
1646 free(wkeydata);
1647 return (ret);
1648 }
1649
1650 /*
1651 * Destroy the given pool. It is up to the caller to ensure that there are no
1652 * datasets left in the pool.
1653 */
1654 int
zpool_destroy(zpool_handle_t * zhp,const char * log_str)1655 zpool_destroy(zpool_handle_t *zhp, const char *log_str)
1656 {
1657 zfs_cmd_t zc = {"\0"};
1658 zfs_handle_t *zfp = NULL;
1659 libzfs_handle_t *hdl = zhp->zpool_hdl;
1660 char errbuf[ERRBUFLEN];
1661
1662 if (zhp->zpool_state == POOL_STATE_ACTIVE &&
1663 (zfp = zfs_open(hdl, zhp->zpool_name, ZFS_TYPE_FILESYSTEM)) == NULL)
1664 return (-1);
1665
1666 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1667 zc.zc_history = (uint64_t)(uintptr_t)log_str;
1668
1669 if (zfs_ioctl(hdl, ZFS_IOC_POOL_DESTROY, &zc) != 0) {
1670 (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
1671 "cannot destroy '%s'"), zhp->zpool_name);
1672
1673 if (errno == EROFS) {
1674 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1675 "one or more devices is read only"));
1676 (void) zfs_error(hdl, EZFS_BADDEV, errbuf);
1677 } else {
1678 (void) zpool_standard_error(hdl, errno, errbuf);
1679 }
1680
1681 if (zfp)
1682 zfs_close(zfp);
1683 return (-1);
1684 }
1685
1686 if (zfp) {
1687 remove_mountpoint(zfp);
1688 zfs_close(zfp);
1689 }
1690
1691 return (0);
1692 }
1693
1694 /*
1695 * Create a checkpoint in the given pool.
1696 */
1697 int
zpool_checkpoint(zpool_handle_t * zhp)1698 zpool_checkpoint(zpool_handle_t *zhp)
1699 {
1700 libzfs_handle_t *hdl = zhp->zpool_hdl;
1701 char errbuf[ERRBUFLEN];
1702 int error;
1703
1704 error = lzc_pool_checkpoint(zhp->zpool_name);
1705 if (error != 0) {
1706 (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
1707 "cannot checkpoint '%s'"), zhp->zpool_name);
1708 (void) zpool_standard_error(hdl, error, errbuf);
1709 return (-1);
1710 }
1711
1712 return (0);
1713 }
1714
1715 /*
1716 * Discard the checkpoint from the given pool.
1717 */
1718 int
zpool_discard_checkpoint(zpool_handle_t * zhp)1719 zpool_discard_checkpoint(zpool_handle_t *zhp)
1720 {
1721 libzfs_handle_t *hdl = zhp->zpool_hdl;
1722 char errbuf[ERRBUFLEN];
1723 int error;
1724
1725 error = lzc_pool_checkpoint_discard(zhp->zpool_name);
1726 if (error != 0) {
1727 (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
1728 "cannot discard checkpoint in '%s'"), zhp->zpool_name);
1729 (void) zpool_standard_error(hdl, error, errbuf);
1730 return (-1);
1731 }
1732
1733 return (0);
1734 }
1735
1736 /*
1737 * Load data type for the given pool.
1738 */
1739 int
zpool_prefetch(zpool_handle_t * zhp,zpool_prefetch_type_t type)1740 zpool_prefetch(zpool_handle_t *zhp, zpool_prefetch_type_t type)
1741 {
1742 libzfs_handle_t *hdl = zhp->zpool_hdl;
1743 char msg[1024];
1744 int error;
1745
1746 error = lzc_pool_prefetch(zhp->zpool_name, type);
1747 if (error != 0) {
1748 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1749 "cannot prefetch %s in '%s'"),
1750 type == ZPOOL_PREFETCH_DDT ? "ddt" : "", zhp->zpool_name);
1751 (void) zpool_standard_error(hdl, error, msg);
1752 return (-1);
1753 }
1754
1755 return (0);
1756 }
1757
1758 /*
1759 * Add the given vdevs to the pool. The caller must have already performed the
1760 * necessary verification to ensure that the vdev specification is well-formed.
1761 */
1762 int
zpool_add(zpool_handle_t * zhp,nvlist_t * nvroot,boolean_t check_ashift)1763 zpool_add(zpool_handle_t *zhp, nvlist_t *nvroot, boolean_t check_ashift)
1764 {
1765 zfs_cmd_t zc = {"\0"};
1766 int ret;
1767 libzfs_handle_t *hdl = zhp->zpool_hdl;
1768 char errbuf[ERRBUFLEN];
1769 nvlist_t **spares, **l2cache;
1770 uint_t nspares, nl2cache;
1771
1772 (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
1773 "cannot add to '%s'"), zhp->zpool_name);
1774
1775 if (zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL) <
1776 SPA_VERSION_SPARES &&
1777 nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
1778 &spares, &nspares) == 0) {
1779 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be "
1780 "upgraded to add hot spares"));
1781 return (zfs_error(hdl, EZFS_BADVERSION, errbuf));
1782 }
1783
1784 if (zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL) <
1785 SPA_VERSION_L2CACHE &&
1786 nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
1787 &l2cache, &nl2cache) == 0) {
1788 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be "
1789 "upgraded to add cache devices"));
1790 return (zfs_error(hdl, EZFS_BADVERSION, errbuf));
1791 }
1792
1793 zcmd_write_conf_nvlist(hdl, &zc, nvroot);
1794 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1795 zc.zc_flags = check_ashift;
1796
1797 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_ADD, &zc) != 0) {
1798 switch (errno) {
1799 case EBUSY:
1800 /*
1801 * This can happen if the user has specified the same
1802 * device multiple times. We can't reliably detect this
1803 * until we try to add it and see we already have a
1804 * label.
1805 */
1806 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1807 "one or more vdevs refer to the same device"));
1808 (void) zfs_error(hdl, EZFS_BADDEV, errbuf);
1809 break;
1810
1811 case EINVAL:
1812
1813 if (zpool_has_draid_vdev(nvroot) &&
1814 zfeature_lookup_name("draid", NULL) != 0) {
1815 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1816 "dRAID vdevs are unsupported by the "
1817 "kernel"));
1818 } else {
1819 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1820 "invalid config; a pool with removing/"
1821 "removed vdevs does not support adding "
1822 "raidz or dRAID vdevs"));
1823 }
1824
1825 (void) zfs_error(hdl, EZFS_BADDEV, errbuf);
1826 break;
1827
1828 case EOVERFLOW:
1829 /*
1830 * This occurs when one of the devices is below
1831 * SPA_MINDEVSIZE. Unfortunately, we can't detect which
1832 * device was the problem device since there's no
1833 * reliable way to determine device size from userland.
1834 */
1835 {
1836 char buf[64];
1837
1838 zfs_nicebytes(SPA_MINDEVSIZE, buf,
1839 sizeof (buf));
1840
1841 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1842 "device is less than the minimum "
1843 "size (%s)"), buf);
1844 }
1845 (void) zfs_error(hdl, EZFS_BADDEV, errbuf);
1846 break;
1847
1848 case ENOTSUP:
1849 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1850 "pool must be upgraded to add these vdevs"));
1851 (void) zfs_error(hdl, EZFS_BADVERSION, errbuf);
1852 break;
1853
1854 default:
1855 (void) zpool_standard_error(hdl, errno, errbuf);
1856 }
1857
1858 ret = -1;
1859 } else {
1860 ret = 0;
1861 }
1862
1863 zcmd_free_nvlists(&zc);
1864
1865 return (ret);
1866 }
1867
1868 /*
1869 * Exports the pool from the system. The caller must ensure that there are no
1870 * mounted datasets in the pool.
1871 */
1872 static int
zpool_export_common(zpool_handle_t * zhp,boolean_t force,boolean_t hardforce,const char * log_str)1873 zpool_export_common(zpool_handle_t *zhp, boolean_t force, boolean_t hardforce,
1874 const char *log_str)
1875 {
1876 zfs_cmd_t zc = {"\0"};
1877
1878 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1879 zc.zc_cookie = force;
1880 zc.zc_guid = hardforce;
1881 zc.zc_history = (uint64_t)(uintptr_t)log_str;
1882
1883 if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_EXPORT, &zc) != 0) {
1884 switch (errno) {
1885 case EXDEV:
1886 zfs_error_aux(zhp->zpool_hdl, dgettext(TEXT_DOMAIN,
1887 "use '-f' to override the following errors:\n"
1888 "'%s' has an active shared spare which could be"
1889 " used by other pools once '%s' is exported."),
1890 zhp->zpool_name, zhp->zpool_name);
1891 return (zfs_error_fmt(zhp->zpool_hdl, EZFS_ACTIVE_SPARE,
1892 dgettext(TEXT_DOMAIN, "cannot export '%s'"),
1893 zhp->zpool_name));
1894 default:
1895 return (zpool_standard_error_fmt(zhp->zpool_hdl, errno,
1896 dgettext(TEXT_DOMAIN, "cannot export '%s'"),
1897 zhp->zpool_name));
1898 }
1899 }
1900
1901 return (0);
1902 }
1903
1904 int
zpool_export(zpool_handle_t * zhp,boolean_t force,const char * log_str)1905 zpool_export(zpool_handle_t *zhp, boolean_t force, const char *log_str)
1906 {
1907 return (zpool_export_common(zhp, force, B_FALSE, log_str));
1908 }
1909
1910 int
zpool_export_force(zpool_handle_t * zhp,const char * log_str)1911 zpool_export_force(zpool_handle_t *zhp, const char *log_str)
1912 {
1913 return (zpool_export_common(zhp, B_TRUE, B_TRUE, log_str));
1914 }
1915
1916 static void
zpool_rewind_exclaim(libzfs_handle_t * hdl,const char * name,boolean_t dryrun,nvlist_t * config)1917 zpool_rewind_exclaim(libzfs_handle_t *hdl, const char *name, boolean_t dryrun,
1918 nvlist_t *config)
1919 {
1920 nvlist_t *nv = NULL;
1921 uint64_t rewindto;
1922 int64_t loss = -1;
1923 struct tm t;
1924 char timestr[128];
1925
1926 if (!hdl->libzfs_printerr || config == NULL)
1927 return;
1928
1929 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, &nv) != 0 ||
1930 nvlist_lookup_nvlist(nv, ZPOOL_CONFIG_REWIND_INFO, &nv) != 0) {
1931 return;
1932 }
1933
1934 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_TIME, &rewindto) != 0)
1935 return;
1936 (void) nvlist_lookup_int64(nv, ZPOOL_CONFIG_REWIND_TIME, &loss);
1937
1938 if (localtime_r((time_t *)&rewindto, &t) != NULL &&
1939 ctime_r((time_t *)&rewindto, timestr) != NULL) {
1940 timestr[24] = 0;
1941 if (dryrun) {
1942 (void) printf(dgettext(TEXT_DOMAIN,
1943 "Would be able to return %s "
1944 "to its state as of %s.\n"),
1945 name, timestr);
1946 } else {
1947 (void) printf(dgettext(TEXT_DOMAIN,
1948 "Pool %s returned to its state as of %s.\n"),
1949 name, timestr);
1950 }
1951 if (loss > 120) {
1952 (void) printf(dgettext(TEXT_DOMAIN,
1953 "%s approximately %lld "),
1954 dryrun ? "Would discard" : "Discarded",
1955 ((longlong_t)loss + 30) / 60);
1956 (void) printf(dgettext(TEXT_DOMAIN,
1957 "minutes of transactions.\n"));
1958 } else if (loss > 0) {
1959 (void) printf(dgettext(TEXT_DOMAIN,
1960 "%s approximately %lld "),
1961 dryrun ? "Would discard" : "Discarded",
1962 (longlong_t)loss);
1963 (void) printf(dgettext(TEXT_DOMAIN,
1964 "seconds of transactions.\n"));
1965 }
1966 }
1967 }
1968
1969 void
zpool_explain_recover(libzfs_handle_t * hdl,const char * name,int reason,nvlist_t * config,char * buf,size_t size)1970 zpool_explain_recover(libzfs_handle_t *hdl, const char *name, int reason,
1971 nvlist_t *config, char *buf, size_t size)
1972 {
1973 nvlist_t *nv = NULL;
1974 int64_t loss = -1;
1975 uint64_t edata = UINT64_MAX;
1976 uint64_t rewindto;
1977 struct tm t;
1978 char timestr[128], temp[1024];
1979
1980 if (!hdl->libzfs_printerr)
1981 return;
1982
1983 /* All attempted rewinds failed if ZPOOL_CONFIG_LOAD_TIME missing */
1984 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, &nv) != 0 ||
1985 nvlist_lookup_nvlist(nv, ZPOOL_CONFIG_REWIND_INFO, &nv) != 0 ||
1986 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_TIME, &rewindto) != 0)
1987 goto no_info;
1988
1989 (void) nvlist_lookup_int64(nv, ZPOOL_CONFIG_REWIND_TIME, &loss);
1990 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_DATA_ERRORS,
1991 &edata);
1992
1993 (void) snprintf(buf, size, dgettext(TEXT_DOMAIN,
1994 "Recovery is possible, but will result in some data loss.\n"));
1995
1996 if (localtime_r((time_t *)&rewindto, &t) != NULL &&
1997 ctime_r((time_t *)&rewindto, timestr) != NULL) {
1998 timestr[24] = 0;
1999 (void) snprintf(temp, 1024, dgettext(TEXT_DOMAIN,
2000 "\tReturning the pool to its state as of %s\n"
2001 "\tshould correct the problem. "), timestr);
2002 (void) strlcat(buf, temp, size);
2003 } else {
2004 (void) strlcat(buf, dgettext(TEXT_DOMAIN,
2005 "\tReverting the pool to an earlier state "
2006 "should correct the problem.\n\t"), size);
2007 }
2008
2009 if (loss > 120) {
2010 (void) snprintf(temp, 1024, dgettext(TEXT_DOMAIN,
2011 "Approximately %lld minutes of data\n"
2012 "\tmust be discarded, irreversibly. "),
2013 ((longlong_t)loss + 30) / 60);
2014 (void) strlcat(buf, temp, size);
2015 } else if (loss > 0) {
2016 (void) snprintf(temp, 1024, dgettext(TEXT_DOMAIN,
2017 "Approximately %lld seconds of data\n"
2018 "\tmust be discarded, irreversibly. "),
2019 (longlong_t)loss);
2020 (void) strlcat(buf, temp, size);
2021 }
2022 if (edata != 0 && edata != UINT64_MAX) {
2023 if (edata == 1) {
2024 (void) strlcat(buf, dgettext(TEXT_DOMAIN,
2025 "After rewind, at least\n"
2026 "\tone persistent user-data error will remain. "),
2027 size);
2028 } else {
2029 (void) strlcat(buf, dgettext(TEXT_DOMAIN,
2030 "After rewind, several\n"
2031 "\tpersistent user-data errors will remain. "),
2032 size);
2033 }
2034 }
2035 (void) snprintf(temp, 1024, dgettext(TEXT_DOMAIN,
2036 "Recovery can be attempted\n\tby executing 'zpool %s -F %s'. "),
2037 reason >= 0 ? "clear" : "import", name);
2038 (void) strlcat(buf, temp, size);
2039
2040 (void) strlcat(buf, dgettext(TEXT_DOMAIN,
2041 "A scrub of the pool\n"
2042 "\tis strongly recommended after recovery.\n"), size);
2043 return;
2044
2045 no_info:
2046 (void) strlcat(buf, dgettext(TEXT_DOMAIN,
2047 "Destroy and re-create the pool from\n\ta backup source.\n"), size);
2048 }
2049
2050 /*
2051 * zpool_import() is a contracted interface. Should be kept the same
2052 * if possible.
2053 *
2054 * Applications should use zpool_import_props() to import a pool with
2055 * new properties value to be set.
2056 */
2057 int
zpool_import(libzfs_handle_t * hdl,nvlist_t * config,const char * newname,char * altroot)2058 zpool_import(libzfs_handle_t *hdl, nvlist_t *config, const char *newname,
2059 char *altroot)
2060 {
2061 nvlist_t *props = NULL;
2062 int ret;
2063
2064 if (altroot != NULL) {
2065 if (nvlist_alloc(&props, NV_UNIQUE_NAME, 0) != 0) {
2066 return (zfs_error_fmt(hdl, EZFS_NOMEM,
2067 dgettext(TEXT_DOMAIN, "cannot import '%s'"),
2068 newname));
2069 }
2070
2071 if (nvlist_add_string(props,
2072 zpool_prop_to_name(ZPOOL_PROP_ALTROOT), altroot) != 0 ||
2073 nvlist_add_string(props,
2074 zpool_prop_to_name(ZPOOL_PROP_CACHEFILE), "none") != 0) {
2075 nvlist_free(props);
2076 return (zfs_error_fmt(hdl, EZFS_NOMEM,
2077 dgettext(TEXT_DOMAIN, "cannot import '%s'"),
2078 newname));
2079 }
2080 }
2081
2082 ret = zpool_import_props(hdl, config, newname, props,
2083 ZFS_IMPORT_NORMAL);
2084 nvlist_free(props);
2085 return (ret);
2086 }
2087
2088 static void
print_vdev_tree(libzfs_handle_t * hdl,const char * name,nvlist_t * nv,int indent)2089 print_vdev_tree(libzfs_handle_t *hdl, const char *name, nvlist_t *nv,
2090 int indent)
2091 {
2092 nvlist_t **child;
2093 uint_t c, children;
2094 char *vname;
2095 uint64_t is_log = 0;
2096
2097 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_LOG,
2098 &is_log);
2099
2100 if (name != NULL)
2101 (void) printf("\t%*s%s%s\n", indent, "", name,
2102 is_log ? " [log]" : "");
2103
2104 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
2105 &child, &children) != 0)
2106 return;
2107
2108 for (c = 0; c < children; c++) {
2109 vname = zpool_vdev_name(hdl, NULL, child[c], VDEV_NAME_TYPE_ID);
2110 print_vdev_tree(hdl, vname, child[c], indent + 2);
2111 free(vname);
2112 }
2113 }
2114
2115 void
zpool_collect_unsup_feat(nvlist_t * config,char * buf,size_t size)2116 zpool_collect_unsup_feat(nvlist_t *config, char *buf, size_t size)
2117 {
2118 nvlist_t *nvinfo, *unsup_feat;
2119 char temp[512];
2120
2121 nvinfo = fnvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO);
2122 unsup_feat = fnvlist_lookup_nvlist(nvinfo, ZPOOL_CONFIG_UNSUP_FEAT);
2123
2124 for (nvpair_t *nvp = nvlist_next_nvpair(unsup_feat, NULL);
2125 nvp != NULL; nvp = nvlist_next_nvpair(unsup_feat, nvp)) {
2126 const char *desc = fnvpair_value_string(nvp);
2127 if (strlen(desc) > 0) {
2128 (void) snprintf(temp, 512, "\t%s (%s)\n",
2129 nvpair_name(nvp), desc);
2130 (void) strlcat(buf, temp, size);
2131 } else {
2132 (void) snprintf(temp, 512, "\t%s\n", nvpair_name(nvp));
2133 (void) strlcat(buf, temp, size);
2134 }
2135 }
2136 }
2137
2138 /*
2139 * Import the given pool using the known configuration and a list of
2140 * properties to be set. The configuration should have come from
2141 * zpool_find_import(). The 'newname' parameters control whether the pool
2142 * is imported with a different name.
2143 */
2144 int
zpool_import_props(libzfs_handle_t * hdl,nvlist_t * config,const char * newname,nvlist_t * props,int flags)2145 zpool_import_props(libzfs_handle_t *hdl, nvlist_t *config, const char *newname,
2146 nvlist_t *props, int flags)
2147 {
2148 zfs_cmd_t zc = {"\0"};
2149 zpool_load_policy_t policy;
2150 nvlist_t *nv = NULL;
2151 nvlist_t *nvinfo = NULL;
2152 nvlist_t *missing = NULL;
2153 const char *thename;
2154 const char *origname;
2155 int ret;
2156 int error = 0;
2157 char buf[2048];
2158 char errbuf[ERRBUFLEN];
2159
2160 origname = fnvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME);
2161
2162 (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
2163 "cannot import pool '%s'"), origname);
2164
2165 if (newname != NULL) {
2166 if (!zpool_name_valid(hdl, B_FALSE, newname))
2167 return (zfs_error_fmt(hdl, EZFS_INVALIDNAME,
2168 dgettext(TEXT_DOMAIN, "cannot import '%s'"),
2169 newname));
2170 thename = newname;
2171 } else {
2172 thename = origname;
2173 }
2174
2175 if (props != NULL) {
2176 uint64_t version;
2177 prop_flags_t flags = { .create = B_FALSE, .import = B_TRUE };
2178
2179 version = fnvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION);
2180
2181 if ((props = zpool_valid_proplist(hdl, origname,
2182 props, version, flags, errbuf)) == NULL)
2183 return (-1);
2184 zcmd_write_src_nvlist(hdl, &zc, props);
2185 nvlist_free(props);
2186 }
2187
2188 (void) strlcpy(zc.zc_name, thename, sizeof (zc.zc_name));
2189
2190 zc.zc_guid = fnvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID);
2191
2192 zcmd_write_conf_nvlist(hdl, &zc, config);
2193 zcmd_alloc_dst_nvlist(hdl, &zc, zc.zc_nvlist_conf_size * 2);
2194
2195 zc.zc_cookie = flags;
2196 while ((ret = zfs_ioctl(hdl, ZFS_IOC_POOL_IMPORT, &zc)) != 0 &&
2197 errno == ENOMEM)
2198 zcmd_expand_dst_nvlist(hdl, &zc);
2199 if (ret != 0)
2200 error = errno;
2201
2202 (void) zcmd_read_dst_nvlist(hdl, &zc, &nv);
2203
2204 zcmd_free_nvlists(&zc);
2205
2206 zpool_get_load_policy(config, &policy);
2207
2208 if (error) {
2209 char desc[1024];
2210 char aux[256];
2211
2212 /*
2213 * Dry-run failed, but we print out what success
2214 * looks like if we found a best txg
2215 */
2216 if (policy.zlp_rewind & ZPOOL_TRY_REWIND) {
2217 zpool_rewind_exclaim(hdl, newname ? origname : thename,
2218 B_TRUE, nv);
2219 nvlist_free(nv);
2220 return (-1);
2221 }
2222
2223 if (newname == NULL)
2224 (void) snprintf(desc, sizeof (desc),
2225 dgettext(TEXT_DOMAIN, "cannot import '%s'"),
2226 thename);
2227 else
2228 (void) snprintf(desc, sizeof (desc),
2229 dgettext(TEXT_DOMAIN, "cannot import '%s' as '%s'"),
2230 origname, thename);
2231
2232 switch (error) {
2233 case ENOTSUP:
2234 if (nv != NULL && nvlist_lookup_nvlist(nv,
2235 ZPOOL_CONFIG_LOAD_INFO, &nvinfo) == 0 &&
2236 nvlist_exists(nvinfo, ZPOOL_CONFIG_UNSUP_FEAT)) {
2237 (void) printf(dgettext(TEXT_DOMAIN, "This "
2238 "pool uses the following feature(s) not "
2239 "supported by this system:\n"));
2240 memset(buf, 0, 2048);
2241 zpool_collect_unsup_feat(nv, buf, 2048);
2242 (void) printf("%s", buf);
2243 if (nvlist_exists(nvinfo,
2244 ZPOOL_CONFIG_CAN_RDONLY)) {
2245 (void) printf(dgettext(TEXT_DOMAIN,
2246 "All unsupported features are only "
2247 "required for writing to the pool."
2248 "\nThe pool can be imported using "
2249 "'-o readonly=on'.\n"));
2250 }
2251 }
2252 /*
2253 * Unsupported version.
2254 */
2255 (void) zfs_error(hdl, EZFS_BADVERSION, desc);
2256 break;
2257
2258 case EREMOTEIO:
2259 if (nv != NULL && nvlist_lookup_nvlist(nv,
2260 ZPOOL_CONFIG_LOAD_INFO, &nvinfo) == 0) {
2261 const char *hostname = "<unknown>";
2262 uint64_t hostid = 0;
2263 mmp_state_t mmp_state;
2264
2265 mmp_state = fnvlist_lookup_uint64(nvinfo,
2266 ZPOOL_CONFIG_MMP_STATE);
2267
2268 if (nvlist_exists(nvinfo,
2269 ZPOOL_CONFIG_MMP_HOSTNAME))
2270 hostname = fnvlist_lookup_string(nvinfo,
2271 ZPOOL_CONFIG_MMP_HOSTNAME);
2272
2273 if (nvlist_exists(nvinfo,
2274 ZPOOL_CONFIG_MMP_HOSTID))
2275 hostid = fnvlist_lookup_uint64(nvinfo,
2276 ZPOOL_CONFIG_MMP_HOSTID);
2277
2278 if (mmp_state == MMP_STATE_ACTIVE) {
2279 (void) snprintf(aux, sizeof (aux),
2280 dgettext(TEXT_DOMAIN, "pool is imp"
2281 "orted on host '%s' (hostid=%lx).\n"
2282 "Export the pool on the other "
2283 "system, then run 'zpool import'."),
2284 hostname, (unsigned long) hostid);
2285 } else if (mmp_state == MMP_STATE_NO_HOSTID) {
2286 (void) snprintf(aux, sizeof (aux),
2287 dgettext(TEXT_DOMAIN, "pool has "
2288 "the multihost property on and "
2289 "the\nsystem's hostid is not set. "
2290 "Set a unique system hostid with "
2291 "the zgenhostid(8) command.\n"));
2292 }
2293
2294 (void) zfs_error_aux(hdl, "%s", aux);
2295 }
2296 (void) zfs_error(hdl, EZFS_ACTIVE_POOL, desc);
2297 break;
2298
2299 case EINVAL:
2300 (void) zfs_error(hdl, EZFS_INVALCONFIG, desc);
2301 break;
2302
2303 case EROFS:
2304 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2305 "one or more devices is read only"));
2306 (void) zfs_error(hdl, EZFS_BADDEV, desc);
2307 break;
2308
2309 case ENXIO:
2310 if (nv && nvlist_lookup_nvlist(nv,
2311 ZPOOL_CONFIG_LOAD_INFO, &nvinfo) == 0 &&
2312 nvlist_lookup_nvlist(nvinfo,
2313 ZPOOL_CONFIG_MISSING_DEVICES, &missing) == 0) {
2314 (void) printf(dgettext(TEXT_DOMAIN,
2315 "The devices below are missing or "
2316 "corrupted, use '-m' to import the pool "
2317 "anyway:\n"));
2318 print_vdev_tree(hdl, NULL, missing, 2);
2319 (void) printf("\n");
2320 }
2321 (void) zpool_standard_error(hdl, error, desc);
2322 break;
2323
2324 case EEXIST:
2325 (void) zpool_standard_error(hdl, error, desc);
2326 break;
2327
2328 case EBUSY:
2329 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2330 "one or more devices are already in use\n"));
2331 (void) zfs_error(hdl, EZFS_BADDEV, desc);
2332 break;
2333 case ENAMETOOLONG:
2334 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2335 "new name of at least one dataset is longer than "
2336 "the maximum allowable length"));
2337 (void) zfs_error(hdl, EZFS_NAMETOOLONG, desc);
2338 break;
2339 default:
2340 (void) zpool_standard_error(hdl, error, desc);
2341 memset(buf, 0, 2048);
2342 zpool_explain_recover(hdl,
2343 newname ? origname : thename, -error, nv,
2344 buf, 2048);
2345 (void) printf("\t%s", buf);
2346 break;
2347 }
2348
2349 nvlist_free(nv);
2350 ret = -1;
2351 } else {
2352 zpool_handle_t *zhp;
2353
2354 /*
2355 * This should never fail, but play it safe anyway.
2356 */
2357 if (zpool_open_silent(hdl, thename, &zhp) != 0)
2358 ret = -1;
2359 else if (zhp != NULL)
2360 zpool_close(zhp);
2361 if (policy.zlp_rewind &
2362 (ZPOOL_DO_REWIND | ZPOOL_TRY_REWIND)) {
2363 zpool_rewind_exclaim(hdl, newname ? origname : thename,
2364 ((policy.zlp_rewind & ZPOOL_TRY_REWIND) != 0), nv);
2365 }
2366 nvlist_free(nv);
2367 }
2368
2369 return (ret);
2370 }
2371
2372 /*
2373 * Translate vdev names to guids. If a vdev_path is determined to be
2374 * unsuitable then a vd_errlist is allocated and the vdev path and errno
2375 * are added to it.
2376 */
2377 static int
zpool_translate_vdev_guids(zpool_handle_t * zhp,nvlist_t * vds,nvlist_t * vdev_guids,nvlist_t * guids_to_paths,nvlist_t ** vd_errlist)2378 zpool_translate_vdev_guids(zpool_handle_t *zhp, nvlist_t *vds,
2379 nvlist_t *vdev_guids, nvlist_t *guids_to_paths, nvlist_t **vd_errlist)
2380 {
2381 nvlist_t *errlist = NULL;
2382 int error = 0;
2383
2384 for (nvpair_t *elem = nvlist_next_nvpair(vds, NULL); elem != NULL;
2385 elem = nvlist_next_nvpair(vds, elem)) {
2386 boolean_t spare, cache;
2387
2388 const char *vd_path = nvpair_name(elem);
2389 nvlist_t *tgt = zpool_find_vdev(zhp, vd_path, &spare, &cache,
2390 NULL);
2391
2392 if ((tgt == NULL) || cache || spare) {
2393 if (errlist == NULL) {
2394 errlist = fnvlist_alloc();
2395 error = EINVAL;
2396 }
2397
2398 uint64_t err = (tgt == NULL) ? EZFS_NODEVICE :
2399 (spare ? EZFS_ISSPARE : EZFS_ISL2CACHE);
2400 fnvlist_add_int64(errlist, vd_path, err);
2401 continue;
2402 }
2403
2404 uint64_t guid = fnvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID);
2405 fnvlist_add_uint64(vdev_guids, vd_path, guid);
2406
2407 char msg[MAXNAMELEN];
2408 (void) snprintf(msg, sizeof (msg), "%llu", (u_longlong_t)guid);
2409 fnvlist_add_string(guids_to_paths, msg, vd_path);
2410 }
2411
2412 if (error != 0) {
2413 verify(errlist != NULL);
2414 if (vd_errlist != NULL)
2415 *vd_errlist = errlist;
2416 else
2417 fnvlist_free(errlist);
2418 }
2419
2420 return (error);
2421 }
2422
2423 static int
xlate_init_err(int err)2424 xlate_init_err(int err)
2425 {
2426 switch (err) {
2427 case ENODEV:
2428 return (EZFS_NODEVICE);
2429 case EINVAL:
2430 case EROFS:
2431 return (EZFS_BADDEV);
2432 case EBUSY:
2433 return (EZFS_INITIALIZING);
2434 case ESRCH:
2435 return (EZFS_NO_INITIALIZE);
2436 }
2437 return (err);
2438 }
2439
2440 int
zpool_initialize_one(zpool_handle_t * zhp,void * data)2441 zpool_initialize_one(zpool_handle_t *zhp, void *data)
2442 {
2443 int error;
2444 libzfs_handle_t *hdl = zpool_get_handle(zhp);
2445 const char *pool_name = zpool_get_name(zhp);
2446 if (zpool_open_silent(hdl, pool_name, &zhp) != 0)
2447 return (-1);
2448 initialize_cbdata_t *cb = data;
2449 nvlist_t *vdevs = fnvlist_alloc();
2450
2451 nvlist_t *config = zpool_get_config(zhp, NULL);
2452 nvlist_t *nvroot = fnvlist_lookup_nvlist(config,
2453 ZPOOL_CONFIG_VDEV_TREE);
2454 zpool_collect_leaves(zhp, nvroot, vdevs);
2455 if (cb->wait)
2456 error = zpool_initialize_wait(zhp, cb->cmd_type, vdevs);
2457 else
2458 error = zpool_initialize(zhp, cb->cmd_type, vdevs);
2459 fnvlist_free(vdevs);
2460
2461 return (error);
2462 }
2463
2464 /*
2465 * Begin, suspend, cancel, or uninit (clear) the initialization (initializing
2466 * of all free blocks) for the given vdevs in the given pool.
2467 */
2468 static int
zpool_initialize_impl(zpool_handle_t * zhp,pool_initialize_func_t cmd_type,nvlist_t * vds,boolean_t wait)2469 zpool_initialize_impl(zpool_handle_t *zhp, pool_initialize_func_t cmd_type,
2470 nvlist_t *vds, boolean_t wait)
2471 {
2472 int err;
2473
2474 nvlist_t *vdev_guids = fnvlist_alloc();
2475 nvlist_t *guids_to_paths = fnvlist_alloc();
2476 nvlist_t *vd_errlist = NULL;
2477 nvlist_t *errlist;
2478 nvpair_t *elem;
2479
2480 err = zpool_translate_vdev_guids(zhp, vds, vdev_guids,
2481 guids_to_paths, &vd_errlist);
2482
2483 if (err != 0) {
2484 verify(vd_errlist != NULL);
2485 goto list_errors;
2486 }
2487
2488 err = lzc_initialize(zhp->zpool_name, cmd_type,
2489 vdev_guids, &errlist);
2490
2491 if (err != 0) {
2492 if (errlist != NULL && nvlist_lookup_nvlist(errlist,
2493 ZPOOL_INITIALIZE_VDEVS, &vd_errlist) == 0) {
2494 goto list_errors;
2495 }
2496
2497 if (err == EINVAL && cmd_type == POOL_INITIALIZE_UNINIT) {
2498 zfs_error_aux(zhp->zpool_hdl, dgettext(TEXT_DOMAIN,
2499 "uninitialize is not supported by kernel"));
2500 }
2501
2502 (void) zpool_standard_error(zhp->zpool_hdl, err,
2503 dgettext(TEXT_DOMAIN, "operation failed"));
2504 goto out;
2505 }
2506
2507 if (wait) {
2508 for (elem = nvlist_next_nvpair(vdev_guids, NULL); elem != NULL;
2509 elem = nvlist_next_nvpair(vdev_guids, elem)) {
2510
2511 uint64_t guid = fnvpair_value_uint64(elem);
2512
2513 err = lzc_wait_tag(zhp->zpool_name,
2514 ZPOOL_WAIT_INITIALIZE, guid, NULL);
2515 if (err != 0) {
2516 (void) zpool_standard_error_fmt(zhp->zpool_hdl,
2517 err, dgettext(TEXT_DOMAIN, "error "
2518 "waiting for '%s' to initialize"),
2519 nvpair_name(elem));
2520
2521 goto out;
2522 }
2523 }
2524 }
2525 goto out;
2526
2527 list_errors:
2528 for (elem = nvlist_next_nvpair(vd_errlist, NULL); elem != NULL;
2529 elem = nvlist_next_nvpair(vd_errlist, elem)) {
2530 int64_t vd_error = xlate_init_err(fnvpair_value_int64(elem));
2531 const char *path;
2532
2533 if (nvlist_lookup_string(guids_to_paths, nvpair_name(elem),
2534 &path) != 0)
2535 path = nvpair_name(elem);
2536
2537 (void) zfs_error_fmt(zhp->zpool_hdl, vd_error,
2538 "cannot initialize '%s'", path);
2539 }
2540
2541 out:
2542 fnvlist_free(vdev_guids);
2543 fnvlist_free(guids_to_paths);
2544
2545 if (vd_errlist != NULL)
2546 fnvlist_free(vd_errlist);
2547
2548 return (err == 0 ? 0 : -1);
2549 }
2550
2551 int
zpool_initialize(zpool_handle_t * zhp,pool_initialize_func_t cmd_type,nvlist_t * vds)2552 zpool_initialize(zpool_handle_t *zhp, pool_initialize_func_t cmd_type,
2553 nvlist_t *vds)
2554 {
2555 return (zpool_initialize_impl(zhp, cmd_type, vds, B_FALSE));
2556 }
2557
2558 int
zpool_initialize_wait(zpool_handle_t * zhp,pool_initialize_func_t cmd_type,nvlist_t * vds)2559 zpool_initialize_wait(zpool_handle_t *zhp, pool_initialize_func_t cmd_type,
2560 nvlist_t *vds)
2561 {
2562 return (zpool_initialize_impl(zhp, cmd_type, vds, B_TRUE));
2563 }
2564
2565 static int
xlate_trim_err(int err)2566 xlate_trim_err(int err)
2567 {
2568 switch (err) {
2569 case ENODEV:
2570 return (EZFS_NODEVICE);
2571 case EINVAL:
2572 case EROFS:
2573 return (EZFS_BADDEV);
2574 case EBUSY:
2575 return (EZFS_TRIMMING);
2576 case ESRCH:
2577 return (EZFS_NO_TRIM);
2578 case EOPNOTSUPP:
2579 return (EZFS_TRIM_NOTSUP);
2580 }
2581 return (err);
2582 }
2583
2584 void
zpool_collect_leaves(zpool_handle_t * zhp,nvlist_t * nvroot,nvlist_t * res)2585 zpool_collect_leaves(zpool_handle_t *zhp, nvlist_t *nvroot, nvlist_t *res)
2586 {
2587 libzfs_handle_t *hdl = zhp->zpool_hdl;
2588 uint_t children = 0;
2589 nvlist_t **child;
2590 uint_t i;
2591
2592 (void) nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
2593 &child, &children);
2594
2595 if (children == 0) {
2596 char *path = zpool_vdev_name(hdl, zhp, nvroot,
2597 VDEV_NAME_PATH);
2598
2599 if (strcmp(path, VDEV_TYPE_INDIRECT) != 0 &&
2600 strcmp(path, VDEV_TYPE_HOLE) != 0)
2601 fnvlist_add_boolean(res, path);
2602
2603 free(path);
2604 return;
2605 }
2606
2607 for (i = 0; i < children; i++) {
2608 zpool_collect_leaves(zhp, child[i], res);
2609 }
2610 }
2611
2612 int
zpool_trim_one(zpool_handle_t * zhp,void * data)2613 zpool_trim_one(zpool_handle_t *zhp, void *data)
2614 {
2615 int error;
2616 libzfs_handle_t *hdl = zpool_get_handle(zhp);
2617 const char *pool_name = zpool_get_name(zhp);
2618 if (zpool_open_silent(hdl, pool_name, &zhp) != 0)
2619 return (-1);
2620
2621 trim_cbdata_t *cb = data;
2622 nvlist_t *vdevs = fnvlist_alloc();
2623
2624 /* no individual leaf vdevs specified, so add them all */
2625 nvlist_t *config = zpool_get_config(zhp, NULL);
2626 nvlist_t *nvroot = fnvlist_lookup_nvlist(config,
2627 ZPOOL_CONFIG_VDEV_TREE);
2628
2629 zpool_collect_leaves(zhp, nvroot, vdevs);
2630 error = zpool_trim(zhp, cb->cmd_type, vdevs, &cb->trim_flags);
2631 fnvlist_free(vdevs);
2632
2633 return (error);
2634 }
2635
2636 static int
zpool_trim_wait(zpool_handle_t * zhp,nvlist_t * vdev_guids)2637 zpool_trim_wait(zpool_handle_t *zhp, nvlist_t *vdev_guids)
2638 {
2639 int err;
2640 nvpair_t *elem;
2641
2642 for (elem = nvlist_next_nvpair(vdev_guids, NULL); elem != NULL;
2643 elem = nvlist_next_nvpair(vdev_guids, elem)) {
2644
2645 uint64_t guid = fnvpair_value_uint64(elem);
2646
2647 err = lzc_wait_tag(zhp->zpool_name,
2648 ZPOOL_WAIT_TRIM, guid, NULL);
2649 if (err != 0) {
2650 (void) zpool_standard_error_fmt(zhp->zpool_hdl,
2651 err, dgettext(TEXT_DOMAIN, "error "
2652 "waiting to trim '%s'"), nvpair_name(elem));
2653
2654 return (err);
2655 }
2656 }
2657 return (0);
2658 }
2659
2660 /*
2661 * Check errlist and report any errors, omitting ones which should be
2662 * suppressed. Returns B_TRUE if any errors were reported.
2663 */
2664 static boolean_t
check_trim_errs(zpool_handle_t * zhp,trimflags_t * trim_flags,nvlist_t * guids_to_paths,nvlist_t * vds,nvlist_t * errlist)2665 check_trim_errs(zpool_handle_t *zhp, trimflags_t *trim_flags,
2666 nvlist_t *guids_to_paths, nvlist_t *vds, nvlist_t *errlist)
2667 {
2668 nvpair_t *elem;
2669 boolean_t reported_errs = B_FALSE;
2670 int num_vds = 0;
2671 int num_suppressed_errs = 0;
2672
2673 for (elem = nvlist_next_nvpair(vds, NULL);
2674 elem != NULL; elem = nvlist_next_nvpair(vds, elem)) {
2675 num_vds++;
2676 }
2677
2678 for (elem = nvlist_next_nvpair(errlist, NULL);
2679 elem != NULL; elem = nvlist_next_nvpair(errlist, elem)) {
2680 int64_t vd_error = xlate_trim_err(fnvpair_value_int64(elem));
2681 const char *path;
2682
2683 /*
2684 * If only the pool was specified, and it was not a secure
2685 * trim then suppress warnings for individual vdevs which
2686 * do not support trimming.
2687 */
2688 if (vd_error == EZFS_TRIM_NOTSUP &&
2689 trim_flags->fullpool &&
2690 !trim_flags->secure) {
2691 num_suppressed_errs++;
2692 continue;
2693 }
2694
2695 reported_errs = B_TRUE;
2696 if (nvlist_lookup_string(guids_to_paths, nvpair_name(elem),
2697 &path) != 0)
2698 path = nvpair_name(elem);
2699
2700 (void) zfs_error_fmt(zhp->zpool_hdl, vd_error,
2701 "cannot trim '%s'", path);
2702 }
2703
2704 if (num_suppressed_errs == num_vds) {
2705 (void) zfs_error_aux(zhp->zpool_hdl, dgettext(TEXT_DOMAIN,
2706 "no devices in pool support trim operations"));
2707 (void) (zfs_error(zhp->zpool_hdl, EZFS_TRIM_NOTSUP,
2708 dgettext(TEXT_DOMAIN, "cannot trim")));
2709 reported_errs = B_TRUE;
2710 }
2711
2712 return (reported_errs);
2713 }
2714
2715 /*
2716 * Begin, suspend, or cancel the TRIM (discarding of all free blocks) for
2717 * the given vdevs in the given pool.
2718 */
2719 int
zpool_trim(zpool_handle_t * zhp,pool_trim_func_t cmd_type,nvlist_t * vds,trimflags_t * trim_flags)2720 zpool_trim(zpool_handle_t *zhp, pool_trim_func_t cmd_type, nvlist_t *vds,
2721 trimflags_t *trim_flags)
2722 {
2723 int err;
2724 int retval = 0;
2725
2726 nvlist_t *vdev_guids = fnvlist_alloc();
2727 nvlist_t *guids_to_paths = fnvlist_alloc();
2728 nvlist_t *errlist = NULL;
2729
2730 err = zpool_translate_vdev_guids(zhp, vds, vdev_guids,
2731 guids_to_paths, &errlist);
2732 if (err != 0) {
2733 check_trim_errs(zhp, trim_flags, guids_to_paths, vds, errlist);
2734 retval = -1;
2735 goto out;
2736 }
2737
2738 err = lzc_trim(zhp->zpool_name, cmd_type, trim_flags->rate,
2739 trim_flags->secure, vdev_guids, &errlist);
2740 if (err != 0) {
2741 nvlist_t *vd_errlist;
2742 if (errlist != NULL && nvlist_lookup_nvlist(errlist,
2743 ZPOOL_TRIM_VDEVS, &vd_errlist) == 0) {
2744 if (check_trim_errs(zhp, trim_flags, guids_to_paths,
2745 vds, vd_errlist)) {
2746 retval = -1;
2747 goto out;
2748 }
2749 } else {
2750 char errbuf[ERRBUFLEN];
2751
2752 (void) snprintf(errbuf, sizeof (errbuf),
2753 dgettext(TEXT_DOMAIN, "operation failed"));
2754 zpool_standard_error(zhp->zpool_hdl, err, errbuf);
2755 retval = -1;
2756 goto out;
2757 }
2758 }
2759
2760
2761 if (trim_flags->wait)
2762 retval = zpool_trim_wait(zhp, vdev_guids);
2763
2764 out:
2765 if (errlist != NULL)
2766 fnvlist_free(errlist);
2767 fnvlist_free(vdev_guids);
2768 fnvlist_free(guids_to_paths);
2769 return (retval);
2770 }
2771
2772 /*
2773 * Scan the pool.
2774 */
2775 int
zpool_scan(zpool_handle_t * zhp,pool_scan_func_t func,pool_scrub_cmd_t cmd)2776 zpool_scan(zpool_handle_t *zhp, pool_scan_func_t func, pool_scrub_cmd_t cmd) {
2777 return (zpool_scan_range(zhp, func, cmd, 0, 0));
2778 }
2779
2780 int
zpool_scan_range(zpool_handle_t * zhp,pool_scan_func_t func,pool_scrub_cmd_t cmd,time_t date_start,time_t date_end)2781 zpool_scan_range(zpool_handle_t *zhp, pool_scan_func_t func,
2782 pool_scrub_cmd_t cmd, time_t date_start, time_t date_end)
2783 {
2784 char errbuf[ERRBUFLEN];
2785 int err;
2786 libzfs_handle_t *hdl = zhp->zpool_hdl;
2787
2788 nvlist_t *args = fnvlist_alloc();
2789 fnvlist_add_uint64(args, "scan_type", (uint64_t)func);
2790 fnvlist_add_uint64(args, "scan_command", (uint64_t)cmd);
2791 fnvlist_add_uint64(args, "scan_date_start", (uint64_t)date_start);
2792 fnvlist_add_uint64(args, "scan_date_end", (uint64_t)date_end);
2793
2794 err = lzc_scrub(ZFS_IOC_POOL_SCRUB, zhp->zpool_name, args, NULL);
2795 fnvlist_free(args);
2796
2797 if (err == 0) {
2798 return (0);
2799 } else if (err == ZFS_ERR_IOC_CMD_UNAVAIL) {
2800 zfs_cmd_t zc = {"\0"};
2801 (void) strlcpy(zc.zc_name, zhp->zpool_name,
2802 sizeof (zc.zc_name));
2803 zc.zc_cookie = func;
2804 zc.zc_flags = cmd;
2805
2806 if (zfs_ioctl(hdl, ZFS_IOC_POOL_SCAN, &zc) == 0)
2807 return (0);
2808 }
2809
2810 /*
2811 * An ECANCELED on a scrub means one of the following:
2812 * 1. we resumed a paused scrub.
2813 * 2. we resumed a paused error scrub.
2814 * 3. Error scrub is not run because of no error log.
2815 *
2816 * Note that we no longer return ECANCELED in case 1 or 2. However, in
2817 * order to prevent problems where we have a newer userland than
2818 * kernel, we keep this check in place. That prevents erroneous
2819 * failures when an older kernel returns ECANCELED in those cases.
2820 */
2821 if (err == ECANCELED && (func == POOL_SCAN_SCRUB ||
2822 func == POOL_SCAN_ERRORSCRUB) && cmd == POOL_SCRUB_NORMAL)
2823 return (0);
2824 /*
2825 * The following cases have been handled here:
2826 * 1. Paused a scrub/error scrub if there is none in progress.
2827 */
2828 if (err == ENOENT && func != POOL_SCAN_NONE && cmd ==
2829 POOL_SCRUB_PAUSE) {
2830 return (0);
2831 }
2832
2833 ASSERT3U(func, >=, POOL_SCAN_NONE);
2834 ASSERT3U(func, <, POOL_SCAN_FUNCS);
2835
2836 if (func == POOL_SCAN_SCRUB || func == POOL_SCAN_ERRORSCRUB) {
2837 if (cmd == POOL_SCRUB_PAUSE) {
2838 (void) snprintf(errbuf, sizeof (errbuf),
2839 dgettext(TEXT_DOMAIN, "cannot pause scrubbing %s"),
2840 zhp->zpool_name);
2841 } else {
2842 assert(cmd == POOL_SCRUB_NORMAL);
2843 (void) snprintf(errbuf, sizeof (errbuf),
2844 dgettext(TEXT_DOMAIN, "cannot scrub %s"),
2845 zhp->zpool_name);
2846 }
2847 } else if (func == POOL_SCAN_RESILVER) {
2848 assert(cmd == POOL_SCRUB_NORMAL);
2849 (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
2850 "cannot restart resilver on %s"), zhp->zpool_name);
2851 } else if (func == POOL_SCAN_NONE) {
2852 (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
2853 "cannot cancel scrubbing %s"), zhp->zpool_name);
2854 } else {
2855 assert(!"unexpected result");
2856 }
2857
2858 /*
2859 * With EBUSY, six cases are possible:
2860 *
2861 * Current state Requested
2862 * 1. Normal Scrub Running Normal Scrub or Error Scrub
2863 * 2. Normal Scrub Paused Error Scrub
2864 * 3. Normal Scrub Paused Pause Normal Scrub
2865 * 4. Error Scrub Running Normal Scrub or Error Scrub
2866 * 5. Error Scrub Paused Pause Error Scrub
2867 * 6. Resilvering Anything else
2868 */
2869 if (err == EBUSY) {
2870 nvlist_t *nvroot;
2871 pool_scan_stat_t *ps = NULL;
2872 uint_t psc;
2873
2874 nvroot = fnvlist_lookup_nvlist(zhp->zpool_config,
2875 ZPOOL_CONFIG_VDEV_TREE);
2876 (void) nvlist_lookup_uint64_array(nvroot,
2877 ZPOOL_CONFIG_SCAN_STATS, (uint64_t **)&ps, &psc);
2878 if (ps && ps->pss_func == POOL_SCAN_SCRUB &&
2879 ps->pss_state == DSS_SCANNING) {
2880 if (ps->pss_pass_scrub_pause == 0) {
2881 /* handles case 1 */
2882 assert(cmd == POOL_SCRUB_NORMAL);
2883 return (zfs_error(hdl, EZFS_SCRUBBING,
2884 errbuf));
2885 } else {
2886 if (func == POOL_SCAN_ERRORSCRUB) {
2887 /* handles case 2 */
2888 ASSERT3U(cmd, ==, POOL_SCRUB_NORMAL);
2889 return (zfs_error(hdl,
2890 EZFS_SCRUB_PAUSED_TO_CANCEL,
2891 errbuf));
2892 } else {
2893 /* handles case 3 */
2894 ASSERT3U(func, ==, POOL_SCAN_SCRUB);
2895 ASSERT3U(cmd, ==, POOL_SCRUB_PAUSE);
2896 return (zfs_error(hdl,
2897 EZFS_SCRUB_PAUSED, errbuf));
2898 }
2899 }
2900 } else if (ps &&
2901 ps->pss_error_scrub_func == POOL_SCAN_ERRORSCRUB &&
2902 ps->pss_error_scrub_state == DSS_ERRORSCRUBBING) {
2903 if (ps->pss_pass_error_scrub_pause == 0) {
2904 /* handles case 4 */
2905 ASSERT3U(cmd, ==, POOL_SCRUB_NORMAL);
2906 return (zfs_error(hdl, EZFS_ERRORSCRUBBING,
2907 errbuf));
2908 } else {
2909 /* handles case 5 */
2910 ASSERT3U(func, ==, POOL_SCAN_ERRORSCRUB);
2911 ASSERT3U(cmd, ==, POOL_SCRUB_PAUSE);
2912 return (zfs_error(hdl, EZFS_ERRORSCRUB_PAUSED,
2913 errbuf));
2914 }
2915 } else {
2916 /* handles case 6 */
2917 return (zfs_error(hdl, EZFS_RESILVERING, errbuf));
2918 }
2919 } else if (err == ENOENT) {
2920 return (zfs_error(hdl, EZFS_NO_SCRUB, errbuf));
2921 } else if (err == ENOTSUP && func == POOL_SCAN_RESILVER) {
2922 return (zfs_error(hdl, EZFS_NO_RESILVER_DEFER, errbuf));
2923 } else {
2924 return (zpool_standard_error(hdl, err, errbuf));
2925 }
2926 }
2927
2928 /*
2929 * Find a vdev that matches the search criteria specified. We use the
2930 * the nvpair name to determine how we should look for the device.
2931 * 'avail_spare' is set to TRUE if the provided guid refers to an AVAIL
2932 * spare; but FALSE if its an INUSE spare.
2933 *
2934 * If 'return_parent' is set, then return the *parent* of the vdev you're
2935 * searching for rather than the vdev itself.
2936 */
2937 static nvlist_t *
vdev_to_nvlist_iter(nvlist_t * nv,nvlist_t * search,boolean_t * avail_spare,boolean_t * l2cache,boolean_t * log,boolean_t return_parent)2938 vdev_to_nvlist_iter(nvlist_t *nv, nvlist_t *search, boolean_t *avail_spare,
2939 boolean_t *l2cache, boolean_t *log, boolean_t return_parent)
2940 {
2941 uint_t c, children;
2942 nvlist_t **child;
2943 nvlist_t *ret;
2944 uint64_t is_log;
2945 const char *srchkey;
2946 nvpair_t *pair = nvlist_next_nvpair(search, NULL);
2947 const char *tmp = NULL;
2948 boolean_t is_root;
2949
2950 /* Nothing to look for */
2951 if (search == NULL || pair == NULL)
2952 return (NULL);
2953
2954 /* Obtain the key we will use to search */
2955 srchkey = nvpair_name(pair);
2956
2957 nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &tmp);
2958 if (strcmp(tmp, "root") == 0)
2959 is_root = B_TRUE;
2960 else
2961 is_root = B_FALSE;
2962
2963 switch (nvpair_type(pair)) {
2964 case DATA_TYPE_UINT64:
2965 if (strcmp(srchkey, ZPOOL_CONFIG_GUID) == 0) {
2966 uint64_t srchval = fnvpair_value_uint64(pair);
2967 uint64_t theguid = fnvlist_lookup_uint64(nv,
2968 ZPOOL_CONFIG_GUID);
2969 if (theguid == srchval)
2970 return (nv);
2971 }
2972 break;
2973
2974 case DATA_TYPE_STRING: {
2975 const char *srchval, *val;
2976
2977 srchval = fnvpair_value_string(pair);
2978 if (nvlist_lookup_string(nv, srchkey, &val) != 0)
2979 break;
2980
2981 /*
2982 * Search for the requested value. Special cases:
2983 *
2984 * - ZPOOL_CONFIG_PATH for whole disk entries. These end in
2985 * "-part1", or "p1". The suffix is hidden from the user,
2986 * but included in the string, so this matches around it.
2987 * - ZPOOL_CONFIG_PATH for short names zfs_strcmp_shortname()
2988 * is used to check all possible expanded paths.
2989 * - looking for a top-level vdev name (i.e. ZPOOL_CONFIG_TYPE).
2990 *
2991 * Otherwise, all other searches are simple string compares.
2992 */
2993 if (strcmp(srchkey, ZPOOL_CONFIG_PATH) == 0) {
2994 uint64_t wholedisk = 0;
2995
2996 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK,
2997 &wholedisk);
2998 if (zfs_strcmp_pathname(srchval, val, wholedisk) == 0)
2999 return (nv);
3000
3001 } else if (strcmp(srchkey, ZPOOL_CONFIG_TYPE) == 0) {
3002 char *type, *idx, *end, *p;
3003 uint64_t id, vdev_id;
3004
3005 /*
3006 * Determine our vdev type, keeping in mind
3007 * that the srchval is composed of a type and
3008 * vdev id pair (i.e. mirror-4).
3009 */
3010 if ((type = strdup(srchval)) == NULL)
3011 return (NULL);
3012
3013 if ((p = strrchr(type, '-')) == NULL) {
3014 free(type);
3015 break;
3016 }
3017 idx = p + 1;
3018 *p = '\0';
3019
3020 /*
3021 * draid names are presented like: draid2:4d:6c:0s
3022 * We match them up to the first ':' so we can still
3023 * do the parity check below, but the other params
3024 * are ignored.
3025 */
3026 if ((p = strchr(type, ':')) != NULL) {
3027 if (strncmp(type, VDEV_TYPE_DRAID,
3028 strlen(VDEV_TYPE_DRAID)) == 0)
3029 *p = '\0';
3030 }
3031
3032 /*
3033 * If the types don't match then keep looking.
3034 */
3035 if (strncmp(val, type, strlen(val)) != 0) {
3036 free(type);
3037 break;
3038 }
3039
3040 verify(zpool_vdev_is_interior(type));
3041
3042 id = fnvlist_lookup_uint64(nv, ZPOOL_CONFIG_ID);
3043 errno = 0;
3044 vdev_id = strtoull(idx, &end, 10);
3045
3046 /*
3047 * If we are looking for a raidz and a parity is
3048 * specified, make sure it matches.
3049 */
3050 int rzlen = strlen(VDEV_TYPE_RAIDZ);
3051 assert(rzlen == strlen(VDEV_TYPE_DRAID));
3052 int typlen = strlen(type);
3053 if ((strncmp(type, VDEV_TYPE_RAIDZ, rzlen) == 0 ||
3054 strncmp(type, VDEV_TYPE_DRAID, rzlen) == 0) &&
3055 typlen != rzlen) {
3056 uint64_t vdev_parity;
3057 int parity = *(type + rzlen) - '0';
3058
3059 if (parity <= 0 || parity > 3 ||
3060 (typlen - rzlen) != 1) {
3061 /*
3062 * Nonsense parity specified, can
3063 * never match
3064 */
3065 free(type);
3066 return (NULL);
3067 }
3068 vdev_parity = fnvlist_lookup_uint64(nv,
3069 ZPOOL_CONFIG_NPARITY);
3070 if ((int)vdev_parity != parity) {
3071 free(type);
3072 break;
3073 }
3074 }
3075
3076 free(type);
3077 if (errno != 0)
3078 return (NULL);
3079
3080 /*
3081 * Now verify that we have the correct vdev id.
3082 */
3083 if (vdev_id == id)
3084 return (nv);
3085 }
3086
3087 /*
3088 * Common case
3089 */
3090 if (strcmp(srchval, val) == 0)
3091 return (nv);
3092 break;
3093 }
3094
3095 default:
3096 break;
3097 }
3098
3099 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
3100 &child, &children) != 0)
3101 return (NULL);
3102
3103 for (c = 0; c < children; c++) {
3104 if ((ret = vdev_to_nvlist_iter(child[c], search,
3105 avail_spare, l2cache, NULL, return_parent)) != NULL) {
3106 /*
3107 * The 'is_log' value is only set for the toplevel
3108 * vdev, not the leaf vdevs. So we always lookup the
3109 * log device from the root of the vdev tree (where
3110 * 'log' is non-NULL).
3111 */
3112 if (log != NULL &&
3113 nvlist_lookup_uint64(child[c],
3114 ZPOOL_CONFIG_IS_LOG, &is_log) == 0 &&
3115 is_log) {
3116 *log = B_TRUE;
3117 }
3118 return (ret && return_parent && !is_root ? nv : ret);
3119 }
3120 }
3121
3122 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES,
3123 &child, &children) == 0) {
3124 for (c = 0; c < children; c++) {
3125 if ((ret = vdev_to_nvlist_iter(child[c], search,
3126 avail_spare, l2cache, NULL, return_parent))
3127 != NULL) {
3128 *avail_spare = B_TRUE;
3129 return (ret && return_parent &&
3130 !is_root ? nv : ret);
3131 }
3132 }
3133 }
3134
3135 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE,
3136 &child, &children) == 0) {
3137 for (c = 0; c < children; c++) {
3138 if ((ret = vdev_to_nvlist_iter(child[c], search,
3139 avail_spare, l2cache, NULL, return_parent))
3140 != NULL) {
3141 *l2cache = B_TRUE;
3142 return (ret && return_parent &&
3143 !is_root ? nv : ret);
3144 }
3145 }
3146 }
3147
3148 return (NULL);
3149 }
3150
3151 /*
3152 * Given a physical path or guid, find the associated vdev.
3153 */
3154 nvlist_t *
zpool_find_vdev_by_physpath(zpool_handle_t * zhp,const char * ppath,boolean_t * avail_spare,boolean_t * l2cache,boolean_t * log)3155 zpool_find_vdev_by_physpath(zpool_handle_t *zhp, const char *ppath,
3156 boolean_t *avail_spare, boolean_t *l2cache, boolean_t *log)
3157 {
3158 nvlist_t *search, *nvroot, *ret;
3159 uint64_t guid;
3160 char *end;
3161
3162 search = fnvlist_alloc();
3163
3164 guid = strtoull(ppath, &end, 0);
3165 if (guid != 0 && *end == '\0') {
3166 fnvlist_add_uint64(search, ZPOOL_CONFIG_GUID, guid);
3167 } else {
3168 fnvlist_add_string(search, ZPOOL_CONFIG_PHYS_PATH, ppath);
3169 }
3170
3171 nvroot = fnvlist_lookup_nvlist(zhp->zpool_config,
3172 ZPOOL_CONFIG_VDEV_TREE);
3173
3174 *avail_spare = B_FALSE;
3175 *l2cache = B_FALSE;
3176 if (log != NULL)
3177 *log = B_FALSE;
3178 ret = vdev_to_nvlist_iter(nvroot, search, avail_spare, l2cache, log,
3179 B_FALSE);
3180 fnvlist_free(search);
3181
3182 return (ret);
3183 }
3184
3185 /*
3186 * Determine if we have an "interior" top-level vdev (i.e mirror/raidz).
3187 */
3188 static boolean_t
zpool_vdev_is_interior(const char * name)3189 zpool_vdev_is_interior(const char *name)
3190 {
3191 if (strncmp(name, VDEV_TYPE_RAIDZ, strlen(VDEV_TYPE_RAIDZ)) == 0 ||
3192 strncmp(name, VDEV_TYPE_SPARE, strlen(VDEV_TYPE_SPARE)) == 0 ||
3193 strncmp(name,
3194 VDEV_TYPE_REPLACING, strlen(VDEV_TYPE_REPLACING)) == 0 ||
3195 strncmp(name, VDEV_TYPE_ROOT, strlen(VDEV_TYPE_ROOT)) == 0 ||
3196 strncmp(name, VDEV_TYPE_MIRROR, strlen(VDEV_TYPE_MIRROR)) == 0)
3197 return (B_TRUE);
3198
3199 if (strncmp(name, VDEV_TYPE_DRAID, strlen(VDEV_TYPE_DRAID)) == 0 &&
3200 !zpool_is_draid_spare(name))
3201 return (B_TRUE);
3202
3203 return (B_FALSE);
3204 }
3205
3206 /*
3207 * Lookup the nvlist for a given vdev or vdev's parent (depending on
3208 * if 'return_parent' is set).
3209 */
3210 static nvlist_t *
__zpool_find_vdev(zpool_handle_t * zhp,const char * path,boolean_t * avail_spare,boolean_t * l2cache,boolean_t * log,boolean_t return_parent)3211 __zpool_find_vdev(zpool_handle_t *zhp, const char *path, boolean_t *avail_spare,
3212 boolean_t *l2cache, boolean_t *log, boolean_t return_parent)
3213 {
3214 char *end;
3215 nvlist_t *nvroot, *search, *ret;
3216 uint64_t guid;
3217 boolean_t __avail_spare, __l2cache, __log;
3218
3219 search = fnvlist_alloc();
3220
3221 guid = strtoull(path, &end, 0);
3222 if (guid != 0 && *end == '\0') {
3223 fnvlist_add_uint64(search, ZPOOL_CONFIG_GUID, guid);
3224 } else if (zpool_vdev_is_interior(path)) {
3225 fnvlist_add_string(search, ZPOOL_CONFIG_TYPE, path);
3226 } else {
3227 fnvlist_add_string(search, ZPOOL_CONFIG_PATH, path);
3228 }
3229
3230 nvroot = fnvlist_lookup_nvlist(zhp->zpool_config,
3231 ZPOOL_CONFIG_VDEV_TREE);
3232
3233 /*
3234 * User can pass NULL for avail_spare, l2cache, and log, but
3235 * we still need to provide variables to vdev_to_nvlist_iter(), so
3236 * just point them to junk variables here.
3237 */
3238 if (!avail_spare)
3239 avail_spare = &__avail_spare;
3240 if (!l2cache)
3241 l2cache = &__l2cache;
3242 if (!log)
3243 log = &__log;
3244
3245 *avail_spare = B_FALSE;
3246 *l2cache = B_FALSE;
3247 if (log != NULL)
3248 *log = B_FALSE;
3249 ret = vdev_to_nvlist_iter(nvroot, search, avail_spare, l2cache, log,
3250 return_parent);
3251 fnvlist_free(search);
3252
3253 return (ret);
3254 }
3255
3256 nvlist_t *
zpool_find_vdev(zpool_handle_t * zhp,const char * path,boolean_t * avail_spare,boolean_t * l2cache,boolean_t * log)3257 zpool_find_vdev(zpool_handle_t *zhp, const char *path, boolean_t *avail_spare,
3258 boolean_t *l2cache, boolean_t *log)
3259 {
3260 return (__zpool_find_vdev(zhp, path, avail_spare, l2cache, log,
3261 B_FALSE));
3262 }
3263
3264 /* Given a vdev path, return its parent's nvlist */
3265 nvlist_t *
zpool_find_parent_vdev(zpool_handle_t * zhp,const char * path,boolean_t * avail_spare,boolean_t * l2cache,boolean_t * log)3266 zpool_find_parent_vdev(zpool_handle_t *zhp, const char *path,
3267 boolean_t *avail_spare, boolean_t *l2cache, boolean_t *log)
3268 {
3269 return (__zpool_find_vdev(zhp, path, avail_spare, l2cache, log,
3270 B_TRUE));
3271 }
3272
3273 /*
3274 * Convert a vdev path to a GUID. Returns GUID or 0 on error.
3275 *
3276 * If is_spare, is_l2cache, or is_log is non-NULL, then store within it
3277 * if the VDEV is a spare, l2cache, or log device. If they're NULL then
3278 * ignore them.
3279 */
3280 static uint64_t
zpool_vdev_path_to_guid_impl(zpool_handle_t * zhp,const char * path,boolean_t * is_spare,boolean_t * is_l2cache,boolean_t * is_log)3281 zpool_vdev_path_to_guid_impl(zpool_handle_t *zhp, const char *path,
3282 boolean_t *is_spare, boolean_t *is_l2cache, boolean_t *is_log)
3283 {
3284 boolean_t spare = B_FALSE, l2cache = B_FALSE, log = B_FALSE;
3285 nvlist_t *tgt;
3286
3287 if ((tgt = zpool_find_vdev(zhp, path, &spare, &l2cache,
3288 &log)) == NULL)
3289 return (0);
3290
3291 if (is_spare != NULL)
3292 *is_spare = spare;
3293 if (is_l2cache != NULL)
3294 *is_l2cache = l2cache;
3295 if (is_log != NULL)
3296 *is_log = log;
3297
3298 return (fnvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID));
3299 }
3300
3301 /* Convert a vdev path to a GUID. Returns GUID or 0 on error. */
3302 uint64_t
zpool_vdev_path_to_guid(zpool_handle_t * zhp,const char * path)3303 zpool_vdev_path_to_guid(zpool_handle_t *zhp, const char *path)
3304 {
3305 return (zpool_vdev_path_to_guid_impl(zhp, path, NULL, NULL, NULL));
3306 }
3307
3308 /*
3309 * Bring the specified vdev online. The 'flags' parameter is a set of the
3310 * ZFS_ONLINE_* flags.
3311 */
3312 int
zpool_vdev_online(zpool_handle_t * zhp,const char * path,int flags,vdev_state_t * newstate)3313 zpool_vdev_online(zpool_handle_t *zhp, const char *path, int flags,
3314 vdev_state_t *newstate)
3315 {
3316 zfs_cmd_t zc = {"\0"};
3317 char errbuf[ERRBUFLEN];
3318 nvlist_t *tgt;
3319 boolean_t avail_spare, l2cache, islog;
3320 libzfs_handle_t *hdl = zhp->zpool_hdl;
3321
3322 if (flags & ZFS_ONLINE_EXPAND) {
3323 (void) snprintf(errbuf, sizeof (errbuf),
3324 dgettext(TEXT_DOMAIN, "cannot expand %s"), path);
3325 } else {
3326 (void) snprintf(errbuf, sizeof (errbuf),
3327 dgettext(TEXT_DOMAIN, "cannot online %s"), path);
3328 }
3329
3330 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3331 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
3332 &islog)) == NULL)
3333 return (zfs_error(hdl, EZFS_NODEVICE, errbuf));
3334
3335 zc.zc_guid = fnvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID);
3336
3337 if (!(flags & ZFS_ONLINE_SPARE) && avail_spare)
3338 return (zfs_error(hdl, EZFS_ISSPARE, errbuf));
3339
3340 #ifndef __FreeBSD__
3341 const char *pathname;
3342 if ((flags & ZFS_ONLINE_EXPAND ||
3343 zpool_get_prop_int(zhp, ZPOOL_PROP_AUTOEXPAND, NULL)) &&
3344 nvlist_lookup_string(tgt, ZPOOL_CONFIG_PATH, &pathname) == 0) {
3345 uint64_t wholedisk = 0;
3346
3347 (void) nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_WHOLE_DISK,
3348 &wholedisk);
3349
3350 /*
3351 * XXX - L2ARC 1.0 devices can't support expansion.
3352 */
3353 if (l2cache) {
3354 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3355 "cannot expand cache devices"));
3356 return (zfs_error(hdl, EZFS_VDEVNOTSUP, errbuf));
3357 }
3358
3359 if (wholedisk) {
3360 const char *fullpath = path;
3361 char buf[MAXPATHLEN];
3362 int error;
3363
3364 if (path[0] != '/') {
3365 error = zfs_resolve_shortname(path, buf,
3366 sizeof (buf));
3367 if (error != 0)
3368 return (zfs_error(hdl, EZFS_NODEVICE,
3369 errbuf));
3370
3371 fullpath = buf;
3372 }
3373
3374 error = zpool_relabel_disk(hdl, fullpath, errbuf);
3375 if (error != 0)
3376 return (error);
3377 }
3378 }
3379 #endif
3380
3381 zc.zc_cookie = VDEV_STATE_ONLINE;
3382 zc.zc_obj = flags;
3383
3384 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SET_STATE, &zc) != 0) {
3385 if (errno == EINVAL) {
3386 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "was split "
3387 "from this pool into a new one. Use '%s' "
3388 "instead"), "zpool detach");
3389 return (zfs_error(hdl, EZFS_POSTSPLIT_ONLINE, errbuf));
3390 }
3391 return (zpool_standard_error(hdl, errno, errbuf));
3392 }
3393
3394 *newstate = zc.zc_cookie;
3395 return (0);
3396 }
3397
3398 /*
3399 * Take the specified vdev offline
3400 */
3401 int
zpool_vdev_offline(zpool_handle_t * zhp,const char * path,boolean_t istmp)3402 zpool_vdev_offline(zpool_handle_t *zhp, const char *path, boolean_t istmp)
3403 {
3404 zfs_cmd_t zc = {"\0"};
3405 char errbuf[ERRBUFLEN];
3406 nvlist_t *tgt;
3407 boolean_t avail_spare, l2cache;
3408 libzfs_handle_t *hdl = zhp->zpool_hdl;
3409
3410 (void) snprintf(errbuf, sizeof (errbuf),
3411 dgettext(TEXT_DOMAIN, "cannot offline %s"), path);
3412
3413 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3414 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
3415 NULL)) == NULL)
3416 return (zfs_error(hdl, EZFS_NODEVICE, errbuf));
3417
3418 zc.zc_guid = fnvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID);
3419
3420 if (avail_spare)
3421 return (zfs_error(hdl, EZFS_ISSPARE, errbuf));
3422
3423 zc.zc_cookie = VDEV_STATE_OFFLINE;
3424 zc.zc_obj = istmp ? ZFS_OFFLINE_TEMPORARY : 0;
3425
3426 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
3427 return (0);
3428
3429 switch (errno) {
3430 case EBUSY:
3431
3432 /*
3433 * There are no other replicas of this device.
3434 */
3435 return (zfs_error(hdl, EZFS_NOREPLICAS, errbuf));
3436
3437 case EEXIST:
3438 /*
3439 * The log device has unplayed logs
3440 */
3441 return (zfs_error(hdl, EZFS_UNPLAYED_LOGS, errbuf));
3442
3443 default:
3444 return (zpool_standard_error(hdl, errno, errbuf));
3445 }
3446 }
3447
3448 /*
3449 * Remove the specified vdev asynchronously from the configuration, so
3450 * that it may come ONLINE if reinserted. This is called from zed on
3451 * Udev remove event.
3452 * Note: We also have a similar function zpool_vdev_remove() that
3453 * removes the vdev from the pool.
3454 */
3455 int
zpool_vdev_remove_wanted(zpool_handle_t * zhp,const char * path)3456 zpool_vdev_remove_wanted(zpool_handle_t *zhp, const char *path)
3457 {
3458 zfs_cmd_t zc = {"\0"};
3459 char errbuf[ERRBUFLEN];
3460 nvlist_t *tgt;
3461 boolean_t avail_spare, l2cache;
3462 libzfs_handle_t *hdl = zhp->zpool_hdl;
3463
3464 (void) snprintf(errbuf, sizeof (errbuf),
3465 dgettext(TEXT_DOMAIN, "cannot remove %s"), path);
3466
3467 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3468 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
3469 NULL)) == NULL)
3470 return (zfs_error(hdl, EZFS_NODEVICE, errbuf));
3471
3472 zc.zc_guid = fnvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID);
3473
3474 zc.zc_cookie = VDEV_STATE_REMOVED;
3475
3476 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
3477 return (0);
3478
3479 return (zpool_standard_error(hdl, errno, errbuf));
3480 }
3481
3482 /*
3483 * Mark the given vdev faulted.
3484 */
3485 int
zpool_vdev_fault(zpool_handle_t * zhp,uint64_t guid,vdev_aux_t aux)3486 zpool_vdev_fault(zpool_handle_t *zhp, uint64_t guid, vdev_aux_t aux)
3487 {
3488 zfs_cmd_t zc = {"\0"};
3489 char errbuf[ERRBUFLEN];
3490 libzfs_handle_t *hdl = zhp->zpool_hdl;
3491
3492 (void) snprintf(errbuf, sizeof (errbuf),
3493 dgettext(TEXT_DOMAIN, "cannot fault %llu"), (u_longlong_t)guid);
3494
3495 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3496 zc.zc_guid = guid;
3497 zc.zc_cookie = VDEV_STATE_FAULTED;
3498 zc.zc_obj = aux;
3499
3500 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
3501 return (0);
3502
3503 switch (errno) {
3504 case EBUSY:
3505
3506 /*
3507 * There are no other replicas of this device.
3508 */
3509 return (zfs_error(hdl, EZFS_NOREPLICAS, errbuf));
3510
3511 default:
3512 return (zpool_standard_error(hdl, errno, errbuf));
3513 }
3514
3515 }
3516
3517 /*
3518 * Generic set vdev state function
3519 */
3520 static int
zpool_vdev_set_state(zpool_handle_t * zhp,uint64_t guid,vdev_aux_t aux,vdev_state_t state)3521 zpool_vdev_set_state(zpool_handle_t *zhp, uint64_t guid, vdev_aux_t aux,
3522 vdev_state_t state)
3523 {
3524 zfs_cmd_t zc = {"\0"};
3525 char errbuf[ERRBUFLEN];
3526 libzfs_handle_t *hdl = zhp->zpool_hdl;
3527
3528 (void) snprintf(errbuf, sizeof (errbuf),
3529 dgettext(TEXT_DOMAIN, "cannot set %s %llu"),
3530 zpool_state_to_name(state, aux), (u_longlong_t)guid);
3531
3532 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3533 zc.zc_guid = guid;
3534 zc.zc_cookie = state;
3535 zc.zc_obj = aux;
3536
3537 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
3538 return (0);
3539
3540 return (zpool_standard_error(hdl, errno, errbuf));
3541 }
3542
3543 /*
3544 * Mark the given vdev degraded.
3545 */
3546 int
zpool_vdev_degrade(zpool_handle_t * zhp,uint64_t guid,vdev_aux_t aux)3547 zpool_vdev_degrade(zpool_handle_t *zhp, uint64_t guid, vdev_aux_t aux)
3548 {
3549 return (zpool_vdev_set_state(zhp, guid, aux, VDEV_STATE_DEGRADED));
3550 }
3551
3552 /*
3553 * Mark the given vdev as in a removed state (as if the device does not exist).
3554 *
3555 * This is different than zpool_vdev_remove() which does a removal of a device
3556 * from the pool (but the device does exist).
3557 */
3558 int
zpool_vdev_set_removed_state(zpool_handle_t * zhp,uint64_t guid,vdev_aux_t aux)3559 zpool_vdev_set_removed_state(zpool_handle_t *zhp, uint64_t guid, vdev_aux_t aux)
3560 {
3561 return (zpool_vdev_set_state(zhp, guid, aux, VDEV_STATE_REMOVED));
3562 }
3563
3564 /*
3565 * Returns TRUE if the given nvlist is a vdev that was originally swapped in as
3566 * a hot spare.
3567 */
3568 static boolean_t
is_replacing_spare(nvlist_t * search,nvlist_t * tgt,int which)3569 is_replacing_spare(nvlist_t *search, nvlist_t *tgt, int which)
3570 {
3571 nvlist_t **child;
3572 uint_t c, children;
3573
3574 if (nvlist_lookup_nvlist_array(search, ZPOOL_CONFIG_CHILDREN, &child,
3575 &children) == 0) {
3576 const char *type = fnvlist_lookup_string(search,
3577 ZPOOL_CONFIG_TYPE);
3578 if ((strcmp(type, VDEV_TYPE_SPARE) == 0 ||
3579 strcmp(type, VDEV_TYPE_DRAID_SPARE) == 0) &&
3580 children == 2 && child[which] == tgt)
3581 return (B_TRUE);
3582
3583 for (c = 0; c < children; c++)
3584 if (is_replacing_spare(child[c], tgt, which))
3585 return (B_TRUE);
3586 }
3587
3588 return (B_FALSE);
3589 }
3590
3591 /*
3592 * Attach new_disk (fully described by nvroot) to old_disk.
3593 * If 'replacing' is specified, the new disk will replace the old one.
3594 */
3595 int
zpool_vdev_attach(zpool_handle_t * zhp,const char * old_disk,const char * new_disk,nvlist_t * nvroot,int replacing,boolean_t rebuild)3596 zpool_vdev_attach(zpool_handle_t *zhp, const char *old_disk,
3597 const char *new_disk, nvlist_t *nvroot, int replacing, boolean_t rebuild)
3598 {
3599 zfs_cmd_t zc = {"\0"};
3600 char errbuf[ERRBUFLEN];
3601 int ret;
3602 nvlist_t *tgt;
3603 boolean_t avail_spare, l2cache, islog;
3604 uint64_t val;
3605 char *newname;
3606 const char *type;
3607 nvlist_t **child;
3608 uint_t children;
3609 nvlist_t *config_root;
3610 libzfs_handle_t *hdl = zhp->zpool_hdl;
3611
3612 if (replacing)
3613 (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
3614 "cannot replace %s with %s"), old_disk, new_disk);
3615 else
3616 (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
3617 "cannot attach %s to %s"), new_disk, old_disk);
3618
3619 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3620 if ((tgt = zpool_find_vdev(zhp, old_disk, &avail_spare, &l2cache,
3621 &islog)) == NULL)
3622 return (zfs_error(hdl, EZFS_NODEVICE, errbuf));
3623
3624 if (avail_spare)
3625 return (zfs_error(hdl, EZFS_ISSPARE, errbuf));
3626
3627 if (l2cache)
3628 return (zfs_error(hdl, EZFS_ISL2CACHE, errbuf));
3629
3630 zc.zc_guid = fnvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID);
3631 zc.zc_cookie = replacing;
3632 zc.zc_simple = rebuild;
3633
3634 if (rebuild &&
3635 zfeature_lookup_guid("org.openzfs:device_rebuild", NULL) != 0) {
3636 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3637 "the loaded zfs module doesn't support device rebuilds"));
3638 return (zfs_error(hdl, EZFS_POOL_NOTSUP, errbuf));
3639 }
3640
3641 type = fnvlist_lookup_string(tgt, ZPOOL_CONFIG_TYPE);
3642 if (strcmp(type, VDEV_TYPE_RAIDZ) == 0 &&
3643 zfeature_lookup_guid("org.openzfs:raidz_expansion", NULL) != 0) {
3644 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3645 "the loaded zfs module doesn't support raidz expansion"));
3646 return (zfs_error(hdl, EZFS_POOL_NOTSUP, errbuf));
3647 }
3648
3649 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
3650 &child, &children) != 0 || children != 1) {
3651 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3652 "new device must be a single disk"));
3653 return (zfs_error(hdl, EZFS_INVALCONFIG, errbuf));
3654 }
3655
3656 config_root = fnvlist_lookup_nvlist(zpool_get_config(zhp, NULL),
3657 ZPOOL_CONFIG_VDEV_TREE);
3658
3659 if ((newname = zpool_vdev_name(NULL, NULL, child[0], 0)) == NULL)
3660 return (-1);
3661
3662 /*
3663 * If the target is a hot spare that has been swapped in, we can only
3664 * replace it with another hot spare.
3665 */
3666 if (replacing &&
3667 nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_IS_SPARE, &val) == 0 &&
3668 (zpool_find_vdev(zhp, newname, &avail_spare, &l2cache,
3669 NULL) == NULL || !avail_spare) &&
3670 is_replacing_spare(config_root, tgt, 1)) {
3671 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3672 "can only be replaced by another hot spare"));
3673 free(newname);
3674 return (zfs_error(hdl, EZFS_BADTARGET, errbuf));
3675 }
3676
3677 free(newname);
3678
3679 zcmd_write_conf_nvlist(hdl, &zc, nvroot);
3680
3681 ret = zfs_ioctl(hdl, ZFS_IOC_VDEV_ATTACH, &zc);
3682
3683 zcmd_free_nvlists(&zc);
3684
3685 if (ret == 0)
3686 return (0);
3687
3688 switch (errno) {
3689 case ENOTSUP:
3690 /*
3691 * Can't attach to or replace this type of vdev.
3692 */
3693 if (replacing) {
3694 uint64_t version = zpool_get_prop_int(zhp,
3695 ZPOOL_PROP_VERSION, NULL);
3696
3697 if (islog) {
3698 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3699 "cannot replace a log with a spare"));
3700 } else if (rebuild) {
3701 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3702 "only mirror and dRAID vdevs support "
3703 "sequential reconstruction"));
3704 } else if (zpool_is_draid_spare(new_disk)) {
3705 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3706 "dRAID spares can only replace child "
3707 "devices in their parent's dRAID vdev"));
3708 } else if (version >= SPA_VERSION_MULTI_REPLACE) {
3709 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3710 "already in replacing/spare config; wait "
3711 "for completion or use 'zpool detach'"));
3712 } else {
3713 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3714 "cannot replace a replacing device"));
3715 }
3716 } else if (strcmp(type, VDEV_TYPE_RAIDZ) == 0) {
3717 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3718 "raidz_expansion feature must be enabled "
3719 "in order to attach a device to raidz"));
3720 } else {
3721 char status[64] = {0};
3722 zpool_prop_get_feature(zhp,
3723 "feature@device_rebuild", status, 63);
3724 if (rebuild &&
3725 strncmp(status, ZFS_FEATURE_DISABLED, 64) == 0) {
3726 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3727 "device_rebuild feature must be enabled "
3728 "in order to use sequential "
3729 "reconstruction"));
3730 } else {
3731 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3732 "can only attach to mirrors and top-level "
3733 "disks"));
3734 }
3735 }
3736 (void) zfs_error(hdl, EZFS_BADTARGET, errbuf);
3737 break;
3738
3739 case EINVAL:
3740 /*
3741 * The new device must be a single disk.
3742 */
3743 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3744 "new device must be a single disk"));
3745 (void) zfs_error(hdl, EZFS_INVALCONFIG, errbuf);
3746 break;
3747
3748 case EBUSY:
3749 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "%s is busy"),
3750 new_disk);
3751 (void) zfs_error(hdl, EZFS_BADDEV, errbuf);
3752 break;
3753
3754 case EOVERFLOW:
3755 /*
3756 * The new device is too small.
3757 */
3758 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3759 "device is too small"));
3760 (void) zfs_error(hdl, EZFS_BADDEV, errbuf);
3761 break;
3762
3763 case EDOM:
3764 /*
3765 * The new device has a different optimal sector size.
3766 */
3767 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3768 "new device has a different optimal sector size; use the "
3769 "option '-o ashift=N' to override the optimal size"));
3770 (void) zfs_error(hdl, EZFS_BADDEV, errbuf);
3771 break;
3772
3773 case ENAMETOOLONG:
3774 /*
3775 * The resulting top-level vdev spec won't fit in the label.
3776 */
3777 (void) zfs_error(hdl, EZFS_DEVOVERFLOW, errbuf);
3778 break;
3779
3780 case ENXIO:
3781 /*
3782 * The existing raidz vdev has offline children
3783 */
3784 if (strcmp(type, VDEV_TYPE_RAIDZ) == 0) {
3785 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3786 "raidz vdev has devices that are are offline or "
3787 "being replaced"));
3788 (void) zfs_error(hdl, EZFS_BADDEV, errbuf);
3789 break;
3790 } else {
3791 (void) zpool_standard_error(hdl, errno, errbuf);
3792 }
3793 break;
3794
3795 case EADDRINUSE:
3796 /*
3797 * The boot reserved area is already being used (FreeBSD)
3798 */
3799 if (strcmp(type, VDEV_TYPE_RAIDZ) == 0) {
3800 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3801 "the reserved boot area needed for the expansion "
3802 "is already being used by a boot loader"));
3803 (void) zfs_error(hdl, EZFS_BADDEV, errbuf);
3804 } else {
3805 (void) zpool_standard_error(hdl, errno, errbuf);
3806 }
3807 break;
3808
3809 case ZFS_ERR_ASHIFT_MISMATCH:
3810 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3811 "The new device cannot have a higher alignment requirement "
3812 "than the top-level vdev."));
3813 (void) zfs_error(hdl, EZFS_BADTARGET, errbuf);
3814 break;
3815 default:
3816 (void) zpool_standard_error(hdl, errno, errbuf);
3817 }
3818
3819 return (-1);
3820 }
3821
3822 /*
3823 * Detach the specified device.
3824 */
3825 int
zpool_vdev_detach(zpool_handle_t * zhp,const char * path)3826 zpool_vdev_detach(zpool_handle_t *zhp, const char *path)
3827 {
3828 zfs_cmd_t zc = {"\0"};
3829 char errbuf[ERRBUFLEN];
3830 nvlist_t *tgt;
3831 boolean_t avail_spare, l2cache;
3832 libzfs_handle_t *hdl = zhp->zpool_hdl;
3833
3834 (void) snprintf(errbuf, sizeof (errbuf),
3835 dgettext(TEXT_DOMAIN, "cannot detach %s"), path);
3836
3837 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3838 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
3839 NULL)) == NULL)
3840 return (zfs_error(hdl, EZFS_NODEVICE, errbuf));
3841
3842 if (avail_spare)
3843 return (zfs_error(hdl, EZFS_ISSPARE, errbuf));
3844
3845 if (l2cache)
3846 return (zfs_error(hdl, EZFS_ISL2CACHE, errbuf));
3847
3848 zc.zc_guid = fnvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID);
3849
3850 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_DETACH, &zc) == 0)
3851 return (0);
3852
3853 switch (errno) {
3854
3855 case ENOTSUP:
3856 /*
3857 * Can't detach from this type of vdev.
3858 */
3859 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "only "
3860 "applicable to mirror and replacing vdevs"));
3861 (void) zfs_error(hdl, EZFS_BADTARGET, errbuf);
3862 break;
3863
3864 case EBUSY:
3865 /*
3866 * There are no other replicas of this device.
3867 */
3868 (void) zfs_error(hdl, EZFS_NOREPLICAS, errbuf);
3869 break;
3870
3871 default:
3872 (void) zpool_standard_error(hdl, errno, errbuf);
3873 }
3874
3875 return (-1);
3876 }
3877
3878 /*
3879 * Find a mirror vdev in the source nvlist.
3880 *
3881 * The mchild array contains a list of disks in one of the top-level mirrors
3882 * of the source pool. The schild array contains a list of disks that the
3883 * user specified on the command line. We loop over the mchild array to
3884 * see if any entry in the schild array matches.
3885 *
3886 * If a disk in the mchild array is found in the schild array, we return
3887 * the index of that entry. Otherwise we return -1.
3888 */
3889 static int
find_vdev_entry(zpool_handle_t * zhp,nvlist_t ** mchild,uint_t mchildren,nvlist_t ** schild,uint_t schildren)3890 find_vdev_entry(zpool_handle_t *zhp, nvlist_t **mchild, uint_t mchildren,
3891 nvlist_t **schild, uint_t schildren)
3892 {
3893 uint_t mc;
3894
3895 for (mc = 0; mc < mchildren; mc++) {
3896 uint_t sc;
3897 char *mpath = zpool_vdev_name(zhp->zpool_hdl, zhp,
3898 mchild[mc], 0);
3899
3900 for (sc = 0; sc < schildren; sc++) {
3901 char *spath = zpool_vdev_name(zhp->zpool_hdl, zhp,
3902 schild[sc], 0);
3903 boolean_t result = (strcmp(mpath, spath) == 0);
3904
3905 free(spath);
3906 if (result) {
3907 free(mpath);
3908 return (mc);
3909 }
3910 }
3911
3912 free(mpath);
3913 }
3914
3915 return (-1);
3916 }
3917
3918 /*
3919 * Split a mirror pool. If newroot points to null, then a new nvlist
3920 * is generated and it is the responsibility of the caller to free it.
3921 */
3922 int
zpool_vdev_split(zpool_handle_t * zhp,char * newname,nvlist_t ** newroot,nvlist_t * props,splitflags_t flags)3923 zpool_vdev_split(zpool_handle_t *zhp, char *newname, nvlist_t **newroot,
3924 nvlist_t *props, splitflags_t flags)
3925 {
3926 zfs_cmd_t zc = {"\0"};
3927 char errbuf[ERRBUFLEN];
3928 const char *bias;
3929 nvlist_t *tree, *config, **child, **newchild, *newconfig = NULL;
3930 nvlist_t **varray = NULL, *zc_props = NULL;
3931 uint_t c, children, newchildren, lastlog = 0, vcount, found = 0;
3932 libzfs_handle_t *hdl = zhp->zpool_hdl;
3933 uint64_t vers, readonly = B_FALSE;
3934 boolean_t freelist = B_FALSE, memory_err = B_TRUE;
3935 int retval = 0;
3936
3937 (void) snprintf(errbuf, sizeof (errbuf),
3938 dgettext(TEXT_DOMAIN, "Unable to split %s"), zhp->zpool_name);
3939
3940 if (!zpool_name_valid(hdl, B_FALSE, newname))
3941 return (zfs_error(hdl, EZFS_INVALIDNAME, errbuf));
3942
3943 if ((config = zpool_get_config(zhp, NULL)) == NULL) {
3944 (void) fprintf(stderr, gettext("Internal error: unable to "
3945 "retrieve pool configuration\n"));
3946 return (-1);
3947 }
3948
3949 tree = fnvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE);
3950 vers = fnvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION);
3951
3952 if (props) {
3953 prop_flags_t flags = { .create = B_FALSE, .import = B_TRUE };
3954 if ((zc_props = zpool_valid_proplist(hdl, zhp->zpool_name,
3955 props, vers, flags, errbuf)) == NULL)
3956 return (-1);
3957 (void) nvlist_lookup_uint64(zc_props,
3958 zpool_prop_to_name(ZPOOL_PROP_READONLY), &readonly);
3959 if (readonly) {
3960 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3961 "property %s can only be set at import time"),
3962 zpool_prop_to_name(ZPOOL_PROP_READONLY));
3963 return (-1);
3964 }
3965 }
3966
3967 if (nvlist_lookup_nvlist_array(tree, ZPOOL_CONFIG_CHILDREN, &child,
3968 &children) != 0) {
3969 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3970 "Source pool is missing vdev tree"));
3971 nvlist_free(zc_props);
3972 return (-1);
3973 }
3974
3975 varray = zfs_alloc(hdl, children * sizeof (nvlist_t *));
3976 vcount = 0;
3977
3978 if (*newroot == NULL ||
3979 nvlist_lookup_nvlist_array(*newroot, ZPOOL_CONFIG_CHILDREN,
3980 &newchild, &newchildren) != 0)
3981 newchildren = 0;
3982
3983 for (c = 0; c < children; c++) {
3984 uint64_t is_log = B_FALSE, is_hole = B_FALSE;
3985 boolean_t is_special = B_FALSE, is_dedup = B_FALSE;
3986 const char *type;
3987 nvlist_t **mchild, *vdev;
3988 uint_t mchildren;
3989 int entry;
3990
3991 /*
3992 * Unlike cache & spares, slogs are stored in the
3993 * ZPOOL_CONFIG_CHILDREN array. We filter them out here.
3994 */
3995 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG,
3996 &is_log);
3997 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_HOLE,
3998 &is_hole);
3999 if (is_log || is_hole) {
4000 /*
4001 * Create a hole vdev and put it in the config.
4002 */
4003 if (nvlist_alloc(&vdev, NV_UNIQUE_NAME, 0) != 0)
4004 goto out;
4005 if (nvlist_add_string(vdev, ZPOOL_CONFIG_TYPE,
4006 VDEV_TYPE_HOLE) != 0)
4007 goto out;
4008 if (nvlist_add_uint64(vdev, ZPOOL_CONFIG_IS_HOLE,
4009 1) != 0)
4010 goto out;
4011 if (lastlog == 0)
4012 lastlog = vcount;
4013 varray[vcount++] = vdev;
4014 continue;
4015 }
4016 lastlog = 0;
4017 type = fnvlist_lookup_string(child[c], ZPOOL_CONFIG_TYPE);
4018
4019 if (strcmp(type, VDEV_TYPE_INDIRECT) == 0) {
4020 vdev = child[c];
4021 if (nvlist_dup(vdev, &varray[vcount++], 0) != 0)
4022 goto out;
4023 continue;
4024 } else if (strcmp(type, VDEV_TYPE_MIRROR) != 0) {
4025 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
4026 "Source pool must be composed only of mirrors\n"));
4027 retval = zfs_error(hdl, EZFS_INVALCONFIG, errbuf);
4028 goto out;
4029 }
4030
4031 if (nvlist_lookup_string(child[c],
4032 ZPOOL_CONFIG_ALLOCATION_BIAS, &bias) == 0) {
4033 if (strcmp(bias, VDEV_ALLOC_BIAS_SPECIAL) == 0)
4034 is_special = B_TRUE;
4035 else if (strcmp(bias, VDEV_ALLOC_BIAS_DEDUP) == 0)
4036 is_dedup = B_TRUE;
4037 }
4038 verify(nvlist_lookup_nvlist_array(child[c],
4039 ZPOOL_CONFIG_CHILDREN, &mchild, &mchildren) == 0);
4040
4041 /* find or add an entry for this top-level vdev */
4042 if (newchildren > 0 &&
4043 (entry = find_vdev_entry(zhp, mchild, mchildren,
4044 newchild, newchildren)) >= 0) {
4045 /* We found a disk that the user specified. */
4046 vdev = mchild[entry];
4047 ++found;
4048 } else {
4049 /* User didn't specify a disk for this vdev. */
4050 vdev = mchild[mchildren - 1];
4051 }
4052
4053 if (nvlist_dup(vdev, &varray[vcount++], 0) != 0)
4054 goto out;
4055
4056 if (flags.dryrun != 0) {
4057 if (is_dedup == B_TRUE) {
4058 if (nvlist_add_string(varray[vcount - 1],
4059 ZPOOL_CONFIG_ALLOCATION_BIAS,
4060 VDEV_ALLOC_BIAS_DEDUP) != 0)
4061 goto out;
4062 } else if (is_special == B_TRUE) {
4063 if (nvlist_add_string(varray[vcount - 1],
4064 ZPOOL_CONFIG_ALLOCATION_BIAS,
4065 VDEV_ALLOC_BIAS_SPECIAL) != 0)
4066 goto out;
4067 }
4068 }
4069 }
4070
4071 /* did we find every disk the user specified? */
4072 if (found != newchildren) {
4073 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "Device list must "
4074 "include at most one disk from each mirror"));
4075 retval = zfs_error(hdl, EZFS_INVALCONFIG, errbuf);
4076 goto out;
4077 }
4078
4079 /* Prepare the nvlist for populating. */
4080 if (*newroot == NULL) {
4081 if (nvlist_alloc(newroot, NV_UNIQUE_NAME, 0) != 0)
4082 goto out;
4083 freelist = B_TRUE;
4084 if (nvlist_add_string(*newroot, ZPOOL_CONFIG_TYPE,
4085 VDEV_TYPE_ROOT) != 0)
4086 goto out;
4087 } else {
4088 verify(nvlist_remove_all(*newroot, ZPOOL_CONFIG_CHILDREN) == 0);
4089 }
4090
4091 /* Add all the children we found */
4092 if (nvlist_add_nvlist_array(*newroot, ZPOOL_CONFIG_CHILDREN,
4093 (const nvlist_t **)varray, lastlog == 0 ? vcount : lastlog) != 0)
4094 goto out;
4095
4096 /*
4097 * If we're just doing a dry run, exit now with success.
4098 */
4099 if (flags.dryrun) {
4100 memory_err = B_FALSE;
4101 freelist = B_FALSE;
4102 goto out;
4103 }
4104
4105 /* now build up the config list & call the ioctl */
4106 if (nvlist_alloc(&newconfig, NV_UNIQUE_NAME, 0) != 0)
4107 goto out;
4108
4109 if (nvlist_add_nvlist(newconfig,
4110 ZPOOL_CONFIG_VDEV_TREE, *newroot) != 0 ||
4111 nvlist_add_string(newconfig,
4112 ZPOOL_CONFIG_POOL_NAME, newname) != 0 ||
4113 nvlist_add_uint64(newconfig, ZPOOL_CONFIG_VERSION, vers) != 0)
4114 goto out;
4115
4116 /*
4117 * The new pool is automatically part of the namespace unless we
4118 * explicitly export it.
4119 */
4120 if (!flags.import)
4121 zc.zc_cookie = ZPOOL_EXPORT_AFTER_SPLIT;
4122 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
4123 (void) strlcpy(zc.zc_string, newname, sizeof (zc.zc_string));
4124 zcmd_write_conf_nvlist(hdl, &zc, newconfig);
4125 if (zc_props != NULL)
4126 zcmd_write_src_nvlist(hdl, &zc, zc_props);
4127
4128 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SPLIT, &zc) != 0) {
4129 retval = zpool_standard_error(hdl, errno, errbuf);
4130 goto out;
4131 }
4132
4133 freelist = B_FALSE;
4134 memory_err = B_FALSE;
4135
4136 out:
4137 if (varray != NULL) {
4138 int v;
4139
4140 for (v = 0; v < vcount; v++)
4141 nvlist_free(varray[v]);
4142 free(varray);
4143 }
4144 zcmd_free_nvlists(&zc);
4145 nvlist_free(zc_props);
4146 nvlist_free(newconfig);
4147 if (freelist) {
4148 nvlist_free(*newroot);
4149 *newroot = NULL;
4150 }
4151
4152 if (retval != 0)
4153 return (retval);
4154
4155 if (memory_err)
4156 return (no_memory(hdl));
4157
4158 return (0);
4159 }
4160
4161 /*
4162 * Remove the given device.
4163 */
4164 int
zpool_vdev_remove(zpool_handle_t * zhp,const char * path)4165 zpool_vdev_remove(zpool_handle_t *zhp, const char *path)
4166 {
4167 zfs_cmd_t zc = {"\0"};
4168 char errbuf[ERRBUFLEN];
4169 nvlist_t *tgt;
4170 boolean_t avail_spare, l2cache, islog;
4171 libzfs_handle_t *hdl = zhp->zpool_hdl;
4172 uint64_t version;
4173
4174 (void) snprintf(errbuf, sizeof (errbuf),
4175 dgettext(TEXT_DOMAIN, "cannot remove %s"), path);
4176
4177 if (zpool_is_draid_spare(path)) {
4178 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
4179 "dRAID spares cannot be removed"));
4180 return (zfs_error(hdl, EZFS_NODEVICE, errbuf));
4181 }
4182
4183 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
4184 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
4185 &islog)) == NULL)
4186 return (zfs_error(hdl, EZFS_NODEVICE, errbuf));
4187
4188 version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL);
4189 if (islog && version < SPA_VERSION_HOLES) {
4190 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
4191 "pool must be upgraded to support log removal"));
4192 return (zfs_error(hdl, EZFS_BADVERSION, errbuf));
4193 }
4194
4195 zc.zc_guid = fnvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID);
4196
4197 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_REMOVE, &zc) == 0)
4198 return (0);
4199
4200 switch (errno) {
4201
4202 case EALREADY:
4203 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
4204 "removal for this vdev is already in progress."));
4205 (void) zfs_error(hdl, EZFS_BUSY, errbuf);
4206 break;
4207
4208 case EINVAL:
4209 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
4210 "invalid config; all top-level vdevs must "
4211 "have the same sector size and not be raidz."));
4212 (void) zfs_error(hdl, EZFS_INVALCONFIG, errbuf);
4213 break;
4214
4215 case EBUSY:
4216 if (islog) {
4217 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
4218 "Mount encrypted datasets to replay logs."));
4219 } else {
4220 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
4221 "Pool busy; removal may already be in progress"));
4222 }
4223 (void) zfs_error(hdl, EZFS_BUSY, errbuf);
4224 break;
4225
4226 case EACCES:
4227 if (islog) {
4228 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
4229 "Mount encrypted datasets to replay logs."));
4230 (void) zfs_error(hdl, EZFS_BUSY, errbuf);
4231 } else {
4232 (void) zpool_standard_error(hdl, errno, errbuf);
4233 }
4234 break;
4235
4236 default:
4237 (void) zpool_standard_error(hdl, errno, errbuf);
4238 }
4239 return (-1);
4240 }
4241
4242 int
zpool_vdev_remove_cancel(zpool_handle_t * zhp)4243 zpool_vdev_remove_cancel(zpool_handle_t *zhp)
4244 {
4245 zfs_cmd_t zc = {{0}};
4246 char errbuf[ERRBUFLEN];
4247 libzfs_handle_t *hdl = zhp->zpool_hdl;
4248
4249 (void) snprintf(errbuf, sizeof (errbuf),
4250 dgettext(TEXT_DOMAIN, "cannot cancel removal"));
4251
4252 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
4253 zc.zc_cookie = 1;
4254
4255 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_REMOVE, &zc) == 0)
4256 return (0);
4257
4258 return (zpool_standard_error(hdl, errno, errbuf));
4259 }
4260
4261 int
zpool_vdev_indirect_size(zpool_handle_t * zhp,const char * path,uint64_t * sizep)4262 zpool_vdev_indirect_size(zpool_handle_t *zhp, const char *path,
4263 uint64_t *sizep)
4264 {
4265 char errbuf[ERRBUFLEN];
4266 nvlist_t *tgt;
4267 boolean_t avail_spare, l2cache, islog;
4268 libzfs_handle_t *hdl = zhp->zpool_hdl;
4269
4270 (void) snprintf(errbuf, sizeof (errbuf),
4271 dgettext(TEXT_DOMAIN, "cannot determine indirect size of %s"),
4272 path);
4273
4274 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
4275 &islog)) == NULL)
4276 return (zfs_error(hdl, EZFS_NODEVICE, errbuf));
4277
4278 if (avail_spare || l2cache || islog) {
4279 *sizep = 0;
4280 return (0);
4281 }
4282
4283 if (nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_INDIRECT_SIZE, sizep) != 0) {
4284 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
4285 "indirect size not available"));
4286 return (zfs_error(hdl, EINVAL, errbuf));
4287 }
4288 return (0);
4289 }
4290
4291 /*
4292 * Clear the errors for the pool, or the particular device if specified.
4293 */
4294 int
zpool_clear(zpool_handle_t * zhp,const char * path,nvlist_t * rewindnvl)4295 zpool_clear(zpool_handle_t *zhp, const char *path, nvlist_t *rewindnvl)
4296 {
4297 zfs_cmd_t zc = {"\0"};
4298 char errbuf[ERRBUFLEN];
4299 nvlist_t *tgt;
4300 zpool_load_policy_t policy;
4301 boolean_t avail_spare, l2cache;
4302 libzfs_handle_t *hdl = zhp->zpool_hdl;
4303 nvlist_t *nvi = NULL;
4304 int error;
4305
4306 if (path)
4307 (void) snprintf(errbuf, sizeof (errbuf),
4308 dgettext(TEXT_DOMAIN, "cannot clear errors for %s"),
4309 path);
4310 else
4311 (void) snprintf(errbuf, sizeof (errbuf),
4312 dgettext(TEXT_DOMAIN, "cannot clear errors for %s"),
4313 zhp->zpool_name);
4314
4315 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
4316 if (path) {
4317 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare,
4318 &l2cache, NULL)) == NULL)
4319 return (zfs_error(hdl, EZFS_NODEVICE, errbuf));
4320
4321 /*
4322 * Don't allow error clearing for hot spares. Do allow
4323 * error clearing for l2cache devices.
4324 */
4325 if (avail_spare)
4326 return (zfs_error(hdl, EZFS_ISSPARE, errbuf));
4327
4328 zc.zc_guid = fnvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID);
4329 }
4330
4331 zpool_get_load_policy(rewindnvl, &policy);
4332 zc.zc_cookie = policy.zlp_rewind;
4333
4334 zcmd_alloc_dst_nvlist(hdl, &zc, zhp->zpool_config_size * 2);
4335 zcmd_write_src_nvlist(hdl, &zc, rewindnvl);
4336
4337 while ((error = zfs_ioctl(hdl, ZFS_IOC_CLEAR, &zc)) != 0 &&
4338 errno == ENOMEM)
4339 zcmd_expand_dst_nvlist(hdl, &zc);
4340
4341 if (!error || ((policy.zlp_rewind & ZPOOL_TRY_REWIND) &&
4342 errno != EPERM && errno != EACCES)) {
4343 if (policy.zlp_rewind &
4344 (ZPOOL_DO_REWIND | ZPOOL_TRY_REWIND)) {
4345 (void) zcmd_read_dst_nvlist(hdl, &zc, &nvi);
4346 zpool_rewind_exclaim(hdl, zc.zc_name,
4347 ((policy.zlp_rewind & ZPOOL_TRY_REWIND) != 0),
4348 nvi);
4349 nvlist_free(nvi);
4350 }
4351 zcmd_free_nvlists(&zc);
4352 return (0);
4353 }
4354
4355 zcmd_free_nvlists(&zc);
4356 return (zpool_standard_error(hdl, errno, errbuf));
4357 }
4358
4359 /*
4360 * Similar to zpool_clear(), but takes a GUID (used by fmd).
4361 */
4362 int
zpool_vdev_clear(zpool_handle_t * zhp,uint64_t guid)4363 zpool_vdev_clear(zpool_handle_t *zhp, uint64_t guid)
4364 {
4365 zfs_cmd_t zc = {"\0"};
4366 char errbuf[ERRBUFLEN];
4367 libzfs_handle_t *hdl = zhp->zpool_hdl;
4368
4369 (void) snprintf(errbuf, sizeof (errbuf),
4370 dgettext(TEXT_DOMAIN, "cannot clear errors for %llx"),
4371 (u_longlong_t)guid);
4372
4373 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
4374 zc.zc_guid = guid;
4375 zc.zc_cookie = ZPOOL_NO_REWIND;
4376
4377 if (zfs_ioctl(hdl, ZFS_IOC_CLEAR, &zc) == 0)
4378 return (0);
4379
4380 return (zpool_standard_error(hdl, errno, errbuf));
4381 }
4382
4383 /*
4384 * Change the GUID for a pool.
4385 *
4386 * Similar to zpool_reguid(), but may take a GUID.
4387 *
4388 * If the guid argument is NULL, then no GUID is passed in the nvlist to the
4389 * ioctl().
4390 */
4391 int
zpool_set_guid(zpool_handle_t * zhp,const uint64_t * guid)4392 zpool_set_guid(zpool_handle_t *zhp, const uint64_t *guid)
4393 {
4394 char errbuf[ERRBUFLEN];
4395 libzfs_handle_t *hdl = zhp->zpool_hdl;
4396 nvlist_t *nvl = NULL;
4397 zfs_cmd_t zc = {"\0"};
4398 int error;
4399
4400 if (guid != NULL) {
4401 if (nvlist_alloc(&nvl, NV_UNIQUE_NAME, 0) != 0)
4402 return (no_memory(hdl));
4403
4404 if (nvlist_add_uint64(nvl, ZPOOL_REGUID_GUID, *guid) != 0) {
4405 nvlist_free(nvl);
4406 return (no_memory(hdl));
4407 }
4408
4409 zcmd_write_src_nvlist(hdl, &zc, nvl);
4410 }
4411
4412 (void) snprintf(errbuf, sizeof (errbuf),
4413 dgettext(TEXT_DOMAIN, "cannot reguid '%s'"), zhp->zpool_name);
4414
4415 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
4416 error = zfs_ioctl(hdl, ZFS_IOC_POOL_REGUID, &zc);
4417 if (error) {
4418 return (zpool_standard_error(hdl, errno, errbuf));
4419 }
4420 if (guid != NULL) {
4421 zcmd_free_nvlists(&zc);
4422 nvlist_free(nvl);
4423 }
4424 return (0);
4425 }
4426
4427 /*
4428 * Change the GUID for a pool.
4429 */
4430 int
zpool_reguid(zpool_handle_t * zhp)4431 zpool_reguid(zpool_handle_t *zhp)
4432 {
4433 return (zpool_set_guid(zhp, NULL));
4434 }
4435
4436 /*
4437 * Reopen the pool.
4438 */
4439 int
zpool_reopen_one(zpool_handle_t * zhp,void * data)4440 zpool_reopen_one(zpool_handle_t *zhp, void *data)
4441 {
4442 libzfs_handle_t *hdl = zpool_get_handle(zhp);
4443 const char *pool_name = zpool_get_name(zhp);
4444 boolean_t *scrub_restart = data;
4445 int error;
4446
4447 error = lzc_reopen(pool_name, *scrub_restart);
4448 if (error) {
4449 return (zpool_standard_error_fmt(hdl, error,
4450 dgettext(TEXT_DOMAIN, "cannot reopen '%s'"), pool_name));
4451 }
4452
4453 return (0);
4454 }
4455
4456 /* call into libzfs_core to execute the sync IOCTL per pool */
4457 int
zpool_sync_one(zpool_handle_t * zhp,void * data)4458 zpool_sync_one(zpool_handle_t *zhp, void *data)
4459 {
4460 int ret;
4461 libzfs_handle_t *hdl = zpool_get_handle(zhp);
4462 const char *pool_name = zpool_get_name(zhp);
4463 boolean_t *force = data;
4464 nvlist_t *innvl = fnvlist_alloc();
4465
4466 fnvlist_add_boolean_value(innvl, "force", *force);
4467 if ((ret = lzc_sync(pool_name, innvl, NULL)) != 0) {
4468 nvlist_free(innvl);
4469 return (zpool_standard_error_fmt(hdl, ret,
4470 dgettext(TEXT_DOMAIN, "sync '%s' failed"), pool_name));
4471 }
4472 nvlist_free(innvl);
4473
4474 return (0);
4475 }
4476
4477 #define PATH_BUF_LEN 64
4478
4479 /*
4480 * Given a vdev, return the name to display in iostat. If the vdev has a path,
4481 * we use that, stripping off any leading "/dev/dsk/"; if not, we use the type.
4482 * We also check if this is a whole disk, in which case we strip off the
4483 * trailing 's0' slice name.
4484 *
4485 * This routine is also responsible for identifying when disks have been
4486 * reconfigured in a new location. The kernel will have opened the device by
4487 * devid, but the path will still refer to the old location. To catch this, we
4488 * first do a path -> devid translation (which is fast for the common case). If
4489 * the devid matches, we're done. If not, we do a reverse devid -> path
4490 * translation and issue the appropriate ioctl() to update the path of the vdev.
4491 * If 'zhp' is NULL, then this is an exported pool, and we don't need to do any
4492 * of these checks.
4493 */
4494 char *
zpool_vdev_name(libzfs_handle_t * hdl,zpool_handle_t * zhp,nvlist_t * nv,int name_flags)4495 zpool_vdev_name(libzfs_handle_t *hdl, zpool_handle_t *zhp, nvlist_t *nv,
4496 int name_flags)
4497 {
4498 const char *type, *tpath;
4499 const char *path;
4500 uint64_t value;
4501 char buf[PATH_BUF_LEN];
4502 char tmpbuf[PATH_BUF_LEN * 2];
4503
4504 /*
4505 * vdev_name will be "root"/"root-0" for the root vdev, but it is the
4506 * zpool name that will be displayed to the user.
4507 */
4508 type = fnvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE);
4509 if (zhp != NULL && strcmp(type, "root") == 0)
4510 return (zfs_strdup(hdl, zpool_get_name(zhp)));
4511
4512 if (libzfs_envvar_is_set("ZPOOL_VDEV_NAME_PATH"))
4513 name_flags |= VDEV_NAME_PATH;
4514 if (libzfs_envvar_is_set("ZPOOL_VDEV_NAME_GUID"))
4515 name_flags |= VDEV_NAME_GUID;
4516 if (libzfs_envvar_is_set("ZPOOL_VDEV_NAME_FOLLOW_LINKS"))
4517 name_flags |= VDEV_NAME_FOLLOW_LINKS;
4518
4519 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT, &value) == 0 ||
4520 name_flags & VDEV_NAME_GUID) {
4521 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &value);
4522 (void) snprintf(buf, sizeof (buf), "%llu", (u_longlong_t)value);
4523 path = buf;
4524 } else if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &tpath) == 0) {
4525 path = tpath;
4526
4527 if (name_flags & VDEV_NAME_FOLLOW_LINKS) {
4528 char *rp = realpath(path, NULL);
4529 if (rp) {
4530 strlcpy(buf, rp, sizeof (buf));
4531 path = buf;
4532 free(rp);
4533 }
4534 }
4535
4536 /*
4537 * For a block device only use the name.
4538 */
4539 if ((strcmp(type, VDEV_TYPE_DISK) == 0) &&
4540 !(name_flags & VDEV_NAME_PATH)) {
4541 path = zfs_strip_path(path);
4542 }
4543
4544 /*
4545 * Remove the partition from the path if this is a whole disk.
4546 */
4547 if (strcmp(type, VDEV_TYPE_DRAID_SPARE) != 0 &&
4548 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK, &value)
4549 == 0 && value && !(name_flags & VDEV_NAME_PATH)) {
4550 return (zfs_strip_partition(path));
4551 }
4552 } else {
4553 path = type;
4554
4555 /*
4556 * If it's a raidz device, we need to stick in the parity level.
4557 */
4558 if (strcmp(path, VDEV_TYPE_RAIDZ) == 0) {
4559 value = fnvlist_lookup_uint64(nv, ZPOOL_CONFIG_NPARITY);
4560 (void) snprintf(buf, sizeof (buf), "%s%llu", path,
4561 (u_longlong_t)value);
4562 path = buf;
4563 }
4564
4565 /*
4566 * If it's a dRAID device, we add parity, groups, and spares.
4567 */
4568 if (strcmp(path, VDEV_TYPE_DRAID) == 0) {
4569 uint64_t ndata, nparity, nspares;
4570 nvlist_t **child;
4571 uint_t children;
4572
4573 verify(nvlist_lookup_nvlist_array(nv,
4574 ZPOOL_CONFIG_CHILDREN, &child, &children) == 0);
4575 nparity = fnvlist_lookup_uint64(nv,
4576 ZPOOL_CONFIG_NPARITY);
4577 ndata = fnvlist_lookup_uint64(nv,
4578 ZPOOL_CONFIG_DRAID_NDATA);
4579 nspares = fnvlist_lookup_uint64(nv,
4580 ZPOOL_CONFIG_DRAID_NSPARES);
4581
4582 path = zpool_draid_name(buf, sizeof (buf), ndata,
4583 nparity, nspares, children);
4584 }
4585
4586 /*
4587 * We identify each top-level vdev by using a <type-id>
4588 * naming convention.
4589 */
4590 if (name_flags & VDEV_NAME_TYPE_ID) {
4591 uint64_t id = fnvlist_lookup_uint64(nv,
4592 ZPOOL_CONFIG_ID);
4593 (void) snprintf(tmpbuf, sizeof (tmpbuf), "%s-%llu",
4594 path, (u_longlong_t)id);
4595 path = tmpbuf;
4596 }
4597 }
4598
4599 return (zfs_strdup(hdl, path));
4600 }
4601
4602 static int
zbookmark_mem_compare(const void * a,const void * b)4603 zbookmark_mem_compare(const void *a, const void *b)
4604 {
4605 return (memcmp(a, b, sizeof (zbookmark_phys_t)));
4606 }
4607
4608 void
zpool_add_propname(zpool_handle_t * zhp,const char * propname)4609 zpool_add_propname(zpool_handle_t *zhp, const char *propname)
4610 {
4611 assert(zhp->zpool_n_propnames < ZHP_MAX_PROPNAMES);
4612 zhp->zpool_propnames[zhp->zpool_n_propnames] = propname;
4613 zhp->zpool_n_propnames++;
4614 }
4615
4616 /*
4617 * Retrieve the persistent error log, uniquify the members, and return to the
4618 * caller.
4619 */
4620 int
zpool_get_errlog(zpool_handle_t * zhp,nvlist_t ** nverrlistp)4621 zpool_get_errlog(zpool_handle_t *zhp, nvlist_t **nverrlistp)
4622 {
4623 zfs_cmd_t zc = {"\0"};
4624 libzfs_handle_t *hdl = zhp->zpool_hdl;
4625 zbookmark_phys_t *buf;
4626 uint64_t buflen = 10000; /* approx. 1MB of RAM */
4627
4628 if (fnvlist_lookup_uint64(zhp->zpool_config,
4629 ZPOOL_CONFIG_ERRCOUNT) == 0)
4630 return (0);
4631
4632 /*
4633 * Retrieve the raw error list from the kernel. If it doesn't fit,
4634 * allocate a larger buffer and retry.
4635 */
4636 (void) strcpy(zc.zc_name, zhp->zpool_name);
4637 for (;;) {
4638 buf = zfs_alloc(zhp->zpool_hdl,
4639 buflen * sizeof (zbookmark_phys_t));
4640 zc.zc_nvlist_dst = (uintptr_t)buf;
4641 zc.zc_nvlist_dst_size = buflen;
4642 if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_ERROR_LOG,
4643 &zc) != 0) {
4644 free(buf);
4645 if (errno == ENOMEM) {
4646 buflen *= 2;
4647 } else {
4648 return (zpool_standard_error_fmt(hdl, errno,
4649 dgettext(TEXT_DOMAIN, "errors: List of "
4650 "errors unavailable")));
4651 }
4652 } else {
4653 break;
4654 }
4655 }
4656
4657 /*
4658 * Sort the resulting bookmarks. This is a little confusing due to the
4659 * implementation of ZFS_IOC_ERROR_LOG. The bookmarks are copied last
4660 * to first, and 'zc_nvlist_dst_size' indicates the number of bookmarks
4661 * _not_ copied as part of the process. So we point the start of our
4662 * array appropriate and decrement the total number of elements.
4663 */
4664 zbookmark_phys_t *zb = buf + zc.zc_nvlist_dst_size;
4665 uint64_t zblen = buflen - zc.zc_nvlist_dst_size;
4666
4667 qsort(zb, zblen, sizeof (zbookmark_phys_t), zbookmark_mem_compare);
4668
4669 verify(nvlist_alloc(nverrlistp, 0, KM_SLEEP) == 0);
4670
4671 /*
4672 * Fill in the nverrlistp with nvlist's of dataset and object numbers.
4673 */
4674 for (uint64_t i = 0; i < zblen; i++) {
4675 nvlist_t *nv;
4676
4677 /* ignoring zb_blkid and zb_level for now */
4678 if (i > 0 && zb[i-1].zb_objset == zb[i].zb_objset &&
4679 zb[i-1].zb_object == zb[i].zb_object)
4680 continue;
4681
4682 if (nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) != 0)
4683 goto nomem;
4684 if (nvlist_add_uint64(nv, ZPOOL_ERR_DATASET,
4685 zb[i].zb_objset) != 0) {
4686 nvlist_free(nv);
4687 goto nomem;
4688 }
4689 if (nvlist_add_uint64(nv, ZPOOL_ERR_OBJECT,
4690 zb[i].zb_object) != 0) {
4691 nvlist_free(nv);
4692 goto nomem;
4693 }
4694 if (nvlist_add_nvlist(*nverrlistp, "ejk", nv) != 0) {
4695 nvlist_free(nv);
4696 goto nomem;
4697 }
4698 nvlist_free(nv);
4699 }
4700
4701 free(buf);
4702 return (0);
4703
4704 nomem:
4705 free(buf);
4706 return (no_memory(zhp->zpool_hdl));
4707 }
4708
4709 /*
4710 * Upgrade a ZFS pool to the latest on-disk version.
4711 */
4712 int
zpool_upgrade(zpool_handle_t * zhp,uint64_t new_version)4713 zpool_upgrade(zpool_handle_t *zhp, uint64_t new_version)
4714 {
4715 zfs_cmd_t zc = {"\0"};
4716 libzfs_handle_t *hdl = zhp->zpool_hdl;
4717
4718 (void) strcpy(zc.zc_name, zhp->zpool_name);
4719 zc.zc_cookie = new_version;
4720
4721 if (zfs_ioctl(hdl, ZFS_IOC_POOL_UPGRADE, &zc) != 0)
4722 return (zpool_standard_error_fmt(hdl, errno,
4723 dgettext(TEXT_DOMAIN, "cannot upgrade '%s'"),
4724 zhp->zpool_name));
4725 return (0);
4726 }
4727
4728 void
zfs_save_arguments(int argc,char ** argv,char * string,int len)4729 zfs_save_arguments(int argc, char **argv, char *string, int len)
4730 {
4731 int i;
4732
4733 (void) strlcpy(string, zfs_basename(argv[0]), len);
4734 for (i = 1; i < argc; i++) {
4735 (void) strlcat(string, " ", len);
4736 (void) strlcat(string, argv[i], len);
4737 }
4738 }
4739
4740 int
zpool_log_history(libzfs_handle_t * hdl,const char * message)4741 zpool_log_history(libzfs_handle_t *hdl, const char *message)
4742 {
4743 zfs_cmd_t zc = {"\0"};
4744 nvlist_t *args;
4745
4746 args = fnvlist_alloc();
4747 fnvlist_add_string(args, "message", message);
4748 zcmd_write_src_nvlist(hdl, &zc, args);
4749 int err = zfs_ioctl(hdl, ZFS_IOC_LOG_HISTORY, &zc);
4750 nvlist_free(args);
4751 zcmd_free_nvlists(&zc);
4752 return (err);
4753 }
4754
4755 /*
4756 * Perform ioctl to get some command history of a pool.
4757 *
4758 * 'buf' is the buffer to fill up to 'len' bytes. 'off' is the
4759 * logical offset of the history buffer to start reading from.
4760 *
4761 * Upon return, 'off' is the next logical offset to read from and
4762 * 'len' is the actual amount of bytes read into 'buf'.
4763 */
4764 static int
get_history(zpool_handle_t * zhp,char * buf,uint64_t * off,uint64_t * len)4765 get_history(zpool_handle_t *zhp, char *buf, uint64_t *off, uint64_t *len)
4766 {
4767 zfs_cmd_t zc = {"\0"};
4768 libzfs_handle_t *hdl = zhp->zpool_hdl;
4769
4770 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
4771
4772 zc.zc_history = (uint64_t)(uintptr_t)buf;
4773 zc.zc_history_len = *len;
4774 zc.zc_history_offset = *off;
4775
4776 if (zfs_ioctl(hdl, ZFS_IOC_POOL_GET_HISTORY, &zc) != 0) {
4777 switch (errno) {
4778 case EPERM:
4779 return (zfs_error_fmt(hdl, EZFS_PERM,
4780 dgettext(TEXT_DOMAIN,
4781 "cannot show history for pool '%s'"),
4782 zhp->zpool_name));
4783 case ENOENT:
4784 return (zfs_error_fmt(hdl, EZFS_NOHISTORY,
4785 dgettext(TEXT_DOMAIN, "cannot get history for pool "
4786 "'%s'"), zhp->zpool_name));
4787 case ENOTSUP:
4788 return (zfs_error_fmt(hdl, EZFS_BADVERSION,
4789 dgettext(TEXT_DOMAIN, "cannot get history for pool "
4790 "'%s', pool must be upgraded"), zhp->zpool_name));
4791 default:
4792 return (zpool_standard_error_fmt(hdl, errno,
4793 dgettext(TEXT_DOMAIN,
4794 "cannot get history for '%s'"), zhp->zpool_name));
4795 }
4796 }
4797
4798 *len = zc.zc_history_len;
4799 *off = zc.zc_history_offset;
4800
4801 return (0);
4802 }
4803
4804 /*
4805 * Retrieve the command history of a pool.
4806 */
4807 int
zpool_get_history(zpool_handle_t * zhp,nvlist_t ** nvhisp,uint64_t * off,boolean_t * eof)4808 zpool_get_history(zpool_handle_t *zhp, nvlist_t **nvhisp, uint64_t *off,
4809 boolean_t *eof)
4810 {
4811 libzfs_handle_t *hdl = zhp->zpool_hdl;
4812 char *buf;
4813 int buflen = 128 * 1024;
4814 nvlist_t **records = NULL;
4815 uint_t numrecords = 0;
4816 int err = 0, i;
4817 uint64_t start = *off;
4818
4819 buf = zfs_alloc(hdl, buflen);
4820
4821 /* process about 1MiB a time */
4822 while (*off - start < 1024 * 1024) {
4823 uint64_t bytes_read = buflen;
4824 uint64_t leftover;
4825
4826 if ((err = get_history(zhp, buf, off, &bytes_read)) != 0)
4827 break;
4828
4829 /* if nothing else was read in, we're at EOF, just return */
4830 if (!bytes_read) {
4831 *eof = B_TRUE;
4832 break;
4833 }
4834
4835 if ((err = zpool_history_unpack(buf, bytes_read,
4836 &leftover, &records, &numrecords)) != 0) {
4837 zpool_standard_error_fmt(hdl, err,
4838 dgettext(TEXT_DOMAIN,
4839 "cannot get history for '%s'"), zhp->zpool_name);
4840 break;
4841 }
4842 *off -= leftover;
4843 if (leftover == bytes_read) {
4844 /*
4845 * no progress made, because buffer is not big enough
4846 * to hold this record; resize and retry.
4847 */
4848 buflen *= 2;
4849 free(buf);
4850 buf = zfs_alloc(hdl, buflen);
4851 }
4852 }
4853
4854 free(buf);
4855
4856 if (!err) {
4857 *nvhisp = fnvlist_alloc();
4858 fnvlist_add_nvlist_array(*nvhisp, ZPOOL_HIST_RECORD,
4859 (const nvlist_t **)records, numrecords);
4860 }
4861 for (i = 0; i < numrecords; i++)
4862 nvlist_free(records[i]);
4863 free(records);
4864
4865 return (err);
4866 }
4867
4868 /*
4869 * Retrieve the next event given the passed 'zevent_fd' file descriptor.
4870 * If there is a new event available 'nvp' will contain a newly allocated
4871 * nvlist and 'dropped' will be set to the number of missed events since
4872 * the last call to this function. When 'nvp' is set to NULL it indicates
4873 * no new events are available. In either case the function returns 0 and
4874 * it is up to the caller to free 'nvp'. In the case of a fatal error the
4875 * function will return a non-zero value. When the function is called in
4876 * blocking mode (the default, unless the ZEVENT_NONBLOCK flag is passed),
4877 * it will not return until a new event is available.
4878 */
4879 int
zpool_events_next(libzfs_handle_t * hdl,nvlist_t ** nvp,int * dropped,unsigned flags,int zevent_fd)4880 zpool_events_next(libzfs_handle_t *hdl, nvlist_t **nvp,
4881 int *dropped, unsigned flags, int zevent_fd)
4882 {
4883 zfs_cmd_t zc = {"\0"};
4884 int error = 0;
4885
4886 *nvp = NULL;
4887 *dropped = 0;
4888 zc.zc_cleanup_fd = zevent_fd;
4889
4890 if (flags & ZEVENT_NONBLOCK)
4891 zc.zc_guid = ZEVENT_NONBLOCK;
4892
4893 zcmd_alloc_dst_nvlist(hdl, &zc, ZEVENT_SIZE);
4894
4895 retry:
4896 if (zfs_ioctl(hdl, ZFS_IOC_EVENTS_NEXT, &zc) != 0) {
4897 switch (errno) {
4898 case ESHUTDOWN:
4899 error = zfs_error_fmt(hdl, EZFS_POOLUNAVAIL,
4900 dgettext(TEXT_DOMAIN, "zfs shutdown"));
4901 goto out;
4902 case ENOENT:
4903 /* Blocking error case should not occur */
4904 if (!(flags & ZEVENT_NONBLOCK))
4905 error = zpool_standard_error_fmt(hdl, errno,
4906 dgettext(TEXT_DOMAIN, "cannot get event"));
4907
4908 goto out;
4909 case ENOMEM:
4910 zcmd_expand_dst_nvlist(hdl, &zc);
4911 goto retry;
4912 default:
4913 error = zpool_standard_error_fmt(hdl, errno,
4914 dgettext(TEXT_DOMAIN, "cannot get event"));
4915 goto out;
4916 }
4917 }
4918
4919 error = zcmd_read_dst_nvlist(hdl, &zc, nvp);
4920 if (error != 0)
4921 goto out;
4922
4923 *dropped = (int)zc.zc_cookie;
4924 out:
4925 zcmd_free_nvlists(&zc);
4926
4927 return (error);
4928 }
4929
4930 /*
4931 * Clear all events.
4932 */
4933 int
zpool_events_clear(libzfs_handle_t * hdl,int * count)4934 zpool_events_clear(libzfs_handle_t *hdl, int *count)
4935 {
4936 zfs_cmd_t zc = {"\0"};
4937
4938 if (zfs_ioctl(hdl, ZFS_IOC_EVENTS_CLEAR, &zc) != 0)
4939 return (zpool_standard_error(hdl, errno,
4940 dgettext(TEXT_DOMAIN, "cannot clear events")));
4941
4942 if (count != NULL)
4943 *count = (int)zc.zc_cookie; /* # of events cleared */
4944
4945 return (0);
4946 }
4947
4948 /*
4949 * Seek to a specific EID, ZEVENT_SEEK_START, or ZEVENT_SEEK_END for
4950 * the passed zevent_fd file handle. On success zero is returned,
4951 * otherwise -1 is returned and hdl->libzfs_error is set to the errno.
4952 */
4953 int
zpool_events_seek(libzfs_handle_t * hdl,uint64_t eid,int zevent_fd)4954 zpool_events_seek(libzfs_handle_t *hdl, uint64_t eid, int zevent_fd)
4955 {
4956 zfs_cmd_t zc = {"\0"};
4957 int error = 0;
4958
4959 zc.zc_guid = eid;
4960 zc.zc_cleanup_fd = zevent_fd;
4961
4962 if (zfs_ioctl(hdl, ZFS_IOC_EVENTS_SEEK, &zc) != 0) {
4963 switch (errno) {
4964 case ENOENT:
4965 error = zfs_error_fmt(hdl, EZFS_NOENT,
4966 dgettext(TEXT_DOMAIN, "cannot get event"));
4967 break;
4968
4969 case ENOMEM:
4970 error = zfs_error_fmt(hdl, EZFS_NOMEM,
4971 dgettext(TEXT_DOMAIN, "cannot get event"));
4972 break;
4973
4974 default:
4975 error = zpool_standard_error_fmt(hdl, errno,
4976 dgettext(TEXT_DOMAIN, "cannot get event"));
4977 break;
4978 }
4979 }
4980
4981 return (error);
4982 }
4983
4984 static void
zpool_obj_to_path_impl(zpool_handle_t * zhp,uint64_t dsobj,uint64_t obj,char * pathname,size_t len,boolean_t always_unmounted)4985 zpool_obj_to_path_impl(zpool_handle_t *zhp, uint64_t dsobj, uint64_t obj,
4986 char *pathname, size_t len, boolean_t always_unmounted)
4987 {
4988 zfs_cmd_t zc = {"\0"};
4989 boolean_t mounted = B_FALSE;
4990 char *mntpnt = NULL;
4991 char dsname[ZFS_MAX_DATASET_NAME_LEN];
4992
4993 if (dsobj == 0) {
4994 /* special case for the MOS */
4995 (void) snprintf(pathname, len, "<metadata>:<0x%llx>",
4996 (longlong_t)obj);
4997 return;
4998 }
4999
5000 /* get the dataset's name */
5001 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
5002 zc.zc_obj = dsobj;
5003 if (zfs_ioctl(zhp->zpool_hdl,
5004 ZFS_IOC_DSOBJ_TO_DSNAME, &zc) != 0) {
5005 /* just write out a path of two object numbers */
5006 (void) snprintf(pathname, len, "<0x%llx>:<0x%llx>",
5007 (longlong_t)dsobj, (longlong_t)obj);
5008 return;
5009 }
5010 (void) strlcpy(dsname, zc.zc_value, sizeof (dsname));
5011
5012 /* find out if the dataset is mounted */
5013 mounted = !always_unmounted && is_mounted(zhp->zpool_hdl, dsname,
5014 &mntpnt);
5015
5016 /* get the corrupted object's path */
5017 (void) strlcpy(zc.zc_name, dsname, sizeof (zc.zc_name));
5018 zc.zc_obj = obj;
5019 if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_OBJ_TO_PATH,
5020 &zc) == 0) {
5021 if (mounted) {
5022 (void) snprintf(pathname, len, "%s%s", mntpnt,
5023 zc.zc_value);
5024 } else {
5025 (void) snprintf(pathname, len, "%s:%s",
5026 dsname, zc.zc_value);
5027 }
5028 } else {
5029 (void) snprintf(pathname, len, "%s:<0x%llx>", dsname,
5030 (longlong_t)obj);
5031 }
5032 free(mntpnt);
5033 }
5034
5035 void
zpool_obj_to_path(zpool_handle_t * zhp,uint64_t dsobj,uint64_t obj,char * pathname,size_t len)5036 zpool_obj_to_path(zpool_handle_t *zhp, uint64_t dsobj, uint64_t obj,
5037 char *pathname, size_t len)
5038 {
5039 zpool_obj_to_path_impl(zhp, dsobj, obj, pathname, len, B_FALSE);
5040 }
5041
5042 void
zpool_obj_to_path_ds(zpool_handle_t * zhp,uint64_t dsobj,uint64_t obj,char * pathname,size_t len)5043 zpool_obj_to_path_ds(zpool_handle_t *zhp, uint64_t dsobj, uint64_t obj,
5044 char *pathname, size_t len)
5045 {
5046 zpool_obj_to_path_impl(zhp, dsobj, obj, pathname, len, B_TRUE);
5047 }
5048 /*
5049 * Wait while the specified activity is in progress in the pool.
5050 */
5051 int
zpool_wait(zpool_handle_t * zhp,zpool_wait_activity_t activity)5052 zpool_wait(zpool_handle_t *zhp, zpool_wait_activity_t activity)
5053 {
5054 boolean_t missing;
5055
5056 int error = zpool_wait_status(zhp, activity, &missing, NULL);
5057
5058 if (missing) {
5059 (void) zpool_standard_error_fmt(zhp->zpool_hdl, ENOENT,
5060 dgettext(TEXT_DOMAIN, "error waiting in pool '%s'"),
5061 zhp->zpool_name);
5062 return (ENOENT);
5063 } else {
5064 return (error);
5065 }
5066 }
5067
5068 /*
5069 * Wait for the given activity and return the status of the wait (whether or not
5070 * any waiting was done) in the 'waited' parameter. Non-existent pools are
5071 * reported via the 'missing' parameter, rather than by printing an error
5072 * message. This is convenient when this function is called in a loop over a
5073 * long period of time (as it is, for example, by zpool's wait cmd). In that
5074 * scenario, a pool being exported or destroyed should be considered a normal
5075 * event, so we don't want to print an error when we find that the pool doesn't
5076 * exist.
5077 */
5078 int
zpool_wait_status(zpool_handle_t * zhp,zpool_wait_activity_t activity,boolean_t * missing,boolean_t * waited)5079 zpool_wait_status(zpool_handle_t *zhp, zpool_wait_activity_t activity,
5080 boolean_t *missing, boolean_t *waited)
5081 {
5082 int error = lzc_wait(zhp->zpool_name, activity, waited);
5083 *missing = (error == ENOENT);
5084 if (*missing)
5085 return (0);
5086
5087 if (error != 0) {
5088 (void) zpool_standard_error_fmt(zhp->zpool_hdl, error,
5089 dgettext(TEXT_DOMAIN, "error waiting in pool '%s'"),
5090 zhp->zpool_name);
5091 }
5092
5093 return (error);
5094 }
5095
5096 int
zpool_set_bootenv(zpool_handle_t * zhp,const nvlist_t * envmap)5097 zpool_set_bootenv(zpool_handle_t *zhp, const nvlist_t *envmap)
5098 {
5099 int error = lzc_set_bootenv(zhp->zpool_name, envmap);
5100 if (error != 0) {
5101 (void) zpool_standard_error_fmt(zhp->zpool_hdl, error,
5102 dgettext(TEXT_DOMAIN,
5103 "error setting bootenv in pool '%s'"), zhp->zpool_name);
5104 }
5105
5106 return (error);
5107 }
5108
5109 int
zpool_get_bootenv(zpool_handle_t * zhp,nvlist_t ** nvlp)5110 zpool_get_bootenv(zpool_handle_t *zhp, nvlist_t **nvlp)
5111 {
5112 nvlist_t *nvl;
5113 int error;
5114
5115 nvl = NULL;
5116 error = lzc_get_bootenv(zhp->zpool_name, &nvl);
5117 if (error != 0) {
5118 (void) zpool_standard_error_fmt(zhp->zpool_hdl, error,
5119 dgettext(TEXT_DOMAIN,
5120 "error getting bootenv in pool '%s'"), zhp->zpool_name);
5121 } else {
5122 *nvlp = nvl;
5123 }
5124
5125 return (error);
5126 }
5127
5128 /*
5129 * Attempt to read and parse feature file(s) (from "compatibility" property).
5130 * Files contain zpool feature names, comma or whitespace-separated.
5131 * Comments (# character to next newline) are discarded.
5132 *
5133 * Arguments:
5134 * compatibility : string containing feature filenames
5135 * features : either NULL or pointer to array of boolean
5136 * report : either NULL or pointer to string buffer
5137 * rlen : length of "report" buffer
5138 *
5139 * compatibility is NULL (unset), "", "off", "legacy", or list of
5140 * comma-separated filenames. filenames should either be absolute,
5141 * or relative to:
5142 * 1) ZPOOL_SYSCONF_COMPAT_D (eg: /etc/zfs/compatibility.d) or
5143 * 2) ZPOOL_DATA_COMPAT_D (eg: /usr/share/zfs/compatibility.d).
5144 * (Unset), "" or "off" => enable all features
5145 * "legacy" => disable all features
5146 *
5147 * Any feature names read from files which match unames in spa_feature_table
5148 * will have the corresponding boolean set in the features array (if non-NULL).
5149 * If more than one feature set specified, only features present in *all* of
5150 * them will be set.
5151 *
5152 * "report" if not NULL will be populated with a suitable status message.
5153 *
5154 * Return values:
5155 * ZPOOL_COMPATIBILITY_OK : files read and parsed ok
5156 * ZPOOL_COMPATIBILITY_BADFILE : file too big or not a text file
5157 * ZPOOL_COMPATIBILITY_BADTOKEN : SYSCONF file contains invalid feature name
5158 * ZPOOL_COMPATIBILITY_WARNTOKEN : DATA file contains invalid feature name
5159 * ZPOOL_COMPATIBILITY_NOFILES : no feature files found
5160 */
5161 zpool_compat_status_t
zpool_load_compat(const char * compat,boolean_t * features,char * report,size_t rlen)5162 zpool_load_compat(const char *compat, boolean_t *features, char *report,
5163 size_t rlen)
5164 {
5165 int sdirfd, ddirfd, featfd;
5166 struct stat fs;
5167 char *fc;
5168 char *ps, *ls, *ws;
5169 char *file, *line, *word;
5170
5171 char l_compat[ZFS_MAXPROPLEN];
5172
5173 boolean_t ret_nofiles = B_TRUE;
5174 boolean_t ret_badfile = B_FALSE;
5175 boolean_t ret_badtoken = B_FALSE;
5176 boolean_t ret_warntoken = B_FALSE;
5177
5178 /* special cases (unset), "" and "off" => enable all features */
5179 if (compat == NULL || compat[0] == '\0' ||
5180 strcmp(compat, ZPOOL_COMPAT_OFF) == 0) {
5181 if (features != NULL) {
5182 for (uint_t i = 0; i < SPA_FEATURES; i++)
5183 features[i] = B_TRUE;
5184 }
5185 if (report != NULL)
5186 strlcpy(report, gettext("all features enabled"), rlen);
5187 return (ZPOOL_COMPATIBILITY_OK);
5188 }
5189
5190 /* Final special case "legacy" => disable all features */
5191 if (strcmp(compat, ZPOOL_COMPAT_LEGACY) == 0) {
5192 if (features != NULL)
5193 for (uint_t i = 0; i < SPA_FEATURES; i++)
5194 features[i] = B_FALSE;
5195 if (report != NULL)
5196 strlcpy(report, gettext("all features disabled"), rlen);
5197 return (ZPOOL_COMPATIBILITY_OK);
5198 }
5199
5200 /*
5201 * Start with all true; will be ANDed with results from each file
5202 */
5203 if (features != NULL)
5204 for (uint_t i = 0; i < SPA_FEATURES; i++)
5205 features[i] = B_TRUE;
5206
5207 char err_badfile[ZFS_MAXPROPLEN] = "";
5208 char err_badtoken[ZFS_MAXPROPLEN] = "";
5209
5210 /*
5211 * We ignore errors from the directory open()
5212 * as they're only needed if the filename is relative
5213 * which will be checked during the openat().
5214 */
5215
5216 /* O_PATH safer than O_RDONLY if system allows it */
5217 #if defined(O_PATH)
5218 #define ZC_DIR_FLAGS (O_DIRECTORY | O_CLOEXEC | O_PATH)
5219 #else
5220 #define ZC_DIR_FLAGS (O_DIRECTORY | O_CLOEXEC | O_RDONLY)
5221 #endif
5222
5223 sdirfd = open(ZPOOL_SYSCONF_COMPAT_D, ZC_DIR_FLAGS);
5224 ddirfd = open(ZPOOL_DATA_COMPAT_D, ZC_DIR_FLAGS);
5225
5226 (void) strlcpy(l_compat, compat, ZFS_MAXPROPLEN);
5227
5228 for (file = strtok_r(l_compat, ",", &ps);
5229 file != NULL;
5230 file = strtok_r(NULL, ",", &ps)) {
5231
5232 boolean_t l_features[SPA_FEATURES];
5233
5234 enum { Z_SYSCONF, Z_DATA } source;
5235
5236 /* try sysconfdir first, then datadir */
5237 source = Z_SYSCONF;
5238 if ((featfd = openat(sdirfd, file, O_RDONLY | O_CLOEXEC)) < 0) {
5239 featfd = openat(ddirfd, file, O_RDONLY | O_CLOEXEC);
5240 source = Z_DATA;
5241 }
5242
5243 /* File readable and correct size? */
5244 if (featfd < 0 ||
5245 fstat(featfd, &fs) < 0 ||
5246 fs.st_size < 1 ||
5247 fs.st_size > ZPOOL_COMPAT_MAXSIZE) {
5248 (void) close(featfd);
5249 strlcat(err_badfile, file, ZFS_MAXPROPLEN);
5250 strlcat(err_badfile, " ", ZFS_MAXPROPLEN);
5251 ret_badfile = B_TRUE;
5252 continue;
5253 }
5254
5255 /* Prefault the file if system allows */
5256 #if defined(MAP_POPULATE)
5257 #define ZC_MMAP_FLAGS (MAP_PRIVATE | MAP_POPULATE)
5258 #elif defined(MAP_PREFAULT_READ)
5259 #define ZC_MMAP_FLAGS (MAP_PRIVATE | MAP_PREFAULT_READ)
5260 #else
5261 #define ZC_MMAP_FLAGS (MAP_PRIVATE)
5262 #endif
5263
5264 /* private mmap() so we can strtok safely */
5265 fc = (char *)mmap(NULL, fs.st_size, PROT_READ | PROT_WRITE,
5266 ZC_MMAP_FLAGS, featfd, 0);
5267 (void) close(featfd);
5268
5269 /* map ok, and last character == newline? */
5270 if (fc == MAP_FAILED || fc[fs.st_size - 1] != '\n') {
5271 (void) munmap((void *) fc, fs.st_size);
5272 strlcat(err_badfile, file, ZFS_MAXPROPLEN);
5273 strlcat(err_badfile, " ", ZFS_MAXPROPLEN);
5274 ret_badfile = B_TRUE;
5275 continue;
5276 }
5277
5278 ret_nofiles = B_FALSE;
5279
5280 for (uint_t i = 0; i < SPA_FEATURES; i++)
5281 l_features[i] = B_FALSE;
5282
5283 /* replace final newline with NULL to ensure string ends */
5284 fc[fs.st_size - 1] = '\0';
5285
5286 for (line = strtok_r(fc, "\n", &ls);
5287 line != NULL;
5288 line = strtok_r(NULL, "\n", &ls)) {
5289 /* discard comments */
5290 char *r = strchr(line, '#');
5291 if (r != NULL)
5292 *r = '\0';
5293
5294 for (word = strtok_r(line, ", \t", &ws);
5295 word != NULL;
5296 word = strtok_r(NULL, ", \t", &ws)) {
5297 /* Find matching feature name */
5298 uint_t f;
5299 for (f = 0; f < SPA_FEATURES; f++) {
5300 zfeature_info_t *fi =
5301 &spa_feature_table[f];
5302 if (strcmp(word, fi->fi_uname) == 0) {
5303 l_features[f] = B_TRUE;
5304 break;
5305 }
5306 }
5307 if (f < SPA_FEATURES)
5308 continue;
5309
5310 /* found an unrecognized word */
5311 /* lightly sanitize it */
5312 if (strlen(word) > 32)
5313 word[32] = '\0';
5314 for (char *c = word; *c != '\0'; c++)
5315 if (!isprint(*c))
5316 *c = '?';
5317
5318 strlcat(err_badtoken, word, ZFS_MAXPROPLEN);
5319 strlcat(err_badtoken, " ", ZFS_MAXPROPLEN);
5320 if (source == Z_SYSCONF)
5321 ret_badtoken = B_TRUE;
5322 else
5323 ret_warntoken = B_TRUE;
5324 }
5325 }
5326 (void) munmap((void *) fc, fs.st_size);
5327
5328 if (features != NULL)
5329 for (uint_t i = 0; i < SPA_FEATURES; i++)
5330 features[i] &= l_features[i];
5331 }
5332 (void) close(sdirfd);
5333 (void) close(ddirfd);
5334
5335 /* Return the most serious error */
5336 if (ret_badfile) {
5337 if (report != NULL)
5338 snprintf(report, rlen, gettext("could not read/"
5339 "parse feature file(s): %s"), err_badfile);
5340 return (ZPOOL_COMPATIBILITY_BADFILE);
5341 }
5342 if (ret_nofiles) {
5343 if (report != NULL)
5344 strlcpy(report,
5345 gettext("no valid compatibility files specified"),
5346 rlen);
5347 return (ZPOOL_COMPATIBILITY_NOFILES);
5348 }
5349 if (ret_badtoken) {
5350 if (report != NULL)
5351 snprintf(report, rlen, gettext("invalid feature "
5352 "name(s) in local compatibility files: %s"),
5353 err_badtoken);
5354 return (ZPOOL_COMPATIBILITY_BADTOKEN);
5355 }
5356 if (ret_warntoken) {
5357 if (report != NULL)
5358 snprintf(report, rlen, gettext("unrecognized feature "
5359 "name(s) in distribution compatibility files: %s"),
5360 err_badtoken);
5361 return (ZPOOL_COMPATIBILITY_WARNTOKEN);
5362 }
5363 if (report != NULL)
5364 strlcpy(report, gettext("compatibility set ok"), rlen);
5365 return (ZPOOL_COMPATIBILITY_OK);
5366 }
5367
5368 static int
zpool_vdev_guid(zpool_handle_t * zhp,const char * vdevname,uint64_t * vdev_guid)5369 zpool_vdev_guid(zpool_handle_t *zhp, const char *vdevname, uint64_t *vdev_guid)
5370 {
5371 nvlist_t *tgt;
5372 boolean_t avail_spare, l2cache;
5373
5374 verify(zhp != NULL);
5375 if (zpool_get_state(zhp) == POOL_STATE_UNAVAIL) {
5376 char errbuf[ERRBUFLEN];
5377 (void) snprintf(errbuf, sizeof (errbuf),
5378 dgettext(TEXT_DOMAIN, "pool is in an unavailable state"));
5379 return (zfs_error(zhp->zpool_hdl, EZFS_POOLUNAVAIL, errbuf));
5380 }
5381
5382 if ((tgt = zpool_find_vdev(zhp, vdevname, &avail_spare, &l2cache,
5383 NULL)) == NULL) {
5384 char errbuf[ERRBUFLEN];
5385 (void) snprintf(errbuf, sizeof (errbuf),
5386 dgettext(TEXT_DOMAIN, "can not find %s in %s"),
5387 vdevname, zhp->zpool_name);
5388 return (zfs_error(zhp->zpool_hdl, EZFS_NODEVICE, errbuf));
5389 }
5390
5391 *vdev_guid = fnvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID);
5392 return (0);
5393 }
5394
5395 /*
5396 * Get a vdev property value for 'prop' and return the value in
5397 * a pre-allocated buffer.
5398 */
5399 int
zpool_get_vdev_prop_value(nvlist_t * nvprop,vdev_prop_t prop,char * prop_name,char * buf,size_t len,zprop_source_t * srctype,boolean_t literal)5400 zpool_get_vdev_prop_value(nvlist_t *nvprop, vdev_prop_t prop, char *prop_name,
5401 char *buf, size_t len, zprop_source_t *srctype, boolean_t literal)
5402 {
5403 nvlist_t *nv;
5404 const char *strval;
5405 uint64_t intval;
5406 zprop_source_t src = ZPROP_SRC_NONE;
5407
5408 if (prop == VDEV_PROP_USERPROP) {
5409 /* user property, prop_name must contain the property name */
5410 assert(prop_name != NULL);
5411 if (nvlist_lookup_nvlist(nvprop, prop_name, &nv) == 0) {
5412 src = fnvlist_lookup_uint64(nv, ZPROP_SOURCE);
5413 strval = fnvlist_lookup_string(nv, ZPROP_VALUE);
5414 } else {
5415 /* user prop not found */
5416 src = ZPROP_SRC_DEFAULT;
5417 strval = "-";
5418 }
5419 (void) strlcpy(buf, strval, len);
5420 if (srctype)
5421 *srctype = src;
5422 return (0);
5423 }
5424
5425 if (prop_name == NULL)
5426 prop_name = (char *)vdev_prop_to_name(prop);
5427
5428 switch (vdev_prop_get_type(prop)) {
5429 case PROP_TYPE_STRING:
5430 if (nvlist_lookup_nvlist(nvprop, prop_name, &nv) == 0) {
5431 src = fnvlist_lookup_uint64(nv, ZPROP_SOURCE);
5432 strval = fnvlist_lookup_string(nv, ZPROP_VALUE);
5433 } else {
5434 src = ZPROP_SRC_DEFAULT;
5435 if ((strval = vdev_prop_default_string(prop)) == NULL)
5436 strval = "-";
5437 }
5438 (void) strlcpy(buf, strval, len);
5439 break;
5440
5441 case PROP_TYPE_NUMBER:
5442 if (nvlist_lookup_nvlist(nvprop, prop_name, &nv) == 0) {
5443 src = fnvlist_lookup_uint64(nv, ZPROP_SOURCE);
5444 intval = fnvlist_lookup_uint64(nv, ZPROP_VALUE);
5445 } else {
5446 src = ZPROP_SRC_DEFAULT;
5447 intval = vdev_prop_default_numeric(prop);
5448 }
5449
5450 switch (prop) {
5451 case VDEV_PROP_ASIZE:
5452 case VDEV_PROP_PSIZE:
5453 case VDEV_PROP_SIZE:
5454 case VDEV_PROP_BOOTSIZE:
5455 case VDEV_PROP_ALLOCATED:
5456 case VDEV_PROP_FREE:
5457 case VDEV_PROP_READ_ERRORS:
5458 case VDEV_PROP_WRITE_ERRORS:
5459 case VDEV_PROP_CHECKSUM_ERRORS:
5460 case VDEV_PROP_INITIALIZE_ERRORS:
5461 case VDEV_PROP_TRIM_ERRORS:
5462 case VDEV_PROP_SLOW_IOS:
5463 case VDEV_PROP_OPS_NULL:
5464 case VDEV_PROP_OPS_READ:
5465 case VDEV_PROP_OPS_WRITE:
5466 case VDEV_PROP_OPS_FREE:
5467 case VDEV_PROP_OPS_CLAIM:
5468 case VDEV_PROP_OPS_TRIM:
5469 case VDEV_PROP_BYTES_NULL:
5470 case VDEV_PROP_BYTES_READ:
5471 case VDEV_PROP_BYTES_WRITE:
5472 case VDEV_PROP_BYTES_FREE:
5473 case VDEV_PROP_BYTES_CLAIM:
5474 case VDEV_PROP_BYTES_TRIM:
5475 if (literal) {
5476 (void) snprintf(buf, len, "%llu",
5477 (u_longlong_t)intval);
5478 } else {
5479 (void) zfs_nicenum(intval, buf, len);
5480 }
5481 break;
5482 case VDEV_PROP_EXPANDSZ:
5483 if (intval == 0) {
5484 (void) strlcpy(buf, "-", len);
5485 } else if (literal) {
5486 (void) snprintf(buf, len, "%llu",
5487 (u_longlong_t)intval);
5488 } else {
5489 (void) zfs_nicenum(intval, buf, len);
5490 }
5491 break;
5492 case VDEV_PROP_CAPACITY:
5493 if (literal) {
5494 (void) snprintf(buf, len, "%llu",
5495 (u_longlong_t)intval);
5496 } else {
5497 (void) snprintf(buf, len, "%llu%%",
5498 (u_longlong_t)intval);
5499 }
5500 break;
5501 case VDEV_PROP_CHECKSUM_N:
5502 case VDEV_PROP_CHECKSUM_T:
5503 case VDEV_PROP_IO_N:
5504 case VDEV_PROP_IO_T:
5505 case VDEV_PROP_SLOW_IO_N:
5506 case VDEV_PROP_SLOW_IO_T:
5507 if (intval == UINT64_MAX) {
5508 (void) strlcpy(buf, "-", len);
5509 } else {
5510 (void) snprintf(buf, len, "%llu",
5511 (u_longlong_t)intval);
5512 }
5513 break;
5514 case VDEV_PROP_FRAGMENTATION:
5515 if (intval == UINT64_MAX) {
5516 (void) strlcpy(buf, "-", len);
5517 } else {
5518 (void) snprintf(buf, len, "%llu%%",
5519 (u_longlong_t)intval);
5520 }
5521 break;
5522 case VDEV_PROP_STATE:
5523 if (literal) {
5524 (void) snprintf(buf, len, "%llu",
5525 (u_longlong_t)intval);
5526 } else {
5527 (void) strlcpy(buf, zpool_state_to_name(intval,
5528 VDEV_AUX_NONE), len);
5529 }
5530 break;
5531 default:
5532 (void) snprintf(buf, len, "%llu",
5533 (u_longlong_t)intval);
5534 }
5535 break;
5536
5537 case PROP_TYPE_INDEX:
5538 if (nvlist_lookup_nvlist(nvprop, prop_name, &nv) == 0) {
5539 src = fnvlist_lookup_uint64(nv, ZPROP_SOURCE);
5540 intval = fnvlist_lookup_uint64(nv, ZPROP_VALUE);
5541 } else {
5542 /* 'trim_support' only valid for leaf vdevs */
5543 if (prop == VDEV_PROP_TRIM_SUPPORT) {
5544 (void) strlcpy(buf, "-", len);
5545 break;
5546 }
5547 src = ZPROP_SRC_DEFAULT;
5548 intval = vdev_prop_default_numeric(prop);
5549 /* Only use if provided by the RAIDZ VDEV above */
5550 if (prop == VDEV_PROP_RAIDZ_EXPANDING)
5551 return (ENOENT);
5552 }
5553 if (vdev_prop_index_to_string(prop, intval,
5554 (const char **)&strval) != 0)
5555 return (-1);
5556 (void) strlcpy(buf, strval, len);
5557 break;
5558
5559 default:
5560 abort();
5561 }
5562
5563 if (srctype)
5564 *srctype = src;
5565
5566 return (0);
5567 }
5568
5569 /*
5570 * Get a vdev property value for 'prop_name' and return the value in
5571 * a pre-allocated buffer.
5572 */
5573 int
zpool_get_vdev_prop(zpool_handle_t * zhp,const char * vdevname,vdev_prop_t prop,char * prop_name,char * buf,size_t len,zprop_source_t * srctype,boolean_t literal)5574 zpool_get_vdev_prop(zpool_handle_t *zhp, const char *vdevname, vdev_prop_t prop,
5575 char *prop_name, char *buf, size_t len, zprop_source_t *srctype,
5576 boolean_t literal)
5577 {
5578 nvlist_t *reqnvl, *reqprops;
5579 nvlist_t *retprops = NULL;
5580 uint64_t vdev_guid = 0;
5581 int ret;
5582
5583 if ((ret = zpool_vdev_guid(zhp, vdevname, &vdev_guid)) != 0)
5584 return (ret);
5585
5586 if (nvlist_alloc(&reqnvl, NV_UNIQUE_NAME, 0) != 0)
5587 return (no_memory(zhp->zpool_hdl));
5588 if (nvlist_alloc(&reqprops, NV_UNIQUE_NAME, 0) != 0)
5589 return (no_memory(zhp->zpool_hdl));
5590
5591 fnvlist_add_uint64(reqnvl, ZPOOL_VDEV_PROPS_GET_VDEV, vdev_guid);
5592
5593 if (prop != VDEV_PROP_USERPROP) {
5594 /* prop_name overrides prop value */
5595 if (prop_name != NULL)
5596 prop = vdev_name_to_prop(prop_name);
5597 else
5598 prop_name = (char *)vdev_prop_to_name(prop);
5599 assert(prop < VDEV_NUM_PROPS);
5600 }
5601
5602 assert(prop_name != NULL);
5603 if (nvlist_add_uint64(reqprops, prop_name, prop) != 0) {
5604 nvlist_free(reqnvl);
5605 nvlist_free(reqprops);
5606 return (no_memory(zhp->zpool_hdl));
5607 }
5608
5609 fnvlist_add_nvlist(reqnvl, ZPOOL_VDEV_PROPS_GET_PROPS, reqprops);
5610
5611 ret = lzc_get_vdev_prop(zhp->zpool_name, reqnvl, &retprops);
5612
5613 if (ret == 0) {
5614 ret = zpool_get_vdev_prop_value(retprops, prop, prop_name, buf,
5615 len, srctype, literal);
5616 } else {
5617 char errbuf[ERRBUFLEN];
5618 (void) snprintf(errbuf, sizeof (errbuf),
5619 dgettext(TEXT_DOMAIN, "cannot get vdev property %s from"
5620 " %s in %s"), prop_name, vdevname, zhp->zpool_name);
5621 (void) zpool_standard_error(zhp->zpool_hdl, ret, errbuf);
5622 }
5623
5624 nvlist_free(reqnvl);
5625 nvlist_free(reqprops);
5626 nvlist_free(retprops);
5627
5628 return (ret);
5629 }
5630
5631 /*
5632 * Get all vdev properties
5633 */
5634 int
zpool_get_all_vdev_props(zpool_handle_t * zhp,const char * vdevname,nvlist_t ** outnvl)5635 zpool_get_all_vdev_props(zpool_handle_t *zhp, const char *vdevname,
5636 nvlist_t **outnvl)
5637 {
5638 nvlist_t *nvl = NULL;
5639 uint64_t vdev_guid = 0;
5640 int ret;
5641
5642 if ((ret = zpool_vdev_guid(zhp, vdevname, &vdev_guid)) != 0)
5643 return (ret);
5644
5645 if (nvlist_alloc(&nvl, NV_UNIQUE_NAME, 0) != 0)
5646 return (no_memory(zhp->zpool_hdl));
5647
5648 fnvlist_add_uint64(nvl, ZPOOL_VDEV_PROPS_GET_VDEV, vdev_guid);
5649
5650 ret = lzc_get_vdev_prop(zhp->zpool_name, nvl, outnvl);
5651
5652 nvlist_free(nvl);
5653
5654 if (ret) {
5655 char errbuf[ERRBUFLEN];
5656 (void) snprintf(errbuf, sizeof (errbuf),
5657 dgettext(TEXT_DOMAIN, "cannot get vdev properties for"
5658 " %s in %s"), vdevname, zhp->zpool_name);
5659 (void) zpool_standard_error(zhp->zpool_hdl, errno, errbuf);
5660 }
5661
5662 return (ret);
5663 }
5664
5665 /*
5666 * Set vdev property
5667 */
5668 int
zpool_set_vdev_prop(zpool_handle_t * zhp,const char * vdevname,const char * propname,const char * propval)5669 zpool_set_vdev_prop(zpool_handle_t *zhp, const char *vdevname,
5670 const char *propname, const char *propval)
5671 {
5672 int ret;
5673 nvlist_t *nvl = NULL;
5674 nvlist_t *outnvl = NULL;
5675 nvlist_t *props;
5676 nvlist_t *realprops;
5677 prop_flags_t flags = { 0 };
5678 uint64_t version;
5679 uint64_t vdev_guid;
5680
5681 if ((ret = zpool_vdev_guid(zhp, vdevname, &vdev_guid)) != 0)
5682 return (ret);
5683
5684 if (nvlist_alloc(&nvl, NV_UNIQUE_NAME, 0) != 0)
5685 return (no_memory(zhp->zpool_hdl));
5686 if (nvlist_alloc(&props, NV_UNIQUE_NAME, 0) != 0)
5687 return (no_memory(zhp->zpool_hdl));
5688
5689 fnvlist_add_uint64(nvl, ZPOOL_VDEV_PROPS_SET_VDEV, vdev_guid);
5690
5691 if (nvlist_add_string(props, propname, propval) != 0) {
5692 nvlist_free(props);
5693 return (no_memory(zhp->zpool_hdl));
5694 }
5695
5696 char errbuf[ERRBUFLEN];
5697 (void) snprintf(errbuf, sizeof (errbuf),
5698 dgettext(TEXT_DOMAIN, "cannot set property %s for %s on %s"),
5699 propname, vdevname, zhp->zpool_name);
5700
5701 flags.vdevprop = 1;
5702 version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL);
5703 if ((realprops = zpool_valid_proplist(zhp->zpool_hdl,
5704 zhp->zpool_name, props, version, flags, errbuf)) == NULL) {
5705 nvlist_free(props);
5706 nvlist_free(nvl);
5707 return (-1);
5708 }
5709
5710 nvlist_free(props);
5711 props = realprops;
5712
5713 fnvlist_add_nvlist(nvl, ZPOOL_VDEV_PROPS_SET_PROPS, props);
5714
5715 ret = lzc_set_vdev_prop(zhp->zpool_name, nvl, &outnvl);
5716
5717 nvlist_free(props);
5718 nvlist_free(nvl);
5719 nvlist_free(outnvl);
5720
5721 if (ret)
5722 (void) zpool_standard_error(zhp->zpool_hdl, errno, errbuf);
5723
5724 return (ret);
5725 }
5726
5727 /*
5728 * Prune older entries from the DDT to reclaim space under the quota
5729 */
5730 int
zpool_ddt_prune(zpool_handle_t * zhp,zpool_ddt_prune_unit_t unit,uint64_t amount)5731 zpool_ddt_prune(zpool_handle_t *zhp, zpool_ddt_prune_unit_t unit,
5732 uint64_t amount)
5733 {
5734 int error = lzc_ddt_prune(zhp->zpool_name, unit, amount);
5735 if (error != 0) {
5736 libzfs_handle_t *hdl = zhp->zpool_hdl;
5737 char errbuf[ERRBUFLEN];
5738
5739 (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
5740 "cannot prune dedup table on '%s'"), zhp->zpool_name);
5741
5742 if (error == EALREADY) {
5743 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
5744 "a prune operation is already in progress"));
5745 (void) zfs_error(hdl, EZFS_BUSY, errbuf);
5746 } else {
5747 (void) zpool_standard_error(hdl, errno, errbuf);
5748 }
5749 return (-1);
5750 }
5751
5752 return (0);
5753 }
5754